diff --git a/.agents/plugins/marketplace.json b/.agents/plugins/marketplace.json index 62ca3e6a..6ce0c35f 100644 --- a/.agents/plugins/marketplace.json +++ b/.agents/plugins/marketplace.json @@ -40,6 +40,18 @@ }, "category": "Development" }, + { + "name": "aws-transform", + "source": { + "source": "local", + "path": "./plugins/aws-transform" + }, + "policy": { + "installation": "AVAILABLE", + "authentication": "ON_INSTALL" + }, + "category": "Migration" + }, { "name": "codebase-documentor-for-aws", "source": { diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json index 58e7ae42..dc2da326 100644 --- a/.claude-plugin/marketplace.json +++ b/.claude-plugin/marketplace.json @@ -79,6 +79,30 @@ ], "version": "1.0.0" }, + { + "category": "migration", + "description": "Migrate, modernize, and upgrade codebases to AWS. Transforms .NET Framework to .NET 8/10, mainframe COBOL to Java, VMware VMs to EC2, SQL Server to Aurora, and upgrades Java/Python/Node.js versions and AWS SDKs.", + "keywords": [ + "aws", + "aws-transform", + "migration", + "modernization", + "dotnet", + "mainframe", + "cobol", + "vmware", + "ec2", + "sql-server", + "aurora", + "java", + "python", + "sdk" + ], + "name": "aws-transform", + "source": "./plugins/aws-transform", + "tags": ["aws", "migration", "modernization", "transform"], + "version": "1.0.0" + }, { "category": "development", "description": "Analyze codebases to generate structured technical documentation with source-of-truth citations. Optimized for AWS workloads using CDK, CloudFormation, and Terraform.", diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a33aeeb8..2e1377d8 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -33,6 +33,7 @@ tools/ @awslabs/agent-plugins-admins plugins/amazon-location-service @awslabs/agent-plugins-admins @awslabs/agent-plugins-maintainers @awslabs/agent-plugins-amazon-location-service plugins/aws-amplify @awslabs/agent-plugins-admins @awslabs/agent-plugins-maintainers @awslabs/agent-plugins-amplify plugins/aws-serverless @awslabs/agent-plugins-admins @awslabs/agent-plugins-maintainers @awslabs/agent-plugins-aws-serverless +plugins/aws-transform @awslabs/agent-plugins-admins @awslabs/agent-plugins-maintainers @awslabs/agent-plugins-aws-transform plugins/codebase-documentor-for-aws @awslabs/agent-plugins-admins @awslabs/agent-plugins-maintainers @awslabs/agent-plugins-codebase-documentor-for-aws plugins/databases-on-aws @awslabs/agent-plugins-admins @awslabs/agent-plugins-maintainers @awslabs/agent-plugins-dsql plugins/deploy-on-aws @awslabs/agent-plugins-admins @awslabs/agent-plugins-maintainers @awslabs/agent-plugins-deploy-on-aws diff --git a/README.md b/README.md index 93b9dbb7..c0ad3745 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,7 @@ To maximize the benefits of plugin-assisted development while maintaining securi | **amazon-location-service** | Add maps, geocoding, routing, places search, and geospatial features to applications with Amazon Location Service | Available | | **aws-amplify** | Build full-stack apps with AWS Amplify Gen 2 using guided workflows for auth, data, storage, and functions | Available | | **aws-serverless** | Build serverless applications with Lambda, API Gateway, EventBridge, Step Functions, and durable functions | Available | +| **aws-transform** | Migrate, modernize, and upgrade codebases to AWS — .NET to .NET 8/10, mainframe COBOL to Java, VMware to EC2, SQL Server to Aurora, and language/SDK upgrades | Available | | **codebase-documentor-for-aws** | Analyze AWS-deployed services and codebases to generate structured technical documentation with source-of-truth citations | Available | | **databases-on-aws** | Database guidance for the AWS database portfolio — schema design, queries, migrations, and multi-tenant patterns | Some Services Available (Aurora DSQL) | | **deploy-on-aws** | Deploy applications to AWS with architecture recommendations, cost estimates, and IaC deployment | Available | diff --git a/plugins/aws-transform/.claude-plugin/plugin.json b/plugins/aws-transform/.claude-plugin/plugin.json new file mode 100644 index 00000000..3d4b80d0 --- /dev/null +++ b/plugins/aws-transform/.claude-plugin/plugin.json @@ -0,0 +1,27 @@ +{ + "author": { + "name": "Amazon Web Services" + }, + "description": "Migrate, modernize, and upgrade codebases to AWS. Transforms .NET Framework to .NET 8/10, mainframe COBOL to Java, VMware VMs to EC2, SQL Server to Aurora, and upgrades Java/Python/Node.js versions and AWS SDKs.", + "homepage": "https://github.com/awslabs/agent-plugins", + "keywords": [ + "aws", + "aws-transform", + "migration", + "modernization", + "dotnet", + "mainframe", + "cobol", + "vmware", + "ec2", + "sql-server", + "aurora", + "java", + "python", + "sdk" + ], + "license": "Apache-2.0", + "name": "aws-transform", + "repository": "https://github.com/awslabs/agent-plugins", + "version": "1.0.0" +} diff --git a/plugins/aws-transform/.codex-plugin/plugin.json b/plugins/aws-transform/.codex-plugin/plugin.json new file mode 100644 index 00000000..64049619 --- /dev/null +++ b/plugins/aws-transform/.codex-plugin/plugin.json @@ -0,0 +1,51 @@ +{ + "name": "aws-transform", + "version": "1.0.0", + "description": "Migrate, modernize, and upgrade codebases to AWS. Transforms .NET Framework to .NET 8/10, mainframe COBOL to Java, VMware VMs to EC2, SQL Server to Aurora, and upgrades Java/Python/Node.js versions and AWS SDKs.", + "author": { + "name": "Amazon Web Services", + "email": "aws-agent-plugins@amazon.com", + "url": "https://github.com/awslabs/agent-plugins" + }, + "homepage": "https://github.com/awslabs/agent-plugins", + "repository": "https://github.com/awslabs/agent-plugins", + "license": "Apache-2.0", + "keywords": [ + "aws", + "aws-transform", + "migration", + "modernization", + "dotnet", + "mainframe", + "cobol", + "vmware", + "ec2", + "sql-server", + "aurora", + "java", + "python", + "sdk" + ], + "skills": "./skills/", + "mcpServers": "./.mcp.json", + "interface": { + "displayName": "AWS Transform", + "shortDescription": "Migrate and modernize codebases to AWS with assessment, planning, and execution.", + "longDescription": "AWS Transform brings AI-powered code and workload modernization into your coding agent. Supports .NET Framework to .NET 8/10, mainframe COBOL to Java, VMware to EC2, SQL Server/Oracle/MySQL to Aurora, and Java/Python/Node.js language and AWS SDK upgrades. Guides users through assessment, requirements, approval, tasks, and execution with just-in-time authentication.", + "defaultPrompt": [ + "Migrate this .NET Framework app to .NET 8 on AWS.", + "Assess this codebase for modernization with AWS Transform.", + "Plan a VMware-to-EC2 migration for my workload." + ], + "developerName": "Amazon Web Services", + "category": "Migration", + "capabilities": [ + "Read", + "Write" + ], + "websiteURL": "https://github.com/awslabs/agent-plugins", + "privacyPolicyURL": "https://aws.amazon.com/privacy/", + "termsOfServiceURL": "https://aws.amazon.com/service-terms/", + "brandColor": "#FF9900" + } +} diff --git a/plugins/aws-transform/.mcp.json b/plugins/aws-transform/.mcp.json new file mode 100644 index 00000000..6894cdac --- /dev/null +++ b/plugins/aws-transform/.mcp.json @@ -0,0 +1,10 @@ +{ + "mcpServers": { + "aws-transform-mcp": { + "command": "uvx", + "args": [ + "awslabs.aws-transform-mcp-server@latest" + ] + } + } +} diff --git a/plugins/aws-transform/README.md b/plugins/aws-transform/README.md new file mode 100644 index 00000000..1f5f68ee --- /dev/null +++ b/plugins/aws-transform/README.md @@ -0,0 +1,47 @@ +# AWS Transform Agent Plugin + +Migrate and modernize codebases to AWS. Covers .NET Framework to .NET 8/10, mainframe COBOL to Java, VMware VMs to EC2, SQL Server/Oracle/MySQL to Aurora, and Java/Python/Node.js language and AWS SDK upgrades, plus custom transformations defined by the user. + +## Overview + +AWS Transform is AWS's AI-powered code and workload modernization service. This plugin brings its workflow guidance into AI coding agents: assess, plan, transform, and validate — routed through just-in-time authentication and workload-specific steering. + +## Skills + +| Skill | Description | +| --------------- | --------------------------------------------------------------------------------------------------- | +| `aws-transform` | Assessment, planning, and execution for .NET, mainframe, VMware, SQL, language/SDK upgrades, custom | + +## MCP Servers + +| Server | Description | +| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `aws-transform-mcp` | [awslabs.aws-transform-mcp-server](https://pypi.org/project/awslabs.aws-transform-mcp-server/) — tools for workspaces, jobs, agents, HITL tasks, and authentication | + +## Installation + +```bash +/plugin marketplace add awslabs/agent-plugins +/plugin install aws-transform@agent-plugins-for-aws +``` + +## Prerequisites + +- `uv` (required to launch the MCP server via `uvx`): + - macOS: `brew install uv` + - Linux: `curl -LsSf https://astral.sh/uv/install.sh | sh` +- AWS credentials for AWS Transform (SigV4) or an IAM Identity Center session +- AWS Transform CLI (`atx`) — only required for custom transformations: `curl -fsSL https://transform-cli.awsstatic.com/install.sh | bash` + +## Examples + +- "Migrate this .NET Framework app to .NET 8 on AWS" +- "Upgrade this Java 8 project to Java 21" +- "Move these VMware VMs to EC2" +- "Convert this SQL Server database to Aurora PostgreSQL" +- "Modernize this COBOL mainframe code" +- "Upgrade this Python 2 codebase to Python 3" + +## License + +Apache-2.0 diff --git a/plugins/aws-transform/skills/aws-transform/SKILL.md b/plugins/aws-transform/skills/aws-transform/SKILL.md new file mode 100644 index 00000000..c763a6c3 --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/SKILL.md @@ -0,0 +1,283 @@ +--- +name: aws-transform +description: Migrate, modernize, and upgrade codebases to AWS. Transforms .NET Framework to .NET 8/10, mainframe COBOL to Java, VMware VMs to EC2, SQL Server to Aurora, and upgrades Java/Python/Node.js versions and AWS SDKs. Use when the user says "migrate .NET to AWS", "upgrade Java to 17/21", "modernize COBOL", "modernize mainframe", "move VMware to EC2", "convert SQL Server to Aurora", "upgrade Python version", "migrate AWS SDK", or "transform this codebase". Don't use for infrastructure provisioning, CI/CD pipelines, or general coding tasks. +--- + +# AWS Transform + +## Overview + +Domain expertise for migrating and modernizing workloads using AWS Transform. Covers .NET Framework to .NET 8/10, mainframe COBOL to Java, VMware to EC2, SQL Server to Aurora PostgreSQL, and custom code transformations (Java, Python, Node.js version upgrades, SDK migrations). Orchestrates assessment, planning, and execution through Managed Agents and AWS Transform CLI with human-in-the-loop checkpoints. + +## Prerequisites + +This skill requires the AWS Transform MCP server (`aws-transform-mcp`). Configure it in your agent's MCP settings: + +```json +{ + "mcpServers": { + "aws-transform-mcp": { + "command": "uvx", + "args": [ + "awslabs.aws-transform-mcp-server@latest" + ] + } + } +} +``` + +The AWS Transform CLI is also required for custom transformations. Install via: + +```bash +curl -fsSL https://transform-cli.awsstatic.com/install.sh | bash +``` + +## Mandatory workflow + +Follow these phases in order. Do NOT skip ahead. Authentication is handled just-in-time — only when a chosen action actually needs it. Do NOT probe auth before the user has declared an intent. + +``` +Resume → Check .atx/context.json +Intent → Ask user what they want to do +Discovery → Scan workspace + query available agents +Scope → User selects what to modernize (GATE 1) +Assessment → Run workload assessment (NOT optional) +Requirements → Draft from assessment report +Approval → User approves requirements (GATE 2) +Tasks → Generate tasks.md +Execute → Run transforms, monitor, review diffs +``` + +**Discovery finds opportunities. Assessment produces detailed findings. Requirements come from the assessment — NOT from discovery.** + +You MUST NOT create requirements without an assessment report. +You MUST NOT start execution without requirements.md and tasks.md. + +## Resuming a prior session + +Check for `.atx/context.json` (workspace-relative). NEVER read `~/.aws/atx/kiro-power-context.json`. + +**This check is an internal bookkeeping operation. The user must never see it happen.** Do not announce the check, do not narrate what you are doing. No phrases like "checking for prior sessions", "let me check", "check for a prior session", "context file". On a fresh install, the first visible output must be the intent question — no preamble of any kind. + +- **No context found:** Proceed directly to intent. Produce no user-visible output for this step. +- **Context found:** If the context has an active job (`assessment.jobId` or entries in `execution.activeJobIds`), try to refresh live state from the service, but do so invisibly: + - **Check auth first** (no-auth-required). If sign-in is NOT configured, skip the refresh entirely — do not attempt service calls. Use local context only. + - **If sign-in is configured**, fetch each resource your resume message depends on — at minimum the job itself and all pending user tasks. Surface every pending task to the user; do not cherry-pick one and omit the others. `BLOCKING` HITL tasks hold up progress even when the job status is active; `NON_BLOCKING` tasks still need attention but don't stall the job. Name every pending task; flag blocking ones. Don't infer one resource from another. + - **If any call fails** for any reason, silently fall back to local context. **Do NOT reveal your reasoning about the refresh to the user** — no "sign-in isn't configured so I'll skip", no "the service isn't reachable". The user should see only the resume message. Do NOT demand auth or block the flow. + + Then tell the user about their prior session. Frame the offer explicitly as a **continuation** of that same session — not a new one. The message should make clear: + - This is the specific session they previously worked on. Mention the phase reached, workspace/job identifiers if relevant. + - **Refresh succeeded** → speak in present tense about live state ("your assessment job is running", "I need your input on X to continue"). If there is a pending HITL task, surface it — don't bury it under "your job is running." + - **Refresh failed or was skipped** → use prior-session framing: "last time", "when you paused", "previously", "your last session had finished assessment." Do NOT present-tense claims about job state — local context may be stale. Offer sign-in as the path to current status ("sign in to see the latest status"), not as a gate. + - **Resume** = continue where you left off, reusing the existing assessment report, workspace, and prior progress. + - **Start fresh** = discard the prior session (local artifacts deleted) and begin a brand-new migration. + + Use language like "continue where you left off" or "pick up from where you stopped" — not ambiguous phrasing like "start a similar session." If user chooses start fresh, delete `.atx/context.json`, `.atx/discovery.json`, `.atx/assessment-report/`, and `.atx/specs/`, then proceed to intent. Otherwise follow the resume logic in [workflow reference](references/workflow.md). + +## Determining user intent + +Ask the user: "What would you like to focus on?" The first user-visible action in this phase is the question — no auth-probing tool calls precede it, no auth lecture precedes it. + +With projects: [Discover This Workspace] [Browse My Jobs] [Start a Specific Transform] +No projects: [Browse My Jobs] [Open a Project Folder] [Start from Scratch] + +**Just-in-time auth.** Once the user picks an intent, the next tool that action needs may require auth. If so, prompt for auth then, framed around the action the user just chose ("to browse your jobs, sign in to AWS Transform"). Which auth each MCP tool needs is reported by the MCP server — read it from the tool's description, `get_status`, or the error the tool returns. CLI transforms use AWS credentials only — do NOT prompt for sign-in for CLI-only intents, even when sign-in is unconfigured. If the user picks something that needs no service call (e.g., "Open a Project Folder"), do not probe auth. + +See [auth reference](references/auth.md) for the MCP-vs-CLI auth split and how to present sign-in options. + +## Discovery + +Fast scan (~10 sec). Three things happen in parallel: + +1. **Scan the workspace** — detect languages, frameworks, file types, and dependencies present in the project. +2. **Query available agents** — call `list_resources` with `resource: "agents"` (MCP). Skip if sign-in is not configured or the user's intent is CLI-only. This is a paginated API — fetch all pages to get the complete set. The results contain two levels: + - **Orchestrator agents** — top-level agents you create jobs with. Each orchestrator may have sub-agents that provide deeper workload-specific capabilities. + - **Sub-agents** — invoked through their orchestrator, not directly. They represent specialized skills within a workload type. + - Some agents may not belong to a known orchestrator — treat these as standalone capabilities. +3. **List available transformation definitions** — call `atx custom def list` (CLI) to get the current set and what they transform. Skip if CLI is not available or the user's intent is MCP-only. + +For the "Discover This Workspace" intent, Discovery is where sign-in is first required (other intents like "Browse My Jobs" need sign-in even earlier, per the just-in-time rule — handle those there). If `list_resources` returns NOT_CONFIGURED, prompt the user to sign in for the auth system needed — do not demand both. + +Then **match** workspace signals against orchestrator capabilities and available transformation definitions. Before selecting an orchestratorAgent for any workload, read the matched workload's reference file — it may specify the exact agent to use. Save the matched results to `.atx/discovery.json` — include the orchestrator → sub-agent hierarchy so later steps know what deeper capabilities are available. + +See [workflow reference](references/workflow.md) for the workspace scanning framework. + +**Discovery is NOT assessment.** Discovery identifies opportunities and matches them to available agents. Assessment produces the detailed findings. + +## Scoping (GATE 1) + +**For each matched workload type, read ALL reference files with its prefix (e.g., [dotnet](references/dotnet.md)).** These contain the workload's capabilities, workflow, agent details, example requirements, and known limitations. The file prefix comes from the agent match in Discovery — not from a hardcoded list. + +Show migration table, then let the user select with multiSelect: + +``` +| Risk | Why | Component | Current | Target | AWS Target | Recommended Approach | +``` + +Always explain risk in plain language in the "Why" column — use the user-facing phrases from the Risk Classification table in [workflow reference](references/workflow.md). Never show a bare HIGH/MED/LOW label without explanation. + +User selects what to modernize. + +## Assessment + +**This is NOT optional. Run the workload's assessment BEFORE creating requirements.** + +Tell the user: "I'll assess your workload. The assessment report drives the migration plan." + +**How assessment runs depends on the workload's reference files.** Each workload type defines its own assessment approach — the agent to use, the objective format, and how to collect results. Consult the matched workload's reference files for specifics. + +General pattern for agent-based assessment: + +1. **Confirm the plan** — tell the user what you will do (create workspace, create job with which agent, what the objective is). WAIT for approval before calling any tools. +2. Create/select workspace +3. Create job with a **clear objective** — the workload's reference files define what a good objective looks like +4. Start the job (already started by `create_job`; use `control_job` to restart if stopped) +5. Send a **detailed follow-up message** with project specifics +6. **Ask before uploading** — ask how the user wants to share source code. WAIT. Then upload with `categoryType: "CUSTOMER_INPUT"`. +7. Handle agent requests (checkpoints, decisions) — always present to user, WAIT for user response +8. When assessment completes, download the report: `get_resource resource="artifact"` +9. Save report to `.atx/assessment-report/` + +**Rule: NEVER batch workspace creation, job creation, and uploads into a single turn without user confirmation at each decision point.** + +Use the orchestrator agent or transformation definition identified during Discovery. The match comes from `list_resources` (with `resource: "agents"`) and `atx custom def list`, not a hardcoded mapping. When creating a job, specify the orchestrator — sub-agents are invoked by the orchestrator as needed. + +Update `.atx/context.json` with `phase: "assessed"`, workspace ID, job ID. + +## Requirements (from assessment report) + +Now create `.atx/specs/requirements.md` using the **assessment report** — NOT discovery findings. + +- Read `.atx/assessment-report/` for detailed findings +- Load workload reference files for context +- Draft requirements grounded in the assessment (specific blockers, LOC, complexity, migration paths) +- Each requirement says WHO handles it: AWS Transform CLI / Managed Agents / IDE +- Multi-module: group by module with Module Overview table +- See [workflow reference](references/workflow.md) for format + +**Do NOT create tasks.md yet.** + +Show requirements summary and let the user choose: [Looks Good] [Edit] [Add Component] + +## Approval (GATE 2) + +Ask the user: "Requirements finalized. Ready to create the execution plan?" +[Create Plan] [Edit More] + +## Task generation + +Generate `tasks.md` from approved requirements: + +- Module Status table + per-module sections +- Sized: max 100 files/task +- Parallel groups verified +- Review-diffs after every code change +- See [workflow reference](references/workflow.md) for format + +Present options: [Start Execution] [Review Tasks] [Modify] + +## Execution + +See [workflow reference](references/workflow.md) for full details. + +**How execution runs depends on the workload's reference files.** Each workload type defines its own execution tooling — which agent or CLI command to use, how to parallelize, and how to collect results. Consult the matched workload's reference files. + +General pattern for agent-based execution: + +When creating new jobs, always: + +1. **Clear objective** in `create_job` — what to transform, from what, to what +2. **Detailed follow-up message** via `send_message` — project specifics, discovery findings, blockers +3. **Upload artifacts** if agent needs code — ask user first, `categoryType: "CUSTOMER_INPUT"` + +### Every agent request → user decides (NEVER auto-handle) + +When the AWS Transform agent asks for input, needs files, or hits a checkpoint: + +1. Read the task/message +2. Present to user +3. WAIT for user response +4. Relay user's decision back to agent + +### Uploading artifacts to agents + +Always use `categoryType: "CUSTOMER_INPUT"` when uploading files to an agent: + +```python +upload_artifact( + workspaceId="...", jobId="...", + content="/path/to/source.zip", + fileType="ZIP", + categoryType="CUSTOMER_INPUT" +) +``` + +| categoryType | When to Use | +| ----------------- | --------------------------------------------------------- | +| `CUSTOMER_INPUT` | Uploading files TO the agent (source code, configs, data) | +| `CUSTOMER_OUTPUT` | Downloading files FROM the agent (reports, migrated code) | +| `HITL_FROM_USER` | User responses to agent HITL tasks | + +See [workflow reference](references/workflow.md) for agent request handling patterns. + +### Progress + +Review diffs after every code change. User must approve. +Update tasks.md checkboxes + `.atx/context.json` after every step. + +--- + +## Context persistence (.atx/context.json) + +Save `.atx/context.json` IMMEDIATELY after completing each phase — before presenting results to the user. Every phase transition must have a context save between them. Top-level keys: `phase`, `discovery`, `assessment`, `spec`, `workStyle`, `execution`, `updatedAt`. See [workflow reference](references/workflow.md) for the full schema. + +Resume: read `phase`, pick up from that phase. + +--- + +## Constraints + +- MUST use product, capability, and step names exactly as defined in this document. Never paraphrase or invent terminology. When describing this skill's capabilities, use: "Migrate, modernize, and upgrade codebases — .NET, mainframe COBOL, VMware, databases, and language/SDK upgrades — using AWS Transform CLI and Managed Agents, directly from your IDE." +- MUST present user choices as an explicit selectable list — never bury options in prose or proceed on an inferred answer +- MUST run CLI commands in background — never block the conversation +- MUST discover agents dynamically via `list_resources` with `resource: "agents"` (paginated) — do not hardcode agent names +- MUST create jobs with orchestrator agents — sub-agents are invoked by the orchestrator, not directly +- MUST refer to resources by name, not ID. When referencing a workspace, job, agent, or artifact in user-facing messages, use its human-readable name. Never surface raw UUIDs in prose. If a resource has no name, use a descriptive phrase ("your .NET modernization job") rather than the ID. +- MUST NOT expose internal mechanics to the user — do not name tools (get_status, list_resources), do not cite step numbers, do not reference files you are reading, and do not narrate what you are about to do. Just do it silently and present the outcome in user terms. +- MUST NOT mix workflow descriptions with actual questions in the same numbered list, and never use count language like "two questions" when some items are informational steps rather than questions. Keep what-I-will-do separate from what-I-need-from-you. +- MUST NOT frame HITL checkpoints, agent questions, or pending decisions as coming from "the web app", "the webapp", "the web UI", or a third-party "the agent is asking / the agent needs / the agent wants". The user is working with you in the IDE — you own the interaction. Present every checkpoint as your own first-person request, not a relayed message from elsewhere. **Wrong:** "The web app is asking how you want to deploy the landing zone." / "The agent is now asking about the replication subnet configuration." **Right:** "The next step is to choose how to deploy the landing zone." / "I need the replication subnet configuration to continue." +- MUST NOT explain what this skill does +- MUST NOT create requirements from discovery — wait for assessment +- MUST NOT skip from discovery to execution +- MUST NOT modify code, upgrade dependencies, or run analysis manually — always use AWS Transform tooling +- MUST NOT make decisions on behalf of the user +- MUST NOT editorialize or use subjective language — no "interesting", "fascinating", "notably", "impressive", "remarkable". State findings as facts. +- MUST NOT prompt for authentication before the user has declared an intent. Auth prompts come from the tool a chosen action needs, framed around that action. +- MUST NOT overclaim freshness. If you did NOT fetch a resource this turn, lead with "last I checked" (past tense throughout) and offer to refresh. Never promise proactive surfacing ("I'll let you know when…") unless actively polling — make the reactive model explicit. +- MUST NOT infer one resource's state from another — each MCP resource (job, tasks, artifacts) is its own source of truth. A job in an active state does NOT imply no pending user tasks. Fetch each resource directly when relevant. See [workflow reference](references/workflow.md). +- MUST NOT mix unrelated transformation goals in the same chat without warning. On every shift to a different goal, suggest the user start a new chat session (they start it themselves). Keep re-offers terse. If the user declines, proceed to answer their question about the other job — do not refuse or redirect back to the original goal. Just avoid mixing cached state (e.g., don't apply VMware findings to the .NET question). +- MUST store state in `.atx/context.json` + +--- + +## Reference + +### Core + +| Topic | File | +| ----------------------------------------------------------------------- | ------------------------------------------------ | +| Authentication (sign-in, AWS credentials, CLI credentials, errors) | [references/auth.md](references/auth.md) | +| Tools (MCP tools, CLI commands, connectors, HITL, troubleshooting) | [references/tools.md](references/tools.md) | +| Workflow (discovery, transforms, execution, planning, context, display) | [references/workflow.md](references/workflow.md) | + +### Workload Types + +| Workload | Files | +| ------------ | -------------------------- | +| .NET | `references/dotnet*.md` | +| SQL/Database | `references/sql*.md` | +| Mainframe | `references/mainframe*.md` | +| VMware | `references/vmware*.md` | +| Custom | `references/custom*.md` | + +Each workload type has a root reference file with its capabilities, workflow, and agent details. Additional files with the same prefix provide deeper guidance (e.g., `custom-cli-reference.md`, `custom-repo-analysis.md`). diff --git a/plugins/aws-transform/skills/aws-transform/references/auth.md b/plugins/aws-transform/skills/aws-transform/references/auth.md new file mode 100644 index 00000000..cf56a031 --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/auth.md @@ -0,0 +1,48 @@ +# Authentication + +There are two independent auth paths: + +- **AWS Transform (MCP tools)** — workspaces, jobs, tasks, artifacts, connectors, agents. The MCP server is authoritative: its tool descriptions, `get_status` response, and error messages describe supported methods, current state, and recovery. +- **Custom transformations (AWS Transform CLI)** — the `atx` CLI, which uses standard AWS credentials. No `atx auth` command, no MCP involvement. + +The paths do not block each other. A custom CLI intent proceeds with AWS credentials alone; an MCP intent does not require the CLI. Per the skill instructions, prompt for auth just-in-time for the chosen action — do not probe or demand both. + +## Signing in + +When sign-in is needed, `get_status` returns a message on the unconfigured connection that enumerates the currently-supported options. Present **every** option from that message — do not drop any, do not add any, do not reorder for emphasis. The MCP server is authoritative for which options are valid at a given moment (some options may be conditionally unavailable). + +Details the MCP message does not include, collect from the user only for the option they pick: + +- **Cookie mode** — need `origin` and `sessionCookie`. The cookie comes from the browser: log in to the AWS Transform tenant URL → DevTools (F12) → Application → Cookies → `aws-transform-session` → copy **Value**. +- **SSO mode** — need `startUrl` (looks like `https://d-xxxxxxxxxx.awsapps.com/start`, from IAM Identity Center) and `idcRegion`. +- **AWS Credentials** — no interactive detail to gather. `AWS_PROFILE` lives in the MCP client's env block; the MCP picks it up on restart. + +When a session expires or a cookie is invalid, follow the recovery guidance in the MCP's error message. + +## AWS Transform CLI auth + +The CLI uses standard AWS credentials. There is no `atx auth` command — auth is whatever the AWS SDK / CLI provider chain resolves. + +```bash +aws sso login --profile my-profile +export AWS_PROFILE=my-profile +export AWS_REGION=us-east-1 +``` + +Verify: `AWS_REGION=us-east-1 atx custom def list --json`. + +Common CLI-side conditions: + +- `AccessDeniedException` → AWS credentials expired. Re-run `aws sso login` or refresh env vars. +- `command not found: atx` → CLI not installed. Use MCP-based transforms instead, or install the CLI. + +## Environment variables (MCP client config) + +Pre-set in `mcp.json` to skip an interactive `configure` call: + +| Variable | Description | +| ---------------- | --------------------------------------------- | +| `ATX_REGION` | AWS region (default `us-east-1`) | +| `ATX_AUTH_MODE` | `cookie` or `sso` | +| `ATX_TENANT_URL` | Tenant URL (cookie mode) | +| `SESSION_COOKIE` | `aws-transform-session=` (cookie mode) | diff --git a/plugins/aws-transform/skills/aws-transform/references/custom-cli-reference.md b/plugins/aws-transform/skills/aws-transform/references/custom-cli-reference.md new file mode 100644 index 00000000..a3a09b85 --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/custom-cli-reference.md @@ -0,0 +1,135 @@ +# AWS Transform CLI Reference + +## Execution Flags (`atx custom def exec`) + +| Flag | Long Form | Description | +| ------ | -------------------------------- | ------------------------------------------------------------------ | +| `-n` | `--transformation-name ` | Transformation definition name (from `atx custom def list --json`) | +| `-p` | `--code-repository-path ` | Path to code repo (`.` for current dir) | +| `-x` | `--non-interactive` | No user prompts (always use this flag) | +| `-t` | `--trust-all-tools` | Auto-approve tool executions (required with `-x`) | +| `-d` | `--do-not-learn` | Prevent knowledge item extraction | +| `-g` | `--configuration ` | Inline configuration (`'key=val'`) | +| `--tv` | `--transformation-version ` | Specific transformation definition version | + +## Configuration + +Inline: `--configuration 'additionalPlanContext=Target Python 3.13'` + +Example: `atx custom def exec -n my-td -p /source/repo -g 'additionalPlanContext=Target Java 17' -x -t` + +- See [custom-single-transformation](custom-single-transformation.md) and [custom-multi-transformation](custom-multi-transformation.md) for how to wrap this around a runner/launcher script to ensure this is executed in a non-blocking way, so that user is updated while the process is running instead of having to wait until it exits. + +`--configuration` is optional. Omit if no extra context needed. + +## Other Commands + +| Action | Command | +| ---------------------------------- | ---------------------------------------------------------------------------------- | +| Start interactive conversation | `atx` | +| Resume most recent conversation | `atx --resume` | +| Resume specific conversation | `atx --conversation-id ` (30-day limit) | +| List transformation definitions | `atx custom def list --json` | +| Download transformation definition | `atx custom def get -n ` (optional: `--tv `, `--td `) | +| Delete transformation definition | `atx custom def delete -n ` | +| Save as draft | `atx custom def save-draft -n --description "" --sd ` | +| Publish transformation definition | `atx custom def publish -n --description "" --sd ` | +| List knowledge items | `atx custom def list-ki -n ` | +| View knowledge item | `atx custom def get-ki -n --id ` | +| Enable/disable KI | `atx custom def update-ki-status -n --id --status ENABLED or DISABLED` | +| KI auto-approval on/off | `atx custom def update-ki-config -n --auto-enabled TRUE or FALSE` | +| Export KIs | `atx custom def export-ki-markdown -n ` | +| Delete KI | `atx custom def delete-ki -n --id ` | +| Update CLI | `atx update` | +| Check for CLI updates only | `atx update --check` | +| Tag a transformation definition | `atx custom def tag --arn --tags '{"key":"value"}'` | + +## Environment Variables + +| Variable | Default | Description | +| -------------------------- | ------------ | ------------------------------------- | +| `ATX_SHELL_TIMEOUT` | 900 (15 min) | Shell command timeout in seconds | +| `ATX_DISABLE_UPDATE_CHECK` | false | Disable version check | +| `AWS_PROFILE` | — | AWS credentials profile | +| `AWS_ACCESS_KEY_ID` | — | AWS access key | +| `AWS_SECRET_ACCESS_KEY` | — | AWS secret key | +| `AWS_SESSION_TOKEN` | — | Session token (temporary credentials) | + +## IAM Permissions + +Minimum: `transform-custom:*` on `Resource: "*"`. + +| Permission | Operation | +| ------------------------------------------------------ | ---------------------------------------------- | +| `transform-custom:ConverseStream` | Interactive conversations | +| `transform-custom:ExecuteTransformation` | Execute transforms | +| `transform-custom:ListTransformationPackageMetadata` | List transforms (`atx custom def list --json`) | +| `transform-custom:DeleteTransformationPackage` | Delete transforms | +| `transform-custom:CompleteTransformationPackageUpload` | Upload transformation definitions | +| `transform-custom:CreateTransformationPackageUrl` | Create upload URLs | +| `transform-custom:GetTransformationPackageUrl` | Download transformation definitions | +| `transform-custom:ListKnowledgeItems` | List knowledge items | +| `transform-custom:GetKnowledgeItem` | View knowledge item details | +| `transform-custom:DeleteKnowledgeItem` | Delete knowledge items | +| `transform-custom:UpdateKnowledgeItemConfiguration` | Configure auto-approval | +| `transform-custom:UpdateKnowledgeItemStatus` | Enable/disable items | +| `transform-custom:ListTagsForResource` | List tags | +| `transform-custom:TagResource` | Add tags | +| `transform-custom:UntagResource` | Remove tags | + +### Remote Mode Caller Permissions + +The caller's AWS credentials (the user or role running the session) need additional +permissions beyond `transform-custom:*` for remote mode. Generate the policies, +then create and attach them: + +```bash +ATX_INFRA_DIR="$HOME/.aws/atx/custom/remote-infra" +if [ -d "$ATX_INFRA_DIR" ]; then + git -C "$ATX_INFRA_DIR" add -A + git -C "$ATX_INFRA_DIR" commit -m "Local customizations" -q 2>/dev/null || true + git -C "$ATX_INFRA_DIR" pull -q +else + git clone -b atx-remote-infra --single-branch https://github.com/aws-samples/aws-transform-custom-samples.git "$ATX_INFRA_DIR" +fi +cd "$ATX_INFRA_DIR" +npx ts-node generate-caller-policy.ts +``` + +This produces two policies: + +| Policy | Purpose | When Needed | +| ---------------------------- | ------------------------------------------------------------------------- | ---------------------------- | +| `atx-runtime-policy.json` | Invoke Lambdas, S3 upload/download, KMS, Secrets Manager, CloudWatch logs | Day-to-day remote operations | +| `atx-deployment-policy.json` | CloudFormation, ECR, IAM roles, Batch, VPC, KMS key creation | One-time CDK deploy/destroy | + +After generating, create and attach the runtime policy: + +```bash +ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) +CALLER_ARN=$(aws sts get-caller-identity --query Arn --output text) + +# Create the managed policy (ignore EntityAlreadyExists, fail on other errors) +if ! create_output=$(aws iam create-policy --policy-name ATXRuntimePolicy \ + --policy-document "file://$ATX_INFRA_DIR/atx-runtime-policy.json" 2>&1); then + echo "$create_output" | grep -q "EntityAlreadyExists" \ + || { echo "Failed to create policy: $create_output" >&2; exit 1; } +fi + +if echo "$CALLER_ARN" | grep -q ":user/"; then + IDENTITY_NAME=$(echo "$CALLER_ARN" | awk -F'/' '{print $NF}') + aws iam attach-user-policy --user-name "$IDENTITY_NAME" \ + --policy-arn "arn:aws:iam::${ACCOUNT_ID}:policy/ATXRuntimePolicy" +elif echo "$CALLER_ARN" | grep -Eq ":assumed-role/|:role/"; then + ROLE_NAME=$(echo "$CALLER_ARN" | sed 's/.*:\(assumed-\)\{0,1\}role\///' | cut -d'/' -f1) + aws iam attach-role-policy --role-name "$ROLE_NAME" \ + --policy-arn "arn:aws:iam::${ACCOUNT_ID}:policy/ATXRuntimePolicy" +fi +``` + +The runtime policy covers: `transform-custom:*` for AWS Transform CLI operations (transformation definition discovery, execution), +`lambda:InvokeFunction` on all `atx-*` functions, +`s3:PutObject`/`s3:GetObject` on source and output buckets, `kms:Encrypt`/`kms:Decrypt`/`kms:GenerateDataKey` +on the ATX encryption key, `secretsmanager:CreateSecret`/`PutSecretValue`/`DeleteSecret` on `atx/*` secrets, +`logs:GetLogEvents`/`FilterLogEvents` on the Batch log group, and `cloudformation:DescribeStacks` +for infrastructure status checks. diff --git a/plugins/aws-transform/skills/aws-transform/references/custom-multi-transformation.md b/plugins/aws-transform/skills/aws-transform/references/custom-multi-transformation.md new file mode 100644 index 00000000..6cc023ef --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/custom-multi-transformation.md @@ -0,0 +1,227 @@ +# Multi-Transformation + +Apply transformation definitions to multiple repositories in parallel. Transformation-definition-to-repo assignments and config +are already confirmed from the match report. Do NOT re-discover transformation definitions or re-prompt. + +## Input + +From the match report: repo list, transformation definition per repo, config per transformation definition, execution mode. + +## Prerequisite Check (Once Only) + +Verify AWS credentials ONCE. Do NOT repeat per repo. + +```bash +aws sts get-caller-identity +``` + +Local mode also: `atx --version` + +## Local Execution + +If any repos were provided as git URLs (HTTPS or SSH), clone them locally first. +The user's local git config handles authentication — no Secrets Manager needed. + +```bash +CLONE_DIR=~/.aws/atx/custom/atx-agent-session/repos/-$SESSION_TS +git clone "$CLONE_DIR" +``` + +If repos were provided as an S3 bucket path with zips, download and extract locally: + +```bash +mkdir -p ~/.aws/atx/custom/atx-agent-session/repos +aws s3 sync s3://user-bucket/repos/ ~/.aws/atx/custom/atx-agent-session/repos/ --exclude "*" --include "*.zip" +for zip in ~/.aws/atx/custom/atx-agent-session/repos/*.zip; do + name=$(basename "$zip" .zip) + unzip -qo "$zip" -d "$HOME/.aws/atx/custom/atx-agent-session/repos/${name}-$SESSION_TS/" +done +``` + +Use the cloned/extracted paths as `` for each repo. + +For each repo, verify it's a git repo: + +```bash +ls -la +git -C status +``` + +If not a git repo: `cd && git init && git add . && git commit -m "Initial commit"` + +The active language runtime must match the transformation's target version so that +builds and tests run correctly. Check the current version, and if there is a +mismatch, first check whether the target version is already installed (e.g., +`/usr/libexec/java_home -V 2>&1` (macOS) or `ls /usr/lib/jvm/` (Linux), `pyenv versions`, `nvm ls`). If found, switch +to it (e.g., `export JAVA_HOME= && export PATH="$JAVA_HOME/bin:$PATH"`, `pyenv shell 3.12`, `nvm use 22`). Only if +the target version is not installed at all, ask the user for permission before installing. Suggest: + +- Java: `brew install --cask corretto23` (macOS), `sudo yum install java-23-amazon-corretto-devel` (RHEL/AL2), or `sudo apt install java-23-amazon-corretto-jdk` (Debian/Ubuntu) +- Python: `pyenv install 3.15.0 && pyenv shell 3.15.0` +- Node.js: `nvm install 23 && nvm use 23` + +Do NOT proceed until the correct version is active. Verify the switch succeeded +before proceeding. + +### Telemetry + +When running `atx custom def exec`, always include the `--telemetry` flag (see the Telemetry section in [custom](custom.md)). Format: +`--telemetry "client=ide,agent=ide,executionMode="` + +- `client` is always `ide` +- `agent` is always `ide` +- `executionMode` is `local` for direct CLI invocation, `remote` when submitting via Lambda + +Run transformations in parallel — maximum 3 concurrent repos at a time (the user +can override this, but 3 is recommended to avoid overloading the machine). If there +are more than 3 repos, process them in batches of 3 (wait for a batch to finish +before starting the next). Maximum 9 repos total for local mode (user can override, +but recommend remote mode for more). If the total repo count exceeds 9, suggest +remote mode instead. + +For each repo, use bash to create a runner script that captures the exit code, following this exact format: + +```bash +mkdir -p ~/.aws/atx/custom/atx-agent-session +cat > ~/.aws/atx/custom/atx-agent-session/run-.sh << 'RUNNER' +#!/bin/bash +atx custom def exec -n -p -x -t \ + --configuration 'additionalPlanContext=' \ + --telemetry "client=ide,agent=ide,executionMode=local" +echo $? > ~/.aws/atx/custom/atx-agent-session/.exit +RUNNER +chmod +x ~/.aws/atx/custom/atx-agent-session/run-.sh +nohup ~/.aws/atx/custom/atx-agent-session/run-.sh > ~/.aws/atx/custom/atx-agent-session/.log 2>&1 & +echo $! > ~/.aws/atx/custom/atx-agent-session/.pid +``` + +Omit `--configuration` if no config needed. The `--telemetry` flag is always included — see [custom](custom.md) for details. Launch each repo's script in rapid +succession — do NOT wait between launches. Each runner script is backgrounded +via nohup; the exit code is captured to `~/.aws/atx/custom/atx-agent-session/.exit` when AWS Transform finishes. + +After launching all repos, find each repo's conversation log by grepping its +process log (AWS Transform outputs the path within 30-60 seconds of starting): + +```bash +grep "Conversation log:" ~/.aws/atx/custom/atx-agent-session/.log 2>/dev/null +``` + +If it hasn't appeared yet, wait 15 seconds and retry. Extract the full path from +each — do NOT use `ls -t` across all conversations, as that may match a different run. + +Then start monitoring. On each 60-second cycle: + +1. Check each PID: `kill -0 $(cat ~/.aws/atx/custom/atx-agent-session/.pid) 2>/dev/null && echo "RUNNING" || echo "DONE"` +2. Tail each repo's conversation log and relay progress to the user +3. For each repo, list the artifacts directory (`~/.aws/atx/custom//artifacts/`) + and open any new files as they appear (open each file only once). +4. Report which repos are still running, which have completed + +**You MUST continue polling without waiting for user input.** The user should see +continuous progress updates across all repos. + +A repo's transformation is done ONLY when its background process exits (i.e., +`kill -0` returns non-zero). Do NOT treat exit code 0 from any other command +(grep, cat, test, ls, etc.) as transformation completion. Do NOT treat log +messages like "TRANSFORMATION COMPLETE" as completion — AWS Transform performs additional +steps after that (validation summary generation). + +## Remote Execution + +Prepare each repo's source before submitting the batch. Follow the source prep +rules from [custom-single-transformation](custom-single-transformation.md): HTTPS and SSH git URLs (with credentials +configured) are passed directly; S3 zips from the user's bucket must be copied +to the managed source bucket (`atx-source-code-{account}`) first; local repos +must be zipped and uploaded to the same managed bucket. + +Submit jobs via the batch Lambda in chunks of up to 128. If there are more than +128 jobs, split them into multiple `atx-trigger-batch-jobs` calls (e.g., 500 repos += 4 calls of 128 + 128 + 128 + 116). Each call returns its own `batchId`. Track +all batch IDs for monitoring. + +Include the `environment` field on each job to set the language version matching the transformation's target (e.g., `"JAVA_VERSION":"21"` for a Java upgrade targeting 21). The `--telemetry` flag is always included in each job's `command` string (see [custom](custom.md)): + +```bash +aws lambda invoke --function-name atx-trigger-batch-jobs \ + --payload '{"batchName":"-chunk-1","jobs":[{"source":"","command":"atx custom def exec -n -p /source/ -x -t --telemetry \"client=ide,agent=ide,executionMode=remote\"","jobName":"","environment":{"JAVA_VERSION":""}}]}' \ + --cli-binary-format raw-in-base64-out /dev/stdout +``` + +If the total exceeds 128, repeat with the next chunk: + +```bash +aws lambda invoke --function-name atx-trigger-batch-jobs \ + --payload '{"batchName":"-chunk-2","jobs":[...next 128 jobs...]}' \ + --cli-binary-format raw-in-base64-out /dev/stdout +``` + +Monitor each batch by its `batchId`: + +```bash +aws lambda invoke --function-name atx-get-batch-status \ + --payload '{"batchId":""}' \ + --cli-binary-format raw-in-base64-out /dev/stdout +``` + +Polling: every 60 seconds for the first 10 polls, then every 5 minutes after. +Report only on status change. + +## Progress Reporting + +``` +[1/N] repo-name transformation-name Status +[2/N] repo-name transformation-name Status +``` + +## Result Collection + +Collect per repo: success/failure, transformed code path, error details. + +``` +Succeeded: +- repo-name: transformation-name (config) +Failed: +- repo-name: transformation-name (error) +``` + +For remote executions, include the CloudWatch dashboard link in the final output: + +```bash +REGION=${AWS_REGION:-${AWS_DEFAULT_REGION:-$(aws configure get region 2>/dev/null)}} +REGION=${REGION:-us-east-1} +echo "https://${REGION}.console.aws.amazon.com/cloudwatch/home#dashboards/dashboard/ATX-Transform-CLI-Dashboard" +``` + +Hand off to [custom-results-synthesis](custom-results-synthesis.md) for consolidated reporting. + +For local executions only, tell the user: "To review changes in each repo, open it in +your IDE and use the Source Control panel to see the full +commit history with diffs for each file AWS Transform modified." + +## Error Handling + +| Scenario | Action | +| -------------------- | ------------------------------------------------ | +| Git clone fails | Log error, continue with remaining repos | +| Transformation fails | Log repo and error, do not auto-retry | +| Partial results | Generate summary from successes, report failures | + +## MANDATORY: Cleanup + +Clean up session files **before starting** and **after completing** each batch: + +```bash +[ -d ~/.aws/atx/custom/atx-agent-session ] && find ~/.aws/atx/custom/atx-agent-session -maxdepth 1 -type f \( -name "*.sh" -o -name "*.log" -o -name "*.pid" -o -name "*.exit" -o -name "*.zip" \) -delete 2>/dev/null || true +``` + +For remote mode: after presenting results, also prompt the user about infrastructure +teardown. See the Cleanup section in [custom-remote-execution](custom-remote-execution.md) +for the exact prompt and flow. + +## Key Principles + +1. Single prerequisite check — never repeat for parallel tasks +2. Trust the match report — do not re-discover transformation definitions +3. Local parallel execution — maximum 3 concurrent repos (user-overridable); recommend remote for more than 9 +4. Remote parallel execution — submit in chunks of up to 128 jobs per `atx-trigger-batch-jobs` call; split larger sets into multiple calls (max 512 repos per session) +5. Skip prerequisite checks in parallel task prompts diff --git a/plugins/aws-transform/skills/aws-transform/references/custom-remote-execution.md b/plugins/aws-transform/skills/aws-transform/references/custom-remote-execution.md new file mode 100644 index 00000000..910ea2e1 --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/custom-remote-execution.md @@ -0,0 +1,510 @@ +# Remote Execution + +Deploy and manage AWS Batch/Fargate infrastructure for running AWS Transform custom transformations at scale. +All Lambda calls are executed by you — users never interact with Lambdas directly. + +Remote mode deploys to the user's own AWS account. Key resources: + +- Results stored in S3 (`atx-custom-output-{accountId}`) with KMS encryption, 30-day lifecycle +- Source code uploaded to S3 (`atx-source-code-{accountId}`) with 7-day lifecycle +- CloudWatch dashboard: `ATX-Transform-CLI-Dashboard` for monitoring jobs +- 8 Lambda functions for job management (trigger, status, terminate, list) +- AWS Batch/Fargate for container execution — costs nothing when idle +- To find the account: `aws sts get-caller-identity --query Account --output text` + +## Table of Contents + +- [Infrastructure Check](#infrastructure-check) +- [User Consent Prompt](#user-consent-prompt) +- [Deployment](#deployment) +- [Lambda Function Names](#lambda-function-names) +- [MCP Configuration (Optional)](#mcp-configuration-optional) +- [Job Submission](#job-submission) +- [SSH URL Handling](#ssh-url-handling) +- [Polling](#polling) +- [Results Location](#results-location) +- [Private Repository Access](#private-repository-access) +- [Monitoring](#monitoring) +- [Container Customization](#container-customization) +- [Pricing](#pricing) +- [Cleanup](#cleanup) + +## Infrastructure Check + +Before checking, determine the active AWS region (from `AWS_REGION`, `AWS_DEFAULT_REGION`, +or `aws configure get region`) and tell the user which region is being used. + +Then check deployment status: + +```bash +aws cloudformation describe-stacks --stack-name AtxInfrastructureStack \ + --query 'Stacks[0].StackStatus' --output text || echo "NOT_DEPLOYED" +``` + +If deployed (`CREATE_COMPLETE` or `UPDATE_COMPLETE`): proceed to job submission. +If `NOT_DEPLOYED` or any other status: get explicit user consent before deploying. + +## User Consent Prompt + +Explain what gets created: AWS Batch (Fargate), 8 Lambda functions, S3 buckets (KMS encrypted), +CloudWatch dashboard, IAM roles. If using the pre-built image, no Docker is needed and no ECR +repository is created in their account. If using a custom image, an ECR repository is created +and the container is built locally. One-time setup. +Do NOT deploy until user confirms. + +## Deployment + +### Pre-built vs Custom Image + +The infrastructure supports two container modes: + +**Pre-built image (default):** A public ECR image with Java (8, 11, 17, 21, 25), +Python (3.8-3.14), Node.js (16-24), Maven, Gradle, and common build tools. +No Docker required on the user's machine. Use this when the pre-built image +has everything the transformation needs (source runtime, target runtime, build +tools, and any other dependencies). + +**Custom image (fallback):** If the transformation requires a language, tool, or +version not in the pre-built image, you clone the infrastructure repo, +customize the Dockerfile, and build locally. This requires Docker on the user's +machine. + +You determine which mode to use during the Verify Runtime Compatibility step +in [custom](custom.md). Do NOT ask the user to choose — you decide automatically based +on whether the pre-built image has everything needed for the transformation. + +### Pre-built Image Runtimes + +The pre-built image includes: + +- **Java**: 8, 11, 17, 21, 25 (Amazon Corretto) with Maven and Gradle 9.4 +- **Python**: 3.8, 3.9, 3.10, 3.11, 3.12, 3.13, 3.14 (dnf + pyenv) +- **Node.js**: 16, 18, 20, 22, 24 (nvm) with yarn, pnpm, TypeScript, ts-node +- **Build tools**: gcc, g++, make, patch +- **CLI tools**: AWS CLI v2, AWS Transform CLI, git, jq, curl, unzip, tar +- **OS**: Amazon Linux 2023 (x86_64) + +If the transformation target is in this list, use the pre-built image path. + +### Pre-built Image Path (No Docker Required) + +Clone and run setup — Docker is NOT required: + +```bash +ATX_INFRA_DIR="$HOME/.aws/atx/custom/remote-infra" +if [ -d "$ATX_INFRA_DIR" ]; then + git -C "$ATX_INFRA_DIR" add -A + git -C "$ATX_INFRA_DIR" commit -m "Local customizations" -q 2>/dev/null || true + git -C "$ATX_INFRA_DIR" pull -q +else + git clone -b atx-remote-infra --single-branch https://github.com/aws-samples/aws-transform-custom-samples.git "$ATX_INFRA_DIR" +fi +``` + +If `git pull` reports a merge conflict, resolve it by keeping both the upstream +changes and the user's customizations in the `CUSTOM LANGUAGES AND TOOLS` section +of the Dockerfile, then commit the merge. + +Ensure `prebuiltImageUri` is set in `cdk.json` (it should be set to "public.ecr.aws/d9h8z6l7/aws-transform:latest" by default). Then deploy: + +```bash +cd "$ATX_INFRA_DIR" && ./setup.sh +``` + +The setup script skips the Docker prerequisite check and container build when +`prebuiltImageUri` is configured. First deploy takes ~3-5 minutes (no image build). + +### Custom Image Path (Docker Required) + +If the transformation requires a runtime (source or target) or any other software not in the pre-built image, +clone/update the repo, clear the pre-built URI, customize the Dockerfile, and deploy: + +```bash +ATX_INFRA_DIR="$HOME/.aws/atx/custom/remote-infra" +if [ -d "$ATX_INFRA_DIR" ]; then + git -C "$ATX_INFRA_DIR" add -A + git -C "$ATX_INFRA_DIR" commit -m "Local customizations" -q 2>/dev/null || true + git -C "$ATX_INFRA_DIR" pull -q +else + git clone -b atx-remote-infra --single-branch https://github.com/aws-samples/aws-transform-custom-samples.git "$ATX_INFRA_DIR" +fi + +cd "$ATX_INFRA_DIR" && sed -i.bak 's|"prebuiltImageUri": ".*"|"prebuiltImageUri": ""|' cdk.json +``` + +Customize the Dockerfile (see Container Customization below), then deploy: + +```bash +cd "$ATX_INFRA_DIR" && ./setup.sh +``` + +This path requires Docker installed and running. First deploy takes ~5-10 minutes +(container build). CDK auto-detects Dockerfile changes and rebuilds the image. + +### Deployment Failures + +If `setup.sh` fails, it prints the specific prerequisite that's missing. Fix that +one thing and re-run — the script is idempotent. + +If deployment fails partway through (e.g., CloudFormation stack stuck in +`ROLLBACK_COMPLETE` or `UPDATE_ROLLBACK_FAILED`), run teardown first, then retry: + +```bash +cd "$ATX_INFRA_DIR" && rm -f cdk.context.json && ./teardown.sh && ./setup.sh +``` + +This cleans up the half-deployed state, clears cached CDK context, and starts fresh. +The teardown script handles stacks in any state, including failed rollbacks. + +### Attach IAM Policies + +After deployment, generate and attach the runtime policy so the caller has +permissions to invoke Lambdas, upload/download from S3, use KMS, etc.: + +```bash +cd "$ATX_INFRA_DIR" && npx ts-node generate-caller-policy.ts +``` + +This produces two JSON files in `$ATX_INFRA_DIR`: + +- `atx-runtime-policy.json` — Day-to-day operations (Lambda invoke, S3, KMS, Secrets Manager, logs) +- `atx-deployment-policy.json` — One-time CDK deploy/destroy (CloudFormation, ECR, IAM, Batch, VPC) + +Attach the runtime policy to the caller: + +```bash +ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) +CALLER_ARN=$(aws sts get-caller-identity --query Arn --output text) + +# Create the managed policy (ignore EntityAlreadyExists, fail on other errors) +if ! create_output=$(aws iam create-policy --policy-name ATXRuntimePolicy \ + --policy-document "file://$ATX_INFRA_DIR/atx-runtime-policy.json" 2>&1); then + echo "$create_output" | grep -q "EntityAlreadyExists" \ + || { echo "Failed to create policy: $create_output" >&2; exit 1; } +fi + +# Attach to the caller (handles IAM users, IAM roles, and SSO/assumed roles) +if echo "$CALLER_ARN" | grep -q ":user/"; then + IDENTITY_NAME=$(echo "$CALLER_ARN" | awk -F'/' '{print $NF}') + aws iam attach-user-policy --user-name "$IDENTITY_NAME" \ + --policy-arn "arn:aws:iam::${ACCOUNT_ID}:policy/ATXRuntimePolicy" +elif echo "$CALLER_ARN" | grep -Eq ":assumed-role/|:role/"; then + ROLE_NAME=$(echo "$CALLER_ARN" | sed 's/.*:\(assumed-\)\{0,1\}role\///' | cut -d'/' -f1) + aws iam attach-role-policy --role-name "$ROLE_NAME" \ + --policy-arn "arn:aws:iam::${ACCOUNT_ID}:policy/ATXRuntimePolicy" +fi +``` + +If the attachment fails (insufficient IAM permissions, or an SSO-managed role with +name starting with `AWSReservedSSO_`), inform the user: + +- The policy JSON is at `$ATX_INFRA_DIR/atx-runtime-policy.json` +- They need their AWS administrator to create and attach it to their identity +- For SSO users, it must be added to their IAM Identity Center permission set + +Verify the policy is working by invoking a Lambda: + +```bash +aws lambda invoke --function-name atx-list-jobs --payload '{}' \ + --cli-binary-format raw-in-base64-out /dev/stdout +``` + +If this succeeds, the runtime policy is active. If not, the attachment hasn't +taken effect yet — wait a few seconds and retry. + +If the caller also needs to deploy/destroy infrastructure (not just run jobs), +repeat the above with `atx-deployment-policy.json` and policy name `ATXDeploymentPolicy`. + +## Lambda Function Names + +After deployment, the Lambda functions are available with these names: + +- `atx-trigger-job` — Submit a single transformation job +- `atx-get-job-status` — Get status of a single job +- `atx-terminate-job` — Terminate a running job +- `atx-list-jobs` — List all jobs +- `atx-trigger-batch-jobs` — Submit a batch of jobs +- `atx-get-batch-status` — Get batch status +- `atx-terminate-batch-jobs` — Terminate all jobs in a batch +- `atx-list-batches` — List all batches + +## MCP Configuration (Optional) + +If the user has a local AWS Transform MCP configuration, include it inline with job +submissions so the containers can use it. Check for a local config: + +```bash +cat ~/.aws/atx/mcp.json 2>/dev/null +``` + +If it exists, include the contents as the `mcpConfig` field in the `atx-trigger-job` +or `atx-trigger-batch-jobs` payload. For example: + +```bash +aws lambda invoke --function-name atx-trigger-job \ + --payload '{"source":"...","command":"...","jobName":"...","mcpConfig":}' \ + --cli-binary-format raw-in-base64-out /dev/stdout +``` + +The MCP config travels with the job request — do NOT upload it separately via +`atx-configure-mcp`. Skip this step if no local MCP config exists. + +## Job Submission + +**Limits:** Maximum 512 repositories per session. Submit in batches of up to 128 +jobs each via `atx-trigger-batch-jobs`. If you have more than 128 jobs, split them +into multiple Lambda calls (e.g., 500 repos = 4 calls of 128 + 128 + 128 + 116). +Each call returns its own `batchId` — track all of them for monitoring. AWS Batch +runs all jobs in a batch concurrently. If the total repo count exceeds 512, stop +and ask the user to reduce the list. + +**Repo analysis:** Do NOT scan or inspect repository contents locally in remote +mode. The repos may not be available on the local machine. Let the user specify +which transformation definitions to apply, or use the one already selected in the plugin. + +**Deployment failures:** If `setup.sh` or `cdk deploy` fails for any reason, run +`./teardown.sh` first to clean up the partial state, then retry `./setup.sh`. +Do not try to manually fix individual CloudFormation errors. + +**Source restrictions:** The `source` field accepts HTTPS git URLs, SSH git URLs +(with `atx/ssh-key` configured), or S3 paths within the CDK-managed source bucket +(`atx-source-code-{account}`). The container's IAM role cannot read from arbitrary +S3 buckets. If the user provides zips in their own S3 bucket, copy them to the +managed source bucket first (see the Collect Repositories step in [custom](custom.md)). + +Single job: + +```bash +aws lambda invoke --function-name atx-trigger-job \ + --payload '{"source":"","command":"atx custom def exec -n -p /source/ -x -t","jobName":""}' \ + --cli-binary-format raw-in-base64-out /dev/stdout +``` + +Batch: + +```bash +aws lambda invoke --function-name atx-trigger-batch-jobs \ + --payload '{"batchName":"","jobs":[{"source":"","command":"atx custom def exec -n -p /source/ -x -t","jobName":""}]}' \ + --cli-binary-format raw-in-base64-out /dev/stdout +``` + +## SSH URL Handling + +SSH git URLs (`git@github.com:org/repo.git` or `ssh://git@github.com/org/repo.git`) +are passed directly to the Lambda — the container clones them remotely. This requires +an SSH private key stored in Secrets Manager as `atx/ssh-key`. See the Collect Repositories step in [custom](custom.md) +for setup instructions. + +If the SSH key is not configured, the clone will fail inside the container. Do NOT +fall back to cloning locally — guide the user through SSH key setup instead. + +## Polling + +Poll every 60 seconds for the first 10 polls, then every 5 minutes after. +Report only on status change. + +```bash +aws lambda invoke --function-name atx-get-job-status \ + --payload '{"jobId":""}' \ + --cli-binary-format raw-in-base64-out /dev/stdout + +aws lambda invoke --function-name atx-get-batch-status \ + --payload '{"batchId":""}' \ + --cli-binary-format raw-in-base64-out /dev/stdout +``` + +## Results Location + +Do NOT download results locally. Results stay in S3. Present the S3 path to the user: + +``` +Results: s3://atx-custom-output-{account-id}/transformations/// + code.zip — zipped transformed source code + logs.zip — AWS Transform conversation logs +``` + +If the user explicitly asks to download, provide the command but let them run it: + +``` +aws s3 cp s3://atx-custom-output-{account-id}/transformations///code.zip ./code.zip +``` + +## Private Repository Access + +**Note:** If the user has private repos, credentials should already be configured +during the Collect Repositories step in [custom](custom.md). This section documents the +mechanism for reference. + +The container fetches credentials from AWS Secrets Manager at startup. Three secret types: + +**`atx/github-token`** — plain string GitHub PAT for private HTTPS repo cloning: + +```bash +aws secretsmanager create-secret --name "atx/github-token" --secret-string "" +``` + +**`atx/ssh-key`** — plain string SSH private key for private SSH repo cloning: + +```bash +aws secretsmanager create-secret --name "atx/ssh-key" --secret-string "$(cat )" +``` + +**`atx/credentials`** — JSON array of credential files for any tool/registry (see Container Customization below). + +Setup (requires user consent): + +1. Explain which secrets will be created in their AWS account +2. Get explicit confirmation and credentials from the user +3. Create the secret(s) +4. Container entrypoint auto-fetches at startup — no image rebuild needed +5. User can delete anytime: `aws secretsmanager delete-secret --secret-id "atx/github-token" --region "$REGION" --force-delete-without-recovery` + +AWS credentials for AWS Transform CLI are handled automatically by the IAM task role (refreshed every 45 min). + +## Monitoring + +CloudWatch dashboard: `ATX-Transform-CLI-Dashboard` + +- Job Tracking: completion rates, success/failure trends +- Lambda Metrics: invocation counts, duration, errors +- Real-time Logs: stream transformation progress + +Dashboard URL (construct dynamically using the caller's region): + +```bash +REGION=${AWS_REGION:-${AWS_DEFAULT_REGION:-$(aws configure get region 2>/dev/null)}} +REGION=${REGION:-us-east-1} +echo "https://${REGION}.console.aws.amazon.com/cloudwatch/home#dashboards/dashboard/ATX-Transform-CLI-Dashboard" +``` + +Include this link in the final output when remote execution completes. + +## Container Customization + +The default container includes Java (8, 11, 17, 21, 25), Python (3.8-3.14), Node.js +(16-24), Maven, Gradle, gcc/g++, make, and common build tools. + +If a transformation requires a language or tool not included, you handle this +automatically during the Verify Container Compatibility step — see [custom](custom.md). The +Dockerfile has a clearly marked `CUSTOM LANGUAGES AND TOOLS` section where new +`RUN` commands should be inserted. After editing, redeploy with `cd "$ATX_INFRA_DIR" && ./setup.sh` — CDK +auto-detects Dockerfile changes and rebuilds the image. + +### Adding Languages or Tools + +```dockerfile +# Example: Add Rust (install as atxuser so binaries land in /home/atxuser/.cargo) +USER atxuser +RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y +USER root +ENV PATH="/home/atxuser/.cargo/bin:$PATH" +``` + +### Private Package Registries + +Credentials are fetched from AWS Secrets Manager at container startup — never baked into the image. + +**`atx/github-token`** (plain string) — GitHub PAT for private repo cloning. + +**`atx/credentials`** (JSON array) — Generic credential files for any tool or registry. Each entry writes a file into the container at startup: + +```json +[ + { "path": "/home/atxuser/.npmrc", "content": "//npm.company.com/:_authToken=TOKEN" }, + { "path": "/home/atxuser/.m2/settings.xml", "content": "..." }, + { + "path": "/home/atxuser/.config/pip/pip.conf", + "content": "[global]\nindex-url = https://pypi.company.com/simple" + }, + { + "path": "/home/atxuser/.gem/credentials", + "content": "---\n:rubygems_api_key: KEY", + "mode": "0600" + }, + { "path": "/home/atxuser/.cargo/credentials.toml", "content": "[registry]\ntoken = \"TOKEN\"" }, + { "path": "/home/atxuser/.nuget/NuGet.Config", "content": "..." } +] +``` + +Create the secret: + +```bash +aws secretsmanager create-secret --name "atx/credentials" \ + --secret-string '[{"path":"/home/atxuser/.npmrc","content":"//npm.company.com/:_authToken=TOKEN"}]' +``` + +This works for any language or tool added to the Dockerfile — npm, Maven, pip, RubyGems, Cargo, NuGet, etc. The `mode` field is optional (defaults to `0644`). + +### Version Switching at Runtime + +The container supports runtime version switching via environment variables passed as container overrides. +The `environment` field on the job MUST match the exact target version of the +transformation — not the closest available version. For example, if upgrading to +Java 23, set `"JAVA_VERSION":"23"` (not `"21"`). If the target version was added +to the Dockerfile and entrypoint per the Verify Runtime Compatibility step, the switcher will activate it. + +Via Lambda (recommended): + +```bash +aws lambda invoke --function-name atx-trigger-job \ + --payload '{"source":"...","jobName":"...","command":"atx ...","environment":{"JAVA_VERSION":"23","NODE_VERSION":"22","PYTHON_VERSION":"3.13"}}' \ + --cli-binary-format raw-in-base64-out /dev/stdout +``` + +Via direct Batch submission: + +```bash +aws batch submit-job \ + --container-overrides '{ + "environment": [ + {"name": "JAVA_VERSION", "value": "23"}, + {"name": "PYTHON_VERSION", "value": "3.13"}, + {"name": "NODE_VERSION", "value": "22"} + ] + }' ... +``` + +Available: Java 8/11/17/21/25, Python 3.8-3.14, Node.js 16/18/20/22/24. +Python accepts both short (`13`) and full (`3.13`) formats. + +See `$ATX_INFRA_DIR/container/README.md` for full customization reference including Docker BuildKit secrets for secure credential handling. + +## Pricing + +Do NOT quote specific prices or cost estimates to the user. If the user asks about +pricing, direct them to: https://aws.amazon.com/transform/pricing/ + +The remote infrastructure (Batch, Lambda, S3) has no fixed costs — all services are +pay-per-use and cost nothing when idle. + +## Cleanup + +The remote infrastructure costs nothing when idle — Fargate is pay-per-task, +Lambdas are pay-per-invoke, and S3 storage is minimal. + +After every remote execution completes (all jobs finished or failed), prompt the +user with the following: + +> Your remote infrastructure is still deployed in your AWS account. All services +> are pay-per-use only — there are no fixed costs when idle. You can leave it in +> place for future transformations, or tear it down now. +> +> For pricing details: https://aws.amazon.com/transform/pricing/ +> +> If you tear down: +> +> - All AWS Transform resources are completely removed from your account +> - KMS key deletion is scheduled (7-day AWS minimum wait) +> - S3 buckets, secrets, IAM policies, log groups — all deleted +> - You'll need to re-run setup (~5-10 min) next time you use remote mode +> +> Would you like to keep the infrastructure or tear it down? + +If the user chooses to tear down: + +```bash +cd "$ATX_INFRA_DIR" && ./teardown.sh +``` + +If the user chooses to keep it, confirm: "Infrastructure will stay deployed. Next +time you run remote transformations, everything will be ready immediately." diff --git a/plugins/aws-transform/skills/aws-transform/references/custom-repo-analysis.md b/plugins/aws-transform/skills/aws-transform/references/custom-repo-analysis.md new file mode 100644 index 00000000..5811cbe7 --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/custom-repo-analysis.md @@ -0,0 +1,138 @@ +# Repo Analysis & Transformation Definition Matching + +**Local mode only.** Repo analysis inspects files on the local filesystem — it +cannot run inside remote containers. For remote mode, skip this step and let the +user specify which transformation definitions to apply. If the user selected remote mode, do NOT attempt +to run the detection commands below. + +Inspect repositories and match them against available transformation definitions. + +## Transformation Definition Discovery (Required First Step) + +```bash +atx custom def list # Human-readable +atx custom def list --json # Programmatic parsing +``` + +Never hardcode transformation definition names. Only match repos against transformation definitions that appear in this output. +If `atx` is not installed, install it first — do not fall back to guessed names. + +## Known AWS-Managed Transformation Definitions (Reference Only) + +This table is a guide for signal detection, NOT a substitute for `atx custom def list --json`. +Names change over time. Always use actual names from the live output. + +| Name (may change) | Description | Key Config | +| --------------------------------------- | --------------------------------------------------- | ------------------------------------------------ | +| `AWS/java-version-upgrade` | Upgrade Java/JDK version (any source -> any target) | Target JDK version (e.g., 17, 21) | +| `AWS/python-version-upgrade` | Upgrade Python version (3.8/3.9 -> 3.11/3.12/3.13) | Target Python version | +| `AWS/nodejs-version-upgrade` | Upgrade Node.js version (any source -> any target) | Target Node.js version | +| `AWS/java-aws-sdk-v1-to-v2` | Migrate AWS SDK for Java v1 -> v2 (Maven or Gradle) | None required | +| `AWS/python-boto2-to-boto3` | Migrate Python boto2 -> boto3 | None required | +| `AWS/nodejs-aws-sdk-v2-to-v3` | Migrate AWS SDK for JavaScript v2 -> v3 | None required | +| `AWS/early-access-java-x86-to-graviton` | Migrate Java x86 code to ARM64/Graviton | None required | +| `AWS/comprehensive-codebase-analysis` | Tech debt analysis + documentation generation | Optional: `additionalPlanContext` for focus area | + +## Transformation Patterns + +| Pattern | Complexity | Examples | +| ------------------------------- | ----------- | ----------------------------------------------------------- | +| Language Version Upgrades | Low-Medium | Java 8->17, Python 3.9->3.13, Node.js 12->22 | +| API and Service Migrations | Medium | AWS SDK v1->v2, Boto2->Boto3, JUnit 4->5, javax->jakarta | +| Framework Upgrades | Medium | Spring Boot 2.x->3.x, React 17->18, Angular, Django | +| Framework Migrations | High | Angular->React, Redux->Zustand, Vue.js->React | +| Library and Dependency Upgrades | Low-Medium | Pandas 1.x->2.x, NumPy, Hadoop/HBase/Hive | +| Code Refactoring | Low-Medium | Print->Logging, string concat->f-strings, type hints | +| Script/File Translations | Low-Medium | CDK->Terraform, Terraform->CloudFormation, Bash->PowerShell | +| Architecture Migrations | Medium-High | x86->Graviton, on-prem->Lambda, server->containers | +| Language-to-Language Migrations | Very High | Java->Python, JavaScript->TypeScript, C->Rust | +| Custom/Org-Specific | Varies | Internal library migrations, coding standards enforcement | + +Service routing: COBOL/mainframe -> use AWS Transform for Mainframe. .NET Framework -> consider AWS Transform for Windows. VMware -> consider AWS Transform for VMware. + +## Detection Commands + +### Python + +```bash +cat /.python-version 2>/dev/null +cat /pyproject.toml 2>/dev/null | head -30 +cat /setup.cfg 2>/dev/null | head -30 +cat /requirements.txt 2>/dev/null | head -10 +``` + +### Java + +```bash +cat /pom.xml 2>/dev/null | head -60 # Look for , +cat /build.gradle 2>/dev/null | head -40 # Look for sourceCompatibility +cat /.java-version 2>/dev/null +``` + +### Node.js + +```bash +cat /package.json 2>/dev/null # Look for engines.node +cat /.nvmrc 2>/dev/null +cat /.node-version 2>/dev/null +``` + +## AWS SDK Detection + +| Signal | Language | What It Means | +| -------------------------------------------- | -------- | ------------------------------ | +| `import boto` / `from boto` (NOT boto3) | Python | Legacy boto2 — needs migration | +| `com.amazonaws` or `aws-java-sdk` in pom.xml | Java | SDK v1 — needs migration | +| `"aws-sdk"` in package.json (NOT `@aws-sdk`) | Node.js | SDK v2 — needs migration | + +```bash +# Python boto2 +grep -rlE "import boto([^3]|$)|from boto([^3]|$)" --include="*.py" 2>/dev/null | head -3 +# Java SDK v1 +grep -rl "com.amazonaws" --include="*.java" 2>/dev/null | head -3 +cat /pom.xml 2>/dev/null | grep -i "aws-java-sdk" +# Node.js SDK v2 +cat /package.json 2>/dev/null | grep '"aws-sdk"' +``` + +## Graviton Detection + +```bash +grep -rlE "x86_64|amd64|x86-64" --include="*.yml" --include="*.yaml" --include="Dockerfile" 2>/dev/null | head -3 +``` + +Currently Java-only. Match against Graviton migration transformation definition if available. + +## Match Report Format + +``` +Transformation Match Report +============================= +Repository: () + Language: + Matching transformation definitions: + - + + Other available transformation definitions (may also apply): + - + +Summary: N repos analyzed, M have matches (T total jobs) +``` + +Group by repository. Show detected version. Include repos with no matches. +List custom transformation definitions (non-`AWS/` prefix) under "Other available transformation definitions". + +## Edge Cases + +| Case | Handling | +| -------------------------------------------- | ------------------------------------------------------------------------ | +| Repo already up-to-date | List upgrade transformation definition but note current version | +| Monorepo (multiple languages) | List all matching transformation definitions — each is a separate job | +| Mixed local + remote repos | Clone git URL repos locally for inspection, inspect local paths directly | +| Custom transformation definitions in account | Show under "Other available transformation definitions" per repo | +| Git clone fails | Report error, continue with remaining repos | + +## Cleanup + +Do NOT delete cloned repos after analysis — they are needed for local execution. +Track cloned repo paths and inform the user at session end so they can delete them. diff --git a/plugins/aws-transform/skills/aws-transform/references/custom-results-synthesis.md b/plugins/aws-transform/skills/aws-transform/references/custom-results-synthesis.md new file mode 100644 index 00000000..993f776b --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/custom-results-synthesis.md @@ -0,0 +1,59 @@ +# Results Synthesis + +Generate a single summary file after bulk transformations complete. + +## Output + +Write one file: `~/.aws/atx/custom/atx-agent-session/transformation-summaries/transformation-summary-$SESSION_TS.md` + +```bash +mkdir -p ~/.aws/atx/custom/atx-agent-session/transformation-summaries +``` + +**Important:** Do NOT use heredoc (`cat << EOF`) to write this file — heredoc +blocks can hang in shell environments. Use a command (ex. `printf '%s'`) to write the content. + +## Template + +```markdown +# AWS Transform Summary + +> Completed: +> Repositories: | Succeeded: | Failed: + +## Results + +| Project | Transformation | Status | Notes | +| ------- | --------------------- | ---------------- | ------------ | +| | | Succeeded/Failed | | + +## Failed Transformations + +### + +- **TD**: +- **Error**: +- **Suggested Fix**: + +## Next Steps + +1. Review changes in each transformed repo +2. Run tests and deploy +``` + +## Presentation + +Tell the user: + +``` +Results: / succeeded, failed +Summary: ~/.aws/atx/custom/atx-agent-session/transformation-summaries/transformation-summary-$SESSION_TS.md +``` + +For remote mode executions, also include the CloudWatch dashboard link: + +```bash +REGION=${AWS_REGION:-${AWS_DEFAULT_REGION:-$(aws configure get region 2>/dev/null)}} +REGION=${REGION:-us-east-1} +echo "CloudWatch Dashboard: https://${REGION}.console.aws.amazon.com/cloudwatch/home#dashboards/dashboard/ATX-Transform-CLI-Dashboard" +``` diff --git a/plugins/aws-transform/skills/aws-transform/references/custom-single-transformation.md b/plugins/aws-transform/skills/aws-transform/references/custom-single-transformation.md new file mode 100644 index 00000000..511b4385 --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/custom-single-transformation.md @@ -0,0 +1,327 @@ +# Single Transformation + +Apply one transformation definition to one repo. Transformation definition, config, and repo are already confirmed from the match report. + +## Table of Contents + +- [Local Mode](#local-mode) +- [Remote Mode](#remote-mode) +- [Error Handling](#error-handling) +- [MANDATORY: Cleanup](#mandatory-cleanup) + +## Local Mode + +### Verify AWS Transform CLI (once per session, skip if already verified) + +```bash +atx --version +``` + +### Verify Language Version + +The active language runtime must match the transformation's target version so that builds and tests run correctly. For example, a Java 8 -> 17 upgrade needs Java 17 available locally. + +Check the installed version matches the target: + +```bash +java -version # Java transformations +python3 --version # Python transformations +node --version # Node.js transformations +``` + +If there is a mismatch, resolve it before proceeding: + +- Look for the correct version already installed (e.g., check `/usr/lib/jvm/`, `pyenv versions`, `nvm ls`) +- If found, switch to it (e.g., `export JAVA_HOME= && export PATH="$JAVA_HOME/bin:$PATH"`, `pyenv shell 3.12`, `nvm use 22`) +- If not installed, ask the user for permission before installing (e.g., `brew install --cask corretto17` (macOS), `sudo yum install java-17-amazon-corretto-devel` (RHEL/AL2), or `sudo apt install java-17-amazon-corretto-jdk` (Debian/Ubuntu), `pyenv install 3.12`, `nvm install 22`) +- Verify the switch succeeded by re-checking the version before continuing + +### Prepare Source + +If the user provided a git URL (HTTPS or SSH) instead of a local path, clone it +locally first. The user's local git config handles authentication for private repos +— no Secrets Manager setup needed in local mode. + +```bash +CLONE_DIR=~/.aws/atx/custom/atx-agent-session/repos/-$SESSION_TS +git clone "$CLONE_DIR" +``` + +If the user provided an S3 path to a zip, download and extract it locally: + +```bash +aws s3 cp s3://user-bucket/repos/.zip ~/.aws/atx/custom/atx-agent-session/-$SESSION_TS.zip +unzip -qo ~/.aws/atx/custom/atx-agent-session/-$SESSION_TS.zip -d ~/.aws/atx/custom/atx-agent-session/repos/-$SESSION_TS/ +``` + +Use the cloned/extracted path as `` for all subsequent steps. If the +user provided a local path, use it directly. + +### Validate Repository + +```bash +ls -la +git -C status +``` + +If not a git repo: `cd && git init && git add . && git commit -m "Initial commit"` + +### Telemetry + +When running `atx custom def exec`, always include the `--telemetry` flag (see the Telemetry section in [custom](custom.md)). Format: +`--telemetry "client=ide,agent=ide,executionMode="` + +- `client` is always `ide` +- `agent` is always `ide` +- `executionMode` is `local` for direct CLI invocation, `remote` when submitting via Lambda + +### Execute and Monitor + +If the user is transforming the currently opened workspace project, `cd` into it +and run `pwd` to confirm the absolute path before using it with `-p`. + +Launch the transformation in a way that returns control immediately. Some shell +tools block until all child processes exit, even with `&`. To avoid this, use bash to write +a launcher script and execute it, using exactly this: + +```bash +mkdir -p ~/.aws/atx/custom/atx-agent-session +cat > ~/.aws/atx/custom/atx-agent-session/run.sh << 'RUNNER' +#!/bin/bash +atx custom def exec -n -p -x -t \ + --configuration 'additionalPlanContext=' \ + --telemetry "client=ide,agent=ide,executionMode=local" +echo $? > ~/.aws/atx/custom/atx-agent-session/transform.exit +RUNNER +chmod +x ~/.aws/atx/custom/atx-agent-session/run.sh +nohup ~/.aws/atx/custom/atx-agent-session/run.sh > ~/.aws/atx/custom/atx-agent-session/transform.log 2>&1 & +echo $! > ~/.aws/atx/custom/atx-agent-session/transform.pid +cat ~/.aws/atx/custom/atx-agent-session/transform.pid +``` + +Omit `--configuration` if no config is needed. The `--telemetry` flag is always included — see [custom](custom.md) for details. + +This backgrounds the runner script (not the CLI directly), so the exit code is +captured to `~/.aws/atx/custom/atx-agent-session/transform.exit` when ATX finishes. The PID file tracks +the runner process. + +**As soon as you have the PID, immediately run the next command** — do NOT stop +and wait for the user. The AWS Transform CLI outputs the conversation log path within +30-60 seconds of starting. Read it from the process log: + +```bash +grep "Conversation log:" ~/.aws/atx/custom/atx-agent-session/transform.log 2>/dev/null +``` + +If it hasn't appeared yet, wait 15 seconds and retry (up to 4 attempts). The +output looks like: + +``` +Conversation log: /Users//.aws/atx/custom/20260319_063712_e3479843/logs/2026-03-19T06-37-26-conversation.log +``` + +Extract the full path from this line — this is the conversation log for THIS +specific run. Do NOT use `ls -t` to find the most recent log across all +conversations, as that may return a log from a previous run. + +Then start a monitoring loop. On each cycle: + +1. Check if the process is still running: `kill -0 $(cat ~/.aws/atx/custom/atx-agent-session/transform.pid) 2>/dev/null && echo "RUNNING" || echo "DONE"` +2. Read the latest lines from the conversation log and tell the user what's happening +3. Wait 60 seconds, then repeat + +**You MUST continue polling without waiting for user input.** After each poll, +immediately schedule the next one. The user should see continuous progress updates +like "AWS Transform is planning changes...", "Applying changes to 3 files...", "Running build...". + +CRITICAL rules: + +1. **Extract conversation ID and log path.** After launching the process, look for + the conversation log line in stdout: + + ``` + Conversation log: /Users//.aws/atx/custom//logs/-conversation.log + ``` + + Extract the `` (e.g., `20260311_233325_21bb5ef0`) and the full + log file path. Report the conversation ID to the user immediately. Example: + "Transformation started — conversation ID: `20260311_233325_21bb5ef0`" + +2. **Tail the conversation log.** Once the log path is known, read new lines from + the conversation log on each polling cycle and relay meaningful progress to the + user. This is the primary way to keep the user informed of what AWS Transform is doing + (e.g., planning steps, applying changes, running builds, encountering errors). + +3. **Filter out noise.** When reading the conversation log or process stdout, + silently IGNORE any lines containing "Thinking" — these are animated spinner + indicators that repeat dozens of times and must NOT be echoed to the user. + Surface everything else: planning output, file changes, build results, errors, + and completion summaries. + +4. **Completion = process exit only.** The transformation is done ONLY when the + background process exits (i.e., `kill -0` returns non-zero). Do NOT treat + exit code 0 from any other command (grep, cat, test, etc.) as transformation + completion. Do NOT treat log messages like "TRANSFORMATION COMPLETE" as + completion — AWS Transform performs additional steps after that (validation summary + generation). Check the process exit code — do NOT parse terminal + output or log content to determine completion. AWS Transform prints progress messages + and spinner animations throughout execution that do NOT indicate completion. + +5. **Polling interval.** Check the background process status and tail the + conversation log every 60 seconds. Do NOT use escalating backoff for local + mode — a fixed 60-second interval is sufficient. Do NOT sleep in the foreground + terminal. + +6. **Exit code determines success.** Once `kill -0` confirms the process has + exited, read the exit code: `cat ~/.aws/atx/custom/atx-agent-session/transform.exit`. Exit code 0 = + success. Non-zero = failure. Only after reading the exit code should you + report the transformation as complete or failed. + +7. **Open artifacts in the IDE (local mode only).** Using the conversation ID + from rule #1, the artifacts directory is + `~/.aws/atx/custom//artifacts/`. During each polling cycle, + list the directory and open any new files that appear. Open each file only + once — track which ones you've already opened. + + Check and open during polling: + + ```bash + ARTIFACTS_DIR=~/.aws/atx/custom//artifacts + ls "$ARTIFACTS_DIR" 2>/dev/null + ``` + + When new files appear, open them in the current IDE window. + + If a file named `plan.json` appears, IMMEDIATELY after opening it — before + doing anything else, before the next polling cycle — display this message: + + > ### Open Source Control to watch changes in real time + > + > **AWS Transform commits after each step — Source Control shows every file change with full diffs as they happen.** + + Do NOT defer this message. Do NOT batch it with other output. Send it + right after opening plan.json. + + Continue polling and opening new artifacts until the process exits. + +### Present Results + +Show transformation definition, repo path, key changes. Also tell the user: +"You can review all changes in the Source Control panel — it shows the full +commit history with diffs for each file AWS Transform modified." + +## Remote Mode + +### Check Infrastructure + +```bash +aws cloudformation describe-stacks --stack-name AtxInfrastructureStack \ + --query 'Stacks[0].StackStatus' --output text || echo "NOT_DEPLOYED" +``` + +If NOT_DEPLOYED: get user consent, then deploy. See [custom-remote-execution](custom-remote-execution.md). + +### Prepare Source + +| Source Type | Action | +| ------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | +| HTTPS git URL (public) | Use directly — container clones it | +| HTTPS git URL (private) | Verify `atx/github-token` exists in Secrets Manager (see Collect Repositories in [custom](custom.md)), then use directly — container fetches PAT and clones | +| SSH git URL (public or private) | Verify `atx/ssh-key` exists in Secrets Manager (see Collect Repositories in [custom](custom.md)), then use directly — container fetches SSH key and clones | +| S3 bucket with zips | Copy zips from user's bucket to managed source bucket (`atx-source-code-{account}`), then use managed S3 paths | +| Local repo | Zip -> upload to S3 -> use S3 path | + +For local sources: + +```bash +ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) +mkdir -p ~/.aws/atx/custom/atx-agent-session +cd && zip -qr ~/.aws/atx/custom/atx-agent-session/-$SESSION_TS.zip . +aws s3 cp ~/.aws/atx/custom/atx-agent-session/-$SESSION_TS.zip s3://atx-source-code-${ACCOUNT_ID}/repos/.zip +``` + +**Important:** Only the CDK-managed source bucket (`atx-source-code-{account}`) is +accessible to the remote container. Do NOT pass arbitrary S3 bucket paths as source — +the container's IAM role cannot read from them. + +### Submit Job + +```bash +aws lambda invoke --function-name atx-trigger-job \ + --payload '{"source":"","command":"atx custom def exec -n -p /source/ -x -t --telemetry \"client=ide,agent=ide,executionMode=remote\"","jobName":"","environment":{"JAVA_VERSION":""}}' \ + --cli-binary-format raw-in-base64-out /dev/stdout +``` + +Add `--configuration \"additionalPlanContext=\"` to the command string if config is needed. +The `--telemetry` flag is always included — see [custom](custom.md) for details. + +Set the appropriate version environment variable to match the transformation's target version: + +- `JAVA_VERSION` for Java transformations (e.g., `"21"` for a Java 8 -> 21 upgrade) +- `PYTHON_VERSION` for Python transformations (e.g., `"3.12"` for a Python 3.8 -> 3.12 upgrade) +- `NODE_VERSION` for Node.js transformations (e.g., `"22"` for a Node.js 18 -> 22 upgrade) + +Only include the variable relevant to the transformation language. The Lambda accepts these keys and passes them as Batch container overrides; the entrypoint switches the active runtime at startup. + +### Monitor + +```bash +aws lambda invoke --function-name atx-get-job-status \ + --payload '{"jobId":""}' \ + --cli-binary-format raw-in-base64-out /dev/stdout +``` + +Poll every 60 seconds for the first 10 polls, then every 5 minutes after. +Report only on status change. + +### Present Results (Remote) + +Do NOT download results locally. Results stay in S3. Present the S3 path to the user: + +```bash +ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) +echo "Results: s3://atx-custom-output-${ACCOUNT_ID}/transformations//" +``` + +If the user wants to download results, first list the S3 path to discover the +conversation ID (generated at runtime inside the container). Use the actual +job name and account ID — do NOT leave placeholders in commands given to the user: + +```bash +aws s3 ls s3://atx-custom-output-{account-id}/transformations// --region +``` + +Then provide the download command with the actual conversation ID: + +``` +aws s3 cp s3://atx-custom-output-{account-id}/transformations///code.zip ./code.zip +``` + +Include the CloudWatch dashboard link in the completion output: + +```bash +REGION=${AWS_REGION:-${AWS_DEFAULT_REGION:-$(aws configure get region 2>/dev/null)}} +REGION=${REGION:-us-east-1} +echo "https://${REGION}.console.aws.amazon.com/cloudwatch/home#dashboards/dashboard/ATX-Transform-CLI-Dashboard" +``` + +Show transformation definition, repo, status, downloaded path, and the dashboard link for monitoring history and logs. + +After presenting results, prompt the user about infrastructure teardown. See the +Cleanup section in [custom-remote-execution](custom-remote-execution.md) for the exact prompt. + +## Error Handling + +| Issue | Resolution | +| -------------------------- | ----------------------------------------------------------------------------------- | +| Dependency incompatibility | Check package compatibility, may need manual update | +| Build failure (remote) | Check build command works locally, verify registry credentials in `atx/credentials` | +| ATX timeout | Set `ATX_SHELL_TIMEOUT=1800` or break into smaller transforms | + +## MANDATORY: Cleanup + +Clean up session files **before starting** and **after completing** each transformation: + +```bash +[ -d ~/.aws/atx/custom/atx-agent-session ] && find ~/.aws/atx/custom/atx-agent-session -maxdepth 1 -type f \( -name "*.sh" -o -name "*.log" -o -name "*.pid" -o -name "*.exit" -o -name "*.zip" \) -delete 2>/dev/null || true +``` diff --git a/plugins/aws-transform/skills/aws-transform/references/custom-troubleshooting.md b/plugins/aws-transform/skills/aws-transform/references/custom-troubleshooting.md new file mode 100644 index 00000000..3a65fec2 --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/custom-troubleshooting.md @@ -0,0 +1,133 @@ +# Troubleshooting + +## Quick Reference + +| Issue | Resolution | +| ----------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `atx` not found | Install: `curl -fsSL https://transform-cli.awsstatic.com/install.sh` piped to `bash` | +| AWS credentials error or expiry | Run `aws sts get-caller-identity`. Check `AWS_PROFILE` or access key env vars | +| Permission denied | Local mode: need `transform-custom:*` — see Prerequisites -> IAM Permissions in [custom](custom.md). Remote mode: generate and attach policies via `npx ts-node generate-caller-policy.ts` — see [custom-remote-execution](custom-remote-execution.md) | +| Network error | Resolve region: `REGION=${AWS_REGION:-${AWS_DEFAULT_REGION:-$(aws configure get region 2>/dev/null)}}; REGION=${REGION:-us-east-1}`. Check access to `transform-custom.${REGION}.api.aws` | +| Build fails during transform | Verify build command works locally first. Try interactive mode for debugging | +| Transform not found | Run `atx custom def list --json` to check available transformation definitions | +| Configuration fails with commas | Do not use commas inside `additionalPlanContext` values — they break the CLI parser. Rephrase to avoid commas | +| Conversation expired | Conversations expire after 30 days. Start a new one | +| Windows not supported | Tell user to use Windows Subsystem for Linux (WSL) | +| Git clone fails in remote container | See "Private Repo Credential Issues" section below | +| Timeout | Set `export ATX_SHELL_TIMEOUT=1800` (default: 900s) | +| Stale .exit file | The `.exit` file in `atx-agent-session/` may be left over from a previous run. Always use `kill -0 ` to check if the process is still running — do not rely solely on the `.exit` file | +| Poor quality results | See Improving Quality section below | + +## Private Repo Credential Issues + +If a git clone fails in the remote container (job status FAILED, logs show +authentication or 403 errors), work through these steps with the user: + +**1. Is the PAT/key stored?** + +```bash +aws secretsmanager describe-secret --secret-id "atx/github-token" --region "$REGION" 2>/dev/null && echo "EXISTS" || echo "MISSING" +aws secretsmanager describe-secret --secret-id "atx/ssh-key" --region "$REGION" 2>/dev/null && echo "EXISTS" || echo "MISSING" +``` + +If missing, guide the user through setup — see the Collect Repositories step in [custom](custom.md). + +**2. Does the PAT have the right scope?** +GitHub fine-grained PATs can be scoped to specific repos. If the user created a +PAT for repos A and B but is now transforming repo C, the clone will fail with 403. +Ask: "Does your GitHub PAT have access to [repo name]? Fine-grained PATs need +each repo explicitly listed." + +Resolution: the user updates their PAT on GitHub to include the new repo, then +updates the stored secret: + +```bash +aws secretsmanager put-secret-value --secret-id "atx/github-token" --region "$REGION" --secret-string "" +``` + +**3. Has the PAT expired?** +GitHub PATs can have expiration dates. Ask: "When did you create this PAT? It may +have expired." Resolution: create a new PAT on GitHub, then update the secret: + +```bash +aws secretsmanager put-secret-value --secret-id "atx/github-token" --region "$REGION" --secret-string "" +``` + +**4. Is it the right credential type for the URL?** + +- HTTPS URLs (`https://github.com/...`) need `atx/github-token` (PAT) +- SSH URLs (`git@github.com:...`) need `atx/ssh-key` (SSH private key) + If the user provided SSH URLs but only has a PAT stored (or vice versa), guide + them to set up the correct credential type. + +**5. Classic vs fine-grained PAT?** +Classic PATs with `repo` scope work for all repos the user has access to. +Fine-grained PATs need each repo explicitly added. If the user is unsure, suggest +a classic PAT with `repo` scope as the simpler option. + +## Local Mode Debugging + +| Log | Path | +| ---------------- | ----------------------------------------------------------------------- | +| Developer logs | `~/.aws/atx/logs/debug*.log` and `~/.aws/atx/logs/error.log` | +| Conversation log | `~/.aws/atx/custom//logs/-conversation.log` | + +Network errors may indicate VPN/firewall issues with AWS endpoints. + +## Remote Mode Debugging + +- CloudWatch logs: `/aws/batch/atx-transform` +- Check log streams for the failed conversation ID in AWS Console +- S3 output bucket contains artifacts even for failed jobs +- Check batch job status for error details + +## Deployment Failures + +CDK deployment handles most issues automatically. Common recovery: + +```bash +ATX_INFRA_DIR="$HOME/.aws/atx/custom/remote-infra" +cd "$ATX_INFRA_DIR" && ./teardown.sh +cd "$ATX_INFRA_DIR" && ./setup.sh +``` + +Common causes: insufficient IAM permissions, service quota limits, no default VPC, Docker not running (only needed when using a custom container image, not the pre-built image). + +## Improving Quality + +Diagnose in this order: + +1. **Reference materials**: Provide migration guides or API specs via `additionalPlanContext`. +2. **Complexity**: Decompose very complex transforms into smaller steps. +3. **Knowledge items**: Review learnings from previous runs. Enable good ones, disable irrelevant ones. + +## Network Requirements + +| Endpoint | Purpose | +| ------------------------------------ | ---------------------------- | +| `transform-cli.awsstatic.com` | CLI installation and updates | +| `transform-custom.${REGION}.api.aws` | Transformation service API | + +## Pre-built Container Image + +The default pre-built image URI is `public.ecr.aws/d9h8z6l7/aws-transform:latest`. +This is configured via `prebuiltImageUri` in `cdk.json`. + +## Remote Infrastructure Repo Issues + +If `git pull`, `git commit`, or any other step on the remote-infra repo fails +(merge conflicts, corrupted state, detached HEAD, permission errors, etc.), rename +the existing directory and re-clone from scratch. This is safe — the repo is just +a working copy of the infrastructure scripts, and all deployed AWS resources are +unaffected. + +```bash +ATX_INFRA_DIR="$HOME/.aws/atx/custom/remote-infra" +if [ -d "$ATX_INFRA_DIR" ]; then + mv "$ATX_INFRA_DIR" "$ATX_INFRA_DIR.broken-$(date +%Y%m%d-%H%M%S)" +fi +git clone -b atx-remote-infra --single-branch https://github.com/aws-samples/aws-transform-custom-samples.git "$ATX_INFRA_DIR" +``` + +After re-cloning, continue with the normal flow (e.g., `cd "$ATX_INFRA_DIR" && ./setup.sh`). +The renamed directory can be deleted once you confirm the new clone works. diff --git a/plugins/aws-transform/skills/aws-transform/references/custom.md b/plugins/aws-transform/skills/aws-transform/references/custom.md new file mode 100644 index 00000000..79050d0d --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/custom.md @@ -0,0 +1,845 @@ +# AWS Transform custom + +## Table of Contents + +- [Overview](#overview) +- [Greeting](#greeting) +- [Usage](#usage) +- [Core Concepts](#core-concepts) +- [Philosophy](#philosophy) +- [Prerequisites](#prerequisites) +- [Workflow](#workflow) +- [Execution Modes](#execution-modes) +- [Critical Rules](#critical-rules) +- [Guardrails](#guardrails) +- [Telemetry](#telemetry) +- [Output Structure](#output-structure) +- [References](#references) +- [License](#license) +- [Issues](#issues) +- [Changelog](#changelog) + +## Overview + +Perform code upgrades, migrations, and transformations using AWS Transform custom. +Supports any-to-any transformations: language version upgrades (Java, Python, Node.js, etc.), +framework migrations, AWS SDK migrations, library upgrades, code refactoring, architecture +changes, and custom organization-specific transformations. + +Two execution modes: + +- **Local mode**: Runs the AWS Transform CLI directly on the user's machine. Best for 1-9 repos. +- **Remote mode**: Runs transformations at scale via AWS Batch/Fargate containers. + Best for 10+ repos or when the user prefers cloud execution. Infrastructure is + auto-deployed with user consent. + +You handle the full workflow: inspecting repos, matching them to available +transformation definitions, collecting configuration, and executing transformations +in either mode — the user just provides repos and confirms the plan. + +## Greeting + +"AWS Transform custom can help you: + +- Upgrade Java, Python, and Node.js to modern versions +- Migrate AWS SDKs (Java SDK v1->v2, boto2->boto3, JS SDK v2->v3) +- Handle framework migrations, library upgrades, and code refactoring +- Analyze codebases and generate documentation +- Define and run your own custom transformations using natural language, docs, + and code samples + +Run locally on a few repos for fast iteration, or at scale on hundreds of repos (up to 128 in-parallel). Note: this skill collects telemetry. To opt out, see [here](https://docs.aws.amazon.com/transform/latest/userguide/transform-usage-telemetry.html). + +What would you like to transform today?" + +Do NOT inspect any files, run any commands, or check prerequisites until the user responds. + +## Usage + +Use when the user wants to: + +- Transform, upgrade, or migrate code (Java, Python, Node.js, etc.) +- Migrate AWS SDKs (Java SDK v1->v2, boto2->boto3, JS SDK v2->v3, etc.) +- Run bulk code transformations at scale via AWS Batch/Fargate +- Analyze which AWS Transform transformations apply to their repositories +- Perform comprehensive codebase analysis +- Create a new custom Transformation definition + +## Core Concepts + +- **Transformation definition**: A reusable transformation recipe discovered via `atx custom def list --json` +- **Match Report**: Auto-generated mapping of repos to applicable transformation definitions based on code inspection +- **Local Mode**: Runs AWS Transform CLI on the user's machine (1-9 repos, max 3 concurrent) +- **Remote Mode**: Runs transformations in AWS Batch/Fargate (10+ repos, or by preference) + +## Philosophy + +Wait for the user. On activation, present what this skill can do and ask the user +what they'd like to accomplish. Do NOT automatically inspect the working directory, +open files, or any repository until the user explicitly provides repos to work with. + +Once the user provides repositories, match — don't ask. Inspect those repositories +and present which transformations apply automatically. Never show a raw transformation definition list and +ask the user to pick. + +## Prerequisites + +Prerequisite checks run ONCE at the start of a session. Do not repeat per repo. +Do NOT run prerequisite checks until the user has stated what they want to do. + +### Platform Check (Required — All Modes) + +Detect the user's operating system. If on Windows (not WSL), stop immediately and +inform the user: + +> AWS Transform custom does not support native Windows. You need to install +> Windows Subsystem for Linux (WSL) and run this from within WSL. +> +> Install WSL: `wsl --install` in PowerShell (as Administrator), then restart. +> After that, open a WSL terminal and re-run this skill from there. + +Check by running: + +```bash +uname -s +``` + +- `Linux` or `Darwin` -> proceed normally +- `MINGW*`, `MSYS*`, `CYGWIN*`, or any Windows-like output -> block and show the WSL message above +- Command fails, errors, or is not found -> treat as native Windows, block and show the WSL message above + +Do NOT proceed with any other steps on native Windows. + +### AWS CLI (Required — All Modes) + +```bash +aws --version +``` + +If not installed, guide the user: + +- macOS: `brew install awscli` or `curl "https://awscli.amazonaws.com/AWSCLIV2.pkg" -o "AWSCLIV2.pkg" && sudo installer -pkg AWSCLIV2.pkg -target /` +- Linux: `curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && unzip awscliv2.zip && sudo ./aws/install` + +Do NOT proceed until `aws --version` succeeds. + +### AWS Credentials (Required — All Modes) + +```bash +aws sts get-caller-identity +``` + +If credentials are NOT configured, walk the user through setup: + +``` +AWS Transform custom requires AWS credentials to authenticate with the service. Configure authentication using one of the following methods. + +1. AWS CLI Configure (~/.aws/credentials): + aws configure + +2. AWS Credentials File (manual). Configure credentials in ~/.aws/credentials: + +[default] +aws_access_key_id = your_access_key +aws_secret_access_key = your_secret_key + +3. Environment Variables. Set the following environment variables: + +export AWS_ACCESS_KEY_ID=your_access_key +export AWS_SECRET_ACCESS_KEY=your_secret_key +export AWS_SESSION_TOKEN=your_session_token + +You can also specify a profile using the AWS_PROFILE environment variable: + +export AWS_PROFILE=your_profile_name +``` + +Do NOT proceed until credentials are verified. Re-run `aws sts get-caller-identity` after setup. + +Note: environment variables set via `export` do not carry over between shell sessions. If you spawn a new shell, credentials set as env vars may be lost. Prefer `aws configure` or `~/.aws/credentials` for persistence. + +### AWS Transform CLI (Required — All Modes) + +Required in all modes for transformation definition discovery (`atx custom def list --json`). +Local mode also uses it for transformation execution. + +```bash +atx --version +# Install: curl -fsSL https://transform-cli.awsstatic.com/install.sh | bash +``` + +**Mandatory: always run `atx update` once at the start of every session**, even if you just ran it recently. This catches new AWS Transform CLI versions and new transformation definitions. Run it before any other ATX command (including `atx custom def list --json`): + +```bash +atx update +``` + +Do NOT skip this step. Do NOT ask the user whether to update. Do NOT condition it on whether the CLI "needs" an update. Run it unconditionally. + +### IAM Permissions (Required — All Modes) + +Local mode requires `transform-custom:*` minimum. Verify by running a transformation definition list: + +```bash +atx custom def list --json +``` + +If this succeeds, permissions are sufficient — skip the rest of this section. + +If it fails with a permissions error, the caller needs the `transform-custom:*` +IAM permission. Explain to the user what's needed and get confirmation before proceeding: + +> Your identity needs the `transform-custom:*` permission to use the AWS Transform CLI. +> I can attach the AWS-managed policy `AWSTransformCustomFullAccess` to your +> identity. Shall I proceed? + +Only after the user confirms, attach the managed policy: + +```bash +CALLER_ARN=$(aws sts get-caller-identity --query Arn --output text) +if echo "$CALLER_ARN" | grep -q ":user/"; then + IDENTITY_NAME=$(echo "$CALLER_ARN" | awk -F'/' '{print $NF}') + aws iam attach-user-policy --user-name "$IDENTITY_NAME" \ + --policy-arn "arn:aws:iam::aws:policy/AWSTransformCustomFullAccess" +elif echo "$CALLER_ARN" | grep -Eq ":assumed-role/|:role/"; then + ROLE_NAME=$(echo "$CALLER_ARN" | sed 's/.*:\(assumed-\)\{0,1\}role\///' | cut -d'/' -f1) + aws iam attach-role-policy --role-name "$ROLE_NAME" \ + --policy-arn "arn:aws:iam::aws:policy/AWSTransformCustomFullAccess" +fi +``` + +If the attachment command itself fails (e.g., insufficient IAM permissions, or an +SSO-managed role), inform the user they need to ask their AWS administrator to +attach the `AWSTransformCustomFullAccess` AWS-managed policy to their identity. +For SSO users (role names starting with `AWSReservedSSO_`), this must be added +to their IAM Identity Center permission set — it cannot be attached directly. + +Do NOT proceed until `atx custom def list --json` succeeds. + +Remote mode requires additional permissions (Lambda invoke, S3, KMS, Secrets Manager, +CloudWatch). These are generated and attached as part of the deployment flow — see +[custom-remote-execution](custom-remote-execution.md). + +See [custom-cli-reference](custom-cli-reference.md) for the full permission list. + +### AWS CDK (Remote Mode Only) + +Required for deploying remote infrastructure. Check if installed: + +```bash +cdk --version +``` + +If not installed, install it globally: + +```bash +npm install -g aws-cdk +``` + +Do NOT proceed with remote deployment until `cdk --version` succeeds. + +### Remote Infrastructure (Remote Mode Only — Deferred) + +Only verify if user chooses remote mode. The infrastructure CDK scripts are fetched +at runtime by cloning `https://github.com/aws-samples/aws-transform-custom-samples.git` (branch `atx-remote-infra`) — +they are not bundled with this skill. See [custom-remote-execution](custom-remote-execution.md). + +## Workflow + +Generate a session timestamp once and reuse it for all paths in this session: + +```bash +SESSION_TS=$(date +%Y%m%d-%H%M%S) +``` + +### Collect Repositories + +Ask the user for local paths or git URLs. Accept one or many. Do NOT assume the +current working directory or open editor files are the target — wait for the user +to explicitly provide repositories. + +Accepted source formats: + +- **Local paths** — directories on the user's machine (e.g., `/home/user/my-project`) +- **HTTPS git URLs** — public or private (e.g., `https://github.com/org/repo.git`) +- **SSH git URLs** — e.g., `git@github.com:org/repo.git` +- **S3 bucket path with zips** — e.g., `s3://my-bucket/repos/` + containing zip files of repositories. Each zip becomes one transformation job. + +#### S3 Bucket Input + +If the user provides an S3 path containing zip files, ask which execution mode +they prefer (if not already specified). S3 input works in both modes: + +**Remote mode:** Copy the zips from the user's bucket to the managed source bucket, +then submit jobs pointing to the managed copies: + +```bash +ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) +SOURCE_BUCKET="atx-source-code-${ACCOUNT_ID}" + +# List all zips in the user's bucket path +aws s3 ls s3://user-bucket/repos/ --recursive | grep '\.zip$' + +# Copy each zip to the managed source bucket +aws s3 sync s3://user-bucket/repos/ s3://${SOURCE_BUCKET}/repos/ --exclude "*" --include "*.zip" +``` + +Then submit a batch job with one job per zip, each pointing to +`s3://${SOURCE_BUCKET}/repos/.zip`. The container handles zip extraction +automatically. See [custom-multi-transformation](custom-multi-transformation.md) for batch submission. +The managed source bucket has a 7-day lifecycle — copied zips auto-delete. + +**Local mode:** Download and extract each zip locally: + +```bash +mkdir -p ~/.aws/atx/custom/atx-agent-session/repos +aws s3 sync s3://user-bucket/repos/ ~/.aws/atx/custom/atx-agent-session/repos/ --exclude "*" --include "*.zip" +for zip in ~/.aws/atx/custom/atx-agent-session/repos/*.zip; do + name=$(basename "$zip" .zip) + unzip -qo "$zip" -d "$HOME/.aws/atx/custom/atx-agent-session/repos/${name}-$SESSION_TS/" +done +``` + +Use the extracted directories as `` for local execution. Standard local +mode limits apply (max 3 concurrent repos). + +#### Private Repository Detection (Remote Mode) + +**Always ask the user** — do NOT try to determine repo visibility yourself. Never +attempt to clone, curl, or probe a URL to check if it's public or private. Simply +ask the user. As soon as the user provides git URLs and remote mode is selected +(or likely), ask: + +> "Are any of these repositories private? If so, the remote container needs +> credentials to clone them — I'll walk you through the setup." + +Do NOT skip this question. Do NOT try to infer visibility by attempting a clone, +curl, or any other network request. Just ask. + +If the user confirms repos are private, determine the credential type based on URL format: + +First, resolve the region (use for all Secrets Manager commands below): + +```bash +REGION=${AWS_REGION:-${AWS_DEFAULT_REGION:-$(aws configure get region 2>/dev/null)}} +REGION=${REGION:-us-east-1} +``` + +**For HTTPS URLs** — check whether a GitHub PAT is already configured: + +```bash +aws secretsmanager describe-secret --secret-id "atx/github-token" --region "$REGION" 2>/dev/null \ + && echo "CONFIGURED" || echo "NOT_CONFIGURED" +``` + +If CONFIGURED, ask the user: "A GitHub PAT is already stored. Would you like to +keep using it, or replace it with a new one?" If they want to replace it, tell +them to run: + +``` +aws secretsmanager put-secret-value --secret-id "atx/github-token" --region "$REGION" --secret-string "YOUR_TOKEN_HERE" +``` + +If NOT_CONFIGURED, explain what's needed and tell the user to run the create command: + +> "Private HTTPS repos need a GitHub Personal Access Token (PAT) stored in AWS +> Secrets Manager. The remote container fetches it at startup to clone your repos. +> The token stays in your AWS account — you can delete it anytime. +> +> The PAT needs the `repo` scope for private repositories. Create one at +> https://github.com/settings/tokens and then run: +> +> ``` +> aws secretsmanager create-secret --name "atx/github-token" --region "$REGION" --secret-string "YOUR_TOKEN_HERE" +> ``` +> +> Delete anytime: `aws secretsmanager delete-secret --secret-id atx/github-token --region "$REGION" --force-delete-without-recovery`" + +Do NOT ask the user to paste their token in chat. They run the command themselves. +Wait for the user to confirm it's done, then verify: + +```bash +aws secretsmanager describe-secret --secret-id "atx/github-token" --region "$REGION" 2>/dev/null \ + && echo "CONFIGURED" || echo "NOT_CONFIGURED" +``` + +**For SSH URLs** (`git@...` or `ssh://...`) — check whether an SSH key is configured: + +```bash +aws secretsmanager describe-secret --secret-id "atx/ssh-key" --region "$REGION" 2>/dev/null \ + && echo "CONFIGURED" || echo "NOT_CONFIGURED" +``` + +If CONFIGURED, ask the user: "An SSH key is already stored. Would you like to +keep using it, or replace it with a new one?" If they want to replace it, tell +them to run: + +``` +aws secretsmanager put-secret-value --secret-id "atx/ssh-key" --region "$REGION" --secret-string "$(cat )" +``` + +If NOT_CONFIGURED, explain what's needed and tell the user to run the create command: + +> "SSH repos need an SSH private key stored in AWS Secrets Manager. The remote +> container fetches it at startup to clone your repos. +> +> Run: +> +> ``` +> aws secretsmanager create-secret --name "atx/ssh-key" --region "$REGION" --secret-string "$(cat )" +> ``` +> +> Delete anytime: `aws secretsmanager delete-secret --secret-id atx/ssh-key --region "$REGION" --force-delete-without-recovery`" + +Do NOT ask the user to paste their SSH key in chat. They run the command themselves. + +For local mode, private repo credentials are not needed — the user's local git +config handles authentication. Skip this check entirely for local mode. + +### Discover Transformation Definitions (Silent) + +Run silently — do NOT show output to user: + +```bash +atx custom def list --json +``` + +Inspect the JSON output directly to build an internal lookup of available transformation definitions. +Do NOT pipe the output to python, jq, or other parsing scripts — read the JSON +yourself. Never hardcode transformation definition names. + +#### Creating a New Transformation Definition + +**User explicitly asks to create a transformation definition**, or **no existing transformation definition matches the user's goal**: + +If no match, confirm with the user first: + +> "I didn't find an existing transformation definition that covers [describe the user's goal]. Would +> you like to create a new one?" + +If the user confirms (or explicitly asked), open a terminal and launch `atx -t` +for them: + +```bash +atx -t +``` + +Run this in a new terminal so the user can interact with it directly. Then tell +the user: + +> "I've opened a terminal with `atx -t` — describe the transformation you want +> to build (e.g., 'migrate log4j to SLF4J') and AWS Transform will walk you through it. +> Come back here once it's published and I'll pick it up automatically." + +After the user returns, re-run `atx custom def list --json` to pick up the newly +published transformation definition and continue with the normal workflow. + +### Inspect Each Repository + +Perform lightweight inspection only — check config files for key signals: + +| Signal | Files to Check | Likely Transformation Type | +| --------------- | ------------------------------------------------------------------------------------- | -------------------------- | +| Python version | `.python-version`, `pyproject.toml`, `setup.cfg`, `requirements.txt` | Python version upgrade | +| Java version | `pom.xml` (``), `build.gradle` (`sourceCompatibility`), `.java-version` | Java version upgrade | +| Node.js version | `package.json` (`engines.node`), `.nvmrc`, `.node-version` | Node.js version upgrade | +| Python boto2 | `import boto` (NOT boto3) | boto2->boto3 migration | +| Java SDK v1 | `com.amazonaws` imports, `aws-java-sdk` in pom.xml | Java SDK v1->v2 | +| Node.js SDK v2 | `"aws-sdk"` in package.json (NOT `@aws-sdk`) | JS SDK v2->v3 | +| x86 Java | `x86_64`/`amd64` in Dockerfiles, build configs | Graviton migration | + +Cross-reference detected signals against transformation definitions from the discovery step. Only match transformation definitions that +actually exist in the user's account. + +See [custom-repo-analysis](custom-repo-analysis.md) for full detection commands. + +### Present Match Report + +Format: + +``` +Transformation Match Report +============================= +Repository: () + Language: + Matching transformation definitions: + - + +Summary: N repos analyzed, M have applicable transformations (T total jobs) +``` + +Present the match report and wait for user confirmation before proceeding. +Do NOT start any transformation without explicit user consent. + +### Collect Configuration + +Ask the user for any additional plan context (e.g., target version for upgrade transformation definitions). +This is mandatory — always ask, even if the transformation definition doesn't strictly require config. +The user may have preferences or constraints you don't know about. +Skip only if the user explicitly says no additional context is needed. + +### Verify Runtime Compatibility (Remote and Local) + +#### Remote Mode + +**Infrastructure check:** To verify whether remote infrastructure is deployed, use CloudFormation — do NOT check individual Lambda functions by name: + +```bash +aws cloudformation describe-stacks --stack-name AtxInfrastructureStack \ + --query 'Stacks[0].StackStatus' --output text || echo "NOT_DEPLOYED" +``` + +Before submitting remote jobs, determine whether the pre-built image covers the +target runtime or if a custom Docker build is needed. + +**Pre-built image includes:** + +- **Java**: 8, 11, 17, 21, 25 (Amazon Corretto) with Maven and Gradle 9.4 +- **Python**: 3.8, 3.9, 3.10, 3.11, 3.12, 3.13, 3.14 (dnf + pyenv) +- **Node.js**: 16, 18, 20, 22, 24 (nvm) with yarn, pnpm, TypeScript, ts-node +- **Build tools**: gcc, g++, make, patch +- **CLI tools**: AWS CLI v2, AWS Transform CLI, git, jq, curl, unzip, tar +- **OS**: Amazon Linux 2023 (x86_64) + +**Decision logic:** + +1. Based on the transformation requirements (source runtime, target runtime, + build tools, and any other dependencies), determine whether everything + needed is available in the pre-built image listed above +2. If **yes** -> use the pre-built image path (no Docker required). Proceed to deployment + using the pre-built image instructions in [custom-remote-execution](custom-remote-execution.md). +3. If **no** -> use the custom image path (Docker required). Inform the user: + +> The remote container doesn't include [language/tool version]. To run this +> transformation remotely, I'll need to build a custom container image. This +> requires Docker installed and running on your machine. It's a one-time change +> — about 5-10 minutes. Want me to proceed? + +If the user confirms, follow the custom image path in +[custom-remote-execution](custom-remote-execution.md): clear `prebuiltImageUri`, +customize the Dockerfile, and deploy. + +If the user declines, suggest local mode as an alternative (if the tools are +available on their machine). + +**Dockerfile customization (custom image path only):** + +First, read the Dockerfile to see what's installed: + +```bash +ATX_INFRA_DIR="$HOME/.aws/atx/custom/remote-infra" +cat "$ATX_INFRA_DIR/container/Dockerfile" 2>/dev/null +``` + +1. Ensure the infrastructure repo is cloned and up to date: + + ```bash + ATX_INFRA_DIR="$HOME/.aws/atx/custom/remote-infra" + if [ -d "$ATX_INFRA_DIR" ]; then + git -C "$ATX_INFRA_DIR" add -A + git -C "$ATX_INFRA_DIR" commit -m "Local customizations" -q 2>/dev/null || true + git -C "$ATX_INFRA_DIR" pull -q + else + git clone -b atx-remote-infra --single-branch https://github.com/aws-samples/aws-transform-custom-samples.git "$ATX_INFRA_DIR" + fi + ``` + + If `git pull` reports a merge conflict, resolve it by keeping both upstream + changes and the user's customizations in the `CUSTOM LANGUAGES AND TOOLS` + section of the Dockerfile, then commit the merge. + +2. Edit `$ATX_INFRA_DIR/container/Dockerfile`. Find the section marked + `# CUSTOM LANGUAGES AND TOOLS` and insert `RUN` commands after the comment + block, before the `USER root` line. + + For missing versions of already-installed languages, add the version in the + custom section. Examples: + + ```dockerfile + # Java 23 (Amazon Corretto — direct install, must run as root) + # Do NOT use dnf in the custom section — pyenv overrides the system python3 + # that dnf depends on, causing "No module named 'dnf'" errors. + USER root + RUN curl -fsSL "https://corretto.aws/downloads/latest/amazon-corretto-23-x64-linux-jdk.tar.gz" -o /tmp/corretto23.tar.gz && \ + mkdir -p /usr/lib/jvm && \ + tar -xzf /tmp/corretto23.tar.gz -C /usr/lib/jvm && \ + rm /tmp/corretto23.tar.gz && \ + ln -sfn /usr/lib/jvm/amazon-corretto-23.* /usr/lib/jvm/corretto-23 + + # Node.js 23 (via nvm — must run as atxuser) + USER atxuser + RUN . /home/atxuser/.nvm/nvm.sh && nvm install 23 + USER root + + # Python 3.15 (via pyenv — must run as atxuser) + USER atxuser + RUN eval "$(/home/atxuser/.pyenv/bin/pyenv init -)" && \ + MAKE_OPTS="-j$(nproc)" /home/atxuser/.pyenv/bin/pyenv install 3.15.0 + USER root + ``` + + For entirely new languages, avoid `dnf` in the custom section — pyenv + overrides the system python3 that `dnf` depends on. Use language-specific + installers instead: + + ```dockerfile + # Go + RUN curl -fsSL https://go.dev/dl/go1.22.0.linux-amd64.tar.gz | tar -C /usr/local -xz + ENV PATH="/usr/local/go/bin:$PATH" + + # Ruby (via rbenv — must run as atxuser) + USER atxuser + RUN git clone --depth 1 https://github.com/rbenv/rbenv.git /home/atxuser/.rbenv && \ + git clone --depth 1 https://github.com/rbenv/ruby-build.git /home/atxuser/.rbenv/plugins/ruby-build && \ + /home/atxuser/.rbenv/bin/rbenv install 3.3.0 && \ + /home/atxuser/.rbenv/bin/rbenv global 3.3.0 + ENV PATH="/home/atxuser/.rbenv/shims:/home/atxuser/.rbenv/bin:$PATH" + USER root + + # Rust + USER atxuser + RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + ENV PATH="/home/atxuser/.cargo/bin:$PATH" + USER root + ``` + +3. Update the version switcher in `$ATX_INFRA_DIR/container/entrypoint.sh`. + Find the relevant `switch_*_version` function and add a case for the new + version. For Java versions installed via direct download, find the extracted + directory name under `/usr/lib/jvm/`. For example, to add Java 23: + + ```bash + # In switch_java_version(), add to the case statement: + 23) java_home="/usr/lib/jvm/corretto-23" ;; + ``` + + Check the actual directory name: `ls /usr/lib/jvm/` — use the directory + that matches the version you installed. + + For Node.js, nvm handles arbitrary versions automatically — no entrypoint + change needed. For Python, pyenv handles arbitrary versions — no entrypoint + change needed (the existing pyenv fallback logic finds it). + +4. Deploy (or redeploy): `cd "$ATX_INFRA_DIR" && ./setup.sh` + CDK hashes the `container/` directory — any file change triggers a rebuild + and push to ECR automatically. + +After redeployment, set the `environment` field on the job to the exact target +version (e.g., `"JAVA_VERSION":"23"`, not `"21"`). The version switcher in the +entrypoint reads this and activates the correct runtime. + +If the user declines, suggest local mode as an alternative (if the tools are +available on their machine). + +#### Local Mode + +Before running local transformations, verify the user has the target runtime +version installed. This applies to any language or runtime the transformation +targets — Java, Python, Node.js, Ruby, Go, Rust, .NET, etc. Check the current +version of whatever runtime the transformation definition requires. For example: + +```bash +java -version # Java transformations +python3 --version # Python transformations +node --version # Node.js transformations +ruby --version # Ruby transformations +go version # Go transformations +``` + +If the target version is not active, check whether it's already installed: + +```bash +# Java: check common install locations +/usr/libexec/java_home -V 2>&1 # macOS +ls /usr/lib/jvm/ 2>/dev/null # Linux +# Python: check if the specific version binary exists +which python3.12 2>/dev/null # adjust version as needed +# Node.js: check if nvm is available, or look for the binary +command -v nvm &>/dev/null && nvm ls 2>/dev/null +which node 2>/dev/null && node --version +``` + +If the target version is found, switch to it: + +- Java: `export JAVA_HOME= && export PATH="$JAVA_HOME/bin:$PATH"` +- Python: `pyenv shell 3.15.0` +- Node.js: `nvm use 23` + +Only if the target version is not installed at all, ask the user for permission before installing. Do NOT install runtimes without explicit user confirmation. +Suggest the appropriate version manager: + +- Java: `brew install --cask corretto23` (macOS), `sudo yum install java-23-amazon-corretto-devel` (RHEL/AL2), or `sudo apt install java-23-amazon-corretto-jdk` (Debian/Ubuntu) +- Python: `pyenv install 3.15.0 && pyenv shell 3.15.0`, or `brew install python@3.15` +- Node.js: `nvm install 23 && nvm use 23` + +The active runtime must match the transformation's target version so that builds +and tests run correctly. Do NOT proceed with the transformation until the correct +version is active. + +### Confirm Transformation Plan + +Present final plan with repo, transformation definition, config, and execution mode. Do NOT proceed +until user confirms. + +### Execute + +- **1 repo**: See [custom-single-transformation](custom-single-transformation.md) +- **Multiple repos**: See [custom-multi-transformation](custom-multi-transformation.md) + +## Execution Modes + +| Mode | Best For | Prerequisites | +| -------------------------------------- | ----------------------------------------------------------- | -------------------------------- | +| **Local** (default for 1-9 repos) | Quick transforms, dev machines with ATX | AWS Transform CLI installed | +| **Remote** (recommended for 10+ repos) | Bulk transforms, up to 512 repos (128 concurrent per batch) | AWS account, auto-deployed infra | + +Mode inference: + +- User says "local"/"here"/"on my machine" -> Local (honor the request regardless of repo count) +- User says "remote"/"cloud"/"AWS"/"batch"/"at scale" -> Remote +- 10+ repos without preference -> Recommend remote, explain local cap recommendation of 3 concurrent +- 1-9 repos without preference -> Local, note remote available + +See [custom-remote-execution](custom-remote-execution.md) for infrastructure setup. + +## Critical Rules + +1. **Discover transformation definitions dynamically** — Always run `atx custom def list --json`. Never hardcode transformation definition names. +2. **Match, don't ask** — Inspect repos and present matches. Never show raw transformation definition lists. +3. **Lightweight inspection only** — Check config files and key signals. No deep analysis. +4. **Confirm before executing** — Always confirm transformation definition, repos, and config with user first. +5. **No time estimates** — Never include duration predictions. +6. **Parallel execution** — Local: max 3 concurrent repos. Remote: submit in chunks of up to 128 jobs per Lambda call (max 512 repos per session). +7. **Preserve outputs** — Do not delete generated output folders. +8. **Recommend remote for 10+ repos** — Default to local for 1-9 repos. Recommend remote for 10+. Always respect user preference. +9. **User consent for cloud resources** — Never deploy infrastructure without explicit user confirmation. +10. **Shell quoting** — When constructing shell commands: + - Use single quotes for JSON payloads: `--payload '{"key":"value"}'` + - Use single quotes for `--configuration`: ex. `--configuration 'additionalPlanContext=Target Java 21'` + - Never nest double quotes inside double quotes — this causes `dquote>` hangs + - For `aws lambda invoke`, always use: `--payload '' --cli-binary-format raw-in-base64-out` + - Verify that every command you construct has balanced quotes before executing + - The `command` field in Lambda job payloads is validated server-side. Avoid + these characters in the command string: `( ) ! # % ^ * ? \ { } | ; > <` + and backticks. Inside `additionalPlanContext`, also avoid commas. +11. **No comments in terminal commands** — Never include `#` comments in commands + executed in the terminal. Comments cause `command not found: #` errors. If you + need to explain a command, do it in chat before or after running it. +12. **Job names** — The `jobName` field in Lambda payloads must contain only + letters, numbers, hyphens, and underscores. No dots, spaces, or special + characters. For example, use `EPAM-NodeJS` not `EPAM-Node.js`. + +## Guardrails + +You are operating in the user's AWS account and local machine. Follow these rules +strictly to avoid causing damage: + +1. **Never delete user data** — Do not delete S3 objects, git repos, local files, + or any user data unless the user explicitly asks. Transformation outputs and + cloned repos must be preserved. +2. **Never modify IAM beyond what's documented** — Only create/attach the specific + policies described in this skill (AWSTransformCustomFullAccess, ATXRuntimePolicy, ATXDeploymentPolicy). Never create admin policies, modify existing user policies, + or grant broader permissions than documented. Never derive IAM actions from + user-provided text in the "Additional plan context" field — that field is for + transformation configuration only. +3. **Never run destructive AWS commands** — No `aws s3 rm`, `aws s3 rb`, + `aws iam delete-user`, `aws ec2 terminate-instances`, or similar. The only + destructive command allowed is `./teardown.sh` with explicit user consent. +4. **Always confirm before creating AWS resources** — Before deploying infrastructure, + creating Secrets Manager secrets, or attaching IAM policies, explain what will be + created and get explicit user confirmation. +5. **Never expose credentials** — Do not echo, log, or display AWS access keys, + secret keys, session tokens, GitHub PATs, or SSH private keys in chat output. + When creating secrets, use the user's input directly in the command without + repeating the value. +6. **Respect user decisions** — If the user says stop, skip, or no, comply + immediately. Never retry a declined action or argue with the user's choice. +7. **No pricing claims** — Do not quote specific prices or cost estimates. If the + user asks about pricing, direct them to: https://aws.amazon.com/transform/pricing/ +8. **Scope commands to ATX resources only** — All AWS commands must target ATX-specific + resources (buckets starting with `atx-`, roles starting with `ATX`, Lambda + functions starting with `atx-`, etc.). Never operate on unrelated AWS resources. + +## Telemetry + +Always include `--telemetry` on every `atx custom def exec` invocation (local +or remote). No file checks, no prompts, no opt-in flow. + +The flag format is: `--telemetry "client=ide,agent=ide,executionMode="` + +Field values: + +- `client` — ALWAYS `ide` (lowercase, no spaces) +- `agent` — ALWAYS `ide` (lowercase, no spaces) +- `executionMode` — `local` when running the CLI directly, `remote` when submitting via Lambda + +The only exception: if the user explicitly asks to disable telemetry during the +session (e.g., "disable telemetry", "opt out of telemetry", "no telemetry", or +similar), you MUST comply immediately — omit `--telemetry` for the remainder of +that session. Do not push back, ask for confirmation, or explain why telemetry +is included. This preference is not persisted — the next session starts with +telemetry enabled again. + +## Output Structure + +Local mode: transformed code is in the repo directory. + +Remote mode results stay in S3 — do NOT download automatically. Present the S3 +path to the user: + +``` +s3://atx-custom-output-{account-id}/ + transformations/ + {job-name}/ + {conversation-id}/ + code.zip # Zipped transformed source code + logs.zip # AWS Transform conversation logs +``` + +If the user explicitly asks to download, provide the command but let them run it: +`aws s3 cp s3://atx-custom-output-{account-id}/transformations/{job-name}/{conversation-id}/code.zip ./code.zip` + +Bulk results summary: `~/.aws/atx/custom/atx-agent-session/transformation-summaries/` — see [custom-results-synthesis](custom-results-synthesis.md). + +## References + +| Reference | When to Use | +| --------------------------------------------------------------- | -------------------------------------------------------------------- | +| [custom-repo-analysis](custom-repo-analysis.md) | Detection commands, signal matching, match report format | +| [custom-single-transformation](custom-single-transformation.md) | Applying one transformation definition to one repo (local or remote) | +| [custom-multi-transformation](custom-multi-transformation.md) | Applying transformation definitions to multiple repos in parallel | +| [custom-remote-execution](custom-remote-execution.md) | Infrastructure deployment, job submission, monitoring | +| [custom-results-synthesis](custom-results-synthesis.md) | Generating consolidated reports after bulk transforms | +| [custom-cli-reference](custom-cli-reference.md) | AWS Transform CLI flags, commands, env vars, IAM permissions | +| [custom-troubleshooting](custom-troubleshooting.md) | Error resolution, debugging, quality improvement | + +## License + +AWS Service Terms. This skill is provided by AWS and is subject to the AWS Customer Agreement and applicable AWS service terms. + +## Issues + +https://github.com/awslabs/agent-plugins/issues + +## Changelog + +Share if the user asks what changed, what's new, etc. + +### [1.0.0] - 2026-04-14 + +- Initial release of the AWS Transform skill +- Supported transformation definitions: + - AWS/java-version-upgrade + - AWS/python-version-upgrade + - AWS/nodejs-version-upgrade + - AWS/java-aws-sdk-v1-to-v2 + - AWS/nodejs-aws-sdk-v2-to-v3 + - AWS/python-boto2-to-boto3 + - AWS/comprehensive-codebase-analysis + - AWS/java-performance-optimization + - AWS/angular-version-upgrade + - AWS/vue.js-version-upgrade + - AWS/early-access-java-x86-to-graviton + - AWS/early-access-angular-to-react-migration + - AWS/early-access-log4j-to-slf4j-migration diff --git a/plugins/aws-transform/skills/aws-transform/references/dotnet.md b/plugins/aws-transform/skills/aws-transform/references/dotnet.md new file mode 100644 index 00000000..301addcd --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/dotnet.md @@ -0,0 +1,728 @@ +# .NET Modernization + +> **Last Updated:** 2026-05-13 + +## Table of Contents + +- [Capabilities](#capabilities) +- [Agents & Transforms](#agents--transforms) +- [Decision Points](#decision-points) + - [Target Version](#target-version) + - [Transformation Mode](#transformation-mode) + - [Source Code Upload](#source-code-upload) + - [Confirm & Launch](#confirm--launch) + - [Per-Project Review Toggles](#per-project-review-toggles-interactive-mode-only) +- [Status Check](#status-check) +- [Workflow (11 Steps)](#workflow-11-steps) + - [Verify Authentication](#verify-authentication) + - [Create or Reuse Workspace](#create-or-reuse-workspace) + - [Collect User Choices](#collect-user-choices) + - [Create and Start Job](#create-and-start-job) + - [Upload Source Code](#upload-source-code) + - [Monitor Assessment](#monitor-assessment) + - [Plan Approval](#plan-approval) + - [Checkpoint Config](#checkpoint-config-interactive-mode-only) + - [Monitor Transformation](#monitor-transformation) + - [Local Build Verification (Auto-Skip)](#local-build-verification-auto-skip) + - [Final Summary & Download](#final-summary--download) +- [Apply Transformation Changes](#apply-transformation-changes) +- [Handle Missing Packages](#handle-missing-packages) +- [Mode Behavior Reference](#mode-behavior-reference) +- [HITL Reference](#hitl-reference) +- [Artifacts Reference](#artifacts-reference) +- [Error Recovery](#error-recovery) +- [Known Limitations](#known-limitations) + +--- + +## Capabilities + +Modernize .NET applications to .NET 8, .NET 9, or .NET 10. Supports projects targeting .NET Framework (v2.0–v4.8), .NET Core (1.x–3.x), or .NET 5–7. + +| Source | Target | +| ---------------------- | ------------------------------------ | +| .NET Framework 2.0–4.8 | .NET 8 or .NET 10 | +| .NET Core 1.x–3.x | .NET 8 or .NET 10 | +| .NET 5–7 | .NET 8 or .NET 10 | +| VB.NET (.vbproj) | VB.NET on .NET 8 or .NET 10 | +| WPF (.NET Framework) | WPF on .NET 8 or .NET 10 | +| Xamarin | .NET MAUI | +| ASP.NET MVC 5 | ASP.NET Core | +| WCF Services | gRPC or REST APIs | +| Web Forms | Blazor or Razor Pages | +| Entity Framework 6 | EF Core | +| Web.config | appsettings.json | +| IIS deployment | ECS Fargate / App Runner | +| packages.config | PackageReference (SDK-style .csproj) | + +--- + +## Agents & Transforms + +| User-Facing Name | orchestratorAgent | Purpose | +| ------------------------ | --------------------- | ----------------------------------------------- | +| .NET Modernization Agent | `dotnet-chatty-agent` | .NET code assessment + transformation (primary) | + +The .NET Modernization Agent handles both assessment AND transformation in a single job. There is no separate assessment agent. + +In user-facing messages, refer to this as ".NET modernization agent" or "Managed Agent". Use `dotnet-chatty-agent` only in `create_job` tool calls — never in chat prose. + +--- + +## Decision Points + +All user-facing questions. Ask in this order: version → mode → source code → per-project toggles. + +### Target Version + +| Target | TFM | Support | Notes | +| ------- | --------- | ------------------------- | --------------------------------------- | +| .NET 10 | `net10.0` | LTS (Nov 2025 – Nov 2028) | Latest LTS, newest APIs and performance | +| .NET 9 | `net9.0` | STS (Nov 2024 – May 2026) | Latest features, shorter support window | +| .NET 8 | `net8.0` | LTS (Nov 2023 – Nov 2026) | Stable, widely adopted | + +MUST present all versions as explicit options. Mark .NET 10 as "(Recommended)". Default to .NET 10 if user has no preference. + +Present options to the user and wait for their selection: + +| Option | Description | Value Mapping | +| --------------------- | ------------------------------------------------------------- | ------------------------------ | +| .NET 10 (Recommended) | LTS until Nov 2028. Latest APIs and performance improvements. | `target_framework = "net10.0"` | +| .NET 9 | STS until May 2026. Latest features, shorter support window. | `target_framework = "net9.0"` | +| .NET 8 | LTS until Nov 2026. Stable, widely adopted. | `target_framework = "net8.0"` | + +### Transformation Mode + +Exactly TWO modes exist. No other modes exist. + +| Mode | Value | Behavior | Best For | +| ----------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | +| Auto | `auto` | Runs to completion without pausing. Failures logged and skipped. Transformations are applied to your code during status checks. Best for low-complexity projects resolvable mostly by package version upgrades. | Simple projects, small solutions | +| Interactive | `interactive` | Pauses after each project for review. Plan approval required. Approved changes are applied to your code. Users can optionally configure project checkpoints to review the transformation and iterate in chat for addressing feedback and any residual errors. | Medium and complex applications | + +Present options to the user and wait for their selection: + +| Option | Description | Value Mapping | +| -------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------- | +| Auto (Recommended for simple projects) | Runs to completion without pausing. Transformations are applied to your code during status checks. Failures logged and skipped. Fast. | `interactive_mode = "auto"` | +| Interactive | Pauses after each project for your review. Approved changes are applied to your code. You can inspect diffs, request changes, or skip projects. | `interactive_mode = "interactive"` | + +Default to `auto` if user has no preference. + +After the user selects a mode, inform them: "You can switch between auto and interactive mode at any time during the transformation — just ask." + +### Source Code Upload + +Present options to the user and wait for their selection: + +| Option | Description | +| --------------------- | ------------------------------------------------------ | +| Upload from workspace | Zip and upload the solution from the current workspace | +| Specify a path | User provides the path to the solution | + +MUST ask before uploading. MUST NOT auto-upload. + +### Confirm & Launch + +After collecting target version and mode, present a summary of the user's selections and ask for confirmation before creating the job. No backend APIs have been called yet — this is the user's last chance to adjust. + +Present options to the user and wait for their selection: + +| Option | Description | +| ------------------- | ---------------------- | +| Go ahead | Start the migration | +| Wait, let me adjust | Change something first | + +The plan presented above the question MUST include these steps: + +1. Create a new workspace (or reuse existing) +2. Create a job with the .NET modernization agent (target: ``, mode: ``) +3. Upload your source code +4. Agent assesses your solution, produces an assessment report, and generates a migration plan before starting the transformation +5. Once transformation completes, you receive the migrated source code and a summary of changes + +If user selects "Wait, let me adjust" — return to the relevant decision point (version or mode) and re-collect the choice. No backend APIs have been called yet, so this is a local loop with no side effects. + +### Per-Project Review Toggles (Interactive Mode Only) + +Presented after plan approval. Projects default to "review enabled" — user deselects what they want to skip. + +Present a multi-select list of projects from the plan. Each option shows the project name and complexity. All are selected by default — user deselects any they want to skip review for. + +| Option Pattern | Description | +| --------------- | ------------------------------------ | +| `` | `` | + +--- + +## Status Check + +When the user asks for status, progress, or "what's happening" — use this procedure regardless of which workflow step is active: + +1. `get_resource(resource="job", workspaceId=..., jobId=...)` → get job status + `_pollingGuidance` + `recentWorklogs` +2. `list_resources(resource="tasks", workspaceId=..., jobId=...)` → any pending HITL tasks? +3. Check the **latest agent message** in the response — if it asks a question or presents options (continue/retry/skip/approve), the orchestrator is waiting for a `send_message` response. Present those options to the user. + +**Present the full picture:** + +- Current phase and status +- What the agent last did (from worklogs) +- Any pending actions needed from user (HITL tasks OR agent message asking a question) +- What's coming next + +**Rules:** + +- **AUTO mode:** Call `list_resources(resource="artifacts", pathPrefix="...Generated Outputs/")` to check for new diff artifacts (`fileType: "ZIP"` with `planStepId != "default"`). If found, trigger the Apply Transformation Changes procedure for each completed project before reporting status. +- If `_pollingGuidance.hasPendingTasks=true` → check tasks, present to user +- If `_pollingGuidance.isTerminal=true` → job done, present final summary +- If latest agent message asks a question → present it to user, orchestrator is waiting +- If no pending tasks AND no questions AND job is EXECUTING → inform user the agent is working, no action needed + +--- + +## Workflow (11 Steps) + +The full lifecycle of a .NET modernization job. + +### Verify Authentication + +```python +get_status() +``` + +If not authenticated, guide user through `configure()`. Do not proceed until auth is confirmed. + +### Create or Reuse Workspace + +```python +create_workspace(name="dotnet-modernization", description="Modernize .NET Framework solution to ") +``` + +Save `workspaceId`. If user has a workspace from a previous session (check `.atx/context.json`), offer to reuse it instead of creating a new one. Users can create multiple jobs within the same workspace. + +### Collect User Choices + +MUST ask version FIRST, mode SECOND. MUST store exact values before calling `create_job`. + +1. Ask target version → store `target_framework` ("net10.0", "net9.0", or "net8.0") +2. Ask transformation mode → store `interactive_mode` ("auto" or "interactive") + +See [Decision Points](#decision-points) for the exact formats and value mappings. + +### Create and Start Job + +Build `objective` as a JSON string from collected values: + +```python +# Examples: +objective_json = '{"target_framework": "net10.0", "interactive_mode": "interactive"}' +objective_json = '{"target_framework": "net8.0", "interactive_mode": "auto"}' +``` + +Call `create_job`: + +```python +create_job( + workspaceId="", + jobName=".NET Assessment & Modernization", + objective=objective_json, # MUST be valid JSON, not prose + intent="LANGUAGE_UPGRADE", + orchestratorAgent="dotnet-chatty-agent" +) +``` + +Save `jobId`. The backend parses `objective` as JSON — if it receives prose, parsing fails silently and defaults to `auto` + `net10.0`. + +Immediately after: + +```python +load_instructions(workspaceId="", jobId="") + +send_message( + workspaceId="", + jobId="", + text="Target: (). Mode: . Source code will be uploaded shortly." +) +``` + +`load_instructions` gates all job-scoped tools — MUST be called once per job. + +### Upload Source Code + +Ask user first (see [Decision Points: Source Code Upload](#source-code-upload)). Then: + +```python +# Zip (exclude .git, bin, obj, packages) +upload_artifact( + workspaceId="", jobId="", + content="/tmp/source.zip", fileType="ZIP", + categoryType="CUSTOMER_INPUT", fileName="source.zip" +) + +# Notify agent +send_message(workspaceId="", jobId="", + text="Source code uploaded (artifact ID: ). Please proceed with assessment.") +``` + +Save the user's source path to `.atx/context.json` as `source_root`. This is needed later to apply transformation changes to the correct location. + +### Monitor Assessment + +The agent assesses the solution automatically. On user check-in: + +```python +get_resource(resource="job", workspaceId="", jobId="") +``` + +Report status, any available artifacts (Assessment_Report.md, Modernization_Plan.md), and pending tasks. + +Use `_pollingGuidance` from the response: + +- `hasPendingTasks=true` → check for BLOCKING tasks +- `isTerminal=true` → job done + +If `list_resources(resource="tasks")` returns a task with tag `missing-packages`, follow the Handle Missing Packages procedure. Transformation will not proceed until resolved. + +### Plan Approval + +**AUTO mode:** Skipped. Agent proceeds directly to transformation. + +**INTERACTIVE mode:** + +The backend orchestrator WAITS for user approval before starting transformation. It will NOT proceed on its own. You MUST detect the plan and get user approval. + +**How to detect "plan ready":** Job status is `PLANNING` AND `Modernization_Plan.md` artifact exists. The status stays `PLANNING` until the user approves — it only moves to `PLANNED` after approval. + +**Do NOT trust the chatter's response** — it may say "I'm proceeding" or "preparing to begin" but the orchestrator is actually paused waiting for your `send_message` approval. + +Flow: + +1. Detect plan is ready (artifact or worklog signal) +2. Present plan summary to user (from the agent's messages or assessment data) +3. Ask user to approve +4. Send approval: + +```python +send_message(workspaceId="", jobId="", + text="User approves the migration plan. Please proceed with transformation.") +``` + +Only after this `send_message` will the backend start transforming projects. + +After confirming approval to the user, also inform them: "You can switch between auto and interactive mode, or select/deselect which projects to review, at any time during the transformation — just ask." + +**AUTO mode:** Plan approval is skipped, but still inform the user when first reporting transformation progress that they can switch modes or enable per-project review at any time. + +### Checkpoint Config (Interactive Mode Only) + +Skip this step entirely in AUTO mode. + +After plan approval, extract project step_ids from the plan and present per-project toggles: + +**8a. Get plan and find project steps:** + +```python +get_resource(resource="plan", workspaceId="", jobId="") +# Find steps where parentStepId matches "Transform Projects" step +# These are per-project steps with stepId and stepName +``` + +**8b. Find checkpoint config task ID:** + +```python +list_resources(resource="tasks", workspaceId="", jobId="") +# Find task where tag ends with "-checkpoint" +``` + +**8c. Present per-project toggles to user** (see [Decision Points: Per-Project Review Toggles](#per-project-review-toggles-interactive-mode-only)) + +**8d. Submit user's choices:** + +```python +complete_task( + workspaceId="", jobId="", + taskId="", + content='{"interactive_mode": "interactive", "": true, "": false, ...}', + action="SAVE_DRAFT" +) +``` + +Use `SAVE_DRAFT` (not `APPROVE`) for checkpoint config — this keeps the task open so it can be updated again later. The backend reads the humanArtifact regardless of task status. + +Rules: + +- Step IDs come from plan (`steps[].stepId` where parent is "Transform Projects") +- `true` = pause for review; `false` = skip review +- MUST include ALL project step_ids — omitted ones default to `false` +- MUST include `interactive_mode` in every submission +- The HITL does not close — can be updated anytime + +**8e. Save to `.atx/checkpoint-config.json`:** + +```json +{ + "checkpointTaskId": "", + "mode": "interactive", + "projects": { + "": { "label": "", "review": true } + }, + "lastUpdated": "" +} +``` + +On conversation resume, check this file first — reuse stored mapping instead of re-asking. + +**Mid-run updates:** When user asks to switch mode or change toggles: + +1. Read `.atx/checkpoint-config.json` +2. Update values per user's request +3. Save updated file +4. Call `complete_task` with values from saved file, using `action="SAVE_DRAFT"` + +A `send_message` alone does NOT change the mode — only `complete_task` (with `SAVE_DRAFT`) on the checkpoint HITL changes backend behavior. + +### Monitor Transformation + +The agent transforms each project in dependency order (convert → upgrade packages → fix errors → checkpoint). + +Status progression: `PLANNING` → `PLANNED` → `EXECUTING` → `COMPLETED` + +Terminal states: `STOPPED`, `FAILED` + +**INTERACTIVE mode — when Project Review HITL appears:** + +1. Inform user which project completed + summary +2. Present options to the user: + + | Option | Description | + | ----------------- | --------------------------------------------------------------------- | + | Approve | Apply transformation to the project and continue to the next project. | + | Download the diff | Inspect the exact file changes before deciding. | + | Reject | Skip this project's transformation. Local code is not modified. | + +3. If user approves, state which project you are approving before calling `complete_task` +4. Submit decision: + + ```python + complete_task(workspaceId="", jobId="", taskId="", + content='{}', action="APPROVE") + ``` + +5. Immediately send follow-up: + + ```python + send_message(workspaceId="", jobId="", + text="Review approved for . Please continue with transformation.") + ``` + +6. Apply transformation changes (see [Apply Transformation Changes](#apply-transformation-changes) section). + +**AUTO mode:** No review tasks appear. Transformation changes are applied when the user checks status (see [Status Check](#status-check) and [Apply Transformation Changes](#apply-transformation-changes) sections). + +### Local Build Verification (Auto-Skip) + +After all projects transform, a build verification HITL appears (BLOCKING, tag: `local-build-verification`). This applies to both auto and interactive modes. + +Auto-complete this HITL immediately with a skip response. The user does not need to approve — this workflow handles it silently. After auto-completing, inform the user: "Local build verification is optional and has been skipped. You can always run `dotnet build` on the migrated code yourself after downloading. If you encounter build errors, you can ask me to help fix them." + +```python +complete_task(workspaceId="", jobId="", taskId="", + content='{"status": "SUCCESS", "errorCount": 0, "errors": [], "rawOutput": "Build verification skipped."}', + action="APPROVE") + +send_message(workspaceId="", jobId="", + text="Build verification skipped. Please proceed with next steps.") +``` + +**Detection:** When `list_resources(resource="tasks")` returns a task with tag `local-build-verification`, auto-complete it using the pattern above. This includes detection during status checks — if the user asks for status and a `local-build-verification` HITL is pending, auto-complete it first, then report progress. + +If the backend creates additional build verification rounds (after agent-side fixes), auto-complete each round the same way. + +### Final Summary & Download + +After build verification, the agent generates final artifacts (report, next steps, migrated source ZIP) and sends a message asking whether to mark the job as complete. The job remains in EXECUTING until a confirmation is sent via `send_message`. + +When the agent's message indicates transformation is complete and artifacts are ready: + +1. List output artifacts: `list_resources(resource="artifacts")` +2. Present a summary to the user: + - Projects transformed (count, names, status) + - Per-project diff ZIPs — list each with project name and artifact ID. These contain `metadata.json`, `diffs/*.diff`, `before/*`, and `after/*` so the user can review exactly what changed per project. Identify them by label `checkpoint-diff-{project-name}`. + - Transformation report available (`Transformation_Report.html` — detailed HTML report) + - Next steps available (`NextSteps.md` — recommended post-migration actions) + - Final migrated source available (`*_Transformed_*.zip` — complete migrated solution) +3. Present options to the user and wait for their selection: + + | Option | Description | + | ---------------- | -------------------------------------------- | + | Complete the job | Mark the job as done and download artifacts | + | Make adjustments | Request additional changes before completing | + +4. Based on user's choice: + + ```python + # User confirms completion: + send_message(workspaceId="", jobId="", + text="Looks good, mark the job as complete.") + + # User wants adjustments — relay their request: + send_message(workspaceId="", jobId="", + text="") + # Agent will handle the request, then ask again — repeat this step. + ``` + +5. Once job reaches `COMPLETED`, download any artifacts the user requests: + + ```python + get_resource(resource="artifact", workspaceId="", jobId="", + artifactId="", savePath=".atx/") + ``` + +6. Update `.atx/context.json` with `phase: "complete"` + +--- + +## Apply Transformation Changes + +Apply transformed code from per-project diff artifacts to the user's local filesystem. Triggered when a project's transformation completes and the user interacts. + +**Trigger conditions:** + +- **AUTO mode:** When the user checks status and projects have completed transformation, trigger the Apply Transformation Changes procedure for each completed project +- **INTERACTIVE mode:** After the user approves the project review HITL +- MUST NOT apply if the user rejected the project in interactive mode + +**First-time consent (both modes):** Before applying changes for the first time, inform the user: + +"I'll apply the transformed code to your local codebase as each project completes. Any modifications you made since uploading will be overwritten for the affected projects. If you have git initialized, the changes will appear as unstaged modifications you can review or discard." + +In AUTO mode, ask for confirmation before proceeding. In INTERACTIVE mode, the user's approval of the project review HITL serves as consent. + +### Procedure + +**1. Find the latest diff artifact for the project:** + +```python +# First call returns folder paths +list_resources(resource="artifacts", workspaceId="", jobId="") +# Response contains folders[] — use the "Generated Outputs/" path as pathPrefix + +# Second call with pathPrefix returns actual artifacts +list_resources(resource="artifacts", workspaceId="", jobId="", + pathPrefix="AWSTransform/Workspaces//Jobs//Generated Outputs/") +# Per-project diff artifacts are identified by: +# - fileType == "ZIP" +# - planStepId != "default" +# Match artifact.planStepId to plan step.stepId to determine which project it belongs to. +# If multiple artifacts share the same planStepId (retries), pick the one with the latest artifactCreatedTimestamp. +``` + +**2. Download the diff ZIP:** + +```python +get_resource(resource="artifact", workspaceId="", jobId="", + artifactId="", + savePath=".atx/diffs//checkpoint-diff-.zip") +``` + +**3. Extract the ZIP and read `metadata.json`:** + +```json +{ + "filesAdded": ["path/to/NewFile.cs"], + "filesUpdated": ["path/to/Modified.csproj"], + "filesRemoved": ["path/to/Deleted.cs"] +} +``` + +**4. Write files to the user's local codebase:** + +The `source_root` is the path the user provided during Source Code Upload, stored in `.atx/context.json`. + +| Action | Source | Destination | +| ---------- | --------------------------------- | ----------------------------- | +| Update/Add | `after/{path}` from extracted ZIP | `{source_root}/{path}` | +| Remove | — | Delete `{source_root}/{path}` | + +MUST use the agent's native file-write capability. Do NOT use `git apply`, `patch`, or any OS-specific command. This ensures cross-platform compatibility (Windows, macOS, Linux). + +MUST create parent directories if they do not exist when writing new files. + +MUST preserve file encoding from the `after/` content (write bytes as-is). + +**5. Confirm to user:** + +``` +✅ Applied transformation for : + • files updated + • files added + • files removed +``` + +### Rules + +- MUST apply project-by-project as each completes — do NOT wait until all projects finish +- MUST use the latest diff artifact when multiple exist (highest `createdAt`) +- MUST preserve file encoding from the `after/` content (write bytes as-is) +- If a file in `filesUpdated` does not exist locally, treat it as an add +- If a file in `filesRemoved` does not exist locally, skip silently +- If download or extraction fails, inform the user and offer to retry + +--- + +## Handle Missing Packages + +When the agent detects private or unavailable NuGet packages during assessment, a BLOCKING HITL task appears. Transformation will not proceed until resolved. + +**Detection:** `list_resources(resource="tasks")` returns a task with tag `missing-packages` and `uxComponentId: "DotnetMissingPackages"`. + +**Procedure:** + +1. Get task details to see which packages are missing: + + ```python + get_resource(resource="task", workspaceId="", jobId="", taskId="") + ``` + +2. Present to user: "The following NuGet packages could not be found on public feeds: [list]. Please provide the .nupkg files, or let me know if any can be removed." + +3. For each .nupkg file the user provides, upload it: + + ```python + complete_task(workspaceId="", jobId="", taskId="", + filePath="/path/to/Package.1.0.0.nupkg", + action="SAVE_DRAFT") + # Returns uploadedArtifactId — save this for the final submission + ``` + +4. After all packages are uploaded, submit the final response with all artifact IDs: + + ```python + complete_task(workspaceId="", jobId="", taskId="", + content='{"uploadedArtifactIds": [{"artifactId": "", "name": "Package1.nupkg", "lastModified": 0, "size": 0}, {"artifactId": "", "name": "Package2.nupkg", "lastModified": 0, "size": 0}]}', + action="APPROVE") + ``` + +5. If user wants to remove a package from the missing list instead of uploading: + + ```python + complete_task(workspaceId="", jobId="", taskId="", + content='{"removedPackages": [{"name": "PackageName", "version": "1.0.0"}]}', + action="APPROVE") + ``` + +**Rules:** + +- MUST collect all `uploadedArtifactId` values from each `SAVE_DRAFT` call for the final `APPROVE` submission +- User uploads are stored under `CUSTOMER_INPUT` category +- If only one package is missing, a single `complete_task` with `filePath` + `action="APPROVE"` is sufficient +- Transformation remains blocked until this HITL is resolved + +**Upload packages anytime (outside the HITL):** + +If the user wants to upload private packages at any point during the job (e.g., after the HITL closes, or when a build fails due to a missing package): + +```python +upload_artifact(workspaceId="", jobId="", + content="/path/to/Package.nupkg", fileType="ZIP", + categoryType="CUSTOMER_INPUT", fileName="Package.1.0.0.nupkg") + +send_message(workspaceId="", jobId="", + text="I uploaded the private package Package.1.0.0.nupkg. Please add it to the local feed and retry.") +``` + +The agent checks CUSTOMER_INPUT artifacts, adds the package to the local feed, and resumes. + +--- + +## Mode Behavior Reference + +| Phase | AUTO | INTERACTIVE | +| ---------------------------- | ------------------------------------------- | -------------------------------------- | +| Plan approval | Skipped | Required (via send_message) | +| Checkpoint config | Ignore | Present per-project toggles — optional | +| Per-project review | No HITLs created | HITL per toggled-on project — optional | +| Apply transformation changes | On status check, for each completed project | After user approves project review | +| On project failure | Log + skip + continue | Present to user, wait for decision | +| Build verification | Auto-skip | Auto-skip | +| Missing Packages | Present to user (BLOCKING) | Present to user (BLOCKING) | +| Final diffs | All at end | Per-project during + all at end | + +--- + +## HITL Reference + +| HITL Type | Component ID | Tag | Blocking | Behavior | +| ------------------------ | ------------------------ | -------------------------- | ------------ | ------------------------------------------------------- | +| Checkpoint Config | `FileUploadV2` | `*-checkpoint` | NON_BLOCKING | AUTO: ignore. INTERACTIVE: Checkpoint Config step. | +| Project Review | `DotnetReviewAndConfirm` | `*-review` | NON_BLOCKING | INTERACTIVE only. Present diffs, wait for decision. | +| Missing Packages | `DotnetMissingPackages` | `missing-packages` | BLOCKING | Both modes. Always present to user. Severity: CRITICAL. | +| Local Build Verification | `FileUploadV2` | `local-build-verification` | BLOCKING | Both modes. Auto-skip with dummy SUCCESS. | + +Processing pattern for any HITL: + +1. `list_resources(resource="tasks")` → discover pending tasks +2. `get_resource(resource="task", taskId=...)` → get full details (read `_outputSchema`, `_responseHint`) +3. Present to user +4. Show payload before submitting +5. `complete_task(content=..., action="APPROVE")` → submit + +There is NO Assessment Review HITL. Plan approval uses `send_message`. + +--- + +## Artifacts Reference + +| Type | Category | Contains | +| ---------------------------------------------------- | ----------------- | --------------------------------------------------------- | +| Source code ZIP | `CUSTOMER_INPUT` | User's original .NET solution | +| Assessment report (`Assessment_Report.md`) | `CUSTOMER_OUTPUT` | Complexity scores, dependency analysis | +| Modernization plan (`Modernization_Plan.md`) | `CUSTOMER_OUTPUT` | Migration plan with project ordering | +| Per-project diff ZIP | `CUSTOMER_OUTPUT` | `metadata.json` + `diffs/*.diff` + `before/*` + `after/*` | +| Final migrated source ZIP (`*_Transformed_*.zip`) | `CUSTOMER_OUTPUT` | Complete migrated solution after all projects transform | +| Transformation report (`Transformation_Report.html`) | `CUSTOMER_OUTPUT` | HTML summary of all transformations performed | +| Next steps (`NextSteps.md`) | `CUSTOMER_OUTPUT` | Recommended post-migration actions | + +Per-project diff ZIPs have `label: "checkpoint-diff-{project-name}"`. + +The final migrated source ZIP, Transformation_Report.html, and NextSteps.md appear after build verification completes (job near completion). + +Upload rules: + +- Use `categoryType: "CUSTOMER_INPUT"` for source code +- Exclude `.git/`, `bin/`, `obj/`, `packages/` from ZIP +- Notify agent via `send_message` after upload with artifact ID + +When multiple artifacts of the same type exist (e.g., multiple diff ZIPs from retries or re-runs), always use the most recent one. Sort by creation timestamp and pick the latest. + +--- + +## Error Recovery + +``` +Job not progressing? +├─ Check tasks → pending HITL? → present to user +├─ Check messages → agent asked something? → respond via send_message +├─ Check job status → FAILED? → offer restart or new job +├─ Send status query → agent responds? → continue +└─ None of above → offer restart +``` + +Key actions: + +- **Job creation fails:** Retry. If repeated, check auth and workspace. +- **Upload fails:** Check file size, verify ZIP, retry. +- **Job stuck:** Send status query via `send_message`. Check for hidden HITL tasks. +- **Task or project fails:** Ask the agent to retry the failed task in chat via `send_message`. The agent can re-attempt the transformation for that project. +- **Restart:** `control_job(action="stop")` then `control_job(action="start")`. Previously uploaded artifacts remain available. +- **Delete and start over:** `delete_job()` then new `create_job()`. + +--- + +## Known Limitations + +- COM interop references require manual replacement +- P/Invoke (native Windows DLL calls) need manual porting +- Web Forms → Blazor conversion is partial — complex controls may need redesign +- Windows Services → need manual conversion to BackgroundService +- GAC dependencies must be replaced with NuGet packages manually diff --git a/plugins/aws-transform/skills/aws-transform/references/mainframe.md b/plugins/aws-transform/skills/aws-transform/references/mainframe.md new file mode 100644 index 00000000..8e996b24 --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/mainframe.md @@ -0,0 +1,140 @@ +# Mainframe Modernization + +> **Last Updated:** 2026-04-01 + +## Table of Contents + +- [Capabilities Overview](#capabilities-overview) +- [Starting Workflow](#starting-workflow) +- [Agents and Transforms](#agents--transforms) +- [Supported File Types](#supported-file-types) +- [Assessment Signals](#assessment-signals-for-local-discovery) +- [Example Requirements](#example-requirements) +- [Example Tasks](#example-tasks) +- [Known API Behaviors](#known-api-behaviors) +- [Known Limitations](#known-limitations) + +AWS Transform for mainframe accelerates the modernization of legacy zOS mainframe applications (COBOL, JCL, CICS, VSAM, Db2, IMS) into cloud-native services on AWS. It orchestrates analysis, documentation, business logic extraction, decomposition, code transformation, and testing through an AI-driven workflow with human-in-the-loop checkpoints. The agent proposes a plan based on your stated objective, executes each step, and pauses for your input when decisions or approvals are needed. + +## Capabilities Overview + +| # | Capability | Description | Eligible Files | Requires | +| -- | ------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | --------------- | ------------------------------------------------------------ | +| 1 | Analyze code | Classifies files, counts LOC, maps dependencies, identifies missing files and duplicates | All | — | +| 2 | Data analysis | Data lineage (program/JCL → dataset mapping) and data dictionary (field-level metadata for copybooks and Db2) | All | Code analysis | +| 3 | Activity metrics analysis | Analyzes SMF records (type 30 batch, type 110 CICS) for job frequency, resource usage, unused code identification | SMF records | Recommend code analysis first | +| 4 | Generate technical documentation | PDF/JSON docs per file — summary or detailed functional specification with logic, flows, dependencies | COBOL, JCL | Code analysis + dependency analysis | +| 5 | Extract business logic | Extracts business rules, process flows, and logic — application-level (grouped by transactions/jobs) or file-level | COBOL, JCL | Code analysis + dependency + entry point analysis | +| 6 | Decompose code | Breaks codebase into functional domains using seed programs, produces dependency graphs | All | Code analysis. Recommend BRE first | +| 7 | Migration wave planning | Sequenced migration plan based on decomposed domains with recommended modernization order | Domains | Decomposition | +| 8 | Refactor code | Transforms COBOL → cloud-optimized Java. Configurable target DB, encoding, engine version | COBOL | Code analysis. Recommend decomposition + wave planning first | +| 9 | Reforge code | LLM-powered post-refactor improvement — replaces COBOL-style Java with idiomatic Java patterns | Refactored Java | Refactor. Quota: 3M LOC/job, 50M LOC/user/month | +| 10 | Plan test cases | Creates test plans from code analysis and scheduler paths, prioritizes by complexity, maps business rules | JCL, schedulers | Code analysis. Benefits from BRE | +| 11 | Generate test data collection scripts | Produces JCL scripts to collect before/after test data from mainframe (Db2 unloads, VSAM REPRO, sequential datasets) | Test plan | Test planning | +| 12 | Test automation script generation | Generates scripts to execute test cases on the modernized Java application with data setup and result comparison | Test plan | Test planning + test data collection | + +## Starting Workflow + +1. **Inventory** — Scan for COBOL (.cbl, .cob), JCL (.jcl), copybooks (.cpy), and VSAM definitions +2. **Scope decision** — Ask user: full rewrite, partial modernization, or re-platform? +3. **Complete analysis on AWS Transform** — Based on what the customer wants to do, run relevant agents in AWS Transform. Note: the agent always starts with a "Kick off modernization" step that requires connector setup and source code location before any analysis begins. +4. **Build modernized applications with IDE** — Based on scope, draft modernization requirements based on outputs from agents + +**Key question to ask user:** "Can you tell me what you are looking to accomplish today on your mainframe modernization project? Is this a full re-architecture to microservices, or a lift-and-shift to run COBOL on AWS?" + +## Agents & Transforms + +| Agent | How to Discover | Purpose | +| ----------------------------------- | ------------------------------------------ | ---------------------------------------- | +| Mainframe agent | `list_resources` with `resource: "agents"` | End-to-end COBOL → Java/C# modernization | +| AWS/comprehensive-codebase-analysis | CLI: `atx custom def exec` | Static analysis of COBOL programs | + +**Discover the agent dynamically** — do not hardcode the agent name: + +```python +# First, discover available agents +list_resources(resource="agents") +# Or ask the chat agent +send_message(workspaceId="...", text="What agents are available for mainframe modernization?") +# Then create job — two approaches work: +# Option A: using jobType enum (e.g. MAINFRAME_V2) +create_job(workspaceId="...", jobName="...", jobType="MAINFRAME_V2", objective="...", intent="...") +# Option B: using orchestratorAgent name +create_job(workspaceId="...", jobName="...", orchestratorAgent="", objective="...", intent="...") +``` + +## Supported File Types + +zOS: COBOL + copybooks, JCL + PROC, CSD, BMS, Db2, VSAM, IMS TM, PL/I (BRE and docs only — not refactoring). +Fujitsu GS21: PSAM, ADL, NDB. + +## Assessment Signals (for local discovery) + +These patterns help identify mainframe assets during local workspace scanning, before the AWS Transform agent runs its own analysis: + +| File Pattern | What to Look For | Indicates | +| --------------------- | ---------------------------- | ----------------------------- | +| `*.cbl`, `*.cob` | COBOL source | Mainframe COBOL programs | +| `*.jcl` | JCL job cards, DD statements | Batch processing | +| `*.cpy` | COBOL copybooks | Shared data structures | +| `*.bms` | BMS maps | CICS screen definitions | +| `EXEC CICS` in source | CICS API calls | Online transaction processing | +| `EXEC SQL` in source | Embedded SQL | Database access (DB2/IMS) | + +## Example Requirements + +``` +## Requirement 1: COBOL to Java Conversion + +**User Story:** As a developer, I want COBOL batch programs converted to Java services +so that we can run them on AWS without mainframe infrastructure. +**Acceptance Criteria:** + +1. WHEN conversion is applied, ALL COBOL PERFORM logic SHALL be equivalent Java methods +2. WHEN conversion is applied, VSAM file I/O SHALL be replaced with database calls +3. WHEN the Java service runs, output SHALL match COBOL program output for test cases + **Handled by:** AWS Transform Mainframe Agent +``` + +## Example Tasks + +``` +- [ ] 1. Inventory and dependency analysis + - [ ] 1.1 Scan COBOL sources and JCL + - [ ] 1.2 Map CALL chains and COPY dependencies +- [ ] 2. Convert COBOL programs to Java (AWS Transform) + - [ ] 2.1 Start mainframe modernization job + - [ ] 2.2 Handle Collaborator Requests (data mapping decisions) + - [ ] 2.3 Review diffs — user approves converted code +- [ ] 3. Migrate data stores + - [ ] 3.1 Convert VSAM to Aurora PostgreSQL schema + - [ ] 3.2 Migrate data +- [ ] 4. Validation + - [ ] 4.1 Run test cases comparing COBOL vs Java output +``` + +## Known API Behaviors + +These are things that work differently through the MCP API vs the AWS Transform webapp. + +### Source Code Upload + +The agent requires source code as a **single .zip file** in S3. When the "Specify resource location" task appears, `assetLocation` must point to a `.zip` file. + +### Business Logic Extraction (BRE) Configuration + +When the "Configure settings" task appears for BRE (`MainframeBreInputComponent`), you MUST always populate the `userSelectedFiles` array — regardless of `reportScope`. + +- `applicationLevel` — produces a single application-wide business rules summary +- `fileLevel` — produces per-file business rules reports + +Both scopes require the file list. The webapp auto-selects all files for `applicationLevel`, but the API does not — you must explicitly list them. + +## Known Limitations + +- Assembler programs (ASM) are not handled by AWS Transform agents — the IDE can analyze but not convert +- PL/I is supported for BRE and documentation only — not for refactoring +- CICS BMS screen conversion may need manual UI design decisions +- Complex SORT/MERGE JCL steps may need manual review +- Performance tuning of converted Java code is not automated +- Reforge quota: 3M lines of code per job, 50M lines of code per user per month diff --git a/plugins/aws-transform/skills/aws-transform/references/sql.md b/plugins/aws-transform/skills/aws-transform/references/sql.md new file mode 100644 index 00000000..5c5d08dc --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/sql.md @@ -0,0 +1,787 @@ +# SQL Database Migration + +> **Last Updated:** 2026-04-13 + +## Capabilities + +This domain handles **SQL database migration via the IDE** using the AWS Transform MCP server. It supports two workflows: + +1. **From-Scratch Workflow** — Start a new AWS Transform conversion job entirely from the IDE: authenticate, create a workspace/job, upload source SQL, monitor the conversion, review assessment results, and retrieve converted artifacts. +2. **Handoff Workflow** — Pick up where an AWS Transform conversion job left off: download converted artifacts and the validation report, then interactively fix all critical and high-severity issues the AWS Transform agent could not fully resolve. + +Both workflows converge at the fix-application phase once converted artifacts and a validation report are available. + +``` +From-Scratch Workflow Handoff Workflow +┌──────────────────────────┐ ┌──────────────────────────┐ +│ 1. Authenticate │ │ 1. Collect job IDs │ +│ 2. Create workspace/job │ │ 2. Download artifacts │ +│ 3. Upload source SQL │ └───────────┬──────────────┘ +│ 4. Monitor conversion │ │ +│ 5. Read assessment │ │ +│ 6. Trigger schema conv. │ │ +│ 7. Retrieve artifacts │ │ +└───────────┬──────────────┘ │ + │ ┌─────────────────────────────┘ + ▼ ▼ +┌──────────────────────────────────────────────────┐ +│ Common: Parse report → Fix critical/high issues │ +│ → Validate → Upload → Deploy │ +└──────────────────────────────────────────────────┘ +``` + +## ⚠️ Agent Behavior Rules (READ FIRST) + +These rules are MANDATORY. Violating any of them is a failure. + +> **Cross-platform note.** Commands below are given in two variants where they differ: **macOS/Linux/Git Bash** (POSIX shell: `cp`, `sed`, `perl`, etc.) and **Windows (PowerShell)**. Detect the OS and pick one variant — do not mix. In all PowerShell examples, paths are written with forward slashes (`/`) for readability; PowerShell accepts `/` and `\` interchangeably, so real Windows paths like `C:\Users\alice\work` can be used verbatim without re-quoting. + +1. **Three-file workflow.** Keep the original source MSSQL file, the converted PG file (untouched), and a working copy of the converted PG file. ALL edits go to the working copy. NEVER modify the original source or the converted PG file. +2. **Navigate before AND after every fix. Skipping navigation is a rule violation.** + - Detect the IDE from the environment: Kiro → `kiro --goto`, VS Code → `code --goto`, DBeaver → manual navigation. + - **Before showing a fix**, open the file at the exact line that will change: + + ``` + code --goto : + ``` + + - **After applying a fix**, navigate back to the changed line so the user sees the result: + + ``` + code --goto : + ``` + +3. **For every fix, follow this exact sequence:** + a. **Navigate** to the line (rule 2 above). Do NOT skip this. + b. **Validate the proposed fix** — Before showing the fix to the user, execute a syntax check (e.g., `EXPLAIN` or parse-only) on the proposed SQL block. If invalid, revise and re-validate. Do NOT propose an invalid fix. + c. **Show in chat:** object name, line number, problem (from report), proposed before/after fix, and syntax validation result (✅ Passed). + d. **Ask** `Approve this fix? [Yes / No / Modify]` and **STOP**. Do NOT proceed until the user responds. + e. **After user approves**, apply the fix using `execute_bash` (NOT `fs_write`). Use `sed -i ''` or `perl -i -pe 's/old/new/g'` on macOS/Linux (Perl works cross-platform when available). On Windows without Git Bash, use PowerShell's literal-string `.Replace()` method (NOT `-replace`, which is a regex operator and will silently mis-match SQL tokens containing regex metacharacters like `[ ] . ( ) $ ^`). Write with `[System.IO.File]::WriteAllLines` and an explicit no-BOM UTF-8 encoding — `Set-Content -Encoding UTF8` on Windows PowerShell 5.1 prepends a UTF-8 BOM (`0xEF 0xBB 0xBF`) that Rule 10's ASCII verification step will flag. Wrap the `-Command` argument in **single quotes** so bash/Git Bash does not expand `$c` / `$false` before PowerShell parses them, and prepend `$ErrorActionPreference='Stop'` so intermediate failures surface instead of silently producing an empty working copy: + + ``` + powershell -Command '$ErrorActionPreference="Stop"; $c = (Get-Content "" -Encoding UTF8).Replace("old","new"); [System.IO.File]::WriteAllLines("", $c, [System.Text.UTF8Encoding]::new($false))' + ``` + + If regex matching is actually required, use `-replace [regex]::Escape("old"),"new"`. Note: `-replace`'s replacement string interprets `$1`, `$2`, `$$`, `$&`, etc. as regex substitution tokens, so if the replacement text contains literal `$` characters (common in PostgreSQL dollar-quoting like `$$`, `$BODY$`), double them: `-replace [regex]::Escape("old"), "new".Replace("$","$$")`. + f. **Navigate back** to the changed line (rule 2 above). Do NOT skip this. +4. **Diff after every cluster.** After completing all fixes in a cluster, show a three-way diff comparing the original source MSSQL, the converted PG file, and the working copy. + + **Empty-diff guard (used by every IDE branch below):** Before showing the second diff (converted PG vs working copy), check whether the two files are identical. If they are, skip the second diff and tell the user: "No fixes have been applied yet — the converted PG file and working copy are still identical." + - macOS/Linux/Git Bash: `diff --brief ` (exit 0 = identical) + - Windows (PowerShell): `powershell -Command "if ((Get-FileHash '').Hash -eq (Get-FileHash '').Hash) { exit 0 } else { exit 1 }"` (exit 0 = identical) + + - **VS Code:** Open two diffs side by side: + `code --diff ` + Only if the converted PG file and working copy differ (empty-diff guard above): + `code --diff ` + - **Kiro:** Open two diffs side by side: + `kiro --diff ` + Only if the converted PG file and working copy differ (empty-diff guard above): + `kiro --diff ` + - **DBeaver:** Use the SQL Compare feature or an external diff tool + - **Fallback:** If the IDE diff commands do not produce visible results, generate text-based diffs in chat. + - macOS/Linux/Git Bash: + `diff -u ` + Only if the converted PG file and working copy differ (empty-diff guard above): + `diff -u ` + - Windows (PowerShell): use `Compare-Object`: + `powershell -Command "Compare-Object (Get-Content '' -Encoding UTF8) (Get-Content '' -Encoding UTF8)"` + Only if the converted PG file and working copy differ (empty-diff guard above): + `powershell -Command "Compare-Object (Get-Content '' -Encoding UTF8) (Get-Content '' -Encoding UTF8)"` + Note: `Compare-Object` output is a side-indicator list (`<=` for left-only, `=>` for right-only) rather than unified-diff format. Show the output to the user as-is — it conveys the same information in a different layout. + If two diffs were shown, tell the user: + > "Two diff views have been opened: one comparing the source MSSQL with the converted PG file (showing what the conversion changed), and one comparing the converted PG file with the working copy (showing what fixes have been applied). You may need to arrange the diff tabs side by side." + > Then ask the user: + > "Cluster fixes applied. Would you like to upload the working copy to AWS Transform for re-validation before proceeding to the next cluster? [Yes / No]" + - If **Yes**: Prepare a ZIP artifact for re-validation: + 1. Create the staging directory: + - macOS/Linux/Git Bash: `execute_bash: mkdir -p /zip_staging` + - Windows (PowerShell): `execute_bash: powershell -Command "New-Item -ItemType Directory -Force -Path '/zip_staging'"` + 2. Copy the working copy as `postgres-completed-deployment.sql`: + - macOS/Linux/Git Bash: `execute_bash: cp /zip_staging/postgres-completed-deployment.sql` + - Windows (PowerShell): `execute_bash: powershell -Command "Copy-Item '' '/zip_staging/postgres-completed-deployment.sql'"` + 3. Copy the custom rules file(s) from the working directory: + - macOS/Linux/Git Bash: `execute_bash: cp / /zip_staging/` + - Windows (PowerShell): `execute_bash: powershell -Command "Copy-Item '/' '/zip_staging/'"` + 4. Create `manifest.json` in `zip_staging/` with database name, execution order (`file: "postgres-completed-deployment.sql"`, `type: "all"`, `object_count: `), and `"custom_rules"` listing the custom rules filenames + 5. Create ZIP: + - macOS/Linux: `execute_bash: cd /zip_staging && rm -f ../IDE_CONVERTED_DB_ARTIFACT.zip && zip ../IDE_CONVERTED_DB_ARTIFACT.zip *` + - Windows: `execute_bash: powershell -Command "Compress-Archive -Path '/zip_staging/*' -DestinationPath '/IDE_CONVERTED_DB_ARTIFACT.zip' -Force"` + 6. Clean up: + - macOS/Linux/Git Bash: `execute_bash: rm -rf /zip_staging` + - Windows (PowerShell): `execute_bash: powershell -Command "Remove-Item -Recurse -Force '/zip_staging'"` + 7. Upload via `upload_artifact` with `content="/IDE_CONVERTED_DB_ARTIFACT.zip"`, `fileName="IDE_CONVERTED_DB_ARTIFACT"`, `fileType="ZIP"`, `categoryType="CUSTOMER_INPUT"`. + 8. Send `send_message` with text "IDE agent completed all critical and high-severity fixes. Uploaded corrected file as IDE_CONVERTED_DB_ARTIFACT. Ready for re-validation through invoke_validation_after_ide with new artifact id:``". Poll messages for "Validation complete", download new report, and present results. If new issues are found in this cluster, fix them before moving on. + - If **No**: Proceed to the next cluster. +5. **No scripts.** Do NOT write Python, Bash, or any scripts to batch-process fixes. +6. **Use `sed`, `perl`, or PowerShell for file edits, NOT `fs_write`.** Large SQL files (10K+ lines) cause the editor to freeze when using `fs_write`. Use `execute_bash` to edit the working copy directly on disk: + - macOS/Linux/Git Bash: `sed -i '' 's/old/new/g' ` or `perl -i -pe 's/old/new/g' ` + - Windows (PowerShell): use the literal-string `.Replace()` method (NOT `-replace`, which is a regex operator). Read with `Get-Content -Encoding UTF8` to avoid Windows PowerShell 5.1's ANSI code-page corruption, and write with `[System.IO.File]::WriteAllLines` + no-BOM UTF-8 (NOT `Set-Content -Encoding UTF8`, which prepends a UTF-8 BOM on PS 5.1 that Rule 10's ASCII verification will flag). Wrap the `-Command` argument in **single quotes** so bash/Git Bash does not expand `$c` / `$false` before PowerShell parses them, and prepend `$ErrorActionPreference='Stop'` so intermediate failures surface instead of silently producing an empty working copy: `powershell -Command '$ErrorActionPreference="Stop"; $c = (Get-Content "" -Encoding UTF8).Replace("old","new"); [System.IO.File]::WriteAllLines("", $c, [System.Text.UTF8Encoding]::new($false))'`. Use `-replace [regex]::Escape("old"),"new"` only if regex matching is actually required; when using `-replace`, also double any literal `$` in the replacement string (`"new".Replace("$","$$")`) because `-replace` interprets `$1`, `$$`, etc. as regex substitution tokens — which silently corrupts PostgreSQL dollar-quoting like `$$` and `$BODY$`. +7. **No bulk operations.** Fix one object at a time. +8. **No invented fixes.** Only apply fixes described in the validation report. +9. **Report every fix in chat** with: object name, line number in working copy, problem (from report), proposed fix, and updated progress table. +10. **Encoding safety before upload.** Before creating a ZIP for upload, sanitize non-ASCII characters that can corrupt during ZIP packaging and cause the validator to fail with `total_checks: 0`. + +**Sanitize and verify (cross-platform, using Perl):** + +``` +perl -i -CSD -pe "s/\x{2014}/--/g; s/\x{2013}/-/g; s/[\x{2018}\x{2019}]/'/g; s/[\x{201c}\x{201d}]/\"/g; s/\x{2026}/.../g; s/\x{a0}/ /g" +perl -ne 'if(/[^\x00-\x7F]/){print "$ARGV:$.: $_"; $f++} END{exit !$f}' || echo "Clean: ASCII only" +``` + +Perl is available on macOS and Linux by default, and on Windows via Git Bash. + +**Windows alternative (PowerShell):** + +``` +powershell -Command " + $text = [IO.File]::ReadAllText('', [Text.Encoding]::UTF8); + $text = $text -replace '\u2014','--' -replace '\u2013','-' -replace '[\u2018\u2019]',\"'\" -replace '[\u201c\u201d]','\"' -replace '\u2026','...' -replace '\u00a0',' '; + $utf8NoBom = New-Object System.Text.UTF8Encoding $false; + [IO.File]::WriteAllText('', $text, $utf8NoBom); + if ($text -match '[^\x00-\x7F]') { Write-Host 'WARNING: non-ASCII characters remain' } else { Write-Host 'Clean: ASCII only' } +" +``` + +Common offenders: em-dash (`—`), en-dash (`–`), smart quotes (`""''`). These appear in MSSQL-generated comment headers and corrupt from UTF-8 to Windows-1252 `0x97` during ZIP packaging. + +--- + +## From-Scratch Workflow + +Use this workflow when starting a new MSSQL → PostgreSQL conversion entirely from the IDE. If you already have a completed AWS Transform job with artifacts, skip to the **Handoff Workflow** section. + +### Step 1: Authentication + +- **Cookie auth requires the Transform app URL** — Ask the user for their Transform app URL (e.g., `https://xxxxxxxx.transform.us-east-1.on.aws`). +- Do **not** use the SSO/IdC start URL (`https://d-xxx.awsapps.com/start`). +- If the auth cookie has expired, ask the user for a new one. + +### Step 2: Create Workspace and Job + +- **Unless the user explicitly says to create a new workspace/job, always ask first:** + > "Do you have an existing AWS Transform workspace or job you'd like to reuse, or should I create new ones?" +- **If reusing an existing workspace:** List all available workspaces so the user can see names and IDs, then ask which one to use. Then ask if they also have an existing job to reuse or need a new job in that workspace. +- **If reusing an existing job:** List all jobs in the selected workspace so the user can see job names and IDs, then ask which one to use. Use artifact store tools to check what outputs the agent has already produced — the job may already be past the upload or assessment phase. +- **If creating a new workspace:** Names must match `[a-zA-Z0-9]+(?:[-_\.][a-zA-Z0-9]+)*` — no spaces allowed. +- **If creating a new job:** MSSQL to PostgreSQL conversion uses `WINDOWS_DATABASE` (SQL Server modernization) as the job type. +- **Always choose MSSQL file upload (NOT the connect-to-database option) after starting the job.** + +### Step 3: SQL File Upload + +- The source SQL file can be either a `.sql` file or a `.zip` archive containing SQL files. Both formats are accepted by AWS Transform. **Upload the file as-is — do NOT zip a `.sql` file before uploading.** +- If the ActiveFile is a mssql file, use that. +- If not, ask for user's permission and search locally for files — use `fileSearch` to find `.sql` or `.zip` files on the user's machine instead of asking for the full path. +- Upload the file as an artifact first using `upload_artifact`. +- Then send the artifact reference in chat using the URI format: + + ``` + aws-transform://workspaces/{workspaceId}/jobs/{jobId}/artifacts/{artifactId} + ``` + +- **Don't send bare artifact IDs** — the agent won't recognize them. It needs the full `aws-transform://` URI. + +### Step 4: Interacting with the Agent + +- **Always use `send_message` as the primary form of communication with the job agent.** +- **Messages can have interaction buttons** — Agent messages include `SELECT` interactions with options. Respond by sending the option's `value` as a chat message. + +### Step 5: Monitoring Job Progress + +- The job is carried out by AWS Transform agents; all communication and updates are wired through them. +- **Always check all three**: messages, worklogs, and HITL tasks. +- **Worklogs** are the most granular progress indicator — they update more frequently than messages and show the agent's internal reasoning/actions step by step. +- **Messages** show user-facing interactions with SELECT options (e.g. "MSSQL to PostgreSQL", "Upload DDL files"). +- **HITL tasks** may or may not appear — the agent can request input via chat messages instead. +- **No HITL tasks doesn't mean no input needed** — The job can be in `AWAITING_HUMAN_INPUT` with no HITL tasks; input is expected via chat messages. +- Jobs go through phases: `STARTING` → `PLANNING` → `AWAITING_HUMAN_INPUT` → active processing. +- The `input_setup` step handles file ingestion before moving to `discovery`. +- Planning can take a few minutes — no tasks or messages will appear until it completes. + +**Polling loop (mandatory throughout the job lifecycle):** + +After any action that triggers agent work (file upload, schema conversion, etc.), enter a polling loop: + +1. Use **mcp-sleep** to wait 10 seconds. +2. Check messages, worklogs, and HITL tasks. +3. **Format timestamps** — Worklog and message timestamps may be returned as epoch milliseconds or numeric values (e.g., `140638`). Always convert these to human-readable format (e.g., `2026-04-13 21:06:38 UTC`) before displaying to the user. +4. If the agent sent a message requiring a response (SELECT options, questions, or `AWAITING_HUMAN_INPUT` status), respond or prompt the user. +5. If the agent is still processing (no new actionable messages), go back to step 1. +6. Continue polling until the current phase completes (e.g., assessment report is ready, schema conversion finishes, validation completes). + +Do NOT stop polling prematurely — always keep the loop running until there is a clear completion signal or user input is needed. + +### Step 6: Reading Assessment Results + +- Download the assessment report zip artifact. +- Extract the zip file and review all contents — it typically contains PDF reports and CSV data files. +- **PDF reports:** + - `assessment_summary.pdf` is a short document with a summary of the assessment. + - The other report PDF is a longer document with detailed findings of migration issues with the database. +- **CSV files:** Contain structured data such as object inventories, issue lists, and migration action items. Read and present these to the user — they are useful for programmatic analysis and tracking. + +### Step 7: Schema Conversion + +- When the user is ready to proceed towards schema conversion, send this intent to the agent. +- **Custom transformation rules:** During conversion, the AWS Transform agent may ask about custom transformation rules. **Do NOT assume default rules.** Always ask the user: + > "The conversion agent is asking about custom transformation rules. Do you have custom rules to upload, or should I proceed with default rules?" + > Only proceed with defaults if the user explicitly confirms. +- Schema conversion is a long-running process — expect to poll for job progress (see Step 5). +- After the schema conversion report is available, review it with the user. **Do NOT skip ahead to downloading the converted PostgreSQL file** — it is not available yet. +- The converted PostgreSQL SQL file is only produced after the **target DB deployment workflow** completes. After reviewing the conversion report, continue interacting with the AWS Transform agent to proceed through the deployment workflow (see Step 8). + +### Step 8: Target DB Deployment + +- After schema conversion and report review, the AWS Transform agent will guide the workflow toward deploying the converted schema to the target PostgreSQL database. +- Continue polling and responding to the agent's messages through this phase (see Step 5). +- **Do NOT assume the user already has a target database cluster.** The deployment flow requires setting up a DB connector first. + +**DB Connector Setup:** + +The AWS Transform agent will issue a HITL task to set up a DB connector. Before creating a new one, check if the workspace already has a connector configured: + +1. **List existing connectors** in the workspace first. If connectors exist, present them to the user: + > "This workspace already has the following DB connector(s) configured: + > + > - `` — `` + > + > Would you like to use an existing connector, or set up a new one?" +2. **If the user picks an existing connector**, use that and skip connector creation. +3. **If a new connector is needed**, the HITL task will require details from the user. Ask for: + - AWS Account ID + - Any other connection parameters the HITL task requests (e.g., database endpoint, credentials, VPC details) + + Do NOT guess or assume any of these values — always ask the user. + +- **The converted PostgreSQL SQL file (`ATX_CONVERTED_DB_ARTIFACT` or `postgres-completed-deployment.sql`) is only available as an artifact after the deployment workflow completes.** Do NOT attempt to download it before this point — it won't exist yet. +- Once deployment finishes, proceed to retrieve the artifacts (see Step 9). + +### Step 9: Retrieving Conversion Artifacts + +``` +# List all artifacts from the completed AWS Transform job +list_resources resource="artifacts" workspaceId="" jobId="" +``` + +Scan the returned list for matching artifact names/labels. AWS Transform MCP does not support filtering by label — you must list all, then match. + +- **Scan for converted database artifacts:** Look for all artifacts with `fileMetadata.path` starting with `ATX_CONVERTED_DB_ARTIFACT`. Each artifact represents a converted database, with the database name as the suffix: `ATX_CONVERTED_DB_ARTIFACT_`. + + Extract the database names and present them to the user: + > "Converted artifacts are available for **N** database(s): + > + > 1. `OrdersDB` + > 2. `InventoryDB` + > 3. `AuditDB` + > + > Which database would you like to work on?" + + Wait for the user to choose. Then download the selected artifact: + + ``` + get_resource resource="artifact" workspaceId="" jobId="" artifactId="" savePath="/local/path/ATX_CONVERTED_DB_ARTIFACT_.zip" + ``` + + Extract the ZIP — contains: `sourcesql/`, `targetsql/`, `validationreport/`. Read `targetsql/manifest.json` to discover the converted SQL files and custom rules — file paths in the manifest are relative to `targetsql/`. + +- **Fallback (single artifact):** If no `ATX_CONVERTED_DB_ARTIFACT_*` artifacts are found, look for a single `ATX_CONVERTED_DB_ARTIFACT` (no suffix) and use it directly. + +- **Fallback (individual artifacts):** If no ZIP artifacts found, look for: + - `postgres-completed-deployment.sql` → converted PG file + - `Schema Conversion Report` → validation report + - Then ask user for the source SQL file path + +Once artifacts are retrieved, proceed to the **Common Workflow** section below. + +--- + +## Handoff Workflow + +Use this workflow when an AWS Transform conversion job has already completed and you need to pick up the remaining fixes in the IDE. + +### Why IDE Handover + +The IDE agent complements the AWS Transform agentic flow by providing capabilities that a managed batch pipeline cannot: + +- **Interactive fixing** — Apply a fix, show the user the exact change, wait for approval or adjustment, then proceed. +- **Cross-object awareness** — The IDE has the full converted file open and can cross-reference across procedures, views, and tables while editing. +- **Granular human-in-the-loop** — Pause at every individual object for user approval instead of coarse-grained job checkpoints. +- **Local tooling** — Run PostgreSQL syntax validation locally, show diffs, and use language intelligence for SQL editing. +- **Long-tail remediation** — Remaining broken objects each have unique issues requiring case-by-case judgment. +- **Full transparency** — Every fix is reported in chat with what changed and why. + +### What AWS Transform Handles vs What the IDE Agent Handles + +| Responsibility | AWS Transform Transform Agent | IDE Agent (this steering) | +| ---------------------------------- | ----------------------------- | --------------------------------------------- | +| Schema conversion (DDL) | ✅ | — | +| Bulk stored proc rewrite | ✅ | Fix remaining broken procs flagged in report | +| Data type mapping | ✅ | Fix type mapping violations flagged in report | +| View creation | ✅ | Resolve failed views flagged in report | +| Index migration | ✅ | Re-create missing indexes flagged in report | +| Validation report generation | ✅ | Parse and act on report | +| Critical/high-severity remediation | — | ✅ | +| Application code changes | — | Out of scope (flag for user) | + +### Step 1: Collect AWS Transform Job Context + +Prompt user for AWS Transform job identifiers: + +- `workspaceId` — The AWS Transform workspace where the conversion job ran +- `jobId` — The AWS Transform job that performed the MSSQL → PostgreSQL conversion +- `agentInstanceId` — The agent instance that produced the conversion artifacts + +### Step 2: Retrieve Artifacts from AWS Transform + +Once the user provides identifiers, list all job artifacts and match by name/label: + +``` +# List all artifacts +list_resources resource="artifacts" workspaceId="" jobId="" +``` + +- **Scan for converted database artifacts:** Look for all artifacts with `fileMetadata.path` starting with `ATX_CONVERTED_DB_ARTIFACT`. Each artifact represents a converted database: `ATX_CONVERTED_DB_ARTIFACT_`. + + Extract the database names and present them: + > "This job has converted artifacts for **N** database(s): + > + > 1. `OrdersDB` + > 2. `InventoryDB` + > + > Which database would you like to fix?" + + Wait for the user to choose. Then download: + + ``` + get_resource resource="artifact" workspaceId="" jobId="" artifactId="" savePath="/local/path/ATX_CONVERTED_DB_ARTIFACT_.zip" + ``` + + Extract the ZIP — contains: `sourcesql/`, `targetsql/`, `validationreport/`. Read `targetsql/manifest.json` to discover the converted SQL files and custom rules — file paths in the manifest are relative to `targetsql/`. + +- **Fallback (single artifact):** If no `ATX_CONVERTED_DB_ARTIFACT_*` found, look for a single `ATX_CONVERTED_DB_ARTIFACT` (no suffix) and use it directly. + +- **Fallback (individual artifacts):** If no ZIP artifacts found, scan for: + - `postgres-completed-deployment.sql` → converted PG file + - `Schema Conversion Report` → validation report + - Ask user for the source SQL file separately + +- **Fallback (no artifacts):** If no artifacts found or AWS Transform job not referenced, ask user for local file paths to: + - Original source SQL file (T-SQL / SQL Server) + - Converted PostgreSQL SQL file + - Conversion validation report (HTML or text) + +Once artifacts are retrieved, proceed to the **Common Workflow** section below. + +--- + +## Common Workflow (Both Paths Converge Here) + +Once you have the source SQL, converted PostgreSQL SQL, and validation report — regardless of whether you arrived via the from-scratch or handoff path — follow these steps. + +### Step 1: Check PostgreSQL Extension + +Verify the user has a PostgreSQL extension installed in their IDE. If not installed, ask: + +> "A PostgreSQL extension is required to validate fixes before applying them. Please install one: +> +> - **VS Code:** Install `ms-ossdata.vscode-postgresql` or `ckolkman.vscode-postgres` from the Extensions marketplace +> - **Kiro:** Install the PostgreSQL extension from the Extensions panel +> - **DBeaver:** Built-in — no additional install needed +> +> Once installed, configure a connection profile for the target Aurora PostgreSQL cluster and let me know the profile name." + +### Step 2: Set Fix Autonomy Mode + +Ask the user: + +> "How should I handle fix approvals? +> +> - **per-fix** (default) — I propose each fix and wait for your approval before applying it +> - **per-cluster** — I show proposed fixes for the first 2–3 issues in a cluster so you can review the approach, then apply all fixes in the cluster after your approval. A cluster diff is shown after applying. +> - **autonomous** — I apply all critical/high fixes end-to-end and present a full diff and summary at the end +> +> Choose [per-fix / per-cluster / autonomous]:" + +Store the chosen mode for the session. The user can switch mid-session by saying e.g. `"Switch to per-cluster mode"`. Mode affects approval behavior: + +- `per-fix` — Rule 2d (approval prompt before every fix) is enforced. +- `per-cluster` — Rule 2d is skipped. The agent shows proposed before/after fixes for the first 2–3 issues as a preview, asks for approval, then applies all fixes in the cluster. Rule 3 (cluster diff) is shown after applying. +- `autonomous` — Rules 2d and 3 approval prompts are skipped. Fixes are still logged in chat (Rule 8) and a final summary is shown. + +### Step 3: Present Available Commands + +Before starting fixes, inform the user: + +> "Before we begin, here's what you can ask me to do at any time: +> +> - **Show diff** — Display a diff of all changes made so far (three-way diff of source MSSQL, converted PG, and working copy). The converted PG vs working copy diff is skipped if the files are still identical. +> - **Run validation** — Upload the working copy to AWS Transform for functional re-validation and get a new report +> - **Show progress** — Print the current progress table with cluster statuses +> - **Switch autonomy mode** — Change fix approval mode (per-fix / per-cluster / autonomous) +> - **Skip cluster** — Skip the current cluster and move to the next one +> - **Revert cluster** — Undo all fixes in the current cluster +> - **Fix a medium/low item** — Ask me to fix a specific advisory item +> - **I'm done** — End the session, upload the final file to AWS Transform, and optionally deploy +> +> Let's start." + +### Step 4: Parse Report and Prioritize + +1. **Parse the Issue Clusters table** — Each row has a severity emoji (🔴🟠🟡🟢), a description, and a score deduction. +2. **Filter to critical and high** — Only 🔴 Critical and 🟠 High clusters are in scope for auto-fixing. +3. **For each in-scope cluster, read "What Needs Immediate Attention"** — Contains affected objects, risk description, and recommended SQL fixes. +4. **For medium/low clusters** — Present as advisory items. Do not auto-fix unless user explicitly requests. +5. **For info/warning items** — Note in summary but take no action. +6. **Separate load failures from conversion defects** — Schema Loading Notes distinguish objects that failed to load due to missing dependencies vs true migration issues. +7. **Read the custom rules file** — Parse the custom rules JSON from `targetsql/` (listed in `manifest.json`'s `custom_rules` array). Understand the current type mappings, extensions, and naming conventions. This context is needed when applying fixes — if a fix contradicts or extends these rules, you'll update the custom rules file in Step 6 (and verify in Step 7). + +### Step 5: Set Up Three-File Workflow + +1. **Ensure the working directory is trusted** — Ask the user: + > "Which directory should I use for the working files? It should be a directory already open and trusted in your IDE. This avoids permission prompts that can freeze the chat." + +2. Read `targetsql/manifest.json` to identify the converted SQL file(s) from `execution_order` and the custom rules file(s) from `custom_rules`. File paths in the manifest are relative to `targetsql/`. +3. Copy the converted PG file (the first `file` in `execution_order`) to a working copy (e.g., `postgres-deployment-fixing.sql`). Keep the original converted PG file as a read-only reference. All fixes go to the working copy only. +4. Copy the custom rules file(s) to the working directory. All custom rules edits go to this working copy — the original in `targetsql/` stays untouched. + - macOS/Linux/Git Bash: `cp / /` + - Windows (PowerShell): `powershell -Command "Copy-Item '/' '/'"` +5. Open the source SQL, converted PG file, and working copy in the editor: + - **Kiro:** `kiro ` then `kiro --reuse-window ` then `kiro --reuse-window ` + - **VS Code:** `code ` + - **DBeaver:** Open all files via File → Open +6. Tell the user: + > "I've opened three files: the source MSSQL, the converted PG file, and the working copy. You can arrange them side by side. The source MSSQL and converted PG files are for reference only — all fixes go to the working copy." + +### Step 6: Apply Fixes in Priority Order + +Apply fixes in dependency order to avoid cascading failures: + +1. **Prerequisites** — Extensions, schemas, and any infrastructure the report identifies as missing +2. **Type/column fixes** — Data type corrections flagged as critical or high +3. **Views** — Resolve dependency issues, then re-create failed views in order +4. **Stored procedures and functions** — Apply each fix pattern described in the report +5. **Indexes** — Re-create missing indexes +6. **Validate** — Confirm syntax validity and that all critical/high items are addressed + +**Custom rules — update as you go:** When a fix establishes a reusable conversion pattern (type mapping, extension, naming), update the custom rules file immediately: + +- **Overwriting an existing rule** (e.g., changing `BIT → BOOLEAN` to `BIT → NUMERIC`) → **ask the user for confirmation** before modifying. Do NOT overwrite silently. +- **Adding a new rule** → In per-fix mode, ask confirmation. In per-cluster/autonomous mode, just inform the user and add it. + +This keeps custom rules in sync with the actual conversion. Validation uses these rules to score — inaccurate rules cause false positives. + +### Step 7: Verify Custom Rules + +**Why this matters:** The validation sub-agent uses the custom rules file to score the conversion. If the custom rules don't reflect the actual type mappings and patterns used in the converted SQL, validation will flag false positives and lower the score. Keeping custom rules accurate ensures validation results are meaningful. + +Before uploading, verify that the custom rules file reflects all conversion patterns applied during this session: + +1. **Review changes made** — For each fix that changed a type mapping, added an extension, or established a naming pattern, confirm the custom rules file was updated during Step 6. +2. **Check for missed updates** — If any fixes were applied that should have updated custom rules but didn't (e.g., you changed a `BIT` column to `NUMERIC` but the rules still say `BIT → BOOLEAN`), update them now following the rules below. +3. **Confirm with user if overwriting** — If any rule was modified (not just added), confirm the user approved the change during Step 6. If not, ask now. + +**Custom rules file format:** + +```json +{ + "extensions": { + "base_extensions": ["citext", "uuid-ossp", "pgcrypto"], + "additional_extensions": [] + }, + "type_mappings": [ + { + "rule-id": "", + "rule-name": "", + "source-type": "", + "target-type": "", + "precision": null, + "scale": null + } + ], + "naming": { + "casing": "lowercase", + "schema_mappings": {}, + "strip_schema_prefixes": [] + } +} +``` + +**What to update:** + +- **Type mapping fixes** → Add or modify entries in `type_mappings`. Use a descriptive `rule-id` (e.g., "bit", "money"). +- **Extension requirements** → Add to `extensions.additional_extensions` if a fix requires a PostgreSQL extension not in `base_extensions`. +- **Naming conventions** → Update `naming.schema_mappings` or `naming.casing` if applicable. + +Save the updated custom rules file to the working directory. It will be included in the next upload ZIP (Step 8 uses the updated version). + +### Step 8: Upload Fixed File to AWS Transform + +#### Copy working copy and upload to artifact store + +- macOS/Linux/Git Bash: + + ``` + execute_bash: cp /postgres-completed-deployment.sql + ``` + +- Windows (PowerShell): + + ``` + execute_bash: powershell -Command "Copy-Item '' '/postgres-completed-deployment.sql'" + ``` + +``` +upload_artifact( + workspaceId="", + jobId="", + content="/postgres-completed-deployment.sql", + fileName="IDE_CONVERTED_DB_ARTIFACT", + fileType="TXT", + categoryType="CUSTOMER_INPUT" +) +``` + +#### Notify the AWS Transform job chat + +``` +send_message( + workspaceId="", + jobId="", + text="IDE agent completed all critical and high-severity fixes. Uploaded corrected file as IDE_CONVERTED_DB_ARTIFACT. Ready for re-validation through invoke_validation_after_ide with new artifact id:" +) +``` + +### Step 9: Monitor Validation and Continue + +Poll AWS Transform job messages for validation status updates: + +``` +list_resources( + resource="messages", + workspaceId="", + jobId="" +) +``` + +Filter for messages containing "validation". Display matching messages to show progress. Re-poll every 10 seconds until a message containing **"Validation complete"** appears. Use the **mcp-sleep** tool between polls. + +When "Validation complete" is found, extract the artifact ID from the message and download the new report: + +``` +get_resource( + resource="artifact", + workspaceId="", + jobId="", + artifactId="", + savePath="/validation_report_latest.html" +) +``` + +Prompt the user: + +> "Validation is complete. New report downloaded. +> +> Would you like to: +> +> 1. **Review & fix** — Parse the new report and apply another round of fixes +> 2. **Deploy** — Accept the current state and deploy to the target database +> +> Choose [1] or [2]:" + +- **If 1:** Go back to Step 4 (Parse Report and Prioritize). +- **If 2:** Prepare and upload the final converted artifact before deployment: + 1. Prepare a ZIP bundle in a staging directory: + - Create the staging directory: + - macOS/Linux/Git Bash: `execute_bash: mkdir -p /zip_staging` + - Windows (PowerShell): `execute_bash: powershell -Command "New-Item -ItemType Directory -Force -Path '/zip_staging'"` + - Copy working copy as `postgres-completed-deployment.sql`: + - macOS/Linux/Git Bash: `execute_bash: cp /zip_staging/postgres-completed-deployment.sql` + - Windows (PowerShell): `execute_bash: powershell -Command "Copy-Item '' '/zip_staging/postgres-completed-deployment.sql'"` + - Copy the custom rules file(s) from the working directory: + - macOS/Linux/Git Bash: `execute_bash: cp / /zip_staging/` + - Windows (PowerShell): `execute_bash: powershell -Command "Copy-Item '/' '/zip_staging/'"` + - Create `manifest.json` with database name, execution order (`file: "postgres-completed-deployment.sql"`, `type: "all"`, `object_count: `), and `"custom_rules"` listing the custom rules filenames + 2. Create ZIP: + - macOS/Linux: `execute_bash: cd /zip_staging && rm -f ../IDE_CONVERTED_DB_ARTIFACT.zip && zip ../IDE_CONVERTED_DB_ARTIFACT.zip *` + - Windows: `execute_bash: powershell -Command "Compress-Archive -Path '/zip_staging/*' -DestinationPath '/IDE_CONVERTED_DB_ARTIFACT.zip' -Force"` + 3. Clean up: + - macOS/Linux/Git Bash: `execute_bash: rm -rf /zip_staging` + - Windows (PowerShell): `execute_bash: powershell -Command "Remove-Item -Recurse -Force '/zip_staging'"` + 4. Upload via `upload_artifact` with `fileName="IDE_CONVERTED_DB_ARTIFACT"`, `fileType="ZIP"`, `categoryType="CUSTOMER_INPUT"`. + 5. Send a message to the AWS Transform job with the artifact ID: + + ``` + send_message( + workspaceId="", + jobId="", + text="IDE agent completed all fixes. Final converted artifact uploaded as IDE_CONVERTED_DB_ARTIFACT (artifactId: ). Ready for deployment." + ) + ``` + + 6. Proceed with deployment via AWS Transform job or IDE PostgreSQL extension if the user prefers. + +--- + +## Report Structure + +The validation report follows this general structure: + +| Section | What It Contains | How to Use | +| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- | +| Executive Summary | Quantitative Score, Expert Assessment, production readiness grade | Determine overall severity — 🔴 NOT READY means critical issues exist | +| Migration Overview table | Per-category counts (Tables, Views, Procs, Triggers, Columns, Constraints, Indexes, Type Mappings) with Pass/Warn/Fail rates | Identify which categories have failures | +| Issue Clusters table | Severity (🔴🟠🟡🟢), cluster description, score deduction | Prioritize fixes — work critical clusters first | +| Schema Loading Notes | Objects that failed to load due to missing dependencies vs true migration issues | Distinguish load failures from conversion defects | +| What Needs Immediate Attention | Detailed per-cluster breakdown with affected objects, risk description, and recommended SQL fixes | Primary source of fix instructions | +| Recommended Action Plan | Ordered steps: 🔴 Before testing → 🟡 Before go-live → 🟢 Post-migration | Follow this sequence when applying fixes | +| What We Could Not Verify | Items not tested (runtime correctness, data accuracy, performance, collation) | Flag as remaining risks after fixes | + +--- + +## Progress Reporting + +Maintain a two-tier progress display: a summary table for overall status, plus an active context block. + +### Progress Table + +``` +## Conversion Progress + +| # | Severity | Cluster | Status | +|---|----------|---------|--------| +| 1 | 🔴 Crit | ( objects) | ✅ Fixed | +| 2 | 🟠 High | ( objects) | 🔄 M/N | +| 3 | 🟠 High | ( objects) | ⏳ Pending | +| 4 | 🟡 Med | ( objects) | ℹ Advisory | + +### Currently fixing: +`` — + +### Last completed: +`` — +``` + +### Status Values + +| Status | Meaning | +| ---------------------------- | --------------------------------------------------------- | +| `⏳ Pending` | Cluster not yet started | +| `🔄 M/N` | In progress — M of N objects fixed so far | +| `✅ Fixed` | All objects in cluster resolved | +| `⚠ Partial (M/N, K skipped)` | Some objects could not be fixed — requires user attention | +| `ℹ Advisory` | Medium/low severity — presented to user, not auto-fixed | + +### Per-Fix Chat Response + +For every individual fix, the chat response MUST include: + +**1. Fix summary with line reference:** + +``` +✅ Fixed `schema.object_name` (line N in working copy) +Problem: +Fix applied: +Syntax validation: ✅ Passed (validated via PostgreSQL extension) +``` + +**2. Updated progress table** (as shown above). + +**3. Approval prompt — STOP and wait:** + +``` +Approve this fix? [Yes / No / Modify] +``` + +Do NOT proceed to the next fix until the user responds. + +After all critical/high clusters are done, print a final summary: + +``` +🔴 Critical: X/X done · 🟠 High: Y/Y done · Overall: Z/Z clusters resolved +``` + +--- + +## Known Limitations + +- The IDE agent applies fixes based on the report's recommendations — it does not independently discover new issues +- Runtime correctness of fixed procedures cannot be verified without production-equivalent test data +- Application code changes (e.g., cursor-based result sets, connection strings) are outside scope — flag for user +- Collation differences (MSSQL case-insensitive vs PostgreSQL case-sensitive) need separate testing +- Performance validation requires production-equivalent load testing +- CLR stored procedures, linked server queries, and SSIS packages require architectural changes beyond SQL file fixes + +--- + +## Example Requirements + +``` +## Requirement 1: Fix All Critical Issues + +**User Story:** As a DBA, I want all critical-severity issues from the validation report +resolved in the PostgreSQL file so that the schema is safe for production. +**Acceptance Criteria:** + +1. WHEN fixes are applied, ALL 🔴 critical clusters from the report SHALL be resolved +2. WHEN fixes are applied, the PostgreSQL file SHALL be syntactically valid +3. WHEN fixes are applied, previously converted logic SHALL NOT be altered + +## Requirement 2: Fix All High-Severity Issues + +**User Story:** As a developer, I want all high-severity issues from the validation report +resolved so that runtime failures are eliminated. +**Acceptance Criteria:** + +1. WHEN fixes are applied, ALL 🟠 high clusters from the report SHALL be resolved +2. WHEN fixes are applied, semantic behavior SHALL match the original source SQL +3. WHEN fixes are applied, the fix SHALL follow the report's recommended approach +``` + +## Example Tasks + +``` +### From-Scratch Tasks + +- [ ] 1. Authenticate and set up AWS Transform job + - [ ] 1.1 Authenticate to AWS Transform using cookie auth + - [ ] 1.2 Create workspace and job (or use existing) + - [ ] 1.3 Upload source SQL artifact (.sql or .zip as-is) + - [ ] 1.4 Send artifact URI to agent via chat +- [ ] 2. Monitor conversion and review assessment + - [ ] 2.1 Poll messages, worklogs, and HITL tasks for progress + - [ ] 2.2 Download and review assessment report PDFs and CSVs + - [ ] 2.3 Trigger schema conversion when user is ready + - [ ] 2.4 Poll for conversion completion + - [ ] 2.5 Retrieve conversion artifacts (ZIP or individual files) + +### Handoff Tasks + +- [ ] 1. Collect AWS Transform job context and inputs + - [ ] 1.1 Prompt user for workspaceId, jobId, and agentInstanceId + - [ ] 1.2 List all artifacts from AWS Transform job + - [ ] 1.3 Download ATX_CONVERTED_DB_ARTIFACT ZIP or individual artifacts + - [ ] 1.4 If no artifacts found, ask user for local file paths + +### Common Tasks (both workflows) + +- [ ] 3. Check PostgreSQL extension prerequisite +- [ ] 4. Parse report and prioritize + - [ ] 4.1 Extract all issue clusters by severity + - [ ] 4.2 Identify critical and high-severity clusters + - [ ] 4.3 Separate load failures from true migration defects + - [ ] 4.4 Create working copy of converted PG file +- [ ] 5. Apply critical-severity fixes + - [ ] 5.1 For each 🔴 critical cluster, validate proposed fix via PostgreSQL extension, then apply +- [ ] 6. Apply high-severity fixes + - [ ] 6.1 For each 🟠 high cluster, validate proposed fix via PostgreSQL extension, then apply +- [ ] 7. Present advisory items + - [ ] 7.1 Summarize 🟡 medium and 🟢 low clusters for user review +- [ ] 8. Validate and upload + - [ ] 8.1 Confirm all critical and high clusters are addressed + - [ ] 8.2 Present diff of all changes + - [ ] 8.3 Create ZIP and upload to AWS Transform artifact store +- [ ] 9. Monitor validation and continue + - [ ] 9.1 Poll AWS Transform job messages for validation progress + - [ ] 9.2 Download new validation report + - [ ] 9.3 Ask user: apply another round of fixes or deploy +``` diff --git a/plugins/aws-transform/skills/aws-transform/references/tools.md b/plugins/aws-transform/skills/aws-transform/references/tools.md new file mode 100644 index 00000000..3dd2472e --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/tools.md @@ -0,0 +1,161 @@ +# Table of Contents + +- [Jobs](#jobs) +- [Connectors](#connectors) +- [HITL Tasks (Collaborator Requests)](#hitl-tasks-collaborator-requests) + - [Building the content payload](#building-the-content-payload) + - [Severity](#severity) + - [TOOL_APPROVAL tasks (separate flow)](#tool_approval-tasks-separate-flow) +- [AWS Transform CLI Commands](#aws-transform-cli-commands) + - [Core Commands](#core-commands) + - [Execution Flags](#execution-flags) +- [Troubleshooting (Non-Auth)](#troubleshooting-non-auth) + - [Job Issues](#job-issues) + - [Connector Issues](#connector-issues) + - [HITL Task Issues](#hitl-task-issues) + - [MCP Server Issues](#mcp-server-issues) + - [Rollback](#rollback) + +--- + +## Tools + +Canonical tool names, parameters, and per-resource requirements come from the MCP server's `tools/list` response — read the tool descriptions directly. This file covers cross-tool workflows, behavioral rules, and the AWS Transform CLI, which the schemas don't provide. For auth behavior, see [auth](auth.md). + +## Jobs + +`create_job` creates AND starts the job in one call — no separate `control_job` start is needed. Only orchestrator agents can create jobs; discover them via `list_resources resource="agents" agentType="ORCHESTRATOR_AGENT"`. Use `jobType` OR `orchestratorAgent`, not both — if one fails, retry with the other. + +## Connectors + +Connector lifecycle: + +``` +PENDING → ACTIVE → COMPLETED + ↓ ↓ +REJECTED FAILED +``` + +Connectors start `PENDING`. An AWS admin must approve via the verification link returned by `create_connector`. Do NOT proceed with dependent tasks until the user confirms admin approval. Check status with `get_resource resource="connector"`. + +## HITL Tasks (Collaborator Requests) + +**CRITICAL: Never auto-submit. Always present to the user first.** + +Workflow for a regular HITL task: + +1. `list_resources resource="tasks"` — find tasks needing human action. Three human-actionable states: + - `AWAITING_HUMAN_INPUT` — first input required from the user + - `IN_PROGRESS` — user has engaged but not submitted + - `AWAITING_APPROVAL` — submitted for admin/approver decision + Surface all three to the user — the person in front of you may be the approver. Whether a task blocks the job depends on its `blockingType` (`BLOCKING` vs `NON_BLOCKING`); surface the state regardless, but let the user know when a blocking task is holding progress. +2. `get_resource resource="task"` — returns two top-level objects you must read together: + - `task` — enriched with `_outputSchema`, `_responseTemplate`, `_responseHint`, `uxComponentId`, `severity`. Tells you the **submission shape**. + - `agentArtifactContent` — downloaded from S3. Tells you the **user-visible context**: current field values, items to select from, component-specific extras (toggles, feature flags, resource identifiers) that may not appear in `_outputSchema`. +3. Present the task to the user. Two things must be surfaced: + - The content of `agentArtifactContent` — this is what the user sees in the web UI. Do not paraphrase it down to a single field. + - Any fields in `_outputSchema` the user needs to provide. +4. Follow `_responseHint` — it is authoritative per `uxComponentId`. If the hint says "Only provide fields you want to change" or similar merge language, the server merges your payload onto the existing artifact. Send **only fields the user explicitly changed** — omit any field the user confirmed as-is or did not modify. Including unchanged fields violates the merge contract and can produce unintended overwrites. +5. Never silently substitute a value the user didn't see. This applies especially to boolean toggles and opt-ins — surface the current value and ask, even if the server has a safe default. +6. Wait for the user's decision. +7. Before calling `complete_task`, show the full payload you are about to submit and confirm. +8. `complete_task` — submit with the user's response. + +### Building the `content` payload + +Shape the payload based on the task's `_outputSchema` (returned by `get_resource resource="task"`): + +- **TextInput** — `{"data": "your text"}` +- **AutoForm** — `{"fieldName": "value", ...}` — flat JSON matching the schema +- **File upload** — call `upload_artifact` first, then pass the returned `artifactId` in `content` +- **Display-only** — omit `content` (the server submits `{}`) + +### Severity + +- `STANDARD` — APPROVE/REJECT submits immediately. +- `CRITICAL` — non-admins must use `SEND_FOR_APPROVAL`; admins can APPROVE directly. + +### TOOL_APPROVAL tasks (separate flow) + +When `list_resources resource="tasks" category="TOOL_APPROVAL"` returns items, use `list_tool_approvals` / `approve_tool_approval` / `deny_tool_approval` — NOT `complete_task`. The backend rejects `complete_task` for TOOL_APPROVAL tasks. + +--- + +## AWS Transform CLI Commands + +The CLI uses standard AWS credentials (see [auth](auth.md)). Always set `AWS_REGION`. + +### Core Commands + +| Action | Command | +| ------------------------------------- | ----------------------------------------------------------------------- | +| List transformation definitions | `atx custom def list --json` | +| Execute transformation definition | `atx custom def exec -n -p -x -t` | +| Get transformation definition details | `atx custom def get -n ` | +| Save draft | `atx custom def save-draft -n --description "" --sd ` | +| Publish | `atx custom def publish -n --description "" --sd ` | +| Delete transformation definition | `atx custom def delete -n ` | +| Interactive mode | `atx` | +| Resume conversation | `atx --resume` | +| Update CLI | `atx update` | + +### Execution Flags + +| Flag | Description | +| ----------------------------------- | ------------------------------------------------------------------ | +| `-n` / `--transformation-name` | Transformation definition name (from `atx custom def list --json`) | +| `-p` / `--code-repository-path` | Path to code repo (`.` for current dir) | +| `-x` / `--non-interactive` | No user prompts | +| `-t` / `--trust-all-tools` | Auto-approve tool executions (required with `-x`) | +| `-c` / `--build-command` | Build/validation command (optional, auto-detected) | +| `-d` / `--do-not-learn` | Prevent knowledge item extraction | +| `-g` / `--configuration` | Config file (`file://config.yaml`) or inline (`'key=val'`) | +| `--tv` / `--transformation-version` | Specific transformation definition version | + +**NEVER hardcode transformation definition names.** Always fetch from `atx custom def list --json`. + +--- + +## Troubleshooting (Non-Auth) + +### Job Issues + +| Problem | Resolution | +| --------------------- | ----------------------------------------------------------------------------------------------------------------- | +| Job stuck in RUNNING | Check `list_resources resource="tasks"` for pending HITL tasks | +| Job fails immediately | Check `get_resource resource="job"` for `errorDetails`. Common: connectors not set up or not ACTIVE | +| Job type not found | Use `list_resources resource="agents"` to discover available agents. Try `orchestratorAgent` instead of `jobType` | +| Missing artifacts | Job may not be complete — check status first | + +### Connector Issues + +| Problem | Resolution | +| ------------------------ | ---------------------------------------------------------------------------------------------------------------------- | +| Stuck in PENDING | Admin hasn't approved — share verification link | +| FAILED | Check IAM role permissions. Trust policy must allow `transform.amazonaws.com` | +| `accept_connector` fails | Needs AWS Credentials available in the environment; see the tool's description and `get_status` for current auth state | + +### HITL Task Issues + +| Problem | Resolution | +| ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| `VALIDATION_ERROR` on submit | Check `_outputSchema` from `get_resource resource="task"`. Don't wrap in `{"data":...}` or `{"properties":...}` unless schema requires it | +| Empty `agentArtifactContent` | Agent still generating. Check worklogs, wait 30-60s, retry | +| File upload fails | Verify path exists. Specify `fileType` explicitly | + +### MCP Server Issues + +| Problem | Resolution | +| ----------------------- | -------------------------------------------------------------------------- | +| Tools not available | Restart your IDE. Check `mcp.json` for syntax errors | +| MCP server not starting | Path in `mcp.json` may be relative — must be absolute. Check binary exists | +| Region mismatch | Ensure `region` in `configure` matches where resources are | + +### Rollback + +Transform works on copies — original code is untouched until you apply changes. + +- **Haven't applied changes:** Safe. Delete job/artifacts if unwanted. +- **Applied via git:** `git log` to find pre-transform commit, `git checkout ` or `git revert`. +- **Code connector branch:** Transform creates a separate branch. Delete it with `git branch -D atx/transform-`. +- **Manually copied files:** Use `git checkout HEAD~1 -- path/to/file` or IDE local history. diff --git a/plugins/aws-transform/skills/aws-transform/references/vmware-containerization.md b/plugins/aws-transform/skills/aws-transform/references/vmware-containerization.md new file mode 100644 index 00000000..09667658 --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/vmware-containerization.md @@ -0,0 +1,132 @@ +# VMware Containerization + +> **Last Updated:** 2026-05-10 + +## Table of Contents + +- [Capabilities](#capabilities) +- [Starting Workflow](#starting-workflow) +- [Agents and Transforms](#agents--transforms) +- [Decision Points](#decision-points) +- [Example Requirements](#example-requirements) +- [Example Tasks](#example-tasks) +- [Known Limitations](#known-limitations) + +## Capabilities + +Containerize applications from VMware environments for deployment on Amazon ECS or Amazon EKS using AWS Transform's AI-powered agent. Analyzes source code, generates Docker artifacts, builds and publishes container images, and generates Infrastructure as Code for container orchestration platforms. + +- Source code analysis → Dockerfiles + configuration files (AI-generated) +- Container image building → Amazon ECR (with automated vulnerability scanning) +- Infrastructure as Code generation → Amazon EKS (Helm charts) or Amazon ECS (Terraform modules) +- Private dependency support → AWS CodeArtifact (Maven, PyPI, npm) + private ECR base images +- Iterative test deployment → validate before production cutover +- Standalone containerization or end-to-end VMware migration with containerize strategy + +## Starting Workflow + +1. **Review security disclaimer** — accept before proceeding +2. **Clone source code** — Git repository (CodeConnections) or zip upload +3. **Containerize** — AI analyzes source, generates Docker artifacts +4. **Review artifacts** — approve generated Dockerfiles and configuration +5. **Publish images** — build and push to Amazon ECR with vulnerability scanning +6. **Generate IaC** — EKS (Helm charts) or ECS (Terraform modules) +7. **Deploy test** — validate before production +8. **Clean up test** — tear down test resources +9. **Deploy cutover** — production deployment + +**Key questions to ask user:** + +- "Do you want standalone containerization or end-to-end migration with containerization?" +- "How would you like to provide your source code — Git repository or zip upload?" +- "Where do you want to deploy — Amazon EKS or Amazon ECS?" + +## Agents & Transforms + +| Agent | How to Discover | Purpose | +| ------------------------------------- | ------------------------------------------ | -------------------------------------------------------------------------------- | +| VMware Migration Agent (orchestrator) | `list_resources` with `resource: "agents"` | Orchestrates containerization workflow within a VMware migration job | +| Containerization sub-agent | _(invoked by orchestrator)_ | Source code analysis, Docker artifact generation, image building, IaC generation | + +**Discover the agent dynamically:** + +```python +list_resources(resource="agents") +create_job(workspaceId="...", jobName="VMware Containerization", + objective="Containerize application source code for deployment on ECS/EKS", + orchestratorAgent="") +``` + +## Decision Points + +| Step | Question to Ask User | Options | +| -------------------- | ------------------------------------------------------ | --------------------------------------------------------------- | +| Mode | "Standalone containerization or end-to-end migration?" | Standalone / End-to-end migration | +| Source code | "How would you like to provide your source code?" | Git repository (CodeConnections) / Zip upload | +| Artifact review | "Do the generated Docker artifacts look correct?" | Approve / Request changes | +| Private dependencies | "Does your application use private dependencies?" | Configure CodeArtifact / Configure private ECR base images / No | +| Deployment target | "Where do you want to deploy?" | Amazon EKS (Helm charts) / Amazon ECS (Terraform modules) | +| Test validation | "Has the test deployment been validated?" | Proceed to cutover / Re-deploy test / Modify configuration | +| Cutover | "Ready to deploy production infrastructure?" | Deploy cutover / Go back to test | + +## Example Requirements + +``` +## Requirement 1: Source Code Containerization + +**User Story:** As a platform engineer, I want my VMware-hosted application containerized +so that it can run on Amazon EKS or Amazon ECS. +**Acceptance Criteria:** + +1. WHEN containerization completes, a Dockerfile SHALL be generated for each application component +2. WHEN containerization completes, container images SHALL be published to Amazon ECR +3. WHEN containerization completes, vulnerability scanning SHALL report no critical findings + **Handled by:** AWS Transform VMware Migration Agent (Containerization sub-agent) + +## Requirement 2: Infrastructure as Code Generation + +**User Story:** As a DevOps engineer, I want deployment infrastructure generated +so that I can deploy containerized applications to my target platform. +**Acceptance Criteria:** + +1. WHEN IaC generation completes for EKS, Helm charts SHALL be generated with security scanning passed +2. WHEN IaC generation completes for ECS, Terraform modules SHALL be generated with validation passed + **Handled by:** AWS Transform VMware Migration Agent (Containerization sub-agent) +``` + +## Example Tasks + +``` +- [ ] 1. Setup + - [ ] 1.1 Create VMware migration job + - [ ] 1.2 Select containerization mode + - [ ] 1.3 Review and accept security disclaimer +- [ ] 2. Source code provisioning + - [ ] 2.1 Provide source code (Git repo or zip upload) + - [ ] 2.2 Configure private dependencies (if applicable) +- [ ] 3. Containerization + - [ ] 3.1 AI agent analyzes source code + - [ ] 3.2 Review generated Docker artifacts + - [ ] 3.3 Approve code changes +- [ ] 4. Image publishing + - [ ] 4.1 Build and publish to Amazon ECR + - [ ] 4.2 Review vulnerability scan results +- [ ] 5. IaC generation + - [ ] 5.1 Select deployment target (EKS or ECS) + - [ ] 5.2 Generate and review IaC +- [ ] 6. Test deployment + - [ ] 6.1 Deploy and validate test infrastructure + - [ ] 6.2 Clean up test infrastructure +- [ ] 7. Production cutover + - [ ] 7.1 Deploy cutover infrastructure + - [ ] 7.2 Confirm migration complete +``` + +## Known Limitations + +- Containerization is accessed through a VMware migration job — cannot be started independently +- Individual source files must not exceed 1 GB; total source code must not exceed 8 GB +- Private dependencies require pre-configured AWS CodeArtifact repositories or private ECR base images +- EKS deployments require an existing cluster or permissions to create one +- AI-generated Dockerfiles may need manual tuning for complex builds +- TOOL_APPROVAL tasks (image publishing, deployments, cleanup) must be approved in the web UI — cannot be completed via API diff --git a/plugins/aws-transform/skills/aws-transform/references/vmware-landing-zone.md b/plugins/aws-transform/skills/aws-transform/references/vmware-landing-zone.md new file mode 100644 index 00000000..81d3dda9 --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/vmware-landing-zone.md @@ -0,0 +1,142 @@ +# Landing Zone + +> **Last Updated:** 2026-05-10 + +## Table of Contents + +- [Capabilities](#capabilities) +- [Starting Workflow](#starting-workflow) +- [Agents and Transforms](#agents--transforms) +- [Decision Points](#decision-points) +- [Example Requirements](#example-requirements) +- [Example Tasks](#example-tasks) +- [Known Limitations](#known-limitations) + +## Capabilities + +Build an AWS landing zone as the foundation for your migration project. AWS Transform analyzes your migration inventory and business requirements to recommend an Organizational Unit (OU) and account structure, apply Service Control Policies (SCPs), and generate or deploy the infrastructure as code (IaC). + +The landing zone agent operates in two phases: + +1. **Foundation setup** — Establish the core landing zone: AWS Control Tower, foundational OUs (Security, Infrastructure, Sandbox, Workloads), and core accounts (Log Archive, Audit). +2. **Workload account design** — Design and create workload OUs and accounts based on migration waves, business units, and environment separation requirements. + +Supports both **greenfield** (no existing landing zone) and **brownfield** (existing OUs and accounts already deployed) environments. + +## Starting Workflow + +1. **Connector setup** — Create a target AWS account connector pointing to the organization management account in the Control Tower home Region +2. **Confirm organization context** — AWS Transform presents the management account ID and target Region for confirmation +3. **Foundation setup** — Design and deploy (or generate IaC for) the core OU structure, Control Tower initialization, and SCPs +4. **Workload account design** — Answer discovery questions; AWS Transform proposes OU and account structure based on migration waves and business requirements +5. **Workload deployment** — Deploy workload OUs, accounts, and SCPs, or download IaC artifacts + +**Key questions to ask user:** + +- "Do you already have AWS Control Tower or AWS Organizations set up (brownfield), or are we starting from scratch (greenfield)?" +- "What is your Control Tower home Region?" +- "What email prefix and domain for account email addresses?" +- "How many business units or teams will use AWS?" +- "Do you have compliance requirements that affect account isolation?" + +## Agents & Transforms + +| Agent | How to Discover | Purpose | +| ------------------ | ------------------------------------------ | ---------------------------------------------------------------------------- | +| Landing zone agent | `list_resources` with `resource: "agents"` | Foundation setup, workload account design, SCP configuration, IaC generation | + +**Discover the agent dynamically:** + +```python +list_resources(resource="agents") +create_job(workspaceId="...", jobName="Landing Zone Setup", + objective="Build AWS landing zone foundation and workload account structure", + orchestratorAgent="") +``` + +## Decision Points + +| Decision | Options | When to Ask | +| ------------------------------ | ---------------------------------------------------------------------- | ----------------------------------------- | +| Greenfield vs brownfield | Greenfield (new) / Brownfield (existing OUs/accounts) | Start — determines what gaps to fill | +| Deployment method (foundation) | Deploy for me / I'll deploy on my own / Design workload accounts first | Phase 1 Step 5 | +| Deployment method (workload) | Deploy for me / I'll deploy on my own | Phase 2 Step 10 | +| IaC format (if self-deploying) | AWS CDK / Landing Zone Accelerator (LZA) | When user selects "I'll deploy on my own" | +| Foundation OU customization | Accept recommended structure / Customize OUs and accounts | Phase 1 Step 1 | +| SCP selection | Which SCPs to apply and to which OUs | Phase 1 Step 4 and Phase 2 Step 9 | +| Account strategy | Single app per account / Grouped / Environment-based | Phase 2 discovery | +| Environment separation | Separate accounts per env / Shared accounts | Phase 2 discovery | +| Compliance sub-OUs | Regulated / Standard separation | Phase 2 — if frameworks identified | + +## Example Requirements + +``` +## Requirement 1: Foundation Setup + +**User Story:** As a cloud platform engineer, I want the core landing zone foundation deployed +so that governance controls, centralized logging, and account isolation are in place before any workloads arrive. +**Acceptance Criteria:** + +1. WHEN foundation setup completes, Control Tower SHALL be initialized with Security OU, Log Archive account, and Audit account +2. WHEN foundation setup completes, Infrastructure, Sandbox, and Workloads OUs SHALL exist in the organization +3. WHEN SCPs are applied, member accounts SHALL be unable to exceed the boundaries defined by the SCPs + **Handled by:** AWS Transform Landing Zone Agent + +## Requirement 2: Workload Account Structure + +**User Story:** As a cloud platform engineer, I want workload OUs and accounts designed around my migration waves +so that servers can be migrated into correctly isolated accounts without splitting waves. +**Acceptance Criteria:** + +1. WHEN workload structure is proposed, ALL servers in a migration wave SHALL map to the same target account +2. WHEN environment isolation is required, Workloads/Production and Workloads/Non-Production sub-OUs SHALL be created +3. WHEN sensitive-data applications are identified, they SHALL each receive a dedicated account + **Handled by:** AWS Transform Landing Zone Agent + +## Requirement 3: IaC Generation + +**User Story:** As a platform engineer, I want IaC artifacts generated for the landing zone +so that I can review, version-control, and deploy the infrastructure through my own pipeline. +**Acceptance Criteria:** + +1. WHEN IaC generation completes, artifacts SHALL be available in CDK (TypeScript) or LZA (YAML) format +2. WHEN artifacts are downloaded, a checksum SHALL be provided to verify file integrity +3. WHEN LZA format is selected, the generated YAML SHALL be compatible with LZA Universal Configuration version 1.1.0 + **Handled by:** AWS Transform Landing Zone Agent +``` + +## Example Tasks + +``` +- [ ] 1. Connector setup + - [ ] 1.1 Create target AWS account connector for the management account + - [ ] 1.2 Confirm connector Region matches Control Tower home Region + - [ ] 1.3 Approve connector via AWS Console verification link +- [ ] 2. Foundation design + - [ ] 2.1 Review recommended foundation OU structure + - [ ] 2.2 Confirm email prefix and domain + - [ ] 2.3 Review and select SCPs +- [ ] 3. Foundation deployment + - [ ] 3.1 Choose deployment method + - [ ] 3.2 Submit deployment for approval + - [ ] 3.3 Confirm OUs and accounts created +- [ ] 4. Workload account design + - [ ] 4.1 Check for migration planning artifacts + - [ ] 4.2 Answer discovery questions + - [ ] 4.3 Review proposed workload structure + - [ ] 4.4 Select workload SCPs +- [ ] 5. Workload deployment + - [ ] 5.1 Choose deployment method + - [ ] 5.2 Submit for approval or download IaC + - [ ] 5.3 Confirm all OUs and accounts created +``` + +## Known Limitations + +- Once an OU or account is deployed, it cannot be removed through the landing zone agent +- The Security OU is managed by Control Tower — cannot be modified through this tool +- All servers in a migration wave must go to the same account — waves cannot be split across accounts +- The connector Region must match the Control Tower home Region and IAM Identity Center Region +- LZA deployment requires the AWS Transform account and LZA installation to be in the same AWS Organization +- SCPs cannot grant permissions — they only restrict what IAM policies allow +- Brownfield environments may require remediation before Control Tower can be initialized diff --git a/plugins/aws-transform/skills/aws-transform/references/vmware-network.md b/plugins/aws-transform/skills/aws-transform/references/vmware-network.md new file mode 100644 index 00000000..93d83fdc --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/vmware-network.md @@ -0,0 +1,490 @@ +# VMware Network Migration + +> **Last Updated:** 2026-05-05 + +## Table of Contents + +- [Capabilities](#capabilities) +- [Workflow](#workflow) +- [Agents and Transforms](#agents--transforms) +- [Decision Points](#decision-points) +- [Hub and Spoke Architecture](#hub-and-spoke-architecture) +- [VPC and Subnet Operations](#vpc-and-subnet-operations) +- [Multi-Account Considerations](#multi-account-considerations) +- [Deployment Approvals](#deployment-approvals) +- [Known Limitations](#known-limitations) +- [Troubleshooting](#troubleshooting) +- [Example Requirements](#example-requirements) +- [Example Tasks](#example-tasks) + +## Capabilities + +Migrate on-premises network infrastructure to AWS. Translates source environment configuration into AWS-equivalent network resources — VPCs, subnets, security groups, NAT gateways, transit gateways, elastic IPs, routes, and route tables. + +- Network segments → AWS VPCs + subnets +- Firewall rules → AWS Security Groups +- Routing → AWS Transit Gateway (Hub and Spoke) or VPC route tables (Isolated) +- Review and modify generated network configuration before deployment +- VPC operations: rename, resize, merge, split, delete, exclude/include, change IP address +- Subnet operations: resize, delete, change IP address +- Security group referencing (within-VPC and cross-VPC/cross-account) +- IP migration: keep existing ranges or update to new CIDRs, static or DHCP assignment +- Guided network recommendations (naming, right-sizing, consolidation, security review) +- Creates network diagrams (PNG and Mermaid) +- Custom tags, MAP 2.0 tags, and automatic launch/replication tags +- Deploy the network using AWS CloudFormation with approval workflow +- Run reachability analysis on deployed VPCs +- Delete deployed network resources (rollback) +- Generates CloudFormation, CDK (TypeScript), Terraform, or Landing Zone Accelerator IaC + +### Supported Source Formats + +| Format | Produces | +| ------------------------ | ------------------------------------------------------ | +| VMware NSX export (.zip) | VPCs + subnets + security groups | +| Cisco ACI | VPCs + subnets + security groups | +| Palo Alto Networks | VPCs + subnets + security groups | +| Fortinet FortiGate | VPCs + subnets + security groups | +| RVTools (.xlsx/.zip) | VPCs + subnets only (no security groups) | +| modelizeIT | VPCs + subnets + security groups (hybrid environments) | + +### Topology Options + +| Topology | When to use | +| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | +| **Hub and Spoke** | Multi-tier apps, cross-VPC traffic, shared services, centralized egress/ingress. Creates Transit Gateway + Inspection/Outbound/Inbound VPCs. | +| **Isolated VPCs** | Independent workloads with no cross-VPC communication. Each VPC gets its own internet gateway. | + +### Security Group Strategies + +| Strategy | When to use | +| ------------ | ----------------------------------------------------------------------------------------------------------- | +| **MAP** | Static IP environments. Translates source firewall rules to SGs with IP-based rules. | +| **MAP_DHCP** | Dynamic IP / DHCP environments. Produces broader rules to accommodate IP changes. Use with Transit Gateway. | +| **SKIP** | Manual SG configuration post-migration. Use when source rules are too complex or need redesign. | + +--- + +## Workflow + +The network migration workflow is one sequence across three phases — Phase 1's 5 steps, Phase 2's 9 steps, and Phase 3's 4 steps run in order (18 steps total). Each phase's numbering restarts at 1, but the phases execute sequentially without gaps. + +### Phase 1: Target Account Setup + +1. Select "Network Migration" workflow +2. Confirm plan → "Proceed" +3. Select migration type: Single-account or Multi-account +4. MAP agreement (optional) +5. Configure connector (HITL task) — must include `connectorType` in payload + +### Phase 2: Network Migration + +1. Upload source file (NSX export, RVTools, etc.) — requires `planStepId` in `upload_artifact` +2. Select topology: Hub and Spoke or Isolated VPCs +3. Select security group strategy: MAP, MAP_DHCP, or SKIP +4. Agent presents configuration summary — **this is NOT the mapping step** +5. User confirms → mapping actually begins (2-5 min) +6. Guided Modernization or Direct Edits +7. Apply changes → IaC regeneration +8. Done with network design +9. Network diagram generation (image + Mermaid) + +### Confirmation Before Mapping + +After the user selects topology and security group strategy, the agent presents a configuration summary and asks for confirmation before starting the actual network mapping. Do NOT tell the user that mapping has started until the user confirms and the agent explicitly begins the mapping process. + +The sequence is: + +1. Select topology → agent acknowledges +2. Select security group strategy → agent presents **configuration summary** with all selections +3. User confirms "Yes, start network mapping" → mapping actually begins (2-5 min) + +The summary step is NOT the mapping step. Do not conflate them. + +### Polling and Waiting + +Two types of waits occur during the workflow: + +**Machine-gated steps** (mapping, job processing, agent responses): + +- Poll automatically and silently every 30 seconds +- Do NOT ask the user for permission to poll +- Only surface results to the user (completion, error, or progress update) + +**Human-gated steps** (connector approval, deployment approval): + +- Do NOT auto-poll — these depend on a human action that may take minutes or hours +- Present the action needed (link, instructions) and ask the user to confirm when done +- When user confirms → verify status once +- If not yet complete → remind them what's needed + +### Phase 3: Deployment + +1. Tagging (optional) +2. Deployment strategy: self-deploy or AWS Transform deploy +3. Additional IaC format selection (CDK, Terraform, LZA) +4. Download artifacts + +### Artifact Handling + +The job generates several artifact types. Each has different download behavior: + +| Artifact | Location | Download Behavior | +| --------------------------------------------------- | ----------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Network diagram** (PNG, Mermaid) | Managed artifact store | **Always download automatically** — use `get_resource(resource="artifact", savePath=...)` to save to the user's Downloads directory (full absolute path). Never send the user to the web console for diagrams. | +| **IaC files** (CloudFormation, Terraform, CDK, LZA) | Connector S3 bucket (`code_generation/` prefix) | **Ask the user** — present the S3 console link, then ask if they want a local copy. If yes, run `aws s3 cp` recursively to a local directory (full absolute path). | +| **Checksums** | Inline in agent message (text) | **No download needed** — checksums are presented as text in the agent's response. Present them to the user as-is. | + +**Diagram download (mandatory, via `aws-transform-mcp`):** + +> Note: `get_resource` is an `aws-transform-mcp` wrapper tool — not the platform's native `create_artifact_download_url` + `download_artifact` flow. It accepts `savePath` (full file path), not `output_dir`. + +```python +get_resource(resource="artifact", artifactId="", savePath="/Users//Downloads/network_diagram.png") +``` + +**IaC download (user choice):** + +```bash +aws s3 cp s3:///code_generation/ ~/Downloads/network-iac/ --recursive --region us-east-1 +``` + +Extract the bucket name and prefix from the S3 link in the agent's response. Requires AWS CLI configured with access to the target account. + +### Connector Setup + +The VMware Migration Agent creates a HITL task requesting connector configuration. Two steps are needed: + +**Step A: Create the connector** (via `aws-transform-mcp` server's `create_connector` tool): + +> Note: `aws-transform-mcp` is a wrapper MCP server that provides a unified interface over the platform's customer-facing and agentic APIs. Tool names and parameters may differ from the underlying platform tools. + +```python +create_connector( + workspaceId="", + connectorName="network-migration-connector", + connectorType="vmware_migration|infra_provisioning|2", # Registered partner type in ConnectorTypeConfigurationProvider; not in base enum (S3|CODE_CONNECTION). Do not use arbitrary values. + configuration={"encryptionKeyArn": ""}, # MCP wrapper accepts encryptionKeyArn (rejects kmsKeyArn); maps to platform's KMS_KEY_ARN_KEY internally + awsAccountId="", + targetRegions=["us-east-1"] +) +``` + +Resolve the KMS key ARN beforehand: + +```bash +aws kms describe-key --key-id alias/aws/s3 --region us-east-1 --query 'KeyMetadata.Arn' --output text +``` + +After creation, the user must approve via the verification link returned in the response. This creates an IAM role with correct permissions. Once the user confirms approval, verify status via `get_resource(resource="connector")`. + +**Prerequisites for approval:** The user must be logged into the target AWS account in their browser before opening the verification link. If not logged in, the console will redirect to a sign-in page. + +**Connector approval is human-gated** — do NOT auto-poll. Instead: + +1. Present the verification link to the user +2. Ask the user to confirm once they've approved +3. When user confirms → check status once via `get_resource(resource="connector")` +4. If `ACTIVE` → proceed. If still `PENDING` → remind them to approve. If `REJECTED` → report error. + +**Step B: Complete the HITL task** (via `complete_task`): + +- Submit the connector ID and `connectorType: "vmware_migration|infra_provisioning|2"` in the task payload + +### File Upload + +Upload the source file (NSX export, RVTools, etc.) via the `aws-transform-mcp` server's `upload_artifact` tool with the `planStepId` parameter. The `planStepId` ties the artifact to the correct plan step so the agent can find it. + +**CRITICAL: The `planStepId` parameter is REQUIRED in the `upload_artifact` call. Without it, the file uploads but the agent cannot find it.** + +**CRITICAL: Resolve ALL paths to absolute form BEFORE calling `upload_artifact`.** The tool does NOT expand `~` or shell variables. `~/file.zip` will fail silently — use `/Users//file.zip` instead. + +> Note: `upload_artifact` is an `aws-transform-mcp` wrapper tool that combines the platform's 3-step flow (`create_artifact_upload_url` → `upload_artifact` → `complete_artifact_upload`) into a single call. + +```python +upload_artifact( + workspaceId="", + jobId="", + content="/Users//Downloads/.zip", # MUST be absolute — no ~ or relative paths + fileName=".zip", + fileType="ZIP", + categoryType="CUSTOMER_INPUT", + planStepId="" # REQUIRED — get from list_resources(resource="plan") +) +``` + +Get the `planStepId` from `list_resources(resource="plan")` — find the step for "Upload source file." + +**Path resolution (all tools):** Always resolve `~` and relative paths to their full absolute form before passing to any MCP tool parameter (`content`, `savePath`, etc.). Tools do NOT expand `~` or shell variables — they treat paths as raw strings. +Example: `~/Downloads/file.zip` → `/Users//Downloads/file.zip` + +⚠️ **Without `planStepId`**, the file uploads successfully but the agent cannot find it in "User Uploads." The `planStepId` is required for the mapping engine to access the file. + +--- + +## Agents & Transforms + +| Agent | How to Discover | Purpose | +| ------------------------------------- | ------------------------------------------ | ------------------------------------------------------- | +| VMware Migration Agent (orchestrator) | `list_resources` with `resource: "agents"` | Orchestrates full workflow, invokes NMA sub-agent | +| Network Migration Agent (NMA) | _(sub-agent, not directly invocable)_ | File processing, mapping, modernization, IaC generation | +| MGN Backend | _(external service)_ | Runs `StartNetworkMigrationMapping` and code generation | + +**Discover the orchestrator agent dynamically** — do not hardcode agent names: + +```python +list_resources(resource="agents") +# Find the VMware network migration orchestrator from results +``` + +**Selection criteria** when multiple VMware agents exist: choose the agent whose `description` mentions "network migration" or "infra provisioning." Prefer the highest version number if multiple matches exist. Ignore agents marked as deprecated in their description. Present the matched agent to the user for confirmation before creating a job. + +### Job Creation + +```python +create_job( + workspaceId="", + jobName="Network Migration ", + objective="Migrate on-premises network to AWS", + intent="Migrate on-premises network to AWS VPC", + orchestratorAgent="" +) +``` + +### Monitoring + +```python +# Check for agent messages (primary signal) +list_resources(resource="messages", workspaceId="...", jobId="...") + +# Check for HITL tasks +list_resources(resource="tasks", workspaceId="...", jobId="...") + +# Check plan step status +list_resources(resource="plan", workspaceId="...", jobId="...") +``` + +--- + +## Decision Points + +| Step | Question to Ask User | Options | +| --------------- | ---------------------------------------------------------- | --------------------------------------------------------------- | +| Topology | "Do your workloads need cross-VPC communication?" | Hub and Spoke / Isolated VPCs | +| Security groups | "How should security groups be created?" | MAP (static IPs) / MAP_DHCP (dynamic IPs) / SKIP (manual setup) | +| Modernization | "Would you like guided optimization or direct control?" | Guided Modernization / Direct Edits / Skip | +| Naming | "What naming convention do you use for cloud resources?" | User provides pattern (e.g., `{env}-{workload}-{type}`) or skip | +| Deployment | "Do you want AWS Transform to deploy, or deploy yourself?" | Let AWS Transform deploy / I'll deploy myself | +| IaC format | "Which additional IaC format do you need?" | CDK Project / Terraform / LZA / None | + +### Guided Modernization Recommendations + +When user selects Guided Modernization, present each recommendation and ask: + +- Rename VPCs → user provides new names or skips +- Right-sizing (resize CIDRs) → user approves or skips +- Security posture review → informational, user acknowledges +- Remove unused subnets → user approves or keeps + +After all recommendations: "Apply all staged changes" → wait for IaC regeneration → "Done with network design" + +--- + +## Hub and Spoke Architecture + +When the user selects Hub and Spoke topology, the agent generates: + +### Generated VPCs + +| VPC | Purpose | +| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Spoke VPCs** | One per detected source network segment. Connected to Transit Gateway. | +| **Inspection VPC** | Routes all cross-VPC traffic through this VPC for inspection. User must deploy a firewall appliance (e.g., AWS Network Firewall) here post-migration. TGW attachment uses appliance mode (symmetric routing). | +| **Inbound VPC** | Handles public internet → workload traffic (north-south inbound). Includes internet gateway + public subnets across AZs. | +| **Outbound VPC** | Handles workload → public internet traffic (north-south outbound). Includes internet gateway + NAT gateways with elastic IPs per AZ. | + +### Transit Gateway Route Tables + +| Route Table | Associated With | Routes | Purpose | +| --------------- | ------------------------------------- | --------------------------------------- | --------------------------------------------------------------------- | +| **Uninspected** | Spoke VPCs, Inbound VPC, Outbound VPC | `0.0.0.0/0` → Inspection VPC attachment | Default association table. Forces all traffic through inspection. | +| **Inspected** | Inspection VPC | Propagated routes from all spoke VPCs | Default propagation table. Lets inspected traffic reach destinations. | + +### Traffic Flow + +1. Spoke VPC → Transit Gateway (default route `0.0.0.0/0`) +2. Uninspected route table → Inspection VPC +3. Inspection VPC forwards traffic back to Transit Gateway (firewall appliance inspects here if deployed) +4. Inspected route table → destination spoke VPC (via propagated routes) + +Outbound internet: Inspected table routes to Outbound VPC → NAT gateway → internet gateway. +Inbound internet: Internet gateway → Inbound VPC → same inspection path → spoke VPC. + +**Note:** Cross-VPC traffic routes through Inspection VPC but is not inspected until the user deploys a firewall appliance (e.g., AWS Network Firewall) there. + +For multi-account deployments, Transit Gateway is shared across accounts via AWS Resource Access Manager (RAM). + +--- + +## VPC and Subnet Operations + +Operations available during the review and optimization phase (Guided Modernization or Direct Edits). + +### VPC Operations + +| Operation | What It Does | Constraints | +| ------------- | ---------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Rename** | Change VPC name | — | +| **Resize** | Change CIDR prefix length | Must be /16–/28. Subnets must fit within new CIDR. SG rules matching old CIDR auto-update. | +| **Change IP** | Change base IP, keep prefix length | Subnets shift by same offset. SG rules matching old CIDR auto-update. | +| **Merge** | Combine two VPCs into one | No subnet CIDR overlap. Merged result must be /16 or smaller (i.e., prefix ≥ /16). Same account (multi-account). | +| **Split** | Divide VPC into two | Exactly two CIDRs, no overlap, each /16–/28. Every subnet must fit entirely within exactly one of the two new CIDRs (split is blocked if any subnet spans the boundary). SG rules NOT auto-updated (manual review required). | +| **Delete** | Permanently remove | Cannot undo. | +| **Exclude** | Temporarily remove from migration | Can re-include later. | +| **Include** | Re-add excluded VPC | — | + +### Subnet Operations + +| Operation | What It Does | Constraints | +| ------------- | ---------------------------------- | -------------------------------------------------------------------------------------------- | +| **Resize** | Change CIDR prefix length | Must be /16–/28. No overlap with other subnets in same VPC. Must fit within parent VPC CIDR. | +| **Delete** | Permanently remove | Cannot undo. Does not affect parent VPC. | +| **Change IP** | Change base IP, keep prefix length | — | + +**Note:** After each operation (except Split), security group referencing is re-evaluated — CIDR-based rules may convert to SG references or vice versa. Split requires manual SG review since rules are not auto-updated. + +**Appliance VPC restrictions:** For Inspection, Inbound, and Outbound VPCs in Hub and Spoke, only Change IP address is supported. + +--- + +## Multi-Account Considerations + +| Aspect | What to Know | +| ------------------------------ | ------------------------------------------------------------------------------------------------------------- | +| **Migration type selection** | User chooses Single-account or Multi-account during target account setup. Present via user question. | +| **Cross-account IAM roles** | Must be configured before starting network migration. Agent handles setup guidance. | +| **AWS Organizations** | Required for multi-account. LZA deployment requires same Organization as the AWS Transform account. | +| **Transit Gateway sharing** | In Hub and Spoke, TGW is shared across accounts via RAM. Agent configures this. | +| **VPC merge constraint** | Both VPCs must be assigned to the same account. Present this constraint if user attempts cross-account merge. | +| **Security group referencing** | Cross-account ingress rules use SG references (Hub and Spoke). Cross-account egress uses CIDR-based rules. | + +--- + +## Deployment Approvals + +When the user selects AWS Transform-managed deployment: + +1. **Submission** — Confirm deployment intent, agent submits CloudFormation templates for review +2. **Routing** — Request routes automatically to authorized approvers via the AWS Transform Approvals tab +3. **Review** — Approvers validate CloudFormation templates and network configurations against security standards +4. **Decision** — Approver approves or denies: + - **Approved** → deployment proceeds automatically + - **Denied** → inform user, suggest contacting approver for required modifications +5. **Audit** — All approval decisions are tracked for audit purposes + +**Behavior during approval:** + +- After submission, inform user that deployment requires approval +- Inform user that approval is pending — when user confirms approval has been granted, check job status once +- If denied, present the denial to the user and offer to modify the network design and resubmit + +**Rollback:** After deployment completes, resources can be deleted (requires separate approval). If resources were modified after deployment, automatic deletion is not available — user must delete manually via Console or CLI. + +--- + +## Known Limitations + +1. **File upload requires `planStepId`** — `upload_artifact` without `planStepId` uploads successfully but the agent can't find the file. Always include the plan step ID for the "Upload source file" step. +2. **Connector KMS validation** — `create_connector` accepts invalid KMS key ARNs without validation. Fails silently at mapping time. +3. **MCP requires `encryptionKeyArn`** — Cannot create connector without it, even though webapp doesn't require it. +4. **No artifact deletion** — Cannot delete or overwrite files in User Uploads via MCP. +5. **Connector role reuse** — Reusing a role from a deleted connector may have KMS permissions on the wrong key. Always use the verification link. +6. **Existing mapping definitions** — If a previous job created a mapping in the same workspace, agent asks "existing vs new configuration." + +--- + +## Troubleshooting + +| Symptom | Likely Cause | Resolution | +| ---------------------------------------------- | --------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | +| Mapping fails silently after connector setup | Invalid KMS key ARN passed to `create_connector` | Resolve real ARN via `aws kms describe-key --key-id alias/aws/s3` and recreate connector | +| Agent can't find uploaded source file | `upload_artifact` called without `planStepId` | Re-upload with `planStepId` from `list_resources(resource="plan")` — find the "Upload source file" step | +| Connector status stuck on PENDING | User hasn't approved via verification link | Present verification link again — approval creates IAM role with correct permissions | +| "Existing configuration found" prompt | Previous job created a mapping in same workspace | Ask user: reuse existing or start new configuration | +| Deployment fails with Organization ID mismatch | LZA deployment account not in same AWS Organization | Verify Organization membership or choose a different IaC format | +| Reachability analysis shows no connectivity | Security groups or route tables missing rules | Review generated SGs and route tables; check Transit Gateway route table associations | +| Deployment approval denied | Approver rejected CloudFormation templates | Contact approver for required modifications, update network design, resubmit | +| Connector creation fails with validation error | Wrong configuration key name | Use `encryptionKeyArn` (not `kmsKeyArn`) in the `configuration` parameter | +| Cannot replace uploaded file | MCP has no artifact deletion/overwrite | Create a new job or upload with a different filename; existing uploads cannot be removed via MCP | + +--- + +## Example Requirements + +``` +## Requirement 1: Network Mapping + +**User Story:** As a network engineer, I want my on-premises network configuration +translated to AWS VPC infrastructure so that workload connectivity is preserved after migration. +**Acceptance Criteria:** + +1. WHEN mapping completes, EACH source network segment SHALL map to a distinct AWS VPC +2. WHEN mapping completes, source firewall rules SHALL be translated to AWS Security Groups +3. WHEN mapping completes, routing between segments SHALL be preserved via Transit Gateway or VPC route tables + **Handled by:** AWS Transform VMware Migration Agent (NMA sub-agent) + +## Requirement 2: Network Optimization + +**User Story:** As a cloud architect, I want to review and optimize the generated network +before deployment so that it follows our naming conventions and right-sizing standards. +**Acceptance Criteria:** + +1. WHEN optimization completes, ALL VPCs SHALL follow the organization's naming convention +2. WHEN optimization completes, VPC CIDRs SHALL be right-sized for actual subnet usage +3. WHEN optimization completes, unused subnets SHALL be removed with user approval + **Handled by:** AWS Transform VMware Migration Agent (Guided Modernization) + IDE (presenting recommendations) + +## Requirement 3: Network Deployment + +**User Story:** As an infrastructure engineer, I want the approved network configuration +deployed to my AWS account so that VPCs are ready for VM migration. +**Acceptance Criteria:** + +1. WHEN deployment completes, ALL VPCs, subnets, and security groups SHALL exist in the target account +2. WHEN reachability analysis runs, cross-VPC connectivity SHALL be confirmed +3. WHEN deployment completes, IaC templates SHALL be available for download in the selected format + **Handled by:** AWS Transform VMware Migration Agent (CloudFormation deployment) +``` + +--- + +## Example Tasks + +``` +- [ ] 1. Target account setup (AWS Transform) + - [ ] 1.1 Select migration type (single-account or multi-account) + - [ ] 1.2 MAP agreement (if applicable) + - [ ] 1.3 Configure connector (KMS key + verification link approval) + - [ ] 1.4 Verify connector status = ACTIVE +- [ ] 2. Network mapping (AWS Transform) + - [ ] 2.1 Upload source file (`upload_artifact` with `planStepId`) + - [ ] 2.2 Select topology (Hub and Spoke or Isolated VPCs) + - [ ] 2.3 Select security group strategy (MAP / MAP_DHCP / SKIP) + - [ ] 2.4 Review configuration summary and confirm → mapping begins + - [ ] 2.5 Wait for mapping to complete (2-5 min) +- [ ] 3. Network optimization + - [ ] 3.1 Review generated VPCs and subnets + - [ ] 3.2 Guided Modernization or Direct Edits + - [ ] 3.3 Apply changes → IaC regeneration + - [ ] 3.4 Confirm "Done with network design" +- [ ] 4. Deployment + - [ ] 4.1 Configure custom tags + - [ ] 4.2 Select deployment strategy (AWS Transform deploy or self-deploy) + - [ ] 4.3 If self-deploy: select additional IaC format (CDK / Terraform / LZA) + - [ ] 4.4 If AWS Transform deploy: wait for approval → deployment → reachability analysis + - [ ] 4.5 Download artifacts +- [ ] 5. Validation + - [ ] 5.1 Review reachability analysis results + - [ ] 5.2 Verify network diagram matches expected topology + - [ ] 5.3 Confirm VPCs ready for VM migration waves +``` diff --git a/plugins/aws-transform/skills/aws-transform/references/vmware-server.md b/plugins/aws-transform/skills/aws-transform/references/vmware-server.md new file mode 100644 index 00000000..8db881d9 --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/vmware-server.md @@ -0,0 +1,150 @@ +# Server Migration + +> **Last Updated:** 2026-05-10 + +## Table of Contents + +- [Capabilities](#capabilities) +- [Starting Workflow](#starting-workflow) +- [Agents and Transforms](#agents--transforms) +- [Decision Points](#decision-points) +- [Example Requirements](#example-requirements) +- [Example Tasks](#example-tasks) +- [Known Limitations](#known-limitations) + +## Capabilities + +Rehost VMware servers to Amazon EC2 using AWS Application Migration Service (MGN). AWS Transform orchestrates the full wave-based migration lifecycle — wave setup, inventory validation, replication agent deployment, data replication monitoring, test instance launch, and production cutover. This workload covers VMware-sourced servers only; for VMware infrastructure (vSphere networking, vSAN storage, vCenter) see [vmware.md](vmware.md). + +- VMware servers → Amazon EC2 instances (rehost/lift-and-shift via MGN) +- Continuous block-level data replication via AWS Replication Agent +- Automated replication agent installation via MGN connector (SSH/WinRM) — reusable across waves and target accounts +- Multi-wave migration with per-wave configuration +- Single-account and multi-account migration support +- Test instance launch and validation before production cutover +- Selective or full-wave cutover with finalization + +## Starting Workflow + +1. **Confirm prerequisites** — target AWS accounts ready, VPCs/subnets/security groups deployed and tagged, inventory file prepared with wave assignments +2. **Configure execution defaults** — EC2 recommendation preferences and default launch settings (apply to all target accounts) +3. **Set up migration wave** — configure target account, migration mode (single vs multi-account), IP assignment strategy, verify resource tags +4. **Validate inventory** — review and confirm server-to-EC2 mapping, licensing options, and network configuration before loading into MGN +5. **Deploy replication agents** — choose installation method (organization tools, MGN connector, or manual), deploy to all servers in the wave +6. **Monitor replication** — track initial sync and continuous replication until all servers reach Ready for testing +7. **Test** — obtain approval before launching test instances, then launch, validate, mark applications ready for cutover +8. **Cutover** — obtain approval before launching cutover instances, then launch, verify, finalize (stops replication), optionally archive source servers + +Individual servers can advance to test and cutover independently of the rest of the wave. + +## Agents & Transforms + +| Agent | How to Discover | Purpose | +| --------------------------------------- | ------------------------------------------ | -------------------------------------------------------------------------------------------- | +| Server migration agent | `list_resources` with `resource: "agents"` | Wave setup, inventory validation, agent deployment, replication monitoring, testing, cutover | +| AWS Application Migration Service (MGN) | External | Actual server replication, testing, and cutover execution | +| AWS Migration Hub | External | Migration tracking and orchestration | + +**Discover the agent dynamically:** + +```python +list_resources(resource="agents") +# Then create job with discovered orchestratorAgent +create_job(workspaceId="...", jobName="Server Migration", + objective="Migrate VMware servers to EC2 using MGN", orchestratorAgent="") +``` + +## Decision Points + +| Decision | Options | When to Ask | +| ------------------------- | -------------------------------------------------------------------- | -------------------------------------- | +| Migration mode | Single-account / Multi-account | Wave setup | +| IP assignment strategy | Static IP / Dynamic IP (DHCP) | Wave setup | +| Agent installation method | Organization tools / MGN connector / Manual | Before agent deployment | +| Credential configuration | Single secret (Linux) / Single secret (Windows) / Per-server secrets | Connector setup (MGN connector method) | +| Testing scope | Full wave / Selective | Before launching test instances | +| Cutover scope | Full wave / Selective | Before launching cutover instances | + +## Example Requirements + +``` +## Requirement 1: Wave Setup and Inventory Validation + +**User Story:** As an infrastructure engineer, I want each migration wave configured and validated +so that servers are correctly mapped to target accounts, subnets, and EC2 instance types. +**Acceptance Criteria:** + +1. WHEN wave setup completes, EACH server SHALL have a target account, subnet, security group, and EC2 instance type assigned +2. WHEN inventory is validated, required resource tags SHALL be verified +3. WHEN inventory is loaded, MGN SHALL create source server records for each server in the wave + **Handled by:** AWS Transform Server Migration Agent + +## Requirement 2: Replication Agent Deployment + +**User Story:** As an operations engineer, I want replication agents deployed to all source servers +so that continuous block-level replication to AWS begins automatically. +**Acceptance Criteria:** + +1. WHEN agents are deployed, ALL servers in the wave SHALL show replication state INITIATING or INITIAL_SYNC +2. WHEN initial sync completes, ALL servers SHALL reach Ready for testing state +3. WHEN a server fails agent installation, the failure reason SHALL be displayed and retry SHALL be available + **Handled by:** AWS Transform Server Migration Agent + +## Requirement 3: Testing and Cutover + +**User Story:** As an operations engineer, I want to validate migrated servers before cutover +so that production workloads are moved to AWS with verified functionality. +**Acceptance Criteria:** + +1. WHEN test instances are launched, instance IDs SHALL be provided for each server +2. WHEN all applications are marked ready for cutover, replication alerts SHALL be resolved +3. WHEN cutover is finalized, source machine replication SHALL stop and lifecycle state SHALL be locked +4. WHEN cutover completes, downtime SHALL be limited to the window between source shutdown and cutover instance availability + **Handled by:** AWS Transform Server Migration Agent +``` + +## Example Tasks + +``` +- [ ] 1. Prerequisites verification + - [ ] 1.1 Confirm target AWS accounts are ready + - [ ] 1.2 Verify VPC, subnets, and security groups are deployed and tagged + - [ ] 1.3 Confirm inventory file is prepared with wave assignments and EC2 preferences + - [ ] 1.4 Configure migration execution defaults +- [ ] 2. Wave setup + - [ ] 2.1 Configure migration mode (single-account or multi-account) + - [ ] 2.2 Verify MGN is initialized in target accounts + - [ ] 2.3 Verify resource tagging + - [ ] 2.4 Configure IP assignment strategy +- [ ] 3. Inventory validation + - [ ] 3.1 Download and review inventory file (CSV/XLSX) + - [ ] 3.2 Adjust EC2 instance types, licensing options, tenancy if needed + - [ ] 3.3 Upload modified inventory or accept as-is + - [ ] 3.4 Confirm MGN source server records created +- [ ] 4. Deploy replication agents + - [ ] 4.1 Choose installation method + - [ ] 4.2 Set up MGN connector if selected + - [ ] 4.3 Deploy agents to all servers in the wave + - [ ] 4.4 Verify all agents connected +- [ ] 5. Data replication + - [ ] 5.1 Monitor initial sync progress per server + - [ ] 5.2 Confirm all servers reach Ready for testing state +- [ ] 6. Testing + - [ ] 6.1 Launch test instances (full wave or selective) + - [ ] 6.2 Validate test instances + - [ ] 6.3 Mark applications as ready for cutover +- [ ] 7. Cutover + - [ ] 7.1 Launch cutover instances within maintenance window + - [ ] 7.2 Verify cutover instances + - [ ] 7.3 Finalize cutover + - [ ] 7.4 Archive source servers (optional) +``` + +## Known Limitations + +- Agentless replication is not supported — the AWS Replication Agent must be installed on all servers in a wave +- SSM Hybrid Activations for the MGN connector expire after 30 days — a new connector is required if installing on a new machine after expiration +- Only one inventory import to a given target account and Region is allowed at a time +- IP assignment is constrained by the security group mapping strategy: MAP supports static IP only; MAP_DHCP and SKIP support both static and DHCP +- Deployment approvals require Admin or Approver role in AWS Transform +- Downtime is unavoidable between source shutdown and cutover instance availability — plan maintenance windows accordingly diff --git a/plugins/aws-transform/skills/aws-transform/references/vmware.md b/plugins/aws-transform/skills/aws-transform/references/vmware.md new file mode 100644 index 00000000..ff8b0669 --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/vmware.md @@ -0,0 +1,205 @@ +# VMware Migration + +> **Last Updated:** 2026-05-10 + +## Table of Contents + +- [Capabilities](#capabilities) +- [Job Types](#job-types) +- [Starting Workflow](#starting-workflow) +- [Agents and Transforms](#agents--transforms) +- [Decision Points](#decision-points) +- [Example Requirements](#example-requirements) +- [Example Tasks](#example-tasks) +- [Known Limitations](#known-limitations) + +## Capabilities + +Migrate VMware environments to AWS using generative AI-driven planning and execution. AWS Transform orchestrates the full migration lifecycle — discovery, migration planning, landing zone setup, network migration, and server rehosting to EC2. Supports Windows and Linux servers on supported operating systems (see [MGN supported OS list](https://docs.aws.amazon.com/mgn/latest/ug/Supported-Operating-Systems.html)). + +- VMware VMs → Amazon EC2 instances (rehost/lift-and-shift via MGN) +- AI-driven conversion of VMware network configuration → AWS VPC architecture (VPCs, subnets, security groups, Transit Gateway) +- AI-driven migration plan generation — application grouping and wave planning +- Three discovery options: AWS Application Discovery Service collectors, Export for vCenter tool, or independently collected data import +- Landing zone setup for target AWS accounts +- Multi-wave migration with per-wave configuration +- Single-account and multi-account migration support + +For detailed execution guidance see: + +- [vmware-server.md](vmware-server.md) — replication agent deployment, data replication, testing, cutover +- [vmware-network.md](vmware-network.md) — network mapping, topology, IaC generation, deployment +- [vmware-landing-zone.md](vmware-landing-zone.md) — landing zone foundation and workload account design +- [vmware-containerization.md](vmware-containerization.md) — source code containerization, Docker artifacts, ECR publishing, EKS/ECS IaC + +## Job Types + +AWS Transform offers the following VMware migration job types. Steps can be dynamically added or removed at any time to customize the workflow. + +| Job Type | Steps Included | +| ----------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------- | +| **End-to-end migration** | Perform discovery → Build migration plan → Connect target accounts → Build landing zone → Migrate network → Migrate servers | +| **Discovery and migration planning** | Perform discovery → Build migration plan | +| **Network migration** | Connect target accounts → Migrate network | +| **Landing zone** | Connect target accounts → Build landing zone | +| **Landing zone, network, and server migration** | Connect target accounts → Build landing zone → Migrate network → Migrate servers | +| **Migration planning and server migration** | Perform discovery → Build migration plan → Connect target accounts → Migrate servers | +| **Source code containerization** | Connect target accounts → Containerize applications → Publish to ECR → Deploy to EKS/ECS | + +> One target AWS Region per VMware migration job. To migrate to different Regions, create multiple jobs. + +## Starting Workflow + +**Before starting:** Confirm job type — determine which steps are in scope based on what the user already has (existing network, existing landing zone, etc.) + +1. **Perform discovery** — identify VM count, OS types, resource usage (CPU, memory, storage), and network dependencies using one of the three discovery options +2. **Build migration plan** — AI-driven application grouping, wave planning, and right-sizing recommendations +3. **Connect target accounts** — configure target AWS accounts and verify permissions +4. **Build landing zone** — set up AWS account structure, IAM roles, and baseline infrastructure in target accounts +5. **Migrate network** — translate VMware network configuration to AWS VPC architecture; deploy via CloudFormation +6. **Migrate servers** — deploy replication agents, replicate data, test, and cut over wave by wave + +**Key questions to ask user:** + +- "Do you already have a landing zone and network set up in the target account(s), or do we need to build those?" +- "Which discovery method do you have available — ADS collectors, Export for vCenter, or an existing data export?" + +## Agent Interaction Rules + +**After every job interaction** (sending a message, completing a task, uploading an artifact) — always read the latest messages from the job and surface any agent responses or questions to the user immediately. Do not assume silence means the agent is still processing — it may have already responded. + +**Polling priority** — check in this order: + +1. Messages (agent chat responses and questions) — check first +2. Tasks (formal HITL tasks awaiting human input) +3. Worklogs (agent activity and progress) + +**Target account operations — always delegate to the agent:** +When the user asks about resources in the target AWS account (subnets, VPCs, security groups, instances, IAM roles), do NOT query the target account directly. Forward the request to the agent via `send_message` — the agent has connector-based access to the target account. + +**Console links — never construct, always use agent-provided:** +Console URLs are dynamically generated and scoped to specific connectors, workspaces, and accounts. Always use links provided by the agent in its messages or HITL tasks. If the agent hasn't provided a link, ask it via `send_message` rather than guessing the URL format. + +## Agents & Transforms + +| Agent | How to Discover | Purpose | +| ------------------------------------------------------- | ------------------------------------------ | ---------------------------------------------------------------------------------------------------- | +| VMware Migration Agent v2 (`vmware-migration-agent-v2`) | `list_resources` with `resource: "agents"` | End-to-end orchestration: discovery, planning, network migration, server migration, containerization | +| Server Migration Agent | `list_resources` with `resource: "agents"` | Wave setup, replication agent deployment, replication monitoring, testing, cutover | +| Landing Zone Agent | `list_resources` with `resource: "agents"` | Foundation setup, workload account design, SCP configuration, IaC generation | +| Network Migration Agent (NMA) | _(sub-agent, invoked by orchestrator)_ | Network mapping, optimization, and IaC generation | +| Containerization sub-agent | _(sub-agent, invoked by orchestrator)_ | Source code analysis, Docker artifact generation, image building, IaC generation | +| AWS Application Migration Service (MGN) | External | Actual server replication, testing, and cutover execution | +| AWS Migration Hub | External | Migration tracking and orchestration | + +**Discover agents dynamically:** + +```python +list_resources(resource="agents") +# Or ask the chat agent +send_message(workspaceId="...", text="What agents are available for VMware migration?") +# Then create job with discovered orchestratorAgent +create_job(workspaceId="...", jobName="VMware Migration", + objective="Migrate VMware workloads to EC2", orchestratorAgent="vmware-migration-agent-v2") +``` + +## Decision Points + +| Decision | Options | When to Ask | +| ------------------------- | ------------------------------------------------------------------------ | ----------------------------------------------------------------------- | +| Job type | End-to-end / Discovery and planning / Network only / Landing zone / etc. | Before starting — based on what the user already has | +| Discovery method | ADS collectors / Export for vCenter / Independent import | Step 1 — before discovery | +| Migration mode | Single-account / Multi-account | Step 3 — connecting target accounts | +| Network topology | Hub and Spoke / Isolated VPCs | Step 5 — network migration | +| Security group strategy | MAP / MAP_DHCP / SKIP | Step 5 — network migration. Determines IP assignment options in Step 6. | +| IP assignment strategy | Static IP / Dynamic IP (DHCP) | Step 6 — wave setup. Constrained by SG strategy. | +| Agent installation method | Organization tools / MGN connector / Manual | Step 6 — before replication agent deployment | +| Testing scope | Full wave / Selective | Step 6 — before launching test instances | +| Cutover scope | Full wave / Selective | Step 6 — before launching cutover instances | + +## Example Requirements + +``` +## Requirement 1: VM Discovery and Migration Planning + +**User Story:** As an infrastructure engineer, I want all VMware VMs assessed and grouped into waves +so that I have a clear, prioritized migration plan with right-sized EC2 targets. +**Acceptance Criteria:** + +1. WHEN discovery completes, EACH VM SHALL have a recommended EC2 instance type +2. WHEN discovery completes, network dependencies between VMs SHALL be documented +3. WHEN migration plan is built, VMs SHALL be grouped into migration waves with dependency ordering + **Handled by:** AWS Transform VMware Migration Agent v2 + +## Requirement 2: Network Migration + +**User Story:** As a network engineer, I want VMware network configuration translated to AWS VPC +so that VM communication patterns are preserved after migration. +**Acceptance Criteria:** + +1. WHEN network mapping completes, EACH source network segment SHALL map to a distinct AWS VPC +2. WHEN network mapping completes, source firewall rules SHALL be translated to AWS Security Groups +3. WHEN deployment completes, ALL VPCs, subnets, and security groups SHALL exist in the target account + **Handled by:** AWS Transform VMware Migration Agent v2 (NMA sub-agent) + +## Requirement 3: Server Migration + +**User Story:** As an operations engineer, I want VMware servers rehosted to EC2 +so that production workloads run natively on AWS with verified functionality. +**Acceptance Criteria:** + +1. WHEN replication agents are deployed, ALL servers SHALL show replication state INITIATING or INITIAL_SYNC +2. WHEN test instances are launched (after approval), instance IDs SHALL be provided for each server +3. WHEN cutover is finalized (after approval), source machine replication SHALL stop and lifecycle state SHALL be locked + **Handled by:** AWS Transform Server Migration Agent +``` + +## Example Tasks + +``` +- [ ] 1. Job setup + - [ ] 1.1 Confirm job type (end-to-end or subset of steps) + - [ ] 1.2 Confirm single-account or multi-account migration + - [ ] 1.3 Create and start VMware migration job +- [ ] 2. Discovery (Step 1) + - [ ] 2.1 Choose discovery method (ADS / Export for vCenter / independent import) + - [ ] 2.2 Run discovery and collect VM inventory + - [ ] 2.3 Review discovery results +- [ ] 3. Migration planning (Step 2) + - [ ] 3.1 Review AI-generated application groupings + - [ ] 3.2 Review and adjust wave assignments + - [ ] 3.3 Review right-sizing recommendations + - [ ] 3.4 Approve migration plan +- [ ] 4. Connect target accounts (Step 3) + - [ ] 4.1 Provide target AWS account IDs + - [ ] 4.2 Verify MGN initialized in each target account + - [ ] 4.3 Verify cross-account IAM roles +- [ ] 5. Build landing zone (Step 4) + - [ ] 5.1 Configure landing zone settings + - [ ] 5.2 Deploy landing zone (approval required) + - [ ] 5.3 Verify baseline infrastructure in target accounts +- [ ] 6. Network migration (Step 5) — see vmware-network.md + - [ ] 6.1 Upload source network file + - [ ] 6.2 Select topology and security group strategy + - [ ] 6.3 Review and optimize network design + - [ ] 6.4 Deploy network (approval required) +- [ ] 7. Server migration (Step 6) — see vmware-server.md + - [ ] 7.1 Set up migration wave per wave + - [ ] 7.2 Validate and confirm inventory + - [ ] 7.3 Deploy replication agents + - [ ] 7.4 Monitor data replication + - [ ] 7.5 Launch test instances (approval required) + - [ ] 7.6 Mark applications ready for cutover + - [ ] 7.7 Launch cutover instances (approval required) + - [ ] 7.8 Finalize cutover and archive source servers +``` + +## Known Limitations + +- One target AWS Region per VMware migration job — create multiple jobs to migrate to different Regions +- Stopping a running migration job is irreversible — VMWARE_V2 jobs cannot be restarted once stopped. A new job must be created to start over. Artifacts from the stopped job are preserved but job progress is lost. +- NSX imports are only supported for end-to-end migration jobs +- Physical servers (non-virtualized) are not in scope +- VMware-specific features (vMotion, DRS, HA) have no direct AWS equivalents — require architectural redesign +- License mapping (Windows Server, SQL Server on VMs) requires manual review +- AWS Transform generates network configurations and migration strategies based on environment assessment — review with stakeholders before proceeding to ensure security and compliance requirements are met diff --git a/plugins/aws-transform/skills/aws-transform/references/workflow.md b/plugins/aws-transform/skills/aws-transform/references/workflow.md new file mode 100644 index 00000000..4bdc8e35 --- /dev/null +++ b/plugins/aws-transform/skills/aws-transform/references/workflow.md @@ -0,0 +1,484 @@ +# Table of Contents + +- [Quick Start](#quick-start) +- [CLI vs Managed Agents](#cli-vs-managed-agents) +- [Choosing a Transformation](#choosing-a-transformation) + - [By Tech Stack](#by-tech-stack) + - [By Goal](#by-goal) +- [Discovery](#discovery) + - [How It Works](#how-it-works) + - [Signal Detection](#signal-detection) + - [Risk Classification](#risk-classification) + - [Discovery Output](#discovery-output) +- [Execution Lifecycle](#execution-lifecycle) + - [Managed Agent Jobs (MCP)](#managed-agent-jobs-mcp) + - [CLI Transforms](#cli-transforms) + - [Monitoring](#monitoring) + - [Parallel Execution](#parallel-execution) + - [Mandatory Diff Review](#mandatory-diff-review) + - [Hybrid Workflow](#hybrid-workflow) +- [Plan Building](#plan-building) + - [Spec Structure](#spec-structure) + - [Two Gates](#two-gates) + - [Iterative Planning](#iterative-planning) +- [Context Management](#context-management) + - [Location](#location) + - [Schema](#schema) + - [Resume Logic](#resume-logic) +- [Freshness and Source of Truth](#freshness-and-source-of-truth) + - [Source of truth: fetch each resource directly](#source-of-truth-fetch-each-resource-directly) + - [Freshness framing](#freshness-framing) + - [No false promises of proactive surfacing](#no-false-promises-of-proactive-surfacing) + - [Transformation goal switching](#transformation-goal-switching) +- [Display Conventions](#display-conventions) + - [Interactive Choices](#interactive-choices) + - [Consultant-Style Observations](#consultant-style-observations) + - [Status Icons](#status-icons) + - [Progress](#progress) + +--- + +## Workflow + +## Quick Start + +| Say This | What Happens | +| ------------------------------------- | ---------------------------------------------------------------- | +| "Analyze this codebase" | Quick local analysis of architecture and dependencies | +| "Start .NET modernization" | Launch AWS Transform agents to transform .NET Framework → .NET 8 | +| "Check my job" | See job status and progress | +| "Review pending requests" | Handle collaborator requests the agent needs from you | +| "Download artifacts" | Get transformed code, reports, and build outputs | +| "Show my workspaces" | List your AWS Transform workspaces | +| "What transformations are available?" | See available agents for your account | + +--- + +## CLI vs Managed Agents + +| | **AWS Transform CLI** | **Managed Agents** | +| ----------------- | -------------------------------------------------------------------- | ------------------------------------------------ | +| **Runs on** | Your machine | AWS infrastructure | +| **Auth** | AWS credentials | Sign in to AWS Transform | +| **Scope** | Single repo | Multi-repo, specialized workload types | +| **Best for** | Analysis, small upgrades, custom transformations, applying standards | .NET, mainframe, VMware, SQL, full modernization | +| **Offline** | Yes | No | +| **Human-in-loop** | No | Yes (collaborator requests) | +| **Team features** | No | Yes (shared workspaces) | + +**Decision tree:** + +``` +What do you want to do? +│ +├─ Quick analysis or standards check? → CLI +├─ .NET Framework modernization? → Managed Agents +├─ Mainframe (COBOL/JCL)? → Managed Agents +├─ VMware → EC2? → Managed Agents +├─ Database modernization (SQL Server, Oracle)? → Managed Agents +├─ Java/Python version upgrade? → CLI +├─ Team collaboration needed? → Managed Agents +├─ Not sure? → Start with CLI analysis, escalate to Managed Agents if needed +│ +└─ Best results on complex project? → Hybrid (CLI assess → Managed Agents transform → CLI validate) +``` + +--- + +## Choosing a Transformation + +### By Tech Stack + +| Stack | Approach | Agent | +| ------------------------ | -------------- | ---------------------------------------------------------------------------- | +| .NET Framework 4.x | Managed Agents | `dotnet-chatty-agent` (hardcoded) | +| .NET Core 3.1 / .NET 5/6 | Managed Agents | Same .NET agent (simpler upgrade) | +| Java 8/11/17 | CLI | Find Java transformation definitions via `atx custom def list --json` | +| Spring Boot 2.x → 3.x | CLI | Find Spring Boot transformation definitions via `atx custom def list --json` | +| COBOL / JCL | Managed Agents | Discover via `list_resources resource="agents"` | +| VMware VMs | Managed Agents | Discover via `list_resources resource="agents"` | +| SQL Server / Oracle | Managed Agents | Discover via `list_resources resource="agents"` | +| Already modern | CLI | Run analysis or standards transformation definitions | + +**.NET agent is the only hardcoded name. All others: discover dynamically.** + +### By Goal + +| Goal | Approach | +| ---------------------- | --------------------------------------------------------------- | +| Understand a codebase | CLI: run analysis transformation definition | +| Modernize legacy app | Identify stack → CLI assessment → Managed Agents transformation | +| Upgrade a version | CLI for Java/Python; Managed Agents for .NET | +| Apply coding standards | CLI: find standards transformation definition | +| Migrate to AWS | Managed Agents (.NET → dotnet-chatty-agent, mainframe, VMware) | + +--- + +## Discovery + +Discovery is a fast scan (~10 sec) that finds what's in the workspace and maps to agents. It is NOT assessment. + +### How It Works + +1. Glob for project files (signal detection) +2. Read key files for framework/version +3. Classify risk: HIGH / MED / LOW +4. Map to recommended agent +5. Save to `.atx/discovery.json` + +### Signal Detection + +| Signal File | What to Extract | Opportunity | +| -------------------------------------- | ------------------------------------------- | ----------------------- | +| `pom.xml` | ``, spring-boot version | Java upgrade | +| `build.gradle` | `sourceCompatibility`, spring boot plugin | Java upgrade | +| `pom.xml` / `build.gradle` | `com.amazonaws` group | AWS SDK v1 → v2 | +| `.csproj` | `v4.x` | .NET modernization | +| `.csproj` | `netcoreapp2.x` / `net5.0` | .NET upgrade | +| `packages.config`, `Web.config` | Legacy NuGet, `system.web` | .NET modernization | +| `*.cbl`, `*.cob` | COBOL source | Mainframe modernization | +| `*.jcl` | JCL job cards | Mainframe modernization | +| `*.sql` with T-SQL (`GO`, `sp_`) | SQL Server | SQL migration | +| `*.sql` with PL/SQL (`BEGIN`, `DBMS_`) | Oracle | Oracle migration | + +### Risk Classification + +| Risk | Criteria | Examples | Say This to Users | +| -------- | ------------------- | ----------------------------------------------------------------- | ------------------------------------------------------------ | +| **HIGH** | EOL/deprecated | Java 8, .NET FW 4.x, COBOL, Spring Boot 1.x, Spring Framework 5.x | No longer receiving security updates — migration recommended | +| **MED** | Patched or near-EOL | Java 11, .NET 9 | Approaching end of life — plan migration soon | +| **LOW** | Minor version lag | Java 17→21 | Current but not latest — optional upgrade for new features | + +### Discovery Output + +Save to `.atx/discovery.json`: + +```json +{ + "discoveredAt": "...", + "components": [ + { + "path": "order-service/", + "stack": "Java 8, Spring Boot 1.5.22", + "risk": "HIGH", + "reason": "Java 8 EOL Jan 2019", + "recommendedAgent": "AWS Transform CLI (find Java transformation definition via atx custom def list --json)" + } + ] +} +``` + +Present as migration table: + +``` +| Risk | Why | Component | Current | Target | AWS Target | Recommended Approach | +|------|-----|-----------|---------|--------|------------|---------------------| +| HIGH | No longer receiving security updates | order-service/ | Java 8 | Java 25 | — | CLI | +| HIGH | No longer receiving security updates | storefront/ | .NET FW 4.7.2 | .NET 8 | — | Managed Agents | +``` + +--- + +## Execution Lifecycle + +All transformations follow this pattern: + +### Managed Agent Jobs (MCP) + +``` +1. Create/select workspace create_workspace +2. Set up connectors create_connector (if agent requires them) +3. Create and start job create_job +4. Drive conversation send_message +5. Handle collaborator requests complete_task (see tools.md) +6. Download results get_resource resource="artifact" +``` + +**Your IDE is the bridge between user and AWS Transform agent:** + +- Agent asks a question → present options and wait for user decision +- User answers → relay via `send_message` or `complete_task` +- Agent needs files → upload via `upload_artifact` +- Agent produces results → download via `get_resource resource="artifact"` + +**Rule: Present options to user, user decides, relay decision. Never shortcut this.** + +### CLI Transforms + +```bash +# Always run in background +AWS_REGION=us-east-1 atx custom def exec -n -p -x -t +``` + +Use `run_in_background=true` in your IDE. + +### Monitoring + +For job status and progress, ask the agent directly — it has full job context: + +``` +send_message # Scoped to the job: "What's the current status?" +list_resources resource="worklogs" # Recent activity log +list_resources resource="tasks" # Pending collaborator requests — always check +``` + +Fall back to lower-level resources only if the agent's answer is unclear or you need specifics it didn't cover: + +``` +get_resource resource="job" # Status: CREATED, STARTING, ASSESSING, PLANNING, PLANNED, EXECUTING, AWAITING_HUMAN_INPUT, COMPLETED, FAILED, STOPPING, STOPPED +list_resources resource="plan" # Phases and current step +list_resources resource="messages" # Raw messages from agent +``` + +**Monitoring loop:** + +- Ask the agent via `send_message` + check `worklogs` +- Always check `list_resources resource="tasks"` — active job status does not imply no pending user tasks +- Always check `list_resources resource="messages"` — messages with a non-null `interactions` array (selection menus, confirmations) may be awaiting a user response via `send_message`. These interactive prompts do NOT appear as tasks and do NOT change the job status. A job can remain in EXECUTING status with no pending tasks while the agent is waiting on a user reply to an interactive message. +- When a collaborator request appears → present to user, relay decision +- When job completes → download artifacts +- When job fails → show error, offer retry + +**Waiting between re-checks.** When a resource is in a transitional state and you need to re-check after a delay, use `adaptive_poll` rather than responding with stale data or silently stalling. Follow the tool's own description for terminal states and approval requirements. During an active, user-approved polling loop, present-tense status framing is fine (see Freshness below); outside that loop, do not promise proactive surfacing. + +### Parallel Execution + +- CLI and Managed Agents on **different** components → run in parallel +- Two CLI transforms on different projects → both `run_in_background` +- Same component → sequential +- Each workspace can only run **one job at a time** + +### Mandatory Diff Review + +After ANY transform that changes code, show `git diff` summary then present options and wait for user decision: + +- "Accept Changes" +- "Revert" +- "Review File-by-File" + +User MUST approve before next task. + +### Hybrid Workflow + +For best results, combine CLI and Managed Agents: + +1. **Assess locally** (CLI) — run analysis transformation definition, understand codebase +2. **Transform with Managed Agents** — use findings to guide agents (pass in `intent` field) +3. **Validate locally** (CLI) — apply org standards to output + +--- + +## Plan Building + +### Spec Structure + +One spec at `.atx/specs/`: + +``` +.atx/specs/ + .config # {"specId":"aws-transform","workflowType":"requirements-first","specType":"feature"} + requirements.md # Numbered requirements with user stories and acceptance criteria + tasks.md # Hierarchical checkboxes referencing requirements +``` + +For multi-module projects, use **sections within the same spec** — not separate specs. + +### Two Gates + +**Gate 1: Scope Confirmation** (after discovery, before requirements) + +- Present scope summary: components, risk levels, estimated task count +- User confirms or adjusts + +**Gate 2: Plan Approval** (after requirements, before execution) + +- Present full plan: task count, phases, parallel groups +- User confirms "Start Execution" or adjusts + +Between gates: agent works autonomously. After Gate 2: execution proceeds with diff review as the ongoing control mechanism. + +### Iterative Planning + +``` +while not done: + 1. Read requirements.md + tasks.md + .atx/context.json + 2. Pick the most important incomplete task + 3. Execute ONE task + 4. Review diffs — get user approval if code changed + 5. Mark task [x], update context + 6. If new issues found, add to tasks.md +``` + +One task per iteration. Fresh analysis each time. State on disk. + +--- + +## Context Management + +### Location + +``` +.atx/context.json ← workspace-relative, source of truth +.atx/discovery.json ← discovery findings +.atx/specs/ ← requirements + tasks +``` + +**NEVER read from `~/.aws/atx/context.json`** — that's the MCP server's internal state, not this skill's. Context is always relative to the workspace directory. + +Add `.atx/` to `.gitignore`. + +### Schema + +```json +{ + "phase": "intent|discovery|scoped|assessed|requirements|planning|executing|complete", + "discovery": { "completedAt": "...", "components": 3, "discoveryFile": ".atx/discovery.json" }, + "assessment": { + "completedAt": "...", + "workspaceId": "...", + "jobId": "...", + "reportDir": ".atx/assessment-report/" + }, + "spec": { "folder": ".atx/specs", "requirementsApproved": false, "tasksGenerated": false }, + "workStyle": null, + "execution": { + "currentTask": "1.2", + "completedTasks": ["1.1"], + "workspaceId": null, + "activeJobIds": [] + }, + "updatedAt": "..." +} +``` + +### Resume Logic + +Silently check for `.atx/context.json`. + +**No context found:** Proceed directly to the Intent step. Never reference internal step numbers in user-facing text — no "Step 1", "Step 2", or similar. Your first user-facing message must be the intent menu itself, with zero preamble. + +**Context found:** Before resuming, silently try to refresh live state from the service: + +1. **Check auth first** (no-auth-required). Use the MCP tool that reports auth/sign-in status (discover it from `tools/list`). If sign-in is NOT configured, skip the refresh entirely and use local context only. Do NOT attempt further service calls, do NOT mention auth to the user, do NOT demand sign-in. +2. **If sign-in is configured**, fetch each resource your resume message depends on. Each resource has its own source of truth — do NOT infer one from another (e.g., a job in an active state like `EXECUTING` does not mean no pending user tasks). At minimum: + - The **job** itself — what phase is it in, has it completed or failed. + - Any **pending tasks** — HITL tasks requiring user action. Fetch ALL pending tasks, not just one. **Surface every pending task to the user — do NOT cherry-pick the most prominent and omit the rest.** Each task (input-needed, approval-pending, etc.) is something the user needs to know about. `BLOCKING` tasks hold up progress even when the job is in an active state; `NON_BLOCKING` tasks still need attention but don't stall the job. Name every pending task in the resume message; flag which ones are blocking. +3. **If any call fails**, silently fall back to local context. Do NOT mention the failure to the user. + +Tool names come from the server's `tools/list` response; read tool descriptions directly rather than hardcoding names in resume logic. + +Then tell the user about their prior session, framing the offer as a **continuation of that same session** — not a similar new one: + +- Use explicit continuation language: "continue where you left off", "pick up from where you stopped". +- What phase was reached (e.g., "last time, your session finished assessment") +- What key artifacts exist (e.g., workspace ID, assessment report) +- **Refresh succeeded** → speak in present tense about live state ("your assessment job is running", "I need your input on X to continue"). If there is a pending HITL task, surface it — don't bury it under "your job is running." +- **Refresh failed or was skipped** → use prior-session framing ("last time", "when you paused", "previously"). Do NOT present-tense claims about job state; local context may be stale. Offer sign-in as the path to current status — a benefit, not a gate ("sign in to see the latest status"). +- Clarify what resume vs. start-fresh means in user terms: + - **Resume** = continue the same session, reusing the existing assessment report, workspace, and prior progress. + - **Start fresh** = discard the prior session (local artifacts deleted) and begin a brand-new migration. + +If user chooses **start fresh**: delete `.atx/context.json`, `.atx/discovery.json`, `.atx/assessment-report/`, and `.atx/specs/`, then proceed to the Intent step. + +If user chooses **resume**, resume based on `phase`: + +| Phase | Resume Action | +| -------------- | ------------------------------------------------------- | +| `intent` | Present intent options again, continue based on choice | +| `discovery` | Show migration table, continue to scope | +| `scoped` | Show selected scope, continue to assessment | +| `assessed` | Show assessment summary, draft requirements from report | +| `requirements` | Show current requirements, ask to approve or edit | +| `planning` | Show tasks, ask to start execution | +| `executing` | Show progress, pick next task | +| `complete` | Show summary, ask what's next | + +--- + +## Freshness and Source of Truth + +Resume Logic (above) dictates how you frame status at session start. The same discipline applies to **every in-session turn**. + +### Source of truth: fetch each resource directly + +Each MCP resource (job, tasks, messages, artifacts, connectors, ...) is its own source of truth on the server. Do NOT infer one resource's state from another's. In particular: + +- An active job status (`ASSESSING`, `PLANNING`, `EXECUTING`) ≠ no pending user tasks. The agent may be blocked waiting on a checkpoint decision while the job status still reads as active. The job itself can also enter `AWAITING_HUMAN_INPUT`, but ONLY for `BLOCKING` tasks tied to a plan step — `NON_BLOCKING` tasks never trigger this transition. A job in an active state may still have multiple pending `NON_BLOCKING` tasks. So always fetch the tasks resource; don't rely on job status alone. +- Job status COMPLETED ≠ no artifacts pending review. +- An absence of recent messages ≠ no pending tasks. + +When the user-facing message depends on a resource, fetch THAT resource. Don't synthesize. The MCP tool surface is the ground truth — discover the right tool name from the server's `tools/list` response (see [tools](tools.md)), not from memory. + +### Freshness framing + +A "turn" is one user message → one of your responses. Tool calls made while composing the response count as part of the same turn; calls made for a prior user message do not. + +Any user-facing claim about job state, messages, tasks, or artifacts must be either: + +- **Just-fetched** — you called the relevant read tool (`get_resource`, `list_resources`) in THIS turn, before answering → present tense is OK ("your job is running"). +- **Cached** — no fresh fetch this turn → frame as cached AND offer to refresh. + +A fetch from a prior turn, resume, or the initial session refresh does NOT count as fresh. You have no clock — the user may have been away for hours; the job may have changed. + +**Cached framing must scope the ENTIRE claim.** Lead with the cached marker; every status verb must be past-tense. A trailing "as of earlier" does not retroactively qualify a present-tense leading clause. + +- WRONG: "Your job is handling the assessment phase. As of the last check, it was running." +- RIGHT: "As of my last check, your job was handling the assessment phase and running VM tasks. Want me to pull the latest?" + +Exception: during an active polling loop (see workload steering files), present tense is fine — the fetch really is happening on each cycle. + +### No false promises of proactive surfacing + +When NOT polling, do NOT imply background monitoring. Phrases like "I'll let you know when...", "I'll surface those as they come up", "I'll ping you if..." mislead users into assuming this skill is watching the job. + +When not polling, make the reactive model explicit: "You'll need to ask me — I don't watch in the background" / "Say 'check status' and I'll pull the latest." + +"I'll update you as the job progresses" is only acceptable during an active polling loop. + +### Transformation goal switching + +Track the "active goal" — what the user is trying to accomplish (e.g., "modernize this VMware fleet"). Two jobs serving the same goal are ONE goal. + +When the user's message shifts to a DIFFERENT goal (different workload, different migration target, or a clearly different body of work), before answering: + +1. Recognize the shift. Trigger is a change in what the user is accomplishing, not a field-by-field ID comparison. +2. Ask the user and suggest they start a new chat session with fresh context (they start it themselves — you cannot). Give a brief reason: mixing unrelated goals causes cross-contaminated answers. +3. Wait for their choice. If accepted, stop answering about the new goal in this chat. If declined, **answer the user's question in this chat** — respect their choice. Just do not mix cached state from the prior goal into your answers. Do not re-push the new-chat suggestion until the next goal shift. + +**Re-offer on every shift**, because cross-contamination compounds. But keep re-offers terse — "Different goal again — want a new chat session? [Yes] [Stay here]" — don't re-lecture. + +Carve-out: historical, past-tense questions about a prior goal ("what did my .NET modernization produce last week?") do NOT trigger the suggestion. + +--- + +## Display Conventions + +### Interactive Choices + +Always present options and wait for user decision when offering choices. + +Example interaction format: + +- Question: "How would you like to proceed?" +- Options with descriptions: + - "Approve (Recommended)" - Start executing + - "Modify" - Change the plan + - "Explain" - Deep dive into why this order + +**Do NOT use markdown bullets or numbered lists for choices.** Create clickable UI elements. + +### Consultant-Style Observations + +2-3 sentences max. State what you found, then what's possible. No data dumps. Never narrate tool calls — describe outcomes, not mechanics. + +### Status Icons + +``` +[ ] Pending [Running] Running [Done] Done [Failed] Failed +``` + +### Progress + +``` +Steps: 2 of 5 complete +``` diff --git a/tools/generate_codex_manifests.py b/tools/generate_codex_manifests.py index 70ed1cec..37e0e310 100644 --- a/tools/generate_codex_manifests.py +++ b/tools/generate_codex_manifests.py @@ -53,6 +53,16 @@ "Review this repo for AWS serverless best practices.", ], }, + "aws-transform": { + "displayName": "AWS Transform", + "shortDescription": "Migrate and modernize codebases to AWS with assessment, planning, and execution.", + "longDescription": "AWS Transform brings AI-powered code and workload modernization into your coding agent. Supports .NET Framework to .NET 8/10, mainframe COBOL to Java, VMware to EC2, SQL Server/Oracle/MySQL to Aurora, and Java/Python/Node.js language and AWS SDK upgrades. Guides users through assessment, requirements, approval, tasks, and execution with just-in-time authentication.", + "defaultPrompt": [ + "Migrate this .NET Framework app to .NET 8 on AWS.", + "Assess this codebase for modernization with AWS Transform.", + "Plan a VMware-to-EC2 migration for my workload.", + ], + }, "databases-on-aws": { "displayName": "Databases on AWS", "shortDescription": "Design schemas, query data, and choose the right AWS database path.",