Skip to content
125 changes: 99 additions & 26 deletions pkg/destroy/powervs/power-dhcp.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,11 +189,66 @@ func (o *ClusterUninstaller) listDHCPNetworksByName() ([]string, error) {
return result, nil
}

// extractNetworkIDFromError extracts network ID from error message if present.
// Error format: "network xxx-xxx-xxxxx still attached to pvm-instances".
func extractNetworkIDFromError(err error) string {
if err == nil {
return ""
}
errStr := err.Error()
// Look for pattern "network <uuid> still attached"
parts := strings.Split(errStr, "network ")
if len(parts) > 1 {
remaining := parts[1]
// Extract UUID from error message (format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx)
spaceIdx := strings.Index(remaining, " ")
if spaceIdx > 0 {
networkID := remaining[:spaceIdx]
// UUID format validation (36 chars with dashes)
if len(networkID) == 36 && strings.Count(networkID, "-") == 4 {
return networkID
}
}
}
return ""
}

// findNetworkIDByName finds a network ID by matching the network name.
func (o *ClusterUninstaller) findNetworkIDByName(networkName string) string {
if o.networkClient == nil {
return ""
}
networks, err := o.networkClient.GetAll()
if err != nil {
o.Logger.Debugf("Failed to list networks to find ID for %q: %v", networkName, err)
return ""
}
for _, network := range networks.Networks {
if network.Name != nil && *network.Name == networkName {
if network.NetworkID != nil {
return *network.NetworkID
}
}
}
return ""
}

// isDHCPNetworkAttachedError checks if an error indicates the DHCP network is attached to PVM instances.
func isDHCPNetworkAttachedError(err error) bool {
if err == nil {
return false
}
errStr := err.Error()
return strings.Contains(errStr, "still attached to pvm-instances") ||
strings.Contains(errStr, "still attached to") ||
strings.Contains(errStr, "pcloudDhcpDeleteBadRequest")
}

// destroyDHCPNetwork deletes a PowerVS DHCP network.
func (o *ClusterUninstaller) destroyDHCPNetwork(item cloudResource) error {
var err error

_, err = o.dhcpClient.Get(item.id)
dhcpServer, err := o.dhcpClient.Get(item.id)
if err != nil {
o.deletePendingItems(item.typeName, []cloudResource{item})
o.Logger.Infof("Deleted DHCP Network %q", item.name)
Expand All @@ -202,8 +257,46 @@ func (o *ClusterUninstaller) destroyDHCPNetwork(item cloudResource) error {

o.Logger.Debugf("Deleting DHCP network %q", item.name)

// Before deleting the DHCP server, check if its network has attached network interfaces
// that need to be deleted first. This prevents errors like "network still attached to pvm-instances, that fail early before subnet network interfaces are deleted in the destroyPowerSubnets() stage."
var networkID string
if dhcpServer.Network != nil {
// Try to find network ID by name
if dhcpServer.Network.Name != nil {
networkID = o.findNetworkIDByName(*dhcpServer.Network.Name)
if networkID != "" {
o.Logger.Debugf("Found network ID %s for DHCP subnet %q. Checking for network interfaces...", networkID, item.name)
// Try to delete network interfaces from the subnet
if nicErr := o.deleteNetworkInterfaces(networkID); nicErr != nil {
o.Logger.Debugf("Note: Could not delete network interfaces for DHCP subnet %q: %v (will attempt DHCP deletion anyway)", item.name, nicErr)
}
}
}
}

err = o.dhcpClient.Delete(item.id)
if err != nil {
// If deletion failed because network is still attached to instances, try deleting network interfaces
if isDHCPNetworkAttachedError(err) {
// Try to extract network ID from error message if we don't have it yet
if networkID == "" {
networkID = extractNetworkIDFromError(err)
}
// If still no network ID, try finding by name again
if networkID == "" && dhcpServer.Network != nil && dhcpServer.Network.Name != nil {
networkID = o.findNetworkIDByName(*dhcpServer.Network.Name)
}

if networkID != "" {
o.Logger.Debugf("DHCP subnet %q is still attached to instances. Attempting to delete network interfaces from network %s...", item.name, networkID)
if nicErr := o.deleteNetworkInterfaces(networkID); nicErr != nil {
o.Logger.Warnf("Failed to delete network interfaces for DHCP subnet %q: %v", item.name, nicErr)
}
// Return error to trigger retry after NIC deletion
return fmt.Errorf("DHCP server deletion blocked by attached network interfaces: %w", err)
Comment on lines +292 to +296
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

🧩 Analysis chain

🏁 Script executed:

# First, let's examine the DHCP code around lines 292-296
cd /tmp && find . -name "power-dhcp.go" -type f 2>/dev/null | head -5

Repository: openshift/installer

Length of output: 45


🏁 Script executed:

# List relevant files
git ls-files | grep -E "(power-dhcp|power-subnet)" | head -10

Repository: openshift/installer

Length of output: 132


🏁 Script executed:

# Read the DHCP code around the flagged lines
cat -n pkg/destroy/powervs/power-dhcp.go | sed -n '285,305p'

Repository: openshift/installer

Length of output: 1179


🏁 Script executed:

# Get more context around destroyDHCPNetwork function
cat -n pkg/destroy/powervs/power-dhcp.go | sed -n '250,300p'

Repository: openshift/installer

Length of output: 2773


🏁 Script executed:

# Get the full destroyDHCPNetwork function
cat -n pkg/destroy/powervs/power-dhcp.go | sed -n '260,390p'

Repository: openshift/installer

Length of output: 5369


🏁 Script executed:

# Check the subnet code for comparison
cat -n pkg/destroy/powervs/power-subnet.go | grep -A 50 "deleteNetworkInterfaces\|ExponentialBackoff" | head -80

Repository: openshift/installer

Length of output: 3405


🏁 Script executed:

# Check imports in power-dhcp.go to confirm which wait package is used
head -50 pkg/destroy/powervs/power-dhcp.go | grep -E "^import|wait"

Repository: openshift/installer

Length of output: 108


🏁 Script executed:

# Get all imports from power-dhcp.go
cat -n pkg/destroy/powervs/power-dhcp.go | sed -n '1,30p'

Repository: openshift/installer

Length of output: 1064


🌐 Web query:

k8s.io/apimachinery/pkg/util/wait ExponentialBackoffWithContext return value semantics bool error

💡 Result:

The function ExponentialBackoffWithContext in k8s.io/apimachinery/pkg/util/wait has the signature: func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionWithContextFunc) error It returns a single error value, not (bool, error). The condition function (ConditionWithContextFunc) is called repeatedly with exponential backoff until: - the condition returns (true, nil): function returns nil - the condition returns (false, err): function returns that err - context is cancelled: returns ctx.Err - backoff.Steps exhausted: returns ErrWaitTimeout There is no bool return value from the function itself; success is indicated by nil error, timeout by ErrWaitTimeout, condition error by the condition's error, or context error by ctx.Err.

Citations:


🏁 Script executed:

# Verify the actual k8s source code to double-check the semantics
web_search "kubernetes wait.ExponentialBackoffWithContext condition function false error exit immediately"

Repository: openshift/installer

Length of output: 113


🌐 Web query:

k8s.io/apimachinery/pkg/util/wait ConditionWithContextFunc (false, error) behavior retry

💡 Result:

In the k8s.io/apimachinery/pkg/util/wait package, ConditionWithContextFunc is defined as func(context.Context) (done bool, err error). It returns true if the condition is satisfied, or an error if the loop should be aborted. This type was added in v0.22.0. When used in polling functions like ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionWithContextFunc) error (or similar functions like PollImmediateUntilWithContext), the behavior for returning (false, error) is: the error is immediately propagated and returned by the polling function, terminating the retry loop without further attempts. Returning (false, nil) continues retrying according to the backoff policy until success (true, nil), timeout, context cancellation, or max steps. This mirrors the non-context ConditionFunc behavior: errors abort immediately, false continues polling. Context variants respect ctx.Done for cancellation. Example usage pattern from client-go/util/retry: err := wait.ExponentialBackoff(backoff, func (bool, error) { err := fn if err == nil { return true, nil } if retriable(err) { return false, nil } // retry return false, err // abort }) Returning (false, error) aborts retry immediately with that error.

Citations:


Return value exits retry loop immediately instead of triggering retry

Lines 295-296 return an error expecting the retry loop to continue, but wait.ExponentialBackoffWithContext aborts on (false, error) instead of retrying. The error propagates immediately, bypassing the intended retry after NIC deletion. Use (false, nil) to signal retry continuation.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@pkg/destroy/powervs/power-dhcp.go` around lines 292 - 296, The current error
return in the DHCP server deletion block causes
wait.ExponentialBackoffWithContext to abort because it receives (false, error) —
change the closure to signal a retry by returning (false, nil) after attempting
NIC deletion; keep the o.Logger.Warnf call and remove/replace the
fmt.Errorf(...) return with a retry signal so the backoff loop continues (see
the closure around wait.ExponentialBackoffWithContext and the call to
o.deleteNetworkInterfaces and the DHCP server deletion logic).

}
o.Logger.Warnf("Could not determine network ID for DHCP subnet %q to delete network interfaces", item.name)
}
o.Logger.Infof("Error: o.dhcpClient.Delete: %q", err)
return err
}
Expand Down Expand Up @@ -329,31 +422,11 @@ func (o *ClusterUninstaller) destroyDHCPNetworks() error {
o.Logger.Fatal("destroyDHCPNetworks: ExponentialBackoffWithContext (list) returns ", err)
}

// PowerVS hack:
// We were asked to query for the subnet still existing as a test for the DHCP network to be
// finally destroyed. Even though we can't list it anymore, it is still being destroyed. :(
backoff = wait.Backoff{
Duration: 15 * time.Second,
Factor: 1.1,
Cap: leftInContext(ctx),
Steps: math.MaxInt32}
err = wait.ExponentialBackoffWithContext(ctx, backoff, func(context.Context) (bool, error) {
secondPassList, err2 := o.listPowerSubnets()
if err2 != nil {
return false, err2
}
if len(secondPassList) == 0 {
// We finally don't see any remaining instances!
return true, nil
}
for _, item := range secondPassList {
o.Logger.Debugf("destroyDHCPNetworks: found %s in second pass", item.name)
}
return false, nil
})
if err != nil {
o.Logger.Fatal("destroyDHCPNetworks: ExponentialBackoffWithContext (list) returns ", err)
}
// Note: DHCP server subnets will be deleted in the destroyPowerSubnets() stage.
// We no longer wait for them here since:
// 1. Network interfaces are now properly deleted during DHCP deletion
// 2. Subnet deletion happens in a later stage with its own retry logic
// 3. Waiting here was causing timeouts since subnets are deleted in a different stage

return nil
}
43 changes: 43 additions & 0 deletions pkg/destroy/powervs/power-subnet.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package powervs

import (
"context"
"errors"
"fmt"
"math"
"strings"
Expand Down Expand Up @@ -51,6 +52,38 @@ func (o *ClusterUninstaller) listPowerSubnets() (cloudResources, error) {
return cloudResources{}.insert(result...), nil
}

// deleteNetworkInterfaces deletes all network interfaces attached to a subnet.
func (o *ClusterUninstaller) deleteNetworkInterfaces(subnetID string) error {
interfaces, err := o.networkClient.GetAllNetworkInterfaces(subnetID)
if err != nil {
return fmt.Errorf("failed to list network interfaces: %w", err)
}

var deleteErrs []error
for _, nic := range interfaces.Interfaces {
if nic.ID != nil {
o.Logger.Debugf("Deleting network interface %q from subnet %q", *nic.ID, subnetID)
if err := o.networkClient.DeleteNetworkInterface(subnetID, *nic.ID); err != nil {
o.Logger.Warnf("Failed to delete network interface %q: %v", *nic.ID, err)
deleteErrs = append(deleteErrs, fmt.Errorf("failed to delete network interface %q: %w", *nic.ID, err))
}
}
}

return errors.Join(deleteErrs...)
}

// isNetworkInterfaceError checks if an error indicates network interfaces are blocking deletion. (i.e 409 Conflict).
func isNetworkInterfaceError(err error) bool {
if err == nil {
return false
}
errStr := err.Error()
return strings.Contains(errStr, "one or more network interfaces have an IP allocation") ||
strings.Contains(errStr, "status 409") ||
strings.Contains(errStr, "409 Conflict")
}

func (o *ClusterUninstaller) deletePowerSubnet(item cloudResource) error {
if _, err := o.networkClient.Get(item.id); err != nil {
o.deletePendingItems(item.typeName, []cloudResource{item})
Expand All @@ -61,6 +94,16 @@ func (o *ClusterUninstaller) deletePowerSubnet(item cloudResource) error {
o.Logger.Debugf("Deleting Power Network %q", item.name)

if err := o.networkClient.Delete(item.id); err != nil {
// If deletion failed due to attached network interfaces, delete them and retry
if isNetworkInterfaceError(err) {
o.Logger.Debugf("Subnet %q has attached network interfaces. Deleting them...", item.name)
if nicErr := o.deleteNetworkInterfaces(item.id); nicErr != nil {
o.Logger.Warnf("Failed to delete network interfaces for subnet %q: %v", item.name, nicErr)
}
// Return error to trigger retry after NIC deletion
return fmt.Errorf("subnet deletion blocked by network interfaces: %w", err)
Comment on lines +100 to +104
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
set -euo pipefail

version=$(sed -nE 's/^[[:space:]]*k8s\.io\/apimachinery[[:space:]]+(v[^[:space:]]+).*/\1/p' go.mod | head -n1)

if [[ -z "${version}" ]]; then
  echo "Could not determine k8s.io/apimachinery version from go.mod" >&2
  exit 1
fi

for path in pkg/util/wait/backoff.go pkg/util/wait/wait.go; do
  url="https://raw.githubusercontent.com/kubernetes/apimachinery/${version}/${path}"
  if curl -fsSL "$url" | rg -n -C2 'func ExponentialBackoffWithContext|err != nil \|\| ok'; then
    exit 0
  fi
done

echo "Unable to locate ExponentialBackoffWithContext source for ${version}" >&2
exit 1

Repository: openshift/installer

Length of output: 740


Fix retry semantics: returning a non-nil error terminates the backoff instead of retrying

ExponentialBackoffWithContext stops immediately when the condition returns a non-nil error. Line 104 returns fmt.Errorf(...) after attempting NIC cleanup, which will exit the backoff loop and proceed directly to the fatal error handler on line 155-157, skipping any retry. To make this state retriable, signal retry intent by returning (false, nil) instead—use a sentinel value or error type that the callback maps to this return pair.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@pkg/destroy/powervs/power-subnet.go` around lines 100 - 104, The current
callback used with ExponentialBackoffWithContext returns a non-nil error after
calling deleteNetworkInterfaces (via the fmt.Errorf(...) return), which causes
the backoff to stop; change the callback so that when NIC cleanup is attempted
but subnet is still blocked you return the retriable signal instead (i.e.,
return (false, nil) from the callback) rather than a non-nil error — or
alternatively return a sentinel error value that your backoff wrapper maps to
(false, nil). Update the code path around deleteNetworkInterfaces and the
fmt.Errorf(...) return so the backoff receives the retriable indication and will
retry the subnet deletion.

}

o.Logger.Infof("Error: o.networkClient.Delete: %q", err)
return err
}
Expand Down
70 changes: 64 additions & 6 deletions pkg/destroy/powervs/powervs.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"fmt"
"math"
gohttp "net/http"
"strconv"
"strings"
"sync"
"time"
Expand Down Expand Up @@ -385,6 +386,40 @@ func (o *ClusterUninstaller) newAuthenticator(apikey string) (core.Authenticator
return authenticator, nil
}

// waitForRetryAfter sleeps for the duration specified in the "Retry-After" header of the response (in seconds).
// returns true if it actually waited.
func (o *ClusterUninstaller) waitForRetryAfter(ctx context.Context, response *core.DetailedResponse) bool {
if response == nil {
return false
}
headers := response.GetHeaders()

if headers == nil {
return false
}

retryAfterHeader := headers.Get("Retry-After")

if retryAfterHeader == "" {
o.Logger.Debugf("waitForRetryAfter: Retry-After header is not present, or could not be retrieved")
return false
}
secs, err := strconv.Atoi(retryAfterHeader)
if err != nil || secs <= 0 {
o.Logger.Debugf("waitForRetryAfter: Invalid Retry-After header value: %s", retryAfterHeader)
return false
}
d := time.Duration(secs) * time.Second
o.Logger.Debugf("waitForRetryAfter: Rate limited, honoring Retry-After header, waiting for %v", secs)
Comment on lines +401 to +413
Copy link

@coderabbitai coderabbitai bot Mar 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

cat -n pkg/destroy/powervs/powervs.go | sed -n '390,420p'

Repository: openshift/installer

Length of output: 1218


🌐 Web query:

RFC 7231 Retry-After header format specification delta-seconds HTTP-date

💡 Result:

The RFC 7231 Retry-After header format specification defines it as either an HTTP-date or delta-seconds (number of seconds to delay). ABNF: Retry-After = HTTP-date / delay-seconds delay-seconds = 1*DIGIT HTTP-date is defined in RFC 7231 Section 7.1.1.1 as IMF-fixdate / obs-date (preferred fixed-length format or obsolete formats for compatibility). delta-seconds is a non-negative decimal integer (1 or more digits) representing seconds after the response is received. It matches the general delta-seconds rule used across HTTP/1.1 specs (defined in RFC 7230 Section 3.2.6 or equivalent, but locally in Retry-After context). Examples from RFC: Retry-After: Fri, 31 Dec 1999 23:59:59 GMT Retry-After: 120 (2 minutes) Note: RFC 7231 is obsoleted by RFC 9110 (HTTP Semantics, 2022), but the format remains unchanged.

Citations:


🏁 Script executed:

# Check if net/http provides ParseTime function
python3 << 'EOF'
import subprocess
result = subprocess.run(['go', 'doc', 'net/http', 'ParseTime'], 
                       capture_output=True, text=True, cwd='.')
print(result.stdout)
print(result.stderr)
EOF

Repository: openshift/installer

Length of output: 319


🏁 Script executed:

head -30 pkg/destroy/powervs/powervs.go

Repository: openshift/installer

Length of output: 964


🏁 Script executed:

rg "gohttp|import" pkg/destroy/powervs/powervs.go | head -40

Repository: openshift/installer

Length of output: 549


Support the full Retry-After header format per RFC 7231.

The Retry-After header can be sent as either an HTTP-date or delta-seconds format, and 0 is a valid delay value. The current implementation using strconv.Atoi() followed by secs <= 0 check only handles delta-seconds. This causes valid HTTP-date responses (e.g., Retry-After: Fri, 31 Dec 1999 23:59:59 GMT) to be silently dropped as errors, breaking HTTP specification compliance.

Suggested fix
 	retryAfterHeader := headers.Get("Retry-After")
 
 	if retryAfterHeader == "" {
 		o.Logger.Debugf("waitForRetryAfter: Retry-After header is not present, or could not be retrieved")
 		return false
 	}
-	secs, err := strconv.Atoi(retryAfterHeader)
-	if err != nil || secs <= 0 {
-		o.Logger.Debugf("waitForRetryAfter: Invalid Retry-After header value: %s", retryAfterHeader)
-		return false
-	}
-	d := time.Duration(secs) * time.Second
-	o.Logger.Debugf("waitForRetryAfter: Rate limited, honoring Retry-After header, waiting for %v", secs)
+	retryAfterHeader = strings.TrimSpace(retryAfterHeader)
+	var d time.Duration
+	if secs, err := strconv.Atoi(retryAfterHeader); err == nil {
+		if secs < 0 {
+			o.Logger.Debugf("waitForRetryAfter: Invalid Retry-After header value: %s", retryAfterHeader)
+			return false
+		}
+		d = time.Duration(secs) * time.Second
+	} else if t, err := gohttp.ParseTime(retryAfterHeader); err == nil {
+		d = time.Until(t)
+		if d < 0 {
+			d = 0
+		}
+	} else {
+		o.Logger.Debugf("waitForRetryAfter: Invalid Retry-After header value: %s", retryAfterHeader)
+		return false
+	}
+	o.Logger.Debugf("waitForRetryAfter: Rate limited, honoring Retry-After header, waiting for %v", d)
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@pkg/destroy/powervs/powervs.go` around lines 401 - 413, The Retry-After
handling in waitForRetryAfter incorrectly rejects valid HTTP-date values and
treats 0 as invalid; update the logic that reads headers.Get("Retry-After") so
it first tries strconv.Atoi and accepts 0 (remove the secs <= 0 rejection), and
if Atoi fails then attempt to parse the header as an HTTP-date (use time.Parse
with RFC1123/RFC1123Z as appropriate) and compute secs as the max of 0 and
int(parsedTime.Sub(time.Now()).Seconds()); then use secs to build the duration
(d := time.Duration(secs) * time.Second) and log the header and computed wait.
Reference symbols: retryAfterHeader, headers.Get, strconv.Atoi, time.Parse,
secs, d, and the waitForRetryAfter logging calls.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree need to incorporate something along these lineds

 retryTime, err := time.Parse(time.RFC1123, Retryheader)

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@taliandre49, great — that's the right direction. One suggestion: since the file already imports gohttp "net/http", you can use gohttp.ParseTime() instead of time.Parse(time.RFC1123, ...) directly. net/http.ParseTime handles all three HTTP date formats (RFC1123, RFC850, and ANSI C asctime) per the HTTP spec, making it more robust:

retryAfterHeader = strings.TrimSpace(retryAfterHeader)
var d time.Duration
if secs, err := strconv.Atoi(retryAfterHeader); err == nil {
    if secs < 0 {
        o.Logger.Debugf("waitForRetryAfter: Invalid Retry-After header value: %s", retryAfterHeader)
        return false
    }
    d = time.Duration(secs) * time.Second
} else if t, err := gohttp.ParseTime(retryAfterHeader); err == nil {
    d = time.Until(t)
    if d < 0 {
        d = 0
    }
} else {
    o.Logger.Debugf("waitForRetryAfter: Invalid Retry-After header value: %s", retryAfterHeader)
    return false
}
o.Logger.Debugf("waitForRetryAfter: Rate limited, honoring Retry-After header, waiting for %v", d)
select {
case <-ctx.Done():
    return false
case <-time.After(d):
    return true
}

Key points vs. the original:

  • secs < 0 replaces secs <= 0 — a Retry-After: 0 is valid and means "retry immediately" (the time.After(0) fires right away and returns true).
  • gohttp.ParseTime already imports cleanly since gohttp is aliased in this file.

select {
case <-ctx.Done():
// Context is done, return to avoid waiting the full duration
return false
case <-time.After(d):
return true
}
}

func (o *ClusterUninstaller) loadSDKServices() error {
var (
err error
Expand Down Expand Up @@ -516,9 +551,21 @@ func (o *ClusterUninstaller) loadSDKServices() error {

// Get the Zone ID
zoneOptions := o.zonesSvc.NewListZonesOptions()
zoneResources, detailedResponse, err := o.zonesSvc.ListZonesWithContext(ctx, zoneOptions)
if err != nil {
return fmt.Errorf("loadSDKServices: Failed to list Zones: %w and the response is: %s", err, detailedResponse)
var (
zoneResources *zonesv1.ListZonesResp
detailedResponse *core.DetailedResponse
)
for {
zoneResources, detailedResponse, err = o.zonesSvc.ListZonesWithContext(ctx, zoneOptions)
if err != nil {
// check if error is a 429 too many requests
if detailedResponse != nil && detailedResponse.StatusCode == gohttp.StatusTooManyRequests && o.waitForRetryAfter(ctx, detailedResponse) {
o.Logger.Debugf("loadSDKServices: ListZonesWithContext was rate limited, honoring RetryAfter for ListZones...")
continue // we have waited the appropriate amount of time, now retry the request
}
return fmt.Errorf("loadSDKServices: Failed to list Zones: %w and the response is: %s", err, detailedResponse)
}
break // no error, reached on successful response, break the loop
}

for _, zone := range zoneResources.Result {
Expand Down Expand Up @@ -562,9 +609,20 @@ func (o *ClusterUninstaller) loadSDKServices() error {
return fmt.Errorf("failed to parse DNSInstanceCRN: %w", err)
}
options := o.dnsZonesSvc.NewListDnszonesOptions(dnsCRN.ServiceInstance)
listZonesResponse, detailedResponse, err := o.dnsZonesSvc.ListDnszones(options)
if err != nil {
return fmt.Errorf("loadSDKServices: Failed to list Zones: %w and the response is: %s", err, detailedResponse)
var (
listZonesResponse *dnszonesv1.ListDnszones
detailedResponse *core.DetailedResponse
)
for {
listZonesResponse, detailedResponse, err = o.dnsZonesSvc.ListDnszones(options)
if err != nil {
if detailedResponse != nil && detailedResponse.StatusCode == gohttp.StatusTooManyRequests && o.waitForRetryAfter(ctx, detailedResponse) {
o.Logger.Debugf("loadSDKServices: ListDnszones was rate limited, honoring RetryAfter for ListDnszones...")
continue
}
return fmt.Errorf("loadSDKServices: Failed to list Zones: %w and the response is: %s", err, detailedResponse)
}
break
}

for _, zone := range listZonesResponse.Dnszones {
Expand Down
Loading