Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ These tests are run regularly against our public infrastructure as well as our i
| | [test_ping](./test_load_balancer.py#L855) | default |
| **Nested Virtualization** | [test_virtualization_support](./test_nested_virtualization.py#L14) | default |
| | [test_run_nested_vm](./test_nested_virtualization.py#L40) | default |
| **Objects** | [test_bucket_urls](./test_objects.py#L21) | default |
| | [test_notifications](./test_objects.py#L65) | default |
| **Private Network** | [test_private_ip_address_on_all_images](./test_private_network.py#L14) | all |
| | [test_private_network_connectivity_on_all_images](./test_private_network.py#L35) | all |
| | [test_multiple_private_network_interfaces](./test_private_network.py#L88) | default |
Expand Down
152 changes: 129 additions & 23 deletions api.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import boto3
import re
import requests
import time

Expand All @@ -11,6 +13,12 @@
from filelock import FileLock
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from resources import ObjectsUser
from urllib.parse import urlparse


# Contains delete handlers, see delete_handler below
DELETE_HANDLERS = {}


class CloudscaleHTTPAdapter(HTTPAdapter):
Expand Down Expand Up @@ -80,6 +88,7 @@ def __init__(self, scope, zone=None, read_only=False):
self.hooks = {'response': self.on_response}
self.scope = scope
self.read_only = read_only
self.zone = zone

# 8 Retries @ 2.5 backoff_factor = 10.6 minutes
retry_strategy = RetryStrategy(
Expand All @@ -93,6 +102,10 @@ def __init__(self, scope, zone=None, read_only=False):

self.mount("https://", adapter)

# This is None, when running "invoke cleanup"
if self.zone:
self.objects_endpoint = self.objects_endpoint_for(self.zone)

def post(self, url, data=None, json=None, add_tags=True, **kwargs):
assert not data, "Please only use json, not data"

Expand All @@ -101,34 +114,13 @@ def post(self, url, data=None, json=None, add_tags=True, **kwargs):
'runner': RUNNER_ID,
'process': PROCESS_ID,
'scope': self.scope,
'zone': self.zone,
}

return super().post(url, data=data, json=json, **kwargs)

def delete(self, url):
super().delete(url)

# Wait for snapshots to be deleted
if 'volume-snapshots' in url:
timeout = time.monotonic() + 60

while time.monotonic() < timeout:
time.sleep(1)

try:
snapshot = self.get(url)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
# The snapshot is gone, stop waiting
break
else:
raise e

else:
raise Timeout(
f'Snapshot failed to delete within 60 seconds. Status '
f'is still "{snapshot.json()["status"]}".'
)
delete_handler_for_url(url)(api=self, url=url)

def on_response(self, response, *args, **kwargs):
trigger('request.after', request=response.request, response=response)
Expand Down Expand Up @@ -172,6 +164,7 @@ def resources(path):
yield from resources('/networks')
yield from resources('/server-groups')
yield from resources('/custom-images')
yield from resources('/objects-users')

def cleanup(self, limit_to_scope=True, limit_to_process=True):
""" Deletes resources created by this API object. """
Expand All @@ -194,3 +187,116 @@ def cleanup(self, limit_to_scope=True, limit_to_process=True):

if exceptions:
raise ExceptionGroup("Failures during cleanup.", exceptions)

def objects_endpoint_for(self, zone):
netloc = urlparse(self.api_url).netloc

if netloc.startswith("api"):
prefix = ""
tld = "ch"
else:
prefix = f"{netloc.split('-')[0]}-"
tld = "zone"

return f"{prefix}objects.{zone.rstrip('012345679')}.cloudscale.{tld}"

Comment on lines +191 to +202
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would prefer not to encode our naming scheme here. I would prefer a new environment variable for the URL with a default of https://object.<region>.cloudscale.ch.

We can still enforce some sanity checks like setting a CLOUDSCALE_API_URL requires setting the objects URL and that the URL contains the region name.

Copy link
Contributor Author

@href href Dec 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As discussed, you will follow-up with this internally. Ideally I think we would have some kind of API path that has all this information per API endpoint. In the absence of that, I prefer this solution over introducing additional environment variables.


def delete_handler(path):
""" Registers the decorated function as delete handler for the given
path. The path is treated as regular expression pattern, used to search
the path (not the whole URL).

The decorated function is supposed to delete the given URL.

"""

def delete_handler_decorator(fn):
DELETE_HANDLERS[path] = fn
return fn
return delete_handler_decorator


def delete_handler_for_url(url):
""" Evaluates the registered delete handlers and picks the first matching
one, or a default.

The order of the evaluation is not strictly defined, handlers are
expected to not overlap.

"""
path = urlparse(url).path

for pattern, fn in DELETE_HANDLERS.items():
if re.fullmatch(pattern, path):
return fn

# Use the low-level method, as we are downstream from api.delete and cannot
# call api.delete, lest we want an infinite loop.
return lambda api, url: api.request("DELETE", url)


@delete_handler(path='/v1/volume-snapshots/.+')
def delete_volume_snapshots(api, url):
""" When deleting volume-snapshots, we need to wait for the snapshots to
be deleted, or we won't be able to delete the servers later.

"""

# Delete the snapshot first
api.request("DELETE", url)

# Wait for snapshots to be deleted
timeout = time.monotonic() + 60

while time.monotonic() < timeout:
time.sleep(1)

try:
snapshot = api.get(url)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
# The snapshot is gone, stop waiting
break
else:
raise e

else:
raise Timeout(
f'Snapshot failed to delete within 60 seconds. Status '
f'is still "{snapshot.json()["status"]}".'
)


@delete_handler(path='/v1/objects-users/.+')
def delete_objects_users(api, url):
""" Before deleting an objects user, we have to delete owned buckets. """

user = ObjectsUser.from_href(None, api, url, name="")
user.wait_for_access()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In this case the user must already exist. I don't think we need to wait here.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If it already exists and all checks out, then we don't wait much at all anyway. If for some reason there is a problem (during development I may have hit this occasionally), then this makes the cleanup more robust.


session = boto3.Session(
aws_access_key_id=user.keys[0]['access_key'],
aws_secret_access_key=user.keys[0]['secret_key'],
)

objects_endpoint = api.objects_endpoint_for(zone=user.tags['zone'])
s3 = session.resource('s3', endpoint_url=f"https://{objects_endpoint}")

for bucket in s3.buckets.all():
bucket.objects.all().delete()
bucket.delete()

sns = boto3.client(
'sns',
endpoint_url=f"https://{objects_endpoint}",
aws_access_key_id=user.keys[0]['access_key'],
aws_secret_access_key=user.keys[0]['secret_key'],
region_name='default',
)

for topic in sns.list_topics().get('Topics', ()):
arn = topic["TopicArn"]
assert re.match(r'arn:aws:sns:(rma|lpg)::at-.+', arn)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As we don't intend to run this against Reef clusters we might also skip this or mark it for removal after the Squid update.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would like to keep it as a security measure, if for some reason this is ever off, then it would be good to know.

sns.delete_topic(TopicArn=arn)

api.request("DELETE", url)
68 changes: 68 additions & 0 deletions conftest.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import boto3
import os
import pytest
import random
Expand All @@ -21,6 +22,7 @@
from resources import FloatingIP
from resources import LoadBalancer
from resources import Network
from resources import ObjectsUser
from resources import Server
from resources import ServerGroup
from resources import Volume
Expand Down Expand Up @@ -829,3 +831,69 @@ def factory(num_backends,
return load_balancer, listener, pool, backends, private_network

return factory


@pytest.fixture(scope='session')
def create_objects_user(request, session_api, objects_endpoint):
""" Factory to create an objects user. """

factory = ObjectsUser.factory(
request=request,
api=session_api,
)

def wrapper(*args, **kwargs):
user = factory(*args, **kwargs)

# We need to wait for a moment for the key to become available.
user.wait_for_access()

return user

return wrapper


@pytest.fixture(scope='session')
def objects_user(create_objects_user):
""" An object user that can be used with objects_endpoint. """

return create_objects_user(name=f'at-{secrets.token_hex(8)}')


@pytest.fixture(scope='session')
def objects_endpoint(session_api):
""" An objects endpoint of the given zone. """

return session_api.objects_endpoint


@pytest.fixture(scope='session')
def access_key(objects_user):
""" An S3 access key for the objects endpoint. """

return objects_user.keys[0]["access_key"]


@pytest.fixture(scope='session')
def secret_key(objects_user):
""" An S3 secret key for the objects endpoint. """

return objects_user.keys[0]["secret_key"]


@pytest.fixture(scope='function')
def bucket(objects_user, objects_endpoint):
""" A bucket wrapped in a boto3.S3.Bucket class. """

session = boto3.Session(
aws_access_key_id=objects_user.keys[0]['access_key'],
aws_secret_access_key=objects_user.keys[0]['secret_key'],
)

s3 = session.resource('s3', endpoint_url=f"https://{objects_endpoint}")

bucket = s3.create_bucket(Bucket=f"at-{secrets.token_hex(8)}")
yield bucket

bucket.objects.all().delete()
bucket.delete()
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
boto3
dnspython
filelock
flake8
Expand Down
39 changes: 39 additions & 0 deletions resources.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import boto3
import botocore
import re
import secrets
import textwrap
Expand Down Expand Up @@ -1454,3 +1456,40 @@ def verify_backend(self, prober, backend, count=1, port=None):
for i in range(count):
assert (prober.http_get(self.build_url(url='/hostname', port=port))
== backend.name)


class ObjectsUser(CloudscaleResource):

def __init__(self, request, api, name):
super().__init__(request, api)

self.spec = {
'display_name': f'{RESOURCE_NAME_PREFIX}-{name}',
}

@with_trigger('objects-user.create')
def create(self):
self.info = self.api.post('/objects-users', json=self.spec).json()

def wait_for_access(self, timeout=30):
objects_endpoint = self.api.objects_endpoint_for(self.tags['zone'])

s3 = boto3.client(
's3',
endpoint_url=f"https://{objects_endpoint}",
aws_access_key_id=self.keys[0]["access_key"],
aws_secret_access_key=self.keys[0]["secret_key"],
)

timeout = time.monotonic() + 30
exception = None

while time.monotonic() < timeout:
try:
s3.list_buckets()
break
except botocore.exceptions.ClientError as e:
exception = e
time.sleep(1)
else:
raise TimeoutError from exception
Loading