From 8c3ee11d96dbef1094b972451ebb01b7ad95e1f1 Mon Sep 17 00:00:00 2001 From: JosepSampe Date: Mon, 23 Mar 2026 16:47:35 +0100 Subject: [PATCH 1/5] Update cloud run registry --- CHANGELOG.md | 14 ++++++-- docs/source/compute_config/gcp_cloudrun.md | 13 +++++++ .../backends/gcp_cloudrun/cloudrun.py | 34 +++++++++++++------ .../backends/gcp_cloudrun/config.py | 3 +- lithops/utils.py | 9 ++--- runtime/gcp_cloudrun/README.md | 6 ++-- 6 files changed, 58 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c09327c66..8637ed744 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,15 +1,23 @@ # Changelog +## [v3.6.5.dev0] + +### Added +- [Core] Support var-len params in func passed to executor. +- [GCP Cloud Run] Default runtimes use Artifact Registry; added `artifact_registry_repository` (default `lithops`). + + ## [v3.6.4] ### Fixed - [Executor] Support use of `functools.partial` with FunctionExecutor's `call_async` and `map` methods + ## [v3.6.3] ### Fixed -- Fixed memory available options for aws batch: 4 cpus -- Fixed race condition and improving monitor stability +- [AWS Batch] Fixed memory available options for aws batch: 4 cpus +- [Monitor] Fixed race condition and improving monitor stability ## [v3.6.2] @@ -977,7 +985,7 @@ ## [v1.7.2] ### Added -- [GCR] Added Google Cloud Run Backend +- [GCP Cloud Run] Added Google Cloud Run backend ### Changed diff --git a/docs/source/compute_config/gcp_cloudrun.md b/docs/source/compute_config/gcp_cloudrun.md index a36454356..04aa3fd91 100644 --- a/docs/source/compute_config/gcp_cloudrun.md +++ b/docs/source/compute_config/gcp_cloudrun.md @@ -31,6 +31,17 @@ python3 -m pip install lithops[gcp] 10. Enable the **Artifact Registry API**: Navigate to *APIs & services* tab on the menu. Click *ENABLE APIS AND SERVICES*. Look for "Artifact Registry API" at the search bar. Click *Enable*. +11. Create a **Docker** repository in Artifact Registry (in the same region as Cloud Run), for example named `lithops`, or set `artifact_registry_repository` in config to match your repository name. From a shell with `gcloud` and the correct project: + +```bash +gcloud artifacts repositories create lithops \ + --repository-format=docker \ + --location= \ + --description="Lithops Cloud Run runtimes" +``` + +Grant the service account **Artifact Registry Writer** (or **Artifact Registry Create-on-push Writer** if you prefer) so it can push images. + ## Configuration 1. Edit your lithops config and add the following keys: @@ -67,6 +78,8 @@ python3 -m pip install lithops[gcp] |gcp_cloudrun | trigger | https | no | Currently it supports 'https' trigger| |gcp_cloudrun | invoke_pool_threads | 100 |no | Number of concurrent threads used for invocation | |gcp_cloudrun | runtime_include_function | False | no | If set to true, Lithops will automatically build a new runtime, including the function's code, instead of transferring it through the storage backend at invocation time. This is useful when the function's code size is large (in the order of 10s of MB) and the code does not change frequently | +|gcp_cloudrun | docker_server | pkg.dev | no | Marker for [Artifact Registry](https://cloud.google.com/artifact-registry/docs/docker/names) default image names (`REGION-docker.pkg.dev/PROJECT/REPOSITORY/IMAGE`). | +|gcp_cloudrun | artifact_registry_repository | lithops | no | Docker repository name in Artifact Registry (must exist under your project in the configured region). | ## Test Lithops Once you have your compute and storage backends configured, you can run a hello world function with: diff --git a/lithops/serverless/backends/gcp_cloudrun/cloudrun.py b/lithops/serverless/backends/gcp_cloudrun/cloudrun.py index 758385cd4..bb22513dc 100644 --- a/lithops/serverless/backends/gcp_cloudrun/cloudrun.py +++ b/lithops/serverless/backends/gcp_cloudrun/cloudrun.py @@ -75,16 +75,30 @@ def _get_default_runtime_image_name(self): self.name, self.cr_config, 'lithops-cloudrun-default' ) + def _full_container_image_reference(self, name): + """True if name already includes a registry hostname (user-supplied image).""" + if '/' not in name: + return False + host = name.split('/', 1)[0] + if host.startswith('./') or host.startswith('../'): + return False + # hostname or host:port (e.g. REGION-docker.pkg.dev, docker.io) + return '.' in host or (':' in host and not host.startswith('/')) + + def _docker_login_registry_host(self): + """Artifact Registry hostname for docker login / push.""" + return f'{self.region}-docker.pkg.dev' + def _format_image_name(self, runtime_name): """ - Formats GCR image name from runtime name + Formats Artifact Registry image reference from runtime name. """ - if 'gcr.io' not in runtime_name: - country = self.region.split('-')[0] - return f'{country}.gcr.io/{self.project_name}/{runtime_name}' - else: + if self._full_container_image_reference(runtime_name): return runtime_name + repo = self.cr_config.get('artifact_registry_repository', 'lithops') + return f'{self.region}-docker.pkg.dev/{self.project_name}/{repo}/{runtime_name}' + def _build_api_resource(self): """ Instantiate and authorize admin discovery API session @@ -229,16 +243,16 @@ def build_runtime(self, runtime_name, dockerfile, extra_args=[]): finally: os.remove(config.FH_ZIP_LOCATION) - logger.debug('Authorizing Docker client with GCR permissions') - country = self.region.split('-')[0] - cmd = f'cat {self.credentials_path} | {docker_path} login {country}.gcr.io -u _json_key --password-stdin' + registry_host = self._docker_login_registry_host() + logger.debug(f'Authorizing Docker client for registry {registry_host}') + cmd = f'cat {self.credentials_path} | {docker_path} login {registry_host} -u _json_key --password-stdin' if logger.getEffectiveLevel() != logging.DEBUG: cmd = cmd + f" >{os.devnull} 2>&1" res = os.system(cmd) if res != 0: - raise Exception('There was an error authorizing Docker for push to GCR') + raise Exception(f'There was an error authorizing Docker for push to {registry_host}') - logger.debug(f'Pushing runtime {image_name} to GCP Container Registry') + logger.debug(f'Pushing runtime {image_name} to {registry_host}') if utils.is_podman(docker_path): cmd = f'{docker_path} push {image_name} --format docker --remove-signatures' else: diff --git a/lithops/serverless/backends/gcp_cloudrun/config.py b/lithops/serverless/backends/gcp_cloudrun/config.py index d0340cf45..08818a5fe 100644 --- a/lithops/serverless/backends/gcp_cloudrun/config.py +++ b/lithops/serverless/backends/gcp_cloudrun/config.py @@ -32,7 +32,8 @@ 'worker_processes': 1, 'invoke_pool_threads': 100, 'trigger': 'https', - 'docker_server': 'gcr.io' + 'docker_server': 'pkg.dev', + 'artifact_registry_repository': 'lithops', } MAX_RUNTIME_MEMORY = 32768 # 32 GiB diff --git a/lithops/utils.py b/lithops/utils.py index cd314fdb7..e8a018a6a 100644 --- a/lithops/utils.py +++ b/lithops/utils.py @@ -401,15 +401,16 @@ def get_default_container_name(backend, backend_config, runtime_name): f'in config under "{backend}" section') return f'{docker_server}/{docker_namespace}/{img}' - elif 'gcr.io' in docker_server: - # Google container registry + elif 'pkg.dev' in docker_server: + # Google Artifact Registry (Docker) try: - country = backend_config['region'].split('-')[0] + region = backend_config['region'] project_name = backend_config['project_name'] + repository = backend_config.get('artifact_registry_repository', 'lithops') except Exception: raise Exception('You must provide "region" and "project_name" params' 'in config under "gcp" section') - return f'{country}.gcr.io/{project_name}/{img}' + return f'{region}-docker.pkg.dev/{project_name}/{repository}/{img}' else: return f'{docker_server}/{img}' diff --git a/runtime/gcp_cloudrun/README.md b/runtime/gcp_cloudrun/README.md index 1ff6c117e..6e7bd1a61 100644 --- a/runtime/gcp_cloudrun/README.md +++ b/runtime/gcp_cloudrun/README.md @@ -2,9 +2,9 @@ The runtime is the place where the functions are executed. In Google Cloud Run, runtimes are based on container images. -Google Cloud Run requires container images to be pushed to Google Cloud Container Registry (images pushed to Dockerhub are not permitted). +Google Cloud Run requires container images to be in a registry allowed by your project (typically [Artifact Registry](https://cloud.google.com/artifact-registry/docs/docker/overview); images on Docker Hub are not permitted unless you configure [private registries](https://cloud.google.com/run/docs/deploying#images)). -Lithops automatically tags and pushes the image to GCR with authentication from the service account key file. +Lithops automatically tags and pushes the image to Artifact Registry (`REGION-docker.pkg.dev/PROJECT/REPOSITORY/...`) using the service account key file. Create a Docker repository in Artifact Registry (see the main docs) or set `artifact_registry_repository` in config. If you don't have an already built runtime, the default runtime is built the first time you execute a function. Lithops automatically detects the Python version of your environment and deploys the default runtime based on it. @@ -47,7 +47,7 @@ gcp_cloudrun: If you need some Python modules (or other system libraries) which are not included in the default container image, it is possible to build your own Lithops runtime with all of them. - This alternative usage is based on to build a local container image, deploy it to GCR and use it as a Lithops base runtime. + This alternative usage is based on building a local container image, deploying it to Artifact Registry, and using it as a Lithops base runtime. Project provides some skeletons of Docker images, for example: * [Dockerfile](Dockerfile) From fcb83321f1645972729414f2985176ac3f8bc7b6 Mon Sep 17 00:00:00 2001 From: JosepSampe Date: Mon, 23 Mar 2026 16:58:29 +0100 Subject: [PATCH 2/5] Update GCR limits --- .../backends/gcp_cloudrun/config.py | 47 ++++++++++++++----- 1 file changed, 35 insertions(+), 12 deletions(-) diff --git a/lithops/serverless/backends/gcp_cloudrun/config.py b/lithops/serverless/backends/gcp_cloudrun/config.py index 08818a5fe..22acb4f94 100644 --- a/lithops/serverless/backends/gcp_cloudrun/config.py +++ b/lithops/serverless/backends/gcp_cloudrun/config.py @@ -23,23 +23,45 @@ CLOUDRUN_API_VERSION = 'v1' SCOPES = ('https://www.googleapis.com/auth/cloud-platform',) +# Defaults align with Cloud Run service limits (memory, CPU, request timeout). +# See https://cloud.google.com/run/docs/configuring/services/memory-limits +# and https://cloud.google.com/run/docs/configuring/services/cpu DEFAULT_CONFIG_KEYS = { - 'runtime_timeout': 300, # Default: 300 seconds => 5 minutes - 'runtime_memory': 256, # Default memory: 256 MB - 'runtime_cpu': 0.25, # 0.25 vCPU + 'runtime_timeout': 300, # seconds (max 3600 for services) + 'runtime_memory': 256, # MiB (max 32768) + 'runtime_cpu': 0.25, # vCPU: 0.08–<1 in 0.001 steps, or 1, 2, 4, 6, 8 'max_workers': 1000, 'min_workers': 0, 'worker_processes': 1, 'invoke_pool_threads': 100, 'trigger': 'https', + # Substring marker so utils.get_default_container_name() builds an Artifact Registry image name. 'docker_server': 'pkg.dev', 'artifact_registry_repository': 'lithops', } -MAX_RUNTIME_MEMORY = 32768 # 32 GiB -MAX_RUNTIME_TIMEOUT = 3600 # 1 hour - -AVAILABLE_RUNTIME_CPUS = [x / 100.0 for x in range(8, 100)] + [1, 2, 4, 6, 8] +MAX_RUNTIME_MEMORY = 32768 # 32 GiB (Cloud Run service maximum) +MAX_RUNTIME_TIMEOUT = 3600 # 60 minutes (Cloud Run request timeout maximum) + + +def is_valid_cloud_run_cpu(cpu): + """ + True if cpu is allowed for Cloud Run: 1, 2, 4, 6, 8, or 0.08–<1.00 in 0.001 increments. + """ + if cpu in (1, 2, 4, 6, 8): + return True + try: + c = float(cpu) + except (TypeError, ValueError): + return False + if not (0.08 <= c < 1.0): + return False + r = round(c, 3) + if abs(c - r) > 1e-5: + return False + n = int(round((r - 0.08) / 0.001 + 1e-9)) + expected = round(0.08 + n * 0.001, 3) + return abs(r - expected) < 1e-5 and 0 <= n <= 919 FH_ZIP_LOCATION = os.path.join(os.getcwd(), 'lithops_cloudrun.zip') @@ -162,14 +184,15 @@ def load_config(config_data): if config_data['gcp_cloudrun']['runtime_timeout'] > MAX_RUNTIME_TIMEOUT: logger.warning('Runtime timeout {} exceeds maximum - ' - 'Runtime timeout set to {}'.format(config_data['gcp_cloudrun']['runtime_memory'], + 'Runtime timeout set to {}'.format(config_data['gcp_cloudrun']['runtime_timeout'], MAX_RUNTIME_TIMEOUT)) config_data['gcp_cloudrun']['runtime_timeout'] = MAX_RUNTIME_TIMEOUT - if config_data['gcp_cloudrun']['runtime_cpu'] not in AVAILABLE_RUNTIME_CPUS: - raise Exception('{} vCPUs is not available - ' - 'choose one from {} vCPUs'.format(config_data['gcp_cloudrun']['runtime_cpu'], - AVAILABLE_RUNTIME_CPUS)) + if not is_valid_cloud_run_cpu(config_data['gcp_cloudrun']['runtime_cpu']): + raise Exception( + '{} vCPU is not valid for Cloud Run — use 1, 2, 4, 6, 8, or ' + '0.08 to less than 1.0 in steps of 0.001'.format(config_data['gcp_cloudrun']['runtime_cpu']) + ) if config_data['gcp_cloudrun']['runtime_cpu'] == 4 and config_data['gcp_cloudrun']['runtime_memory'] < 4096: raise Exception('For {} vCPUs, runtime memory must be at least 4096 MiB' .format(config_data['gcp_cloudrun']['runtime_cpu'])) From f9e0a9f4be5cb3b5f1fb551c736600a5ba1985db Mon Sep 17 00:00:00 2001 From: JosepSampe Date: Wed, 6 May 2026 16:26:40 +0200 Subject: [PATCH 3/5] Update gcp cloudrun --- docs/source/compute_config/gcp_cloudrun.md | 12 ++ .../backends/gcp_cloudrun/cloudrun.py | 162 ++++++++++++++++-- .../backends/gcp_cloudrun/config.py | 22 ++- runtime/gcp_cloudrun/Dockerfile.conda | 1 - runtime/gcp_cloudrun/README.md | 1 - 5 files changed, 177 insertions(+), 21 deletions(-) diff --git a/docs/source/compute_config/gcp_cloudrun.md b/docs/source/compute_config/gcp_cloudrun.md index 04aa3fd91..38768d125 100644 --- a/docs/source/compute_config/gcp_cloudrun.md +++ b/docs/source/compute_config/gcp_cloudrun.md @@ -42,6 +42,18 @@ gcloud artifacts repositories create lithops \ Grant the service account **Artifact Registry Writer** (or **Artifact Registry Create-on-push Writer** if you prefer) so it can push images. +Example command: + +```bash +gcloud artifacts repositories add-iam-policy-binding gcf-artifacts \ + --location=us-east1 \ + --project=lithops-dev \ + --member="serviceAccount:lithops-executor@lithops-dev.iam.gserviceaccount.com" \ + --role="roles/artifactregistry.writer" +``` + +Replace `gcf-artifacts`, `us-east1`, `lithops-dev`, and the service account email with your own values. + ## Configuration 1. Edit your lithops config and add the following keys: diff --git a/lithops/serverless/backends/gcp_cloudrun/cloudrun.py b/lithops/serverless/backends/gcp_cloudrun/cloudrun.py index c278e9998..2a5ec56b6 100644 --- a/lithops/serverless/backends/gcp_cloudrun/cloudrun.py +++ b/lithops/serverless/backends/gcp_cloudrun/cloudrun.py @@ -17,17 +17,19 @@ import os import time import json -import urllib import yaml import hashlib import logging import httplib2 +import requests import google.auth import google.oauth2.id_token +import google.auth.transport.requests from threading import Lock from google.oauth2 import service_account from google_auth_httplib2 import AuthorizedHttp from googleapiclient.discovery import build +from googleapiclient.errors import HttpError from lithops import utils from lithops.constants import COMPUTE_CLI_MSG @@ -51,6 +53,7 @@ def __init__(self, cloudrun_config, internal_storage): self.credentials_path = cloudrun_config.get('credentials_path') self._build_api_resource() + self._resolve_artifact_registry_repository_fallback() self._service_url = None self._id_token = None @@ -121,10 +124,122 @@ def _build_api_resource(self): 'api_endpoint': f'https://{self.region}-run.googleapis.com' } ) + self._ar_resource = build( + 'artifactregistry', 'v1', + http=http, cache_discovery=False + ) self.cr_config['project_name'] = self.project_name self.cr_config['service_account'] = self.service_account + def _parse_artifact_registry_image_name(self, image_name): + """ + Parse Artifact Registry image format: + REGION-docker.pkg.dev/PROJECT/REPOSITORY/IMAGE[:TAG] + """ + parts = image_name.split('/') + if len(parts) < 4: + return None + host, project, repository = parts[0], parts[1], parts[2] + if not host.endswith('-docker.pkg.dev'): + return None + location = host.replace('-docker.pkg.dev', '') + return project, location, repository + + def _artifact_registry_uploader_identity(self): + if self.credentials_path and os.path.isfile(self.credentials_path): + try: + with open(self.credentials_path, 'r') as f: + cred_data = json.load(f) + return cred_data.get('client_email') + except Exception: + return None + return self.service_account + + def _list_docker_repositories(self): + parent = f'projects/{self.project_name}/locations/{self.region}' + repos = [] + page_token = None + while True: + req = self._ar_resource.projects().locations().repositories().list( + parent=parent, pageToken=page_token + ) + res = req.execute() + for repo in res.get('repositories', []): + if repo.get('format') == 'DOCKER': + repos.append(repo['name'].rsplit('/', 1)[-1]) + page_token = res.get('nextPageToken') + if not page_token: + break + return repos + + def _resolve_artifact_registry_repository_fallback(self): + """ + Minimal repository resolution: + - Use configured repository if accessible + - Otherwise fallback to an existing DOCKER repository (prefer gcf-artifacts) + """ + repository = self.cr_config.get('artifact_registry_repository', 'lithops') + name = f'projects/{self.project_name}/locations/{self.region}/repositories/{repository}' + + try: + self._ar_resource.projects().locations().repositories().get(name=name).execute() + return + except Exception: + pass + + try: + docker_repos = self._list_docker_repositories() + except Exception: + docker_repos = [] + + if not docker_repos: + return + + fallback = 'gcf-artifacts' if 'gcf-artifacts' in docker_repos else sorted(docker_repos)[0] + if fallback != repository: + self.cr_config['artifact_registry_repository'] = fallback + logger.info( + f'Using Artifact Registry repository "{fallback}" ' + f'(configured "{repository}" is not accessible).' + ) + + def _ensure_artifact_registry_upload_permission(self, image_name): + """ + Check that current principal can upload artifacts to the target repo. + """ + parsed = self._parse_artifact_registry_image_name(image_name) + if not parsed: + return + + project, location, repository = parsed + resource = f'projects/{project}/locations/{location}/repositories/{repository}' + + try: + result = self._ar_resource.projects().locations().repositories().testIamPermissions( + resource=resource, + body={ + 'permissions': ['artifactregistry.repositories.uploadArtifacts'] + } + ).execute() + except HttpError: + # If we cannot test permissions (forbidden/unavailable), fail later on push. + return + + granted = set(result.get('permissions', [])) + if 'artifactregistry.repositories.uploadArtifacts' in granted: + return + + principal = self._artifact_registry_uploader_identity() or 'current credentials principal' + raise Exception( + 'Missing Artifact Registry permission to push runtime image. ' + f'Principal "{principal}" does not have ' + f'"artifactregistry.repositories.uploadArtifacts" on "{resource}". ' + 'Grant role "roles/artifactregistry.writer" on the repository/project or configure ' + 'a repository where this principal has write access via ' + '`gcp_cloudrun.artifact_registry_repository`.' + ) + def _get_url_and_token(self, service_name): """ Generates a connection token @@ -207,17 +322,24 @@ def invoke(self, runtime_name, runtime_memory, payload, return_result=False): else: logger.debug('Invoking function') - req = urllib.request.Request(service_url + route, data=json.dumps(payload, default=str).encode('utf-8')) - req.add_header("Authorization", f"Bearer {id_token}") - res = urllib.request.urlopen(req) + headers = { + "Authorization": f"Bearer {id_token}", + "Content-Type": "application/json" + } + response = requests.post( + service_url + route, + data=json.dumps(payload, default=str), + headers=headers, + timeout=120 + ) - if res.getcode() in (200, 202): - data = json.loads(res.read()) + if response.status_code in (200, 202): + data = response.json() if return_result: return data return data["activationId"] else: - raise Exception(res.text) + raise Exception(response.text) def build_runtime(self, runtime_name, dockerfile, extra_args=[]): """ @@ -253,11 +375,19 @@ def build_runtime(self, runtime_name, dockerfile, extra_args=[]): raise Exception(f'There was an error authorizing Docker for push to {registry_host}') logger.debug(f'Pushing runtime {image_name} to {registry_host}') + self._ensure_artifact_registry_upload_permission(image_name) if utils.is_podman(docker_path): cmd = f'{docker_path} push {image_name} --format docker --remove-signatures' else: cmd = f'{docker_path} push {image_name}' - utils.run_command(cmd) + try: + utils.run_command(cmd) + except Exception as e: + raise Exception( + f'Unable to push runtime image to Artifact Registry ({image_name}). ' + 'Verify the repository exists and that your identity has Artifact Registry write permissions ' + '(artifactregistry.repositories.uploadArtifacts).' + ) from e def _create_service(self, runtime_name, runtime_memory, timeout): """ @@ -292,9 +422,19 @@ def _create_service(self, runtime_name, runtime_memory, timeout): container['resources']['requests']['cpu'] = str(self.cr_config['runtime_cpu']) logger.debug(f"Creating service: {service_name}") - res = self._api_resource.namespaces().services().create( - parent=f'namespaces/{self.project_name}', body=svc_res - ).execute() + try: + res = self._api_resource.namespaces().services().create( + parent=f'namespaces/{self.project_name}', body=svc_res + ).execute() + except HttpError as e: + if e.resp.status == 409: + logger.debug(f'Service {service_name} already exists. Recreating it') + self._delete_service(service_name) + res = self._api_resource.namespaces().services().create( + parent=f'namespaces/{self.project_name}', body=svc_res + ).execute() + else: + raise logger.debug(f'Ok -- service created {service_name}') # Wait until service is up diff --git a/lithops/serverless/backends/gcp_cloudrun/config.py b/lithops/serverless/backends/gcp_cloudrun/config.py index 22acb4f94..0601fca49 100644 --- a/lithops/serverless/backends/gcp_cloudrun/config.py +++ b/lithops/serverless/backends/gcp_cloudrun/config.py @@ -30,7 +30,7 @@ 'runtime_timeout': 300, # seconds (max 3600 for services) 'runtime_memory': 256, # MiB (max 32768) 'runtime_cpu': 0.25, # vCPU: 0.08–<1 in 0.001 steps, or 1, 2, 4, 6, 8 - 'max_workers': 1000, + 'max_workers': 100, 'min_workers': 0, 'worker_processes': 1, 'invoke_pool_threads': 100, @@ -85,7 +85,6 @@ def is_valid_cloud_run_cpu(cpu): cloudpickle \ ps-mem \ tblib \ - namegenerator \ cryptography \ httplib2 \ google-cloud-storage \ @@ -96,20 +95,20 @@ def is_valid_cloud_run_cpu(cpu): psutil -ENV PORT 8080 -ENV PYTHONUNBUFFERED TRUE +ENV PORT=8080 +ENV PYTHONUNBUFFERED=TRUE -ENV CONCURRENCY 1 -ENV TIMEOUT 600 +ENV CONCURRENCY=1 +ENV TIMEOUT=600 # Copy Lithops proxy and lib to the container image. -ENV APP_HOME /lithops +ENV APP_HOME=/lithops WORKDIR $APP_HOME COPY lithops_cloudrun.zip . RUN unzip lithops_cloudrun.zip && rm lithops_cloudrun.zip -CMD exec gunicorn --bind :$PORT --workers $CONCURRENCY --timeout $TIMEOUT lithopsproxy:proxy +CMD ["sh", "-c", "exec gunicorn --bind :$PORT --workers $CONCURRENCY --timeout $TIMEOUT lithopsproxy:proxy"] """ service_res = """ @@ -162,6 +161,13 @@ def load_config(config_data): if 'credentials_path' in config_data['gcp']: config_data['gcp']['credentials_path'] = os.path.expanduser(config_data['gcp']['credentials_path']) + if 'credentials_path' not in config_data['gcp']: + raise Exception("'credentials_path' parameter is mandatory under the 'gcp' section for gcp_cloudrun") + if not os.path.isfile(config_data['gcp']['credentials_path']): + raise Exception( + 'A valid "gcp.credentials_path" service account JSON file is required ' + f'({config_data["gcp"]["credentials_path"]})' + ) temp = copy.deepcopy(config_data['gcp_cloudrun']) config_data['gcp_cloudrun'].update(config_data['gcp']) diff --git a/runtime/gcp_cloudrun/Dockerfile.conda b/runtime/gcp_cloudrun/Dockerfile.conda index d374bc6bf..79c554cc2 100644 --- a/runtime/gcp_cloudrun/Dockerfile.conda +++ b/runtime/gcp_cloudrun/Dockerfile.conda @@ -25,7 +25,6 @@ RUN pip install --upgrade setuptools six pip \ cloudpickle \ ps-mem \ tblib \ - namegenerator \ cryptography \ httplib2 \ google-cloud-storage \ diff --git a/runtime/gcp_cloudrun/README.md b/runtime/gcp_cloudrun/README.md index 7ca341a45..eced23913 100644 --- a/runtime/gcp_cloudrun/README.md +++ b/runtime/gcp_cloudrun/README.md @@ -80,7 +80,6 @@ gcp_cloudrun: cloudpickle \ ps-mem \ tblib \ - namegenerator \ torch \ torchvision \ google-cloud-storage \ From 19d99d6d75d1a756ab500ad74fd4b4c3ec599eba Mon Sep 17 00:00:00 2001 From: JosepSampe Date: Wed, 6 May 2026 16:55:59 +0200 Subject: [PATCH 4/5] Update docs --- CHANGELOG.md | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8637ed744..3f67c8518 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,8 +3,17 @@ ## [v3.6.5.dev0] ### Added -- [Core] Support var-len params in func passed to executor. -- [GCP Cloud Run] Default runtimes use Artifact Registry; added `artifact_registry_repository` (default `lithops`). +- [Core] Added support for variable-length parameters in functions passed to the executor. + +### Changed +- [K8s] Auto-detect cluster architecture when building runtimes. +- [Runtimes] Updated runtime images and related version references across backends. +- [K8s] Added configuration for pod and container `securityContext`. +- [Docs] Corrected MinIO/Ceph config template keys and removed obsolete Kubernetes image references. + +### Fixed +- [K8s] Fixed default runtime builds impacted by Debian Buster end-of-life. +- [GCP Cloud Run] Added Artifact Registry (`pkg.dev`) runtime deployment support ## [v3.6.4] From 4dfaa55b460755feb5701bab96232e6e397de412 Mon Sep 17 00:00:00 2001 From: JosepSampe Date: Wed, 6 May 2026 17:00:49 +0200 Subject: [PATCH 5/5] Fix linting --- lithops/serverless/backends/gcp_cloudrun/config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lithops/serverless/backends/gcp_cloudrun/config.py b/lithops/serverless/backends/gcp_cloudrun/config.py index 0601fca49..5cdcdf53e 100644 --- a/lithops/serverless/backends/gcp_cloudrun/config.py +++ b/lithops/serverless/backends/gcp_cloudrun/config.py @@ -63,6 +63,7 @@ def is_valid_cloud_run_cpu(cpu): expected = round(0.08 + n * 0.001, 3) return abs(r - expected) < 1e-5 and 0 <= n <= 919 + FH_ZIP_LOCATION = os.path.join(os.getcwd(), 'lithops_cloudrun.zip') DEFAULT_DOCKERFILE = """