-
Notifications
You must be signed in to change notification settings - Fork 5
CH-262 / CH-263 / CH-264 - Tilt and some other minor changes #847
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: develop
Are you sure you want to change the base?
Changes from all commits
35e794e
b371161
fb26db8
50d4db0
85ecd94
c003f50
ac07aa2
462e999
0ee7681
dbb05a9
78f250e
5bb5adc
03c447b
cffa4a3
fe2a481
4037a96
6882671
9ad54b0
0ba9fbe
383bbc1
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -0,0 +1,67 @@ | ||||||
| load('ext://namespace', 'namespace_create', "namespace_inject") | ||||||
|
|
||||||
|
|
||||||
| def deploy(name, namespace, extra_env, watch): | ||||||
|
|
||||||
| # create namespaces | ||||||
| namespace_create(namespace) | ||||||
|
|
||||||
| # load helm chart | ||||||
| yaml = decode_yaml_stream(helm( | ||||||
| "deployment/helm", | ||||||
| # The release name, equivalent to helm --name | ||||||
| name=name, | ||||||
| # The namespace to install in, equivalent to helm --namespace | ||||||
| namespace=namespace, | ||||||
| # The values file to substitute into the chart. | ||||||
| values=["deployment/helm/values.yaml"], | ||||||
| # Values to set from the command-line | ||||||
| set=["service.port=1234", "ingress.enabled=true"] | ||||||
| ) | ||||||
| ) | ||||||
|
|
||||||
| source_root = os.path.abspath(os.getcwd()) | ||||||
| # modify deployments | ||||||
| for r in yaml: | ||||||
| if r.get("kind") == "Deployment": | ||||||
| deployment_name = r["metadata"]["name"] | ||||||
| print("+ patching deployment:", deployment_name) | ||||||
| r["spec"]["template"]["spec"].setdefault("volumes", []).append({ | ||||||
| "name": name + "-root", | ||||||
| "hostPath": { | ||||||
| "path": source_root | ||||||
| } | ||||||
| }) | ||||||
| for container in r["spec"]["template"]["spec"]["containers"]: | ||||||
| print(" + modifying container:", container["name"]) | ||||||
| print(" - add " + name + " root folder") | ||||||
| container.setdefault("volumeMounts", []).append({ | ||||||
| "mountPath": "/usr/src/" + name, | ||||||
| "name": name + "-root" | ||||||
| }) | ||||||
| if "resources" in container: | ||||||
| print(" - modifying resource requests and limits") | ||||||
| if "limits" not in container["resources"]: | ||||||
| container["resources"]["limits"] = {} | ||||||
| if "requests" not in container["resources"]: | ||||||
| container["resources"]["requests"] = {} | ||||||
| container["resources"]["requests"]["cpu"] = "100m" | ||||||
| container["resources"]["requests"]["memory"] = "256Mi" | ||||||
| container["resources"]["limits"]["cpu"] = "8000m" | ||||||
| container["resources"]["limits"]["memory"] = "4096Mi" | ||||||
|
|
||||||
| if deployment_name in extra_env and len(extra_env[deployment_name]) > 0: | ||||||
| print("Adding tasks images dependencies to env ", deployment_name) | ||||||
| for env in extra_env[deployment_name]: | ||||||
| container["env"].append({ | ||||||
| "name": env, "value": env | ||||||
| }) | ||||||
|
|
||||||
| if not watch: | ||||||
| # don't watch mnp folder | ||||||
|
||||||
| # don't watch mnp folder | |
| # don't watch npm folder |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -4,3 +4,30 @@ | |
| class cloudharness_djangoConfig(AppConfig): | ||
| default_auto_field = 'django.db.models.BigAutoField' | ||
| name = 'cloudharness_django' | ||
|
|
||
| def ready(self): | ||
| # imports | ||
| import sys | ||
| for skip_cmd in [ | ||
| "--help", | ||
| "collectstatic", | ||
| "compilemessages", | ||
| "compress", | ||
| "dbshell", | ||
| "dumpdata", | ||
| "loaddata", | ||
| "makemessages", | ||
| "makemigrations", | ||
| "migrate", | ||
| "reset_db", | ||
| "showmigrations", | ||
| "sqlmigrate", | ||
| "squashmigrations", | ||
| "test", | ||
| ]: | ||
| # for these commands we skip initializing the event listener | ||
| if skip_cmd in sys.argv: | ||
| return | ||
|
|
||
| from cloudharness_django.services.events import init_listener_in_background | ||
| init_listener_in_background() | ||
|
Comment on lines
+8
to
+33
|
||
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -1,3 +1,5 @@ | ||||||
| import time | ||||||
|
|
||||||
| from cloudharness.applications import ConfigurationCallException | ||||||
|
|
||||||
| from django.conf import settings | ||||||
|
|
@@ -26,26 +28,31 @@ def event_handler(app, event_client, message): | |||||
| log.info(f"{event_client} {message}") | ||||||
| if resource in ["CLIENT_ROLE_MAPPING", "GROUP", "USER", "GROUP_MEMBERSHIP", "ORGANIZATION_MEMBERSHIP"]: | ||||||
| try: | ||||||
| time.sleep(1) # wait a bit to make sure the transaction is committed in Keycloak before trying to fetch the updated data | ||||||
| init_services() | ||||||
| user_service = get_user_service() | ||||||
|
Comment on lines
29
to
33
|
||||||
| auth_client = get_auth_service().get_auth_client() | ||||||
|
|
||||||
| if resource == "GROUP": | ||||||
| kc_group = auth_client.get_group(resource_path[1]) | ||||||
| user_service.sync_kc_group(kc_group) | ||||||
| return | ||||||
| if resource == "USER": | ||||||
| kc_user = auth_client.get_user(resource_path[1]) | ||||||
| user_service.sync_kc_user(kc_user, delete=operation == "DELETE") | ||||||
| return | ||||||
| if resource == "CLIENT_ROLE_MAPPING": | ||||||
| # adding/deleting user client roles | ||||||
| # set/user user is_superuser | ||||||
| kc_user = auth_client.get_user(resource_path[1]) | ||||||
| user_service.sync_kc_user(kc_user) | ||||||
| return | ||||||
| if resource == "GROUP_MEMBERSHIP" or resource == "ORGANIZATION_MEMBERSHIP": | ||||||
| # adding / deleting users from groups, update the user | ||||||
| # updating the user will also update the user groups | ||||||
| kc_user = auth_client.get_user(resource_path[1]) | ||||||
| user_service.sync_kc_user(kc_user) | ||||||
| return | ||||||
| except Exception as e: | ||||||
| log.error(e) | ||||||
| raise e | ||||||
|
||||||
| raise e | |
| raise |
Copilot
AI
Apr 16, 2026
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This retry loop runs indefinitely on any exception. Since it is executed in a background thread (started below), make sure failures don’t prevent clean shutdown and aren’t retried forever unintentionally. Consider (a) catching Exception instead of bare except:, (b) adding a max retry count / circuit breaker, and (c) starting the thread as a daemon so the process can exit if initialization never succeeds.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -24,8 +24,11 @@ def get_dsn(appname): | |
| dsn = get_dsn('notifications') | ||
| """ | ||
| url = get_common_service_cluster_address() + f'/api/sentry/getdsn/{appname}' | ||
| response = requests.get(url, verify=False).json() | ||
| dsn = response['dsn'] | ||
| try: | ||
| response = requests.get(url, verify=False, timeout=5).json() | ||
| dsn = response.get('dsn') | ||
| except Exception: | ||
| return None | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should at least log this error |
||
| if dsn and len(dsn) > 0: | ||
| return dsn | ||
| else: | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -213,7 +213,7 @@ def __finish_helm_values(self, values, defer_task_images=False): | |
| values['local'] = self.local | ||
| if self.local: | ||
| try: | ||
| values['localIp'] = get_cluster_ip() | ||
| values['localIp'] = get_cluster_ip(local=True) | ||
| except subprocess.TimeoutExpired: | ||
| logging.warning("Minikube not available") | ||
| except: | ||
|
Comment on lines
215
to
219
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
container["env"]may not exist on all rendered Deployments. This will raise a KeyError when adding task-image env vars. Usecontainer.setdefault("env", []).append(...)(or initializeenvif missing) before appending.