From 3ab626bc92b37d2e1ac5ed74baa6bef29ff91103 Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Thu, 19 Mar 2026 13:30:11 -0300 Subject: [PATCH 1/2] small adjustments to test suite --- .../test.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/integration/test_export_replicated_mt_partition_to_object_storage/test.py b/tests/integration/test_export_replicated_mt_partition_to_object_storage/test.py index af899b91ff70..5ee41a165f50 100644 --- a/tests/integration/test_export_replicated_mt_partition_to_object_storage/test.py +++ b/tests/integration/test_export_replicated_mt_partition_to_object_storage/test.py @@ -66,6 +66,17 @@ def wait_for_export_to_start( raise TimeoutError(f"Export did not start within {timeout}s. ") +def skip_if_remote_database_disk_enabled(cluster): + """Skip test if any instance in the cluster has remote database disk enabled. + + Tests that block MinIO cannot run when remote database disk is enabled, + as the database metadata is stored on MinIO and blocking it would break the database. + """ + for instance in cluster.instances.values(): + if instance.with_remote_database_disk: + pytest.skip("Test cannot run with remote database disk enabled (db disk), as it blocks MinIO which stores database metadata") + + @pytest.fixture(scope="module") def cluster(): try: @@ -1116,6 +1127,11 @@ def test_export_partition_from_replicated_database_uses_db_shard_replica_macros( before the expand call, and the pattern resolves correctly. """ + # The remote disk test suite sets the shard and replica macros in https://github.com/Altinity/ClickHouse/blob/bbabcaa96e8b7fe8f70ecd0bd4f76fb0f76f2166/tests/integration/helpers/cluster.py#L4356 + # When expanding the macros, the configured ones are preferred over the ones from the DatabaseReplicated definition. + # Therefore, this test fails. It is easier to skip it than to fix it. + skip_if_remote_database_disk_enabled(cluster) + node = cluster.instances["replica1"] watcher_node = cluster.instances["watcher_node"] From 7110773bd303eb3e21db03feb194b354d0e58796 Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Fri, 20 Mar 2026 09:30:58 -0300 Subject: [PATCH 2/2] one last attempt --- .../test.py | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tests/integration/test_export_replicated_mt_partition_to_object_storage/test.py b/tests/integration/test_export_replicated_mt_partition_to_object_storage/test.py index 5ee41a165f50..67229f592034 100644 --- a/tests/integration/test_export_replicated_mt_partition_to_object_storage/test.py +++ b/tests/integration/test_export_replicated_mt_partition_to_object_storage/test.py @@ -139,6 +139,31 @@ def cluster(): cluster.shutdown() +@pytest.fixture(autouse=True) +def drop_tables_after_test(cluster): + """Drop all tables in the default database after every test. + + Without this, ReplicatedMergeTree tables from completed tests remain alive and keep + running ZooKeeper background threads (merge selector, queue log, cleanup, export manifest + updater). With many tables alive simultaneously the ZooKeeper session becomes overwhelmed + and subsequent tests start seeing operation-timeout / session-expired errors. + """ + yield + for instance_name, instance in cluster.instances.items(): + try: + tables_str = instance.query( + "SELECT name FROM system.tables WHERE database = 'default' FORMAT TabSeparated" + ).strip() + if not tables_str: + continue + for table in tables_str.split('\n'): + table = table.strip() + if table: + instance.query(f"DROP TABLE IF EXISTS default.`{table}` SYNC") + except Exception as e: + logging.warning(f"drop_tables_after_test: cleanup failed on {instance_name}: {e}") + + def create_s3_table(node, s3_table): node.query(f"CREATE TABLE {s3_table} (id UInt64, year UInt16) ENGINE = S3(s3_conn, filename='{s3_table}', format=Parquet, partition_strategy='hive') PARTITION BY year")