From c492b92d71e2e7899e4c97b8d9ad8a4cca43e83e Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Tue, 5 May 2026 10:33:25 +0530 Subject: [PATCH 1/8] Add server-side SAFE flag for UseQueryForMetadata rollout on DBSQL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for server-side feature flag enableUseQueryForThriftJdbc to control SHOW commands rollout for Thrift metadata operations on DBSQL warehouses. Priority order: 1. Client-side param (UseQueryForMetadata in JDBC URL) — honoured first 2. Server-side SAFE flag (DBSQL warehouses only) 3. Default value (0 = disabled) Extract resolveFeatureFlag() helper for client-first, server-fallback pattern reusable across similar feature flags. Co-authored-by: Isaac Signed-off-by: Gopal Lal --- .../api/impl/DatabricksConnectionContext.java | 48 ++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) diff --git a/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java b/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java index 9dc8d4d02..1937446fb 100644 --- a/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java +++ b/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java @@ -45,6 +45,9 @@ public class DatabricksConnectionContext implements IDatabricksConnectionContext private static final String SQL_EXEC_FLAG_NAME = "databricks.partnerplatform.clientConfigsFeatureFlags.enableSqlExecForJdbc"; + private static final String USE_QUERY_FOR_THRIFT_FLAG_NAME = + "databricks.partnerplatform.clientConfigsFeatureFlags.enableUseQueryForThriftJdbc"; + private final String host; @VisibleForTesting final int port; private final String schema; @@ -1133,7 +1136,8 @@ public boolean enableShowCommandsForGetFunctions() { @Override public boolean useQueryForMetadata() { - return getParameter(DatabricksJdbcUrlParams.USE_QUERY_FOR_METADATA).equals("1"); + return resolveFeatureFlag( + DatabricksJdbcUrlParams.USE_QUERY_FOR_METADATA, USE_QUERY_FOR_THRIFT_FLAG_NAME); } @Override @@ -1196,6 +1200,48 @@ private String getParameterIgnoreDefault(DatabricksJdbcUrlParams key) { return this.parameters.getOrDefault(key.getParamName().toLowerCase(), null); } + /** + * Resolves a boolean feature flag with client-side priority over server-side. + * + *

Priority order: + * + *

    + *
  1. Client-side param (explicit user setting in JDBC URL) — honoured unconditionally + *
  2. Server-side feature flag (DBSQL warehouses only) — checked if user didn't set the param + *
  3. Default value from the param definition + *
+ * + * @param clientParam the JDBC URL parameter (e.g. USE_QUERY_FOR_METADATA) + * @param serverFlagName the server-side SAFE flag name + * @return true if the feature should be enabled + */ + private boolean resolveFeatureFlag(DatabricksJdbcUrlParams clientParam, String serverFlagName) { + // Client-side flag has highest priority + String explicitValue = getParameterIgnoreDefault(clientParam); + if (explicitValue != null) { + return explicitValue.equals("1"); + } + + // For DBSQL (warehouses), check server-side feature flag + if (computeResource instanceof Warehouse) { + try { + if (DatabricksDriverFeatureFlagsContextFactory.getInstance(this) + .isFeatureEnabled(serverFlagName)) { + LOGGER.debug( + "Server-side flag {} is enabled for feature {}", + serverFlagName, + clientParam.getParamName()); + return true; + } + } catch (Exception e) { + LOGGER.debug("Failed to check server-side flag {}: {}", serverFlagName, e.getMessage()); + } + } + + // Default from param definition + return getParameter(clientParam).equals("1"); + } + private String getParameter(DatabricksJdbcUrlParams key, String defaultValue) { return this.parameters.getOrDefault(key.getParamName().toLowerCase(), defaultValue); } From 6eca0bce518b355aab70ac9ebb8fc450a62d2597 Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Tue, 5 May 2026 14:50:02 +0530 Subject: [PATCH 2/8] Add tests for server-side UseQueryForMetadata feature flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 5 tests covering all priority combinations: - Server flag enabled on warehouse → true - Server flag disabled on warehouse → false - Server flag enabled on cluster → ignored (false) - Client explicit=1 overrides server disabled → true - Client explicit=0 overrides server enabled → false Co-authored-by: Isaac Signed-off-by: Gopal Lal --- .../impl/DatabricksConnectionContextTest.java | 84 +++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java b/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java index cc61fad69..4b190421c 100644 --- a/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java +++ b/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java @@ -1488,6 +1488,90 @@ public void testUseQueryForMetadataExplicitFalseOnWarehouse() throws DatabricksS assertFalse(ctx.useQueryForMetadata()); } + @Test + public void testUseQueryForMetadata_serverFlagEnabled_warehouseReturnsTrue() + throws DatabricksSQLException { + // Warehouse URL without explicit UseQueryForMetadata — server flag enabled → true + DatabricksConnectionContext ctx = + (DatabricksConnectionContext) + DatabricksConnectionContext.parse(TestConstants.VALID_URL_1, properties); + + Map flags = new HashMap<>(); + flags.put( + "databricks.partnerplatform.clientConfigsFeatureFlags.enableUseQueryForThriftJdbc", "true"); + DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext(ctx, flags); + + assertTrue(ctx.useQueryForMetadata()); + } + + @Test + public void testUseQueryForMetadata_serverFlagDisabled_warehouseReturnsFalse() + throws DatabricksSQLException { + // Warehouse URL without explicit UseQueryForMetadata — server flag disabled → false + DatabricksConnectionContext ctx = + (DatabricksConnectionContext) + DatabricksConnectionContext.parse(TestConstants.VALID_URL_1, properties); + + Map flags = new HashMap<>(); + flags.put( + "databricks.partnerplatform.clientConfigsFeatureFlags.enableUseQueryForThriftJdbc", + "false"); + DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext(ctx, flags); + + assertFalse(ctx.useQueryForMetadata()); + } + + @Test + public void testUseQueryForMetadata_serverFlagEnabled_clusterIgnored() + throws DatabricksSQLException { + // All-purpose cluster — server flag should be ignored, always false + DatabricksConnectionContext ctx = + (DatabricksConnectionContext) + DatabricksConnectionContext.parse(TestConstants.VALID_CLUSTER_URL, properties); + + Map flags = new HashMap<>(); + flags.put( + "databricks.partnerplatform.clientConfigsFeatureFlags.enableUseQueryForThriftJdbc", "true"); + DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext(ctx, flags); + + assertFalse(ctx.useQueryForMetadata()); + } + + @Test + public void testUseQueryForMetadata_clientExplicit1_overridesServerFlagDisabled() + throws DatabricksSQLException { + // Client sets UseQueryForMetadata=1 — should be honoured even if server flag is disabled + DatabricksConnectionContext ctx = + (DatabricksConnectionContext) + DatabricksConnectionContext.parse( + TestConstants.VALID_URL_1 + ";UseQueryForMetadata=1", properties); + + Map flags = new HashMap<>(); + flags.put( + "databricks.partnerplatform.clientConfigsFeatureFlags.enableUseQueryForThriftJdbc", + "false"); + DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext(ctx, flags); + + assertTrue(ctx.useQueryForMetadata()); + } + + @Test + public void testUseQueryForMetadata_clientExplicit0_overridesServerFlagEnabled() + throws DatabricksSQLException { + // Client sets UseQueryForMetadata=0 — should be honoured even if server flag is enabled + DatabricksConnectionContext ctx = + (DatabricksConnectionContext) + DatabricksConnectionContext.parse( + TestConstants.VALID_URL_1 + ";UseQueryForMetadata=0", properties); + + Map flags = new HashMap<>(); + flags.put( + "databricks.partnerplatform.clientConfigsFeatureFlags.enableUseQueryForThriftJdbc", "true"); + DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext(ctx, flags); + + assertFalse(ctx.useQueryForMetadata()); + } + // --------------------------------------------------------------------------- // Client type selection with Thrift-native metadata params // --------------------------------------------------------------------------- From d0732406f2bb203ea422ea862298b38a4f5d0fb5 Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Mon, 11 May 2026 11:41:19 +0530 Subject: [PATCH 3/8] Use two-key rollout: require both client default and server flag for warehouses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change resolveFeatureFlag logic to: 1. User explicitly set param → honour it (any compute) 2. No explicit setting + cluster → always false 3. No explicit setting + warehouse → true only when BOTH client default is "1" AND server-side flag is enabled Change UseQueryForMetadata default from "0" to "1" so the server-side flag becomes the sole rollout gate for warehouses. Co-authored-by: Isaac Signed-off-by: Gopal Lal --- .../api/impl/DatabricksConnectionContext.java | 45 ++++++++++++------- .../jdbc/common/DatabricksJdbcUrlParams.java | 2 +- 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java b/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java index 1e3b8742d..159df78f0 100644 --- a/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java +++ b/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java @@ -1214,30 +1214,41 @@ private String getParameterIgnoreDefault(DatabricksJdbcUrlParams key) { * @return true if the feature should be enabled */ private boolean resolveFeatureFlag(DatabricksJdbcUrlParams clientParam, String serverFlagName) { - // Client-side flag has highest priority + // 1. User explicitly set the param — honour it regardless of compute type String explicitValue = getParameterIgnoreDefault(clientParam); if (explicitValue != null) { return explicitValue.equals("1"); } - // For DBSQL (warehouses), check server-side feature flag - if (computeResource instanceof Warehouse) { - try { - if (DatabricksDriverFeatureFlagsContextFactory.getInstance(this) - .isFeatureEnabled(serverFlagName)) { - LOGGER.debug( - "Server-side flag {} is enabled for feature {}", - serverFlagName, - clientParam.getParamName()); - return true; - } - } catch (Exception e) { - LOGGER.debug("Failed to check server-side flag {}: {}", serverFlagName, e.getMessage()); - } + // 2. No explicit setting + all-purpose cluster — always false + if (!(computeResource instanceof Warehouse)) { + return false; + } + + // 3. No explicit setting + warehouse — enabled only when BOTH client default + // AND server-side flag agree. This gives a two-key rollout mechanism: + // flip the param default to "1" in the driver AND enable the server flag. + boolean clientDefault = getParameter(clientParam).equals("1"); + boolean serverEnabled = false; + try { + serverEnabled = + DatabricksDriverFeatureFlagsContextFactory.getInstance(this) + .isFeatureEnabled(serverFlagName); + } catch (Exception e) { + LOGGER.debug("Failed to check server-side flag {}: {}", serverFlagName, e.getMessage()); + } + + if (clientDefault && serverEnabled) { + LOGGER.debug( + "Feature {} enabled for warehouse: client default={}, server flag {} ={}", + clientParam.getParamName(), + clientDefault, + serverFlagName, + serverEnabled); + return true; } - // Default from param definition - return getParameter(clientParam).equals("1"); + return false; } private String getParameter(DatabricksJdbcUrlParams key, String defaultValue) { diff --git a/src/main/java/com/databricks/jdbc/common/DatabricksJdbcUrlParams.java b/src/main/java/com/databricks/jdbc/common/DatabricksJdbcUrlParams.java index 518676457..8f67fc0a2 100644 --- a/src/main/java/com/databricks/jdbc/common/DatabricksJdbcUrlParams.java +++ b/src/main/java/com/databricks/jdbc/common/DatabricksJdbcUrlParams.java @@ -173,7 +173,7 @@ public enum DatabricksJdbcUrlParams { USE_QUERY_FOR_METADATA( "UseQueryForMetadata", "Use SQL SHOW commands instead of Thrift RPCs for metadata operations. When enabled, EnableShowCommandForGetFunctions is redundant", - "0"), + "1"), TREAT_METADATA_CATALOG_NAME_AS_PATTERN( "TreatMetadataCatalogNameAsPattern", "Treat catalog names as patterns in Thrift metadata RPCs. When disabled (default), wildcard characters in catalog names are escaped", From 0a1b61377d6cecaff57cf902aa6cc60f87809f56 Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Mon, 11 May 2026 11:45:37 +0530 Subject: [PATCH 4/8] Update test comments for two-key rollout logic and add changelog - Update test comments to reflect two-key mechanism (client default + server flag) - Add breaking change entry to NEXT_CHANGELOG.md for UseQueryForMetadata default change Co-authored-by: Isaac Signed-off-by: Gopal Lal --- NEXT_CHANGELOG.md | 1 + .../api/impl/DatabricksConnectionContextTest.java | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index ac81de5d9..e5257a77e 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -6,6 +6,7 @@ ### Updated - `EnableGeoSpatialSupport` no longer requires `EnableComplexDatatypeSupport=1`. Geospatial types (GEOMETRY, GEOGRAPHY) can now be enabled independently of complex type support (ARRAY, MAP, STRUCT). +- **Breaking change:** `UseQueryForMetadata` default changed from `0` to `1`. For DBSQL warehouses, SHOW commands for Thrift metadata operations are now enabled when a server-side feature flag is active. The driver uses a two-key rollout: both the client default (`1`) and the server-side flag must be enabled. Users who explicitly set `UseQueryForMetadata=0` are unaffected — explicit settings always take priority. All-purpose clusters are unaffected (always defaults to native RPCs). ### Fixed diff --git a/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java b/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java index e804c748c..41e54fc50 100644 --- a/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java +++ b/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java @@ -1456,7 +1456,8 @@ public void testDefaultGetterCoverage() throws DatabricksSQLException { @Test public void testUseQueryForMetadataDefaultFalseForWarehouse() throws DatabricksSQLException { - // Warehouse URL without explicit UseQueryForMetadata — default is false (native RPCs) + // Warehouse without explicit setting — requires both client default AND server flag. + // Client default is "1" but no server flag set → false IDatabricksConnectionContext ctx = DatabricksConnectionContext.parse(TestConstants.VALID_URL_1, properties); assertFalse(ctx.useQueryForMetadata()); @@ -1464,7 +1465,7 @@ public void testUseQueryForMetadataDefaultFalseForWarehouse() throws DatabricksS @Test public void testUseQueryForMetadataDefaultFalseForCluster() throws DatabricksSQLException { - // Cluster URL without explicit UseQueryForMetadata — default is false + // Cluster without explicit setting — always false regardless of defaults IDatabricksConnectionContext ctx = DatabricksConnectionContext.parse(TestConstants.VALID_CLUSTER_URL, properties); assertFalse(ctx.useQueryForMetadata()); @@ -1491,7 +1492,7 @@ public void testUseQueryForMetadataExplicitFalseOnWarehouse() throws DatabricksS @Test public void testUseQueryForMetadata_serverFlagEnabled_warehouseReturnsTrue() throws DatabricksSQLException { - // Warehouse URL without explicit UseQueryForMetadata — server flag enabled → true + // Warehouse without explicit setting — client default "1" + server flag enabled → true DatabricksConnectionContext ctx = (DatabricksConnectionContext) DatabricksConnectionContext.parse(TestConstants.VALID_URL_1, properties); @@ -1507,7 +1508,7 @@ public void testUseQueryForMetadata_serverFlagEnabled_warehouseReturnsTrue() @Test public void testUseQueryForMetadata_serverFlagDisabled_warehouseReturnsFalse() throws DatabricksSQLException { - // Warehouse URL without explicit UseQueryForMetadata — server flag disabled → false + // Warehouse without explicit setting — client default "1" but server flag disabled → false DatabricksConnectionContext ctx = (DatabricksConnectionContext) DatabricksConnectionContext.parse(TestConstants.VALID_URL_1, properties); @@ -1524,7 +1525,7 @@ public void testUseQueryForMetadata_serverFlagDisabled_warehouseReturnsFalse() @Test public void testUseQueryForMetadata_serverFlagEnabled_clusterIgnored() throws DatabricksSQLException { - // All-purpose cluster — server flag should be ignored, always false + // All-purpose cluster — always false, server flag and client default both ignored DatabricksConnectionContext ctx = (DatabricksConnectionContext) DatabricksConnectionContext.parse(TestConstants.VALID_CLUSTER_URL, properties); From e106b85f00aad3f0030de1baf9303214e7bde0c6 Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Mon, 11 May 2026 11:52:58 +0530 Subject: [PATCH 5/8] Add BREAKING CHANGES section to changelog for 3.4.1 Document three breaking changes: 1. getTables() % catalog treated as literal, not wildcard 2. getColumnTypeName() returns base DECIMAL without precision/scale 3. DBSQL metadata operations now use SHOW commands (UseQueryForMetadata) Co-authored-by: Isaac Signed-off-by: Gopal Lal --- NEXT_CHANGELOG.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index e5257a77e..9716a8d3b 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -2,11 +2,18 @@ ## [Unreleased] +### BREAKING CHANGES in 3.4.1 + +1. **`getTables()`: Percent sign (`%`) in catalog argument is now treated as a literal character, not a wildcard.** Previously returned all tables; now returns zero rows unless a catalog named "%" exists. JDBC spec: catalog is an exact-match parameter, not a pattern. Migration: Pass `null` to search all catalogs. + +2. **`getColumnTypeName()`: DECIMAL columns now return `"DECIMAL"` without precision/scale** (e.g., `"DECIMAL"` not `"DECIMAL(10,2)"`). Use `getPrecision()` and `getScale()` for numeric constraints. JDBC spec: `getColumnTypeName()` returns the base type name only. + +3. **For DBSQL warehouses, metadata operations are now powered by SHOW SQL commands.** SQL Exec API mode already was powered by SHOW commands, now the same is true for Thrift server mode as well. To revert to native Thrift metadata RPCs, set `UseQueryForMetadata` to `0`. + ### Added ### Updated - `EnableGeoSpatialSupport` no longer requires `EnableComplexDatatypeSupport=1`. Geospatial types (GEOMETRY, GEOGRAPHY) can now be enabled independently of complex type support (ARRAY, MAP, STRUCT). -- **Breaking change:** `UseQueryForMetadata` default changed from `0` to `1`. For DBSQL warehouses, SHOW commands for Thrift metadata operations are now enabled when a server-side feature flag is active. The driver uses a two-key rollout: both the client default (`1`) and the server-side flag must be enabled. Users who explicitly set `UseQueryForMetadata=0` are unaffected — explicit settings always take priority. All-purpose clusters are unaffected (always defaults to native RPCs). ### Fixed From db2f07921437452e55c8b4e43dd25a05a2453b41 Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Mon, 11 May 2026 12:22:06 +0530 Subject: [PATCH 6/8] Fix DatabricksSessionTest: clear stale feature flags to prevent test contamination DatabricksDriverFeatureFlagsContextFactory uses a static ConcurrentHashMap that persists across tests. When DatabricksConnectionContextTest sets enableUseQueryForThriftJdbc=true, it leaks into DatabricksSessionTest, causing useQueryForMetadata() to return true unexpectedly. Fix: call removeInstance() in setupWarehouse() to clear stale flags. Co-authored-by: Isaac Signed-off-by: Gopal Lal --- .../com/databricks/jdbc/api/impl/DatabricksSessionTest.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/test/java/com/databricks/jdbc/api/impl/DatabricksSessionTest.java b/src/test/java/com/databricks/jdbc/api/impl/DatabricksSessionTest.java index dad4727da..d770dfbc0 100644 --- a/src/test/java/com/databricks/jdbc/api/impl/DatabricksSessionTest.java +++ b/src/test/java/com/databricks/jdbc/api/impl/DatabricksSessionTest.java @@ -13,6 +13,7 @@ import com.databricks.jdbc.api.internal.IDatabricksConnectionContext; import com.databricks.jdbc.common.DatabricksClientType; import com.databricks.jdbc.common.DatabricksJdbcUrlParams; +import com.databricks.jdbc.common.safe.DatabricksDriverFeatureFlagsContextFactory; import com.databricks.jdbc.dbclient.impl.sqlexec.DatabricksMetadataQueryClient; import com.databricks.jdbc.dbclient.impl.sqlexec.DatabricksSdkClient; import com.databricks.jdbc.dbclient.impl.thrift.DatabricksThriftServiceClient; @@ -47,6 +48,8 @@ public class DatabricksSessionTest { static void setupWarehouse(boolean useThrift) throws SQLException { String url = useThrift ? WAREHOUSE_JDBC_URL : WAREHOUSE_JDBC_URL_WITH_SEA; connectionContext = DatabricksConnectionContext.parse(url, new Properties()); + // Clear any stale feature flags from other tests to prevent test contamination + DatabricksDriverFeatureFlagsContextFactory.removeInstance(connectionContext); } private void setupCluster() throws SQLException { From 9e42531b8934f28133c18185492f425ccfab3e3f Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Mon, 11 May 2026 13:34:55 +0530 Subject: [PATCH 7/8] Fix test contamination: use setFeatureFlagsContext with empty map removeInstance() didn't clear stale flags because other test classes added multiple active contexts for the same host key, preventing removal. Use setFeatureFlagsContext with an empty map instead to explicitly override any stale flags. Co-authored-by: Isaac Signed-off-by: Gopal Lal --- .../databricks/jdbc/api/impl/DatabricksSessionTest.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/test/java/com/databricks/jdbc/api/impl/DatabricksSessionTest.java b/src/test/java/com/databricks/jdbc/api/impl/DatabricksSessionTest.java index d770dfbc0..5c34a04fe 100644 --- a/src/test/java/com/databricks/jdbc/api/impl/DatabricksSessionTest.java +++ b/src/test/java/com/databricks/jdbc/api/impl/DatabricksSessionTest.java @@ -23,6 +23,7 @@ import com.databricks.jdbc.model.client.thrift.generated.TSessionHandle; import com.databricks.jdbc.telemetry.latency.DatabricksMetricsTimedProcessor; import java.sql.SQLException; +import java.util.HashMap; import java.util.Properties; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -48,8 +49,11 @@ public class DatabricksSessionTest { static void setupWarehouse(boolean useThrift) throws SQLException { String url = useThrift ? WAREHOUSE_JDBC_URL : WAREHOUSE_JDBC_URL_WITH_SEA; connectionContext = DatabricksConnectionContext.parse(url, new Properties()); - // Clear any stale feature flags from other tests to prevent test contamination - DatabricksDriverFeatureFlagsContextFactory.removeInstance(connectionContext); + // Override feature flags with empty map to prevent test contamination from + // other test classes (e.g. DatabricksConnectionContextTest) that set flags + // on the shared static DatabricksDriverFeatureFlagsContextFactory. + DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext( + connectionContext, new HashMap<>()); } private void setupCluster() throws SQLException { From 73a5f59ab5bd5bbe137df1efbb120043ede01d9a Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Mon, 11 May 2026 13:37:27 +0530 Subject: [PATCH 8/8] Add test for server-flag-enabled path in DatabricksSessionTest Verifies that when the server-side enableUseQueryForThriftJdbc flag is enabled for a warehouse, the session uses DatabricksMetadataQueryClient (SHOW commands) instead of native Thrift RPCs. Co-authored-by: Isaac Signed-off-by: Gopal Lal --- .../jdbc/api/impl/DatabricksSessionTest.java | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/test/java/com/databricks/jdbc/api/impl/DatabricksSessionTest.java b/src/test/java/com/databricks/jdbc/api/impl/DatabricksSessionTest.java index 5c34a04fe..1d958a574 100644 --- a/src/test/java/com/databricks/jdbc/api/impl/DatabricksSessionTest.java +++ b/src/test/java/com/databricks/jdbc/api/impl/DatabricksSessionTest.java @@ -24,6 +24,7 @@ import com.databricks.jdbc.telemetry.latency.DatabricksMetricsTimedProcessor; import java.sql.SQLException; import java.util.HashMap; +import java.util.Map; import java.util.Properties; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -335,6 +336,27 @@ public void testUseQueryForMetadataDisabledByDefaultForWarehouse() throws SQLExc "Default UseQueryForMetadata=0: warehouse uses native Thrift RPCs for metadata"); } + @Test + public void testUseQueryForMetadataEnabledViaServerFlag() throws SQLException { + setupWarehouse(true /* useThrift */); + // Simulate server-side flag enabling SHOW commands for this warehouse + Map flags = new HashMap<>(); + flags.put( + "databricks.partnerplatform.clientConfigsFeatureFlags.enableUseQueryForThriftJdbc", "true"); + DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext(connectionContext, flags); + + assertTrue(connectionContext.useQueryForMetadata()); + DatabricksSession session = new DatabricksSession(connectionContext, thriftClient); + assertInstanceOf( + DatabricksMetadataQueryClient.class, + session.getDatabricksMetadataClient(), + "Server flag enabled: warehouse should use SHOW commands for metadata"); + + // Clean up so other tests are not affected + DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext( + connectionContext, new HashMap<>()); + } + @Test public void testUseQueryForMetadataDisabledByDefaultForCluster() throws SQLException { connectionContext = DatabricksConnectionContext.parse(VALID_CLUSTER_URL, new Properties());