Skip to content
Merged
8 changes: 8 additions & 0 deletions NEXT_CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,14 @@

## [Unreleased]

### BREAKING CHANGES in 3.4.1

1. **`getTables()`: Percent sign (`%`) in catalog argument is now treated as a literal character, not a wildcard.** Previously returned all tables; now returns zero rows unless a catalog named "%" exists. JDBC spec: catalog is an exact-match parameter, not a pattern. Migration: Pass `null` to search all catalogs.

2. **`getColumnTypeName()`: DECIMAL columns now return `"DECIMAL"` without precision/scale** (e.g., `"DECIMAL"` not `"DECIMAL(10,2)"`). Use `getPrecision()` and `getScale()` for numeric constraints. JDBC spec: `getColumnTypeName()` returns the base type name only.

3. **For DBSQL warehouses, metadata operations are now powered by SHOW SQL commands.** SQL Exec API mode already was powered by SHOW commands, now the same is true for Thrift server mode as well. To revert to native Thrift metadata RPCs, set `UseQueryForMetadata` to `0`.

### Added

### Updated
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,9 @@ public class DatabricksConnectionContext implements IDatabricksConnectionContext
private static final String SQL_EXEC_FLAG_NAME =
"databricks.partnerplatform.clientConfigsFeatureFlags.enableSqlExecForJdbc";

private static final String USE_QUERY_FOR_THRIFT_FLAG_NAME =
"databricks.partnerplatform.clientConfigsFeatureFlags.enableUseQueryForThriftJdbc";

private final String host;
@VisibleForTesting final int port;
private final String schema;
Expand Down Expand Up @@ -1131,7 +1134,8 @@ public boolean enableShowCommandsForGetFunctions() {

@Override
public boolean useQueryForMetadata() {
return getParameter(DatabricksJdbcUrlParams.USE_QUERY_FOR_METADATA).equals("1");
return resolveFeatureFlag(
DatabricksJdbcUrlParams.USE_QUERY_FOR_METADATA, USE_QUERY_FOR_THRIFT_FLAG_NAME);
}

@Override
Expand Down Expand Up @@ -1194,6 +1198,59 @@ private String getParameterIgnoreDefault(DatabricksJdbcUrlParams key) {
return this.parameters.getOrDefault(key.getParamName().toLowerCase(), null);
}

/**
* Resolves a boolean feature flag with client-side priority over server-side.
*
* <p>Priority order:
*
* <ol>
* <li>Client-side param (explicit user setting in JDBC URL) — honoured unconditionally
* <li>Server-side feature flag (DBSQL warehouses only) — checked if user didn't set the param
* <li>Default value from the param definition
* </ol>
*
* @param clientParam the JDBC URL parameter (e.g. USE_QUERY_FOR_METADATA)
* @param serverFlagName the server-side SAFE flag name
* @return true if the feature should be enabled
*/
private boolean resolveFeatureFlag(DatabricksJdbcUrlParams clientParam, String serverFlagName) {
// 1. User explicitly set the param — honour it regardless of compute type
String explicitValue = getParameterIgnoreDefault(clientParam);
if (explicitValue != null) {
return explicitValue.equals("1");
}

// 2. No explicit setting + all-purpose cluster — always false
if (!(computeResource instanceof Warehouse)) {
return false;
}

// 3. No explicit setting + warehouse — enabled only when BOTH client default
// AND server-side flag agree. This gives a two-key rollout mechanism:
// flip the param default to "1" in the driver AND enable the server flag.
boolean clientDefault = getParameter(clientParam).equals("1");
boolean serverEnabled = false;
try {
serverEnabled =
DatabricksDriverFeatureFlagsContextFactory.getInstance(this)
.isFeatureEnabled(serverFlagName);
} catch (Exception e) {
LOGGER.debug("Failed to check server-side flag {}: {}", serverFlagName, e.getMessage());
}

if (clientDefault && serverEnabled) {
LOGGER.debug(
"Feature {} enabled for warehouse: client default={}, server flag {} ={}",
clientParam.getParamName(),
clientDefault,
serverFlagName,
serverEnabled);
return true;
}

return false;
}

private String getParameter(DatabricksJdbcUrlParams key, String defaultValue) {
return this.parameters.getOrDefault(key.getParamName().toLowerCase(), defaultValue);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ public enum DatabricksJdbcUrlParams {
USE_QUERY_FOR_METADATA(
"UseQueryForMetadata",
"Use SQL SHOW commands instead of Thrift RPCs for metadata operations. When enabled, EnableShowCommandForGetFunctions is redundant",
"0"),
"1"),
TREAT_METADATA_CATALOG_NAME_AS_PATTERN(
"TreatMetadataCatalogNameAsPattern",
"Treat catalog names as patterns in Thrift metadata RPCs. When disabled (default), wildcard characters in catalog names are escaped",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1456,15 +1456,16 @@ public void testDefaultGetterCoverage() throws DatabricksSQLException {

@Test
public void testUseQueryForMetadataDefaultFalseForWarehouse() throws DatabricksSQLException {
// Warehouse URL without explicit UseQueryForMetadata — default is false (native RPCs)
// Warehouse without explicit setting — requires both client default AND server flag.
// Client default is "1" but no server flag set → false
IDatabricksConnectionContext ctx =
DatabricksConnectionContext.parse(TestConstants.VALID_URL_1, properties);
assertFalse(ctx.useQueryForMetadata());
}

@Test
public void testUseQueryForMetadataDefaultFalseForCluster() throws DatabricksSQLException {
// Cluster URL without explicit UseQueryForMetadatadefault is false
// Cluster without explicit settingalways false regardless of defaults
IDatabricksConnectionContext ctx =
DatabricksConnectionContext.parse(TestConstants.VALID_CLUSTER_URL, properties);
assertFalse(ctx.useQueryForMetadata());
Expand All @@ -1488,6 +1489,90 @@ public void testUseQueryForMetadataExplicitFalseOnWarehouse() throws DatabricksS
assertFalse(ctx.useQueryForMetadata());
}

@Test
public void testUseQueryForMetadata_serverFlagEnabled_warehouseReturnsTrue()
throws DatabricksSQLException {
// Warehouse without explicit setting — client default "1" + server flag enabled → true
DatabricksConnectionContext ctx =
(DatabricksConnectionContext)
DatabricksConnectionContext.parse(TestConstants.VALID_URL_1, properties);

Map<String, String> flags = new HashMap<>();
flags.put(
"databricks.partnerplatform.clientConfigsFeatureFlags.enableUseQueryForThriftJdbc", "true");
DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext(ctx, flags);

assertTrue(ctx.useQueryForMetadata());
}

@Test
public void testUseQueryForMetadata_serverFlagDisabled_warehouseReturnsFalse()
throws DatabricksSQLException {
// Warehouse without explicit setting — client default "1" but server flag disabled → false
DatabricksConnectionContext ctx =
(DatabricksConnectionContext)
DatabricksConnectionContext.parse(TestConstants.VALID_URL_1, properties);

Map<String, String> flags = new HashMap<>();
flags.put(
"databricks.partnerplatform.clientConfigsFeatureFlags.enableUseQueryForThriftJdbc",
"false");
DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext(ctx, flags);

assertFalse(ctx.useQueryForMetadata());
}

@Test
public void testUseQueryForMetadata_serverFlagEnabled_clusterIgnored()
throws DatabricksSQLException {
// All-purpose cluster — always false, server flag and client default both ignored
DatabricksConnectionContext ctx =
(DatabricksConnectionContext)
DatabricksConnectionContext.parse(TestConstants.VALID_CLUSTER_URL, properties);

Map<String, String> flags = new HashMap<>();
flags.put(
"databricks.partnerplatform.clientConfigsFeatureFlags.enableUseQueryForThriftJdbc", "true");
DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext(ctx, flags);

assertFalse(ctx.useQueryForMetadata());
}

@Test
public void testUseQueryForMetadata_clientExplicit1_overridesServerFlagDisabled()
throws DatabricksSQLException {
// Client sets UseQueryForMetadata=1 — should be honoured even if server flag is disabled
DatabricksConnectionContext ctx =
(DatabricksConnectionContext)
DatabricksConnectionContext.parse(
TestConstants.VALID_URL_1 + ";UseQueryForMetadata=1", properties);

Map<String, String> flags = new HashMap<>();
flags.put(
"databricks.partnerplatform.clientConfigsFeatureFlags.enableUseQueryForThriftJdbc",
"false");
DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext(ctx, flags);

assertTrue(ctx.useQueryForMetadata());
}

@Test
public void testUseQueryForMetadata_clientExplicit0_overridesServerFlagEnabled()
throws DatabricksSQLException {
// Client sets UseQueryForMetadata=0 — should be honoured even if server flag is enabled
DatabricksConnectionContext ctx =
(DatabricksConnectionContext)
DatabricksConnectionContext.parse(
TestConstants.VALID_URL_1 + ";UseQueryForMetadata=0", properties);

Map<String, String> flags = new HashMap<>();
flags.put(
"databricks.partnerplatform.clientConfigsFeatureFlags.enableUseQueryForThriftJdbc", "true");
DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext(ctx, flags);

assertFalse(ctx.useQueryForMetadata());
}

// ---------------------------------------------------------------------------
// Geospatial flag independence from complex datatype flag
// ---------------------------------------------------------------------------
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import com.databricks.jdbc.api.internal.IDatabricksConnectionContext;
import com.databricks.jdbc.common.DatabricksClientType;
import com.databricks.jdbc.common.DatabricksJdbcUrlParams;
import com.databricks.jdbc.common.safe.DatabricksDriverFeatureFlagsContextFactory;
import com.databricks.jdbc.dbclient.impl.sqlexec.DatabricksMetadataQueryClient;
import com.databricks.jdbc.dbclient.impl.sqlexec.DatabricksSdkClient;
import com.databricks.jdbc.dbclient.impl.thrift.DatabricksThriftServiceClient;
Expand All @@ -22,6 +23,8 @@
import com.databricks.jdbc.model.client.thrift.generated.TSessionHandle;
import com.databricks.jdbc.telemetry.latency.DatabricksMetricsTimedProcessor;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
Expand All @@ -47,6 +50,11 @@ public class DatabricksSessionTest {
static void setupWarehouse(boolean useThrift) throws SQLException {
String url = useThrift ? WAREHOUSE_JDBC_URL : WAREHOUSE_JDBC_URL_WITH_SEA;
connectionContext = DatabricksConnectionContext.parse(url, new Properties());
// Override feature flags with empty map to prevent test contamination from
// other test classes (e.g. DatabricksConnectionContextTest) that set flags
// on the shared static DatabricksDriverFeatureFlagsContextFactory.
DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext(
connectionContext, new HashMap<>());
}

private void setupCluster() throws SQLException {
Expand Down Expand Up @@ -328,6 +336,27 @@ public void testUseQueryForMetadataDisabledByDefaultForWarehouse() throws SQLExc
"Default UseQueryForMetadata=0: warehouse uses native Thrift RPCs for metadata");
}

@Test
public void testUseQueryForMetadataEnabledViaServerFlag() throws SQLException {
setupWarehouse(true /* useThrift */);
// Simulate server-side flag enabling SHOW commands for this warehouse
Map<String, String> flags = new HashMap<>();
flags.put(
"databricks.partnerplatform.clientConfigsFeatureFlags.enableUseQueryForThriftJdbc", "true");
DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext(connectionContext, flags);

assertTrue(connectionContext.useQueryForMetadata());
DatabricksSession session = new DatabricksSession(connectionContext, thriftClient);
assertInstanceOf(
DatabricksMetadataQueryClient.class,
session.getDatabricksMetadataClient(),
"Server flag enabled: warehouse should use SHOW commands for metadata");

// Clean up so other tests are not affected
DatabricksDriverFeatureFlagsContextFactory.setFeatureFlagsContext(
connectionContext, new HashMap<>());
}

@Test
public void testUseQueryForMetadataDisabledByDefaultForCluster() throws SQLException {
connectionContext = DatabricksConnectionContext.parse(VALID_CLUSTER_URL, new Properties());
Expand Down
Loading