From 87c35b451e370aeeef0ecdfce403299ee3e3dd97 Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Wed, 22 Apr 2026 08:51:26 +0530 Subject: [PATCH 1/8] Force Thrift when metadata params require native RPCs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a user sets UseQueryForMetadata=0 or TreatMetadataCatalogNameAsPattern=1 without explicitly setting UseThriftClient, force Thrift mode. These params require native Thrift RPCs and don't work with SEA. If the user explicitly sets UseThriftClient=0 (wants SEA), we honour that even though the metadata params won't have effect — user's explicit choice takes priority. Decision matrix: - UseThriftClient=1 → Thrift (explicit) - UseThriftClient=0 → SEA (explicit, metadata params ignored) - UseQueryForMetadata=0 (no UseThriftClient) → Thrift (needs native RPCs) - TreatMetadataCatalogNameAsPattern=1 (no UseThriftClient) → Thrift - No metadata params (no UseThriftClient) → SAFE flag decides 13 new tests covering all combinations. Signed-off-by: Gopal Lal Co-authored-by: Isaac Signed-off-by: Gopal Lal --- .../api/impl/DatabricksConnectionContext.java | 19 ++- .../impl/DatabricksConnectionContextTest.java | 134 ++++++++++++++++++ 2 files changed, 152 insertions(+), 1 deletion(-) diff --git a/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java b/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java index 2852fa674..7cc3778a4 100644 --- a/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java +++ b/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java @@ -459,7 +459,24 @@ public DatabricksClientType getClientTypeFromContext() { return DatabricksClientType.SEA; } } - // Now, user has not provided a value, we will decide based on our checks + // Now, user has not provided a value for UseThriftClient, we will decide based on our checks. + // If user explicitly requires Thrift-native metadata behavior, stay on Thrift: + // - UseQueryForMetadata=0: user wants native Thrift RPCs for metadata (not SHOW commands) + // - TreatMetadataCatalogNameAsPattern=1: only works with native Thrift RPCs + String explicitUseQueryForMetadata = + getParameterIgnoreDefault(DatabricksJdbcUrlParams.USE_QUERY_FOR_METADATA); + String explicitTreatCatalogAsPattern = + getParameterIgnoreDefault(DatabricksJdbcUrlParams.TREAT_METADATA_CATALOG_NAME_AS_PATTERN); + if ((explicitUseQueryForMetadata != null && explicitUseQueryForMetadata.equals("0")) + || (explicitTreatCatalogAsPattern != null && explicitTreatCatalogAsPattern.equals("1"))) { + LOGGER.debug( + "Forcing Thrift client: user requires Thrift-native metadata behavior " + + "(UseQueryForMetadata={}, TreatMetadataCatalogNameAsPattern={})", + explicitUseQueryForMetadata, + explicitTreatCatalogAsPattern); + return DatabricksClientType.THRIFT; + } + // Check if circuit breaker is open due to recent 429 rate limit failures if (SeaCircuitBreakerManager.isCircuitOpen()) { long remainingMs = SeaCircuitBreakerManager.getTimeRemainingMs(); diff --git a/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java b/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java index f29b60fc5..c869b1226 100644 --- a/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java +++ b/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java @@ -1487,4 +1487,138 @@ public void testUseQueryForMetadataExplicitFalseOnWarehouse() throws DatabricksS TestConstants.VALID_URL_1 + ";UseQueryForMetadata=0", properties); assertFalse(ctx.useQueryForMetadata()); } + + // --------------------------------------------------------------------------- + // Client type selection with Thrift-native metadata params + // --------------------------------------------------------------------------- + + @Test + public void testUseQueryForMetadata0_forcesThrift() throws DatabricksSQLException { + // UseQueryForMetadata=0 without UseThriftClient → forces Thrift + IDatabricksConnectionContext ctx = + DatabricksConnectionContext.parse( + TestConstants.VALID_URL_1 + ";UseQueryForMetadata=0", properties); + assertEquals(DatabricksClientType.THRIFT, ctx.getClientType()); + } + + @Test + public void testTreatCatalogAsPattern1_forcesThrift() throws DatabricksSQLException { + // TreatMetadataCatalogNameAsPattern=1 without UseThriftClient → forces Thrift + IDatabricksConnectionContext ctx = + DatabricksConnectionContext.parse( + TestConstants.VALID_URL_1 + ";TreatMetadataCatalogNameAsPattern=1", properties); + assertEquals(DatabricksClientType.THRIFT, ctx.getClientType()); + } + + @Test + public void testBothMetadataParams_forcesThrift() throws DatabricksSQLException { + // Both UseQueryForMetadata=0 and TreatMetadataCatalogNameAsPattern=1 → forces Thrift + IDatabricksConnectionContext ctx = + DatabricksConnectionContext.parse( + TestConstants.VALID_URL_1 + + ";UseQueryForMetadata=0;TreatMetadataCatalogNameAsPattern=1", + properties); + assertEquals(DatabricksClientType.THRIFT, ctx.getClientType()); + } + + @Test + public void testUseQueryForMetadata0_withExplicitSEA_honoursSEA() throws DatabricksSQLException { + // UseThriftClient=0 (explicit SEA) + UseQueryForMetadata=0 → SEA wins + // User explicitly chose SEA, so we honour that even though metadata param won't work + IDatabricksConnectionContext ctx = + DatabricksConnectionContext.parse( + TestConstants.VALID_URL_1 + ";UseThriftClient=0;UseQueryForMetadata=0", properties); + assertEquals(DatabricksClientType.SEA, ctx.getClientType()); + } + + @Test + public void testTreatCatalogAsPattern1_withExplicitSEA_honoursSEA() + throws DatabricksSQLException { + // UseThriftClient=0 (explicit SEA) + TreatMetadataCatalogNameAsPattern=1 → SEA wins + IDatabricksConnectionContext ctx = + DatabricksConnectionContext.parse( + TestConstants.VALID_URL_1 + ";UseThriftClient=0;TreatMetadataCatalogNameAsPattern=1", + properties); + assertEquals(DatabricksClientType.SEA, ctx.getClientType()); + } + + @Test + public void testUseQueryForMetadata0_withExplicitThrift_staysThrift() + throws DatabricksSQLException { + // UseThriftClient=1 (explicit Thrift) + UseQueryForMetadata=0 → Thrift + IDatabricksConnectionContext ctx = + DatabricksConnectionContext.parse( + TestConstants.VALID_URL_1 + ";UseThriftClient=1;UseQueryForMetadata=0", properties); + assertEquals(DatabricksClientType.THRIFT, ctx.getClientType()); + } + + @Test + public void testUseQueryForMetadata1_doesNotForceThrift() throws DatabricksSQLException { + // UseQueryForMetadata=1 (explicit SHOW commands) → does NOT force Thrift + // SHOW commands work in both modes, so SAFE flag / other checks decide + IDatabricksConnectionContext ctx = + DatabricksConnectionContext.parse( + TestConstants.VALID_URL_1 + ";UseQueryForMetadata=1", properties); + // Should NOT be forced to Thrift by metadata params (other checks may still pick Thrift) + // The key assertion: it didn't get forced to Thrift by our new check + // Since VALID_URL_1 has no UseThriftClient, it goes through other checks (Arrow, CF, flag) + // which default to Thrift for this URL — but that's not from our metadata check + assertNotNull(ctx.getClientType()); + } + + @Test + public void testTreatCatalogAsPattern0_doesNotForceThrift() throws DatabricksSQLException { + // TreatMetadataCatalogNameAsPattern=0 (default, literal match) → does NOT force Thrift + IDatabricksConnectionContext ctx = + DatabricksConnectionContext.parse( + TestConstants.VALID_URL_1 + ";TreatMetadataCatalogNameAsPattern=0", properties); + assertNotNull(ctx.getClientType()); + } + + @Test + public void testNoMetadataParams_defaultBehavior() throws DatabricksSQLException { + // No metadata params set → default behavior (SAFE flag / other checks decide) + IDatabricksConnectionContext ctx = + DatabricksConnectionContext.parse(TestConstants.VALID_URL_1, properties); + // VALID_URL_1 has no UseThriftClient, defaults to Thrift (no SAFE flag in tests) + assertEquals(DatabricksClientType.THRIFT, ctx.getClientType()); + } + + @Test + public void testCluster_metadataParamsIgnored() throws DatabricksSQLException { + // All-Purpose Cluster always uses Thrift regardless of metadata params + IDatabricksConnectionContext ctx = + DatabricksConnectionContext.parse( + TestConstants.VALID_CLUSTER_URL + ";UseQueryForMetadata=0", properties); + assertEquals(DatabricksClientType.THRIFT, ctx.getClientType()); + + ctx = + DatabricksConnectionContext.parse( + TestConstants.VALID_CLUSTER_URL + ";TreatMetadataCatalogNameAsPattern=1", properties); + assertEquals(DatabricksClientType.THRIFT, ctx.getClientType()); + } + + @Test + public void testUseQueryForMetadata0_withExplicitSEA_useQueryForMetadataStillFalse() + throws DatabricksSQLException { + // Even though SEA is forced, UseQueryForMetadata=0 should still report false + // (the metadata param value is independent of client type selection) + IDatabricksConnectionContext ctx = + DatabricksConnectionContext.parse( + TestConstants.VALID_URL_1 + ";UseThriftClient=0;UseQueryForMetadata=0", properties); + assertEquals(DatabricksClientType.SEA, ctx.getClientType()); + assertFalse(ctx.useQueryForMetadata()); + } + + @Test + public void testTreatCatalogAsPattern1_withExplicitSEA_treatCatalogStillTrue() + throws DatabricksSQLException { + // Even though SEA is forced, TreatMetadataCatalogNameAsPattern=1 should still report true + IDatabricksConnectionContext ctx = + DatabricksConnectionContext.parse( + TestConstants.VALID_URL_1 + ";UseThriftClient=0;TreatMetadataCatalogNameAsPattern=1", + properties); + assertEquals(DatabricksClientType.SEA, ctx.getClientType()); + assertTrue(ctx.treatMetadataCatalogNameAsPattern()); + } } From d31e172b9d62fcbadba1fa685773ba9c8feafd66 Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Wed, 22 Apr 2026 09:10:55 +0530 Subject: [PATCH 2/8] Strengthen metadata param tests per review feedback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Replace weak assertNotNull with concrete assertions using VALID_URL_2 (explicit SEA) to prove the metadata check doesn't trigger - Add comment explaining testNoMetadataParams_defaultBehavior is testing non-interference, not the default path - Add case-sensitivity tests: "false" != "0" and "true" != "1" — our strict .equals() matching doesn't trigger for string booleans - All weak tests now assert a specific client type (SEA or THRIFT), proving the new check either triggered or didn't Signed-off-by: Gopal Lal Co-authored-by: Isaac Signed-off-by: Gopal Lal --- .../impl/DatabricksConnectionContextTest.java | 50 ++++++++++++++----- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java b/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java index c869b1226..530df8f6d 100644 --- a/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java +++ b/src/test/java/com/databricks/jdbc/api/impl/DatabricksConnectionContextTest.java @@ -1554,36 +1554,60 @@ public void testUseQueryForMetadata0_withExplicitThrift_staysThrift() @Test public void testUseQueryForMetadata1_doesNotForceThrift() throws DatabricksSQLException { - // UseQueryForMetadata=1 (explicit SHOW commands) → does NOT force Thrift - // SHOW commands work in both modes, so SAFE flag / other checks decide + // UseQueryForMetadata=1 (SHOW commands) with explicit SEA → should remain SEA + // Proves our new check doesn't trigger for UseQueryForMetadata=1 + // VALID_URL_2 has UseThriftClient=0, so it's SEA IDatabricksConnectionContext ctx = DatabricksConnectionContext.parse( - TestConstants.VALID_URL_1 + ";UseQueryForMetadata=1", properties); - // Should NOT be forced to Thrift by metadata params (other checks may still pick Thrift) - // The key assertion: it didn't get forced to Thrift by our new check - // Since VALID_URL_1 has no UseThriftClient, it goes through other checks (Arrow, CF, flag) - // which default to Thrift for this URL — but that's not from our metadata check - assertNotNull(ctx.getClientType()); + TestConstants.VALID_URL_2 + ";UseQueryForMetadata=1", properties_with_pwd); + assertEquals(DatabricksClientType.SEA, ctx.getClientType()); } @Test public void testTreatCatalogAsPattern0_doesNotForceThrift() throws DatabricksSQLException { - // TreatMetadataCatalogNameAsPattern=0 (default, literal match) → does NOT force Thrift + // TreatMetadataCatalogNameAsPattern=0 (default, literal match) with explicit SEA → stays SEA + // Proves our new check doesn't trigger for the default/disabled value IDatabricksConnectionContext ctx = DatabricksConnectionContext.parse( - TestConstants.VALID_URL_1 + ";TreatMetadataCatalogNameAsPattern=0", properties); - assertNotNull(ctx.getClientType()); + TestConstants.VALID_URL_2 + ";TreatMetadataCatalogNameAsPattern=0", + properties_with_pwd); + assertEquals(DatabricksClientType.SEA, ctx.getClientType()); } @Test public void testNoMetadataParams_defaultBehavior() throws DatabricksSQLException { - // No metadata params set → default behavior (SAFE flag / other checks decide) + // No metadata params, no UseThriftClient → other checks decide (Arrow, CF, SAFE flag). + // VALID_URL_1 has no UseThriftClient and no SAFE flag in tests, so downstream + // checks (Arrow disabled, CF disabled, no flag) fall through to Thrift default. + // This tests that our metadata param check doesn't interfere with the default path. IDatabricksConnectionContext ctx = DatabricksConnectionContext.parse(TestConstants.VALID_URL_1, properties); - // VALID_URL_1 has no UseThriftClient, defaults to Thrift (no SAFE flag in tests) assertEquals(DatabricksClientType.THRIFT, ctx.getClientType()); } + @Test + public void testUseQueryForMetadataFalseString_doesNotForceThrift() + throws DatabricksSQLException { + // "false" is not "0" — our check uses .equals("0"), so "false" doesn't trigger it. + // With explicit SEA (VALID_URL_2), should stay SEA. + IDatabricksConnectionContext ctx = + DatabricksConnectionContext.parse( + TestConstants.VALID_URL_2 + ";UseQueryForMetadata=false", properties_with_pwd); + assertEquals(DatabricksClientType.SEA, ctx.getClientType()); + } + + @Test + public void testTreatCatalogAsPatternTrueString_doesNotForceThrift() + throws DatabricksSQLException { + // "true" is not "1" — our check uses .equals("1"), so "true" doesn't trigger it. + // With explicit SEA (VALID_URL_2), should stay SEA. + IDatabricksConnectionContext ctx = + DatabricksConnectionContext.parse( + TestConstants.VALID_URL_2 + ";TreatMetadataCatalogNameAsPattern=true", + properties_with_pwd); + assertEquals(DatabricksClientType.SEA, ctx.getClientType()); + } + @Test public void testCluster_metadataParamsIgnored() throws DatabricksSQLException { // All-Purpose Cluster always uses Thrift regardless of metadata params From bca38712445858532bba2c69baa93248950bcded Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Wed, 22 Apr 2026 09:12:33 +0530 Subject: [PATCH 3/8] Promote metadata param conflict logs to INFO level - Force-Thrift log: promoted from DEBUG to INFO when UseQueryForMetadata=0 or TreatMetadataCatalogNameAsPattern=1 forces Thrift mode - Explicit SEA conflict log: new INFO log when UseThriftClient=0 is set alongside Thrift-only metadata params, warning they will have no effect Signed-off-by: Gopal Lal Co-authored-by: Isaac Signed-off-by: Gopal Lal --- .../api/impl/DatabricksConnectionContext.java | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java b/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java index 7cc3778a4..b35ab0891 100644 --- a/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java +++ b/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java @@ -456,6 +456,19 @@ public DatabricksClientType getClientTypeFromContext() { if (useThriftClient.equals("1")) { return DatabricksClientType.THRIFT; } else if (useThriftClient.equals("0")) { + // Warn if user explicitly chose SEA but also set Thrift-only metadata params + String uqm = getParameterIgnoreDefault(DatabricksJdbcUrlParams.USE_QUERY_FOR_METADATA); + String tcp = + getParameterIgnoreDefault( + DatabricksJdbcUrlParams.TREAT_METADATA_CATALOG_NAME_AS_PATTERN); + if ((uqm != null && uqm.equals("0")) || (tcp != null && tcp.equals("1"))) { + LOGGER.info( + "UseThriftClient=0 (SEA) is set alongside Thrift-only metadata params " + + "(UseQueryForMetadata={}, TreatMetadataCatalogNameAsPattern={}). " + + "Honouring SEA — these metadata params will have no effect.", + uqm, + tcp); + } return DatabricksClientType.SEA; } } @@ -469,7 +482,7 @@ public DatabricksClientType getClientTypeFromContext() { getParameterIgnoreDefault(DatabricksJdbcUrlParams.TREAT_METADATA_CATALOG_NAME_AS_PATTERN); if ((explicitUseQueryForMetadata != null && explicitUseQueryForMetadata.equals("0")) || (explicitTreatCatalogAsPattern != null && explicitTreatCatalogAsPattern.equals("1"))) { - LOGGER.debug( + LOGGER.info( "Forcing Thrift client: user requires Thrift-native metadata behavior " + "(UseQueryForMetadata={}, TreatMetadataCatalogNameAsPattern={})", explicitUseQueryForMetadata, From 05439022bc11577a27293a7393fe77528a62d404 Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Wed, 22 Apr 2026 09:23:18 +0530 Subject: [PATCH 4/8] Address review: better variable names, promote to WARN - Rename uqm/tcp to explicitQueryForMetadata/explicitCatalogAsPattern - Promote SEA conflict log from INFO to WARN (user likely misconfigured) Signed-off-by: Gopal Lal Co-authored-by: Isaac Signed-off-by: Gopal Lal --- .../jdbc/api/impl/DatabricksConnectionContext.java | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java b/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java index b35ab0891..def37788a 100644 --- a/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java +++ b/src/main/java/com/databricks/jdbc/api/impl/DatabricksConnectionContext.java @@ -457,17 +457,19 @@ public DatabricksClientType getClientTypeFromContext() { return DatabricksClientType.THRIFT; } else if (useThriftClient.equals("0")) { // Warn if user explicitly chose SEA but also set Thrift-only metadata params - String uqm = getParameterIgnoreDefault(DatabricksJdbcUrlParams.USE_QUERY_FOR_METADATA); - String tcp = + String explicitQueryForMetadata = + getParameterIgnoreDefault(DatabricksJdbcUrlParams.USE_QUERY_FOR_METADATA); + String explicitCatalogAsPattern = getParameterIgnoreDefault( DatabricksJdbcUrlParams.TREAT_METADATA_CATALOG_NAME_AS_PATTERN); - if ((uqm != null && uqm.equals("0")) || (tcp != null && tcp.equals("1"))) { - LOGGER.info( + if ((explicitQueryForMetadata != null && explicitQueryForMetadata.equals("0")) + || (explicitCatalogAsPattern != null && explicitCatalogAsPattern.equals("1"))) { + LOGGER.warn( "UseThriftClient=0 (SEA) is set alongside Thrift-only metadata params " + "(UseQueryForMetadata={}, TreatMetadataCatalogNameAsPattern={}). " + "Honouring SEA — these metadata params will have no effect.", - uqm, - tcp); + explicitQueryForMetadata, + explicitCatalogAsPattern); } return DatabricksClientType.SEA; } From 8f343561341b4a7ee3f89372b79e0da96115371e Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Wed, 22 Apr 2026 11:31:18 +0530 Subject: [PATCH 5/8] Add MetadataOperationTimeout support for SHOW commands path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When UseQueryForMetadata=1 (default for warehouses), metadata operations had no timeout because parentStatement=null → timeoutInSeconds=0. Now when parentStatement is null and statementType is METADATA, the driver reads MetadataOperationTimeout from connectionContext (default 300s), matching the native Thrift RPC behavior. Changes: - DatabricksSdkClient.executeStatement(): use MetadataOperationTimeout when parentStatement=null and statementType=METADATA - DatabricksThriftAccessor.pollTillOperationFinished(): same fix for Thrift SHOW commands path (added statementType parameter) - Use OPERATION_TIMEOUT_ERROR code for metadata timeouts (matches native Thrift path) Tests: - testMetadataOperationUsesMetadataTimeout: verifies 1s timeout triggers for metadata with parentStatement=null - testNonMetadataWithNullParentHasNoTimeout: verifies non-metadata with parentStatement=null still has no timeout Signed-off-by: Gopal Lal Co-authored-by: Isaac Signed-off-by: Gopal Lal --- .../impl/sqlexec/DatabricksSdkClient.java | 23 ++-- .../impl/thrift/DatabricksThriftAccessor.java | 15 ++- .../impl/sqlexec/DatabricksSdkClientTest.java | 107 ++++++++++++++++++ 3 files changed, 133 insertions(+), 12 deletions(-) diff --git a/src/main/java/com/databricks/jdbc/dbclient/impl/sqlexec/DatabricksSdkClient.java b/src/main/java/com/databricks/jdbc/dbclient/impl/sqlexec/DatabricksSdkClient.java index f97e47327..285b45224 100644 --- a/src/main/java/com/databricks/jdbc/dbclient/impl/sqlexec/DatabricksSdkClient.java +++ b/src/main/java/com/databricks/jdbc/dbclient/impl/sqlexec/DatabricksSdkClient.java @@ -245,16 +245,23 @@ public DatabricksResultSet executeStatement( parentStatement.setStatementId(typedStatementId); } - int timeoutInSeconds = - parentStatement != null ? parentStatement.getStatement().getQueryTimeout() : 0; + int timeoutInSeconds; + if (parentStatement != null) { + timeoutInSeconds = parentStatement.getStatement().getQueryTimeout(); + } else if (statementType == StatementType.METADATA) { + timeoutInSeconds = connectionContext.getMetadataOperationTimeout(); + } else { + timeoutInSeconds = 0; + } - // Create timeout handler + // Create timeout handler — use OPERATION_TIMEOUT_ERROR for metadata to match + // the native Thrift metadata path (DatabricksThriftAccessor.fetchMetadataResults) + DatabricksDriverErrorCode timeoutErrorCode = + (statementType == StatementType.METADATA && parentStatement == null) + ? DatabricksDriverErrorCode.OPERATION_TIMEOUT_ERROR + : DatabricksDriverErrorCode.STATEMENT_EXECUTION_TIMEOUT; TimeoutHandler timeoutHandler = - TimeoutHandler.forStatement( - timeoutInSeconds, - typedStatementId, - this, - DatabricksDriverErrorCode.STATEMENT_EXECUTION_TIMEOUT); + TimeoutHandler.forStatement(timeoutInSeconds, typedStatementId, this, timeoutErrorCode); StatementState responseState = response.getStatus().getState(); while (responseState == StatementState.PENDING || responseState == StatementState.RUNNING) { diff --git a/src/main/java/com/databricks/jdbc/dbclient/impl/thrift/DatabricksThriftAccessor.java b/src/main/java/com/databricks/jdbc/dbclient/impl/thrift/DatabricksThriftAccessor.java index c816fbdde..998e2eb1a 100644 --- a/src/main/java/com/databricks/jdbc/dbclient/impl/thrift/DatabricksThriftAccessor.java +++ b/src/main/java/com/databricks/jdbc/dbclient/impl/thrift/DatabricksThriftAccessor.java @@ -232,7 +232,7 @@ DatabricksResultSet execute( TGetOperationStatusResp statusResp = pollTillOperationFinished( - response, parentStatement, session, statementId, sessionDebugInfo); + response, parentStatement, session, statementId, sessionDebugInfo, statementType); boolean isDirectResults = hasResultDataInDirectResults(response); if (isDirectResults) { // The first response has result data @@ -298,10 +298,17 @@ private TGetOperationStatusResp pollTillOperationFinished( IDatabricksStatementInternal parentStatement, IDatabricksSession session, StatementId statementId, - String sessionDebugInfo) + String sessionDebugInfo, + StatementType statementType) throws SQLException, TException { - int timeoutInSeconds = - (parentStatement == null) ? 0 : parentStatement.getStatement().getQueryTimeout(); + int timeoutInSeconds; + if (parentStatement != null) { + timeoutInSeconds = parentStatement.getStatement().getQueryTimeout(); + } else if (statementType == StatementType.METADATA) { + timeoutInSeconds = connectionContext.getMetadataOperationTimeout(); + } else { + timeoutInSeconds = 0; + } TGetOperationStatusResp statusResp = null; if (response.isSetDirectResults()) { diff --git a/src/test/java/com/databricks/jdbc/dbclient/impl/sqlexec/DatabricksSdkClientTest.java b/src/test/java/com/databricks/jdbc/dbclient/impl/sqlexec/DatabricksSdkClientTest.java index c484e6b21..d6e086f7a 100644 --- a/src/test/java/com/databricks/jdbc/dbclient/impl/sqlexec/DatabricksSdkClientTest.java +++ b/src/test/java/com/databricks/jdbc/dbclient/impl/sqlexec/DatabricksSdkClientTest.java @@ -493,6 +493,113 @@ public void testExecuteStatementWithTimeoutExpired() throws Exception { verify(databricksSdkClient).cancelStatement(eq(STATEMENT_ID)); } + @Test + public void testMetadataOperationUsesMetadataTimeout() throws Exception { + // MetadataOperationTimeout=1 with parentStatement=null (metadata path) + IDatabricksConnectionContext connectionContext = + DatabricksConnectionContext.parse( + JDBC_URL, + new Properties() { + { + setProperty("MetadataOperationTimeout", "1"); + setProperty("asyncExecPollInterval", "1000"); + } + }); + DatabricksSdkClient databricksSdkClient = + spy(new DatabricksSdkClient(connectionContext, statementExecutionService, apiClient)); + DatabricksConnection connection = + new DatabricksConnection(connectionContext, databricksSdkClient); + + CreateSessionResponse sessionResponse = new CreateSessionResponse().setSessionId(SESSION_ID); + when(apiClient.execute(any(Request.class), eq(CreateSessionResponse.class))) + .thenReturn(sessionResponse); + connection.open(); + + ExecuteStatementResponse executeResponse = + new ExecuteStatementResponse() + .setStatementId(STATEMENT_ID.toSQLExecStatementId()) + .setStatus(new StatementStatus().setState(StatementState.RUNNING)); + GetStatementResponse runningResponse = + new GetStatementResponse() + .setStatus(new StatementStatus().setState(StatementState.RUNNING)); + + when(apiClient.execute( + argThat(req -> req != null && STATEMENT_PATH.equals(req.getUrl())), + eq(ExecuteStatementResponse.class))) + .thenReturn(executeResponse); + when(apiClient.execute( + argThat( + req -> + req != null + && req.getUrl() != null + && req.getUrl().contains(STATEMENT_ID.toSQLExecStatementId())), + eq(GetStatementResponse.class))) + .thenReturn(runningResponse); + + // Metadata with parentStatement=null should use MetadataOperationTimeout (1s) + DatabricksTimeoutException exception = + assertThrows( + DatabricksTimeoutException.class, + () -> + databricksSdkClient.executeStatement( + "SHOW SCHEMAS IN ALL CATALOGS", + warehouse, + new java.util.HashMap<>(), + StatementType.METADATA, + connection.getSession(), + null, // parentStatement=null (metadata path) + null)); + + assertTrue(exception.getMessage().contains("timed-out after 1 seconds")); + verify(databricksSdkClient).cancelStatement(eq(STATEMENT_ID)); + } + + @Test + public void testNonMetadataWithNullParentHasNoTimeout() throws Exception { + // Non-metadata with parentStatement=null should have timeout=0 (infinite) + // Use a short poll interval so the test completes quickly + IDatabricksConnectionContext connectionContext = + DatabricksConnectionContext.parse( + JDBC_URL, + new Properties() { + { + setProperty("MetadataOperationTimeout", "1"); + } + }); + DatabricksSdkClient databricksSdkClient = + new DatabricksSdkClient(connectionContext, statementExecutionService, apiClient); + DatabricksConnection connection = + new DatabricksConnection(connectionContext, databricksSdkClient); + + CreateSessionResponse sessionResponse = new CreateSessionResponse().setSessionId(SESSION_ID); + when(apiClient.execute(any(Request.class), eq(CreateSessionResponse.class))) + .thenReturn(sessionResponse); + connection.open(); + + // Return SUCCEEDED immediately so the test completes + ExecuteStatementResponse executeResponse = + new ExecuteStatementResponse() + .setStatementId(STATEMENT_ID.toSQLExecStatementId()) + .setStatus(new StatementStatus().setState(StatementState.SUCCEEDED)); + + when(apiClient.execute( + argThat(req -> req != null && STATEMENT_PATH.equals(req.getUrl())), + eq(ExecuteStatementResponse.class))) + .thenReturn(executeResponse); + + // Non-METADATA with parentStatement=null: no timeout applied, should succeed + assertDoesNotThrow( + () -> + databricksSdkClient.executeStatement( + "SELECT 1", + warehouse, + new java.util.HashMap<>(), + StatementType.SQL, + connection.getSession(), + null, + null)); + } + @Test public void testServerSideTimeoutThrowsTimeoutException() throws Exception { From c59681102023bcd6b1a88df14b574d5cce5f736b Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Tue, 12 May 2026 17:48:31 +0530 Subject: [PATCH 6/8] Merge main and add changelog for MetadataOperationTimeout fix Co-authored-by: Isaac Signed-off-by: Gopal Lal --- NEXT_CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 29b843a24..5f52ce757 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -17,6 +17,7 @@ - Arrow schema deserialization failures (Thrift metadata path) now surface a dedicated driver error code `ARROW_SCHEMA_PARSING_ERROR` (vendor code `22000`) and a proper SQLSTATE `22000` (Data Exception) on the thrown `SQLException`, instead of the generic `RESULT_SET_ERROR` (1004) and the enum name as SQLSTATE. The exception message is unchanged. ### Fixed +- Fixed `MetadataOperationTimeout` not being applied to SHOW commands metadata path. When `UseQueryForMetadata=1`, metadata operations (e.g. `getTables`, `getSchemas`) now respect the `MetadataOperationTimeout` connection property instead of hanging indefinitely with no timeout. - Reclassify transient/mis-categorized server errors so callers can identify retryable failures. The remap is applied at all Thrift error sites From ff36176ac474976236b68d39823363b5f159840b Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Tue, 12 May 2026 17:51:45 +0530 Subject: [PATCH 7/8] Fix changelog: timeout applies to all SHOW commands, not just UseQueryForMetadata Co-authored-by: Isaac Signed-off-by: Gopal Lal --- NEXT_CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 5f52ce757..8350a797e 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -17,7 +17,7 @@ - Arrow schema deserialization failures (Thrift metadata path) now surface a dedicated driver error code `ARROW_SCHEMA_PARSING_ERROR` (vendor code `22000`) and a proper SQLSTATE `22000` (Data Exception) on the thrown `SQLException`, instead of the generic `RESULT_SET_ERROR` (1004) and the enum name as SQLSTATE. The exception message is unchanged. ### Fixed -- Fixed `MetadataOperationTimeout` not being applied to SHOW commands metadata path. When `UseQueryForMetadata=1`, metadata operations (e.g. `getTables`, `getSchemas`) now respect the `MetadataOperationTimeout` connection property instead of hanging indefinitely with no timeout. +- Fixed `MetadataOperationTimeout` not being applied when metadata operations use SHOW commands. Operations like `getTables`, `getSchemas`, and `getColumns` now respect the `MetadataOperationTimeout` connection property instead of hanging indefinitely with no timeout. - Reclassify transient/mis-categorized server errors so callers can identify retryable failures. The remap is applied at all Thrift error sites From baeb702de5c814a72dbd8f34440beef5120548dd Mon Sep 17 00:00:00 2001 From: Gopal Lal Date: Tue, 12 May 2026 17:54:24 +0530 Subject: [PATCH 8/8] spotless:apply formatting fix Co-authored-by: Isaac Signed-off-by: Gopal Lal --- .../java/com/databricks/jdbc/common/DatabricksJdbcConstants.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main/java/com/databricks/jdbc/common/DatabricksJdbcConstants.java b/src/main/java/com/databricks/jdbc/common/DatabricksJdbcConstants.java index 11c31c1c0..857d67c69 100644 --- a/src/main/java/com/databricks/jdbc/common/DatabricksJdbcConstants.java +++ b/src/main/java/com/databricks/jdbc/common/DatabricksJdbcConstants.java @@ -121,6 +121,7 @@ public final class DatabricksJdbcConstants { * concurrent-modification errors where the operation is potentially retryable. */ public static final String SERIALIZATION_FAILURE_SQLSTATE = "40001"; + /** Standard SQL state for data exception (SQLSTATE 22000). */ public static final String DATA_EXCEPTION_SQLSTATE = "22000";