diff --git a/Dashboard/Dashboard.csproj b/Dashboard/Dashboard.csproj
index 0f21541..e15a88b 100644
--- a/Dashboard/Dashboard.csproj
+++ b/Dashboard/Dashboard.csproj
@@ -6,10 +6,10 @@
true
PerformanceMonitorDashboard
SQL Server Performance Monitor Dashboard
- 2.1.0
- 2.1.0.0
- 2.1.0.0
- 2.1.0
+ 2.2.0
+ 2.2.0.0
+ 2.2.0.0
+ 2.2.0
Darling Data, LLC
Copyright © 2026 Darling Data, LLC
EDD.ico
diff --git a/Dashboard/Services/DatabaseService.QueryPerformance.cs b/Dashboard/Services/DatabaseService.QueryPerformance.cs
index 6c3c2c8..c91118b 100644
--- a/Dashboard/Services/DatabaseService.QueryPerformance.cs
+++ b/Dashboard/Services/DatabaseService.QueryPerformance.cs
@@ -739,8 +739,8 @@ WITH per_lifetime AS
total_spills = MAX(qs.total_spills),
min_spills = MIN(qs.min_spills),
max_spills = MAX(qs.max_spills),
- query_text = MAX(qs.query_text),
- query_plan_text = MAX(qs.query_plan_text),
+ query_text = CAST(DECOMPRESS(MAX(qs.query_text)) AS nvarchar(max)),
+ query_plan_text = CAST(DECOMPRESS(MAX(qs.query_plan_text)) AS nvarchar(max)),
query_plan_hash = MAX(qs.query_plan_hash),
sql_handle = MAX(qs.sql_handle),
plan_handle = MAX(qs.plan_handle)
@@ -753,7 +753,7 @@ FROM collect.query_stats AS qs
OR (qs.last_execution_time >= @fromDate AND qs.last_execution_time <= @toDate)
OR (qs.creation_time <= @fromDate AND qs.last_execution_time >= @toDate)))
)
- AND qs.query_text NOT LIKE N'WAITFOR%'
+ AND CAST(DECOMPRESS(qs.query_text) AS nvarchar(max)) NOT LIKE N'WAITFOR%'
GROUP BY
qs.database_name,
qs.query_hash,
@@ -922,7 +922,7 @@ WITH per_lifetime AS
total_spills = MAX(ps.total_spills),
min_spills = MIN(ps.min_spills),
max_spills = MAX(ps.max_spills),
- query_plan_text = MAX(ps.query_plan_text),
+ query_plan_text = CAST(DECOMPRESS(MAX(ps.query_plan_text)) AS nvarchar(max)),
sql_handle = MAX(ps.sql_handle),
plan_handle = MAX(ps.plan_handle)
FROM collect.procedure_stats AS ps
@@ -1101,7 +1101,7 @@ public async Task> GetQueryStoreDataAsync(int hoursBack = 2
plan_type = MAX(qsd.plan_type),
is_forced_plan = MAX(CONVERT(tinyint, qsd.is_forced_plan)),
compatibility_level = MAX(qsd.compatibility_level),
- query_sql_text = CONVERT(nvarchar(max), MAX(qsd.query_sql_text)),
+ query_sql_text = CAST(DECOMPRESS(MAX(qsd.query_sql_text)) AS nvarchar(max)),
query_plan_hash = CONVERT(nvarchar(20), MAX(qsd.query_plan_hash), 1),
force_failure_count = SUM(qsd.force_failure_count),
last_force_failure_reason_desc = MAX(qsd.last_force_failure_reason_desc),
@@ -1121,7 +1121,7 @@ FROM collect.query_store_data AS qsd
OR (qsd.server_last_execution_time >= @fromDate AND qsd.server_last_execution_time <= @toDate)
OR (qsd.server_first_execution_time <= @fromDate AND qsd.server_last_execution_time >= @toDate)))
)
- AND qsd.query_sql_text NOT LIKE N'WAITFOR%'
+ AND CAST(DECOMPRESS(qsd.query_sql_text) AS nvarchar(max)) NOT LIKE N'WAITFOR%'
GROUP BY
qsd.database_name,
qsd.query_id
@@ -2228,7 +2228,7 @@ ORDER BY
SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
SELECT
- qsd.query_plan_text
+ CAST(DECOMPRESS(qsd.query_plan_text) AS nvarchar(max)) AS query_plan_text
FROM collect.query_store_data AS qsd
WHERE qsd.collection_id = @collection_id;";
@@ -2276,7 +2276,7 @@ FROM collect.procedure_stats AS ps
SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
SELECT
- qs.query_plan_text
+ CAST(DECOMPRESS(qs.query_plan_text) AS nvarchar(max)) AS query_plan_text
FROM collect.query_stats AS qs
WHERE qs.collection_id = @collection_id;";
diff --git a/Installer/PerformanceMonitorInstaller.csproj b/Installer/PerformanceMonitorInstaller.csproj
index eef02bc..c69063e 100644
--- a/Installer/PerformanceMonitorInstaller.csproj
+++ b/Installer/PerformanceMonitorInstaller.csproj
@@ -20,10 +20,10 @@
PerformanceMonitorInstaller
SQL Server Performance Monitor Installer
- 2.1.0
- 2.1.0.0
- 2.1.0.0
- 2.1.0
+ 2.2.0
+ 2.2.0.0
+ 2.2.0.0
+ 2.2.0
Darling Data, LLC
Copyright © 2026 Darling Data, LLC
Installation utility for SQL Server Performance Monitor - Supports SQL Server 2016-2025
diff --git a/InstallerGui/InstallerGui.csproj b/InstallerGui/InstallerGui.csproj
index 5a1fbd4..237ad82 100644
--- a/InstallerGui/InstallerGui.csproj
+++ b/InstallerGui/InstallerGui.csproj
@@ -8,10 +8,10 @@
PerformanceMonitorInstallerGui
PerformanceMonitorInstallerGui
SQL Server Performance Monitor Installer
- 2.1.0
- 2.1.0.0
- 2.1.0.0
- 2.1.0
+ 2.2.0
+ 2.2.0.0
+ 2.2.0.0
+ 2.2.0
Darling Data, LLC
Copyright © 2026 Darling Data, LLC
EDD.ico
diff --git a/Lite/PerformanceMonitorLite.csproj b/Lite/PerformanceMonitorLite.csproj
index 88ee7eb..a40ad9d 100644
--- a/Lite/PerformanceMonitorLite.csproj
+++ b/Lite/PerformanceMonitorLite.csproj
@@ -7,10 +7,10 @@
PerformanceMonitorLite
PerformanceMonitorLite
SQL Server Performance Monitor Lite
- 2.1.0
- 2.1.0.0
- 2.1.0.0
- 2.1.0
+ 2.2.0
+ 2.2.0.0
+ 2.2.0.0
+ 2.2.0
Darling Data, LLC
Copyright © 2026 Darling Data, LLC
Lightweight SQL Server performance monitoring - no installation required on target servers
diff --git a/install/01_install_database.sql b/install/01_install_database.sql
index db141d7..2d71820 100644
--- a/install/01_install_database.sql
+++ b/install/01_install_database.sql
@@ -274,6 +274,10 @@ BEGIN
DEFAULT 5,
retention_days integer NOT NULL
DEFAULT 30,
+ collect_query bit NOT NULL
+ DEFAULT CONVERT(bit, 'true'),
+ collect_plan bit NOT NULL
+ DEFAULT CONVERT(bit, 'true'),
[description] nvarchar(500) NULL,
created_date datetime2(7) NOT NULL
DEFAULT SYSDATETIME(),
diff --git a/install/02_create_tables.sql b/install/02_create_tables.sql
index 4c2cb61..3c91b9c 100644
--- a/install/02_create_tables.sql
+++ b/install/02_create_tables.sql
@@ -168,10 +168,11 @@ BEGIN
total_worker_time_delta /
NULLIF(sample_interval_seconds, 0) / 1000.
),
- /*Query text and execution plan*/
- query_text nvarchar(MAX) NULL,
- query_plan_text nvarchar(MAX) NULL,
- query_plan xml NULL,
+ /*Query text and execution plan (compressed with COMPRESS/DECOMPRESS)*/
+ query_text varbinary(max) NULL,
+ query_plan_text varbinary(max) NULL,
+ /*Deduplication hash for skipping unchanged rows*/
+ row_hash binary(32) NULL,
CONSTRAINT
PK_query_stats
PRIMARY KEY CLUSTERED
@@ -183,6 +184,34 @@ BEGIN
PRINT 'Created collect.query_stats table';
END;
+/*
+2b. Query Stats Dedup Tracking
+One row per natural key, updated on each collection cycle
+*/
+IF OBJECT_ID(N'collect.query_stats_latest_hash', N'U') IS NULL
+BEGIN
+ CREATE TABLE
+ collect.query_stats_latest_hash
+ (
+ sql_handle varbinary(64) NOT NULL,
+ statement_start_offset integer NOT NULL,
+ statement_end_offset integer NOT NULL,
+ plan_handle varbinary(64) NOT NULL,
+ row_hash binary(32) NOT NULL,
+ last_seen datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ CONSTRAINT
+ PK_query_stats_latest_hash
+ PRIMARY KEY CLUSTERED
+ (sql_handle, statement_start_offset,
+ statement_end_offset, plan_handle)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+
+ PRINT 'Created collect.query_stats_latest_hash table';
+END;
+
/*
3. Memory Pressure
*/
@@ -429,9 +458,10 @@ BEGIN
total_worker_time_delta /
NULLIF(sample_interval_seconds, 0) / 1000.
),
- /*Execution plan*/
- query_plan_text nvarchar(max) NULL,
- query_plan xml NULL,
+ /*Execution plan (compressed with COMPRESS/DECOMPRESS)*/
+ query_plan_text varbinary(max) NULL,
+ /*Deduplication hash for skipping unchanged rows*/
+ row_hash binary(32) NULL,
CONSTRAINT
PK_procedure_stats
PRIMARY KEY CLUSTERED
@@ -443,6 +473,32 @@ BEGIN
PRINT 'Created collect.procedure_stats table';
END;
+/*
+9b. Procedure Stats Dedup Tracking
+One row per natural key, updated on each collection cycle
+*/
+IF OBJECT_ID(N'collect.procedure_stats_latest_hash', N'U') IS NULL
+BEGIN
+ CREATE TABLE
+ collect.procedure_stats_latest_hash
+ (
+ database_name sysname NOT NULL,
+ object_id integer NOT NULL,
+ plan_handle varbinary(64) NOT NULL,
+ row_hash binary(32) NOT NULL,
+ last_seen datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ CONSTRAINT
+ PK_procedure_stats_latest_hash
+ PRIMARY KEY CLUSTERED
+ (database_name, object_id, plan_handle)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+
+ PRINT 'Created collect.procedure_stats_latest_hash table';
+END;
+
/*
10. Currently Executing Query Snapshots
Table is created dynamically by sp_WhoIsActive on first collection
@@ -473,7 +529,7 @@ BEGIN
server_first_execution_time datetime2(7) NOT NULL,
server_last_execution_time datetime2(7) NOT NULL,
module_name nvarchar(261) NULL,
- query_sql_text nvarchar(max) NULL,
+ query_sql_text varbinary(max) NULL,
query_hash binary(8) NULL,
/*Execution count*/
count_executions bigint NOT NULL,
@@ -531,9 +587,11 @@ BEGIN
last_force_failure_reason_desc nvarchar(128) NULL,
plan_forcing_type nvarchar(60) NULL,
compatibility_level smallint NULL,
- query_plan_text nvarchar(max) NULL,
- compilation_metrics xml NULL,
+ query_plan_text varbinary(max) NULL,
+ compilation_metrics varbinary(max) NULL,
query_plan_hash binary(8) NULL,
+ /*Deduplication hash for skipping unchanged rows*/
+ row_hash binary(32) NULL,
CONSTRAINT
PK_query_store_data
PRIMARY KEY CLUSTERED
@@ -545,6 +603,32 @@ BEGIN
PRINT 'Created collect.query_store_data table';
END;
+/*
+11b. Query Store Data Dedup Tracking
+One row per natural key, updated on each collection cycle
+*/
+IF OBJECT_ID(N'collect.query_store_data_latest_hash', N'U') IS NULL
+BEGIN
+ CREATE TABLE
+ collect.query_store_data_latest_hash
+ (
+ database_name sysname NOT NULL,
+ query_id bigint NOT NULL,
+ plan_id bigint NOT NULL,
+ row_hash binary(32) NOT NULL,
+ last_seen datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ CONSTRAINT
+ PK_query_store_data_latest_hash
+ PRIMARY KEY CLUSTERED
+ (database_name, query_id, plan_id)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+
+ PRINT 'Created collect.query_store_data_latest_hash table';
+END;
+
/*
Trace analysis table - stores processed trace file data
*/
diff --git a/install/06_ensure_collection_table.sql b/install/06_ensure_collection_table.sql
index 024d693..bc3b47d 100644
--- a/install/06_ensure_collection_table.sql
+++ b/install/06_ensure_collection_table.sql
@@ -265,10 +265,11 @@ BEGIN
total_worker_time_delta /
NULLIF(sample_interval_seconds, 0) / 1000.
),
- /*Query text and execution plan*/
- query_text nvarchar(max) NULL,
- query_plan_text nvarchar(max) NULL,
- query_plan xml NULL,
+ /*Query text and execution plan (compressed with COMPRESS/DECOMPRESS)*/
+ query_text varbinary(max) NULL,
+ query_plan_text varbinary(max) NULL,
+ /*Deduplication hash for skipping unchanged rows*/
+ row_hash binary(32) NULL,
CONSTRAINT
PK_query_stats
PRIMARY KEY CLUSTERED
@@ -446,9 +447,10 @@ BEGIN
total_worker_time_delta /
NULLIF(sample_interval_seconds, 0) / 1000.
),
- /*Execution plan*/
- query_plan_text nvarchar(max) NULL,
- query_plan xml NULL,
+ /*Execution plan (compressed with COMPRESS/DECOMPRESS)*/
+ query_plan_text varbinary(max) NULL,
+ /*Deduplication hash for skipping unchanged rows*/
+ row_hash binary(32) NULL,
CONSTRAINT
PK_procedure_stats
PRIMARY KEY CLUSTERED
@@ -491,7 +493,7 @@ BEGIN
server_first_execution_time datetime2(7) NOT NULL,
server_last_execution_time datetime2(7) NOT NULL,
module_name nvarchar(261) NULL,
- query_sql_text nvarchar(max) NULL,
+ query_sql_text varbinary(max) NULL,
query_hash binary(8) NULL,
/*Execution count*/
count_executions bigint NOT NULL,
@@ -549,9 +551,11 @@ BEGIN
last_force_failure_reason_desc nvarchar(128) NULL,
plan_forcing_type nvarchar(60) NULL,
compatibility_level smallint NULL,
- query_plan_text nvarchar(max) NULL,
- compilation_metrics xml NULL,
+ query_plan_text varbinary(max) NULL,
+ compilation_metrics varbinary(max) NULL,
query_plan_hash binary(8) NULL,
+ /*Deduplication hash for skipping unchanged rows*/
+ row_hash binary(32) NULL,
CONSTRAINT
PK_query_store_data
PRIMARY KEY CLUSTERED
diff --git a/install/08_collect_query_stats.sql b/install/08_collect_query_stats.sql
index b264867..98ee876 100644
--- a/install/08_collect_query_stats.sql
+++ b/install/08_collect_query_stats.sql
@@ -22,6 +22,8 @@ GO
Query performance collector
Collects query execution statistics from sys.dm_exec_query_stats
Captures min/max values for parameter sensitivity detection
+LOB columns are compressed with COMPRESS() before storage
+Unchanged rows are skipped via row_hash deduplication
*/
IF OBJECT_ID(N'collect.query_stats_collector', N'P') IS NULL
@@ -48,7 +50,9 @@ BEGIN
@last_collection_time datetime2(7),
@cutoff_time datetime2(7),
@frequency_minutes integer,
- @error_message nvarchar(4000);
+ @error_message nvarchar(4000),
+ @collect_query bit = 1,
+ @collect_plan bit = 1;
BEGIN TRY
BEGIN TRANSACTION;
@@ -106,6 +110,15 @@ BEGIN
END;
END;
+ /*
+ Read collection flags for optional query text and plan collection
+ */
+ SELECT
+ @collect_query = cs.collect_query,
+ @collect_plan = cs.collect_plan
+ FROM config.collection_schedule AS cs
+ WHERE cs.collector_name = N'query_stats_collector';
+
/*
First run detection - collect all queries if this is the first execution
*/
@@ -154,12 +167,63 @@ BEGIN
END;
/*
- Collect query statistics directly from DMV
- Only collects queries executed since last collection
- Excludes PerformanceMonitor and system databases (including 32761, 32767)
+ Stage 1: Collect query statistics into temp table
+ Temp table stays nvarchar(max) — COMPRESS happens at INSERT to permanent table
*/
+ CREATE TABLE
+ #query_stats_staging
+ (
+ server_start_time datetime2(7) NOT NULL,
+ database_name sysname NOT NULL,
+ sql_handle varbinary(64) NOT NULL,
+ statement_start_offset integer NOT NULL,
+ statement_end_offset integer NOT NULL,
+ plan_generation_num bigint NOT NULL,
+ plan_handle varbinary(64) NOT NULL,
+ creation_time datetime2(7) NOT NULL,
+ last_execution_time datetime2(7) NOT NULL,
+ execution_count bigint NOT NULL,
+ total_worker_time bigint NOT NULL,
+ min_worker_time bigint NOT NULL,
+ max_worker_time bigint NOT NULL,
+ total_physical_reads bigint NOT NULL,
+ min_physical_reads bigint NOT NULL,
+ max_physical_reads bigint NOT NULL,
+ total_logical_writes bigint NOT NULL,
+ total_logical_reads bigint NOT NULL,
+ total_clr_time bigint NOT NULL,
+ total_elapsed_time bigint NOT NULL,
+ min_elapsed_time bigint NOT NULL,
+ max_elapsed_time bigint NOT NULL,
+ query_hash binary(8) NULL,
+ query_plan_hash binary(8) NULL,
+ total_rows bigint NOT NULL,
+ min_rows bigint NOT NULL,
+ max_rows bigint NOT NULL,
+ statement_sql_handle varbinary(64) NULL,
+ statement_context_id bigint NULL,
+ min_dop smallint NOT NULL,
+ max_dop smallint NOT NULL,
+ min_grant_kb bigint NOT NULL,
+ max_grant_kb bigint NOT NULL,
+ min_used_grant_kb bigint NOT NULL,
+ max_used_grant_kb bigint NOT NULL,
+ min_ideal_grant_kb bigint NOT NULL,
+ max_ideal_grant_kb bigint NOT NULL,
+ min_reserved_threads integer NOT NULL,
+ max_reserved_threads integer NOT NULL,
+ min_used_threads integer NOT NULL,
+ max_used_threads integer NOT NULL,
+ total_spills bigint NOT NULL,
+ min_spills bigint NOT NULL,
+ max_spills bigint NOT NULL,
+ query_text nvarchar(max) NULL,
+ query_plan_text nvarchar(max) NULL,
+ row_hash binary(32) NULL
+ );
+
INSERT INTO
- collect.query_stats
+ #query_stats_staging
(
server_start_time,
database_name,
@@ -255,6 +319,8 @@ BEGIN
max_spills = qs.max_spills,
query_text =
CASE
+ WHEN @collect_query = 0
+ THEN NULL
WHEN qs.statement_start_offset = 0
AND qs.statement_end_offset = -1
THEN st.text
@@ -272,7 +338,12 @@ BEGIN
) / 2 + 1
)
END,
- query_plan_text = tqp.query_plan
+ query_plan_text =
+ CASE
+ WHEN @collect_plan = 1
+ THEN tqp.query_plan
+ ELSE NULL
+ END
FROM sys.dm_exec_query_stats AS qs
OUTER APPLY sys.dm_exec_sql_text(qs.sql_handle) AS st
OUTER APPLY
@@ -284,7 +355,7 @@ BEGIN
) AS tqp
CROSS APPLY
(
- SELECT
+ SELECT
dbid = CONVERT(integer, pa.value)
FROM sys.dm_exec_plan_attributes(qs.plan_handle) AS pa
WHERE pa.attribute = N'dbid'
@@ -301,8 +372,237 @@ BEGIN
AND pa.dbid < 32761 /*exclude contained AG system databases*/
OPTION(RECOMPILE);
+ /*
+ Stage 2: Compute row_hash on staging data
+ Hash of cumulative metric columns — changes when query executes
+ Binary concat: works on SQL 2016+, no CONCAT_WS dependency
+ */
+ UPDATE
+ #query_stats_staging
+ SET
+ row_hash =
+ HASHBYTES
+ (
+ 'SHA2_256',
+ CAST(execution_count AS binary(8)) +
+ CAST(total_worker_time AS binary(8)) +
+ CAST(total_elapsed_time AS binary(8)) +
+ CAST(total_logical_reads AS binary(8)) +
+ CAST(total_physical_reads AS binary(8)) +
+ CAST(total_logical_writes AS binary(8)) +
+ CAST(total_rows AS binary(8)) +
+ CAST(total_spills AS binary(8))
+ );
+
+ /*
+ Ensure tracking table exists
+ */
+ IF OBJECT_ID(N'collect.query_stats_latest_hash', N'U') IS NULL
+ BEGIN
+ CREATE TABLE
+ collect.query_stats_latest_hash
+ (
+ sql_handle varbinary(64) NOT NULL,
+ statement_start_offset integer NOT NULL,
+ statement_end_offset integer NOT NULL,
+ plan_handle varbinary(64) NOT NULL,
+ row_hash binary(32) NOT NULL,
+ last_seen datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ CONSTRAINT
+ PK_query_stats_latest_hash
+ PRIMARY KEY CLUSTERED
+ (sql_handle, statement_start_offset,
+ statement_end_offset, plan_handle)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+ END;
+
+ /*
+ Stage 3: INSERT only changed rows with COMPRESS on LOB columns
+ A row is "changed" if its natural key is new or its hash differs
+ */
+ INSERT INTO
+ collect.query_stats
+ (
+ server_start_time,
+ database_name,
+ sql_handle,
+ statement_start_offset,
+ statement_end_offset,
+ plan_generation_num,
+ plan_handle,
+ creation_time,
+ last_execution_time,
+ execution_count,
+ total_worker_time,
+ min_worker_time,
+ max_worker_time,
+ total_physical_reads,
+ min_physical_reads,
+ max_physical_reads,
+ total_logical_writes,
+ total_logical_reads,
+ total_clr_time,
+ total_elapsed_time,
+ min_elapsed_time,
+ max_elapsed_time,
+ query_hash,
+ query_plan_hash,
+ total_rows,
+ min_rows,
+ max_rows,
+ statement_sql_handle,
+ statement_context_id,
+ min_dop,
+ max_dop,
+ min_grant_kb,
+ max_grant_kb,
+ min_used_grant_kb,
+ max_used_grant_kb,
+ min_ideal_grant_kb,
+ max_ideal_grant_kb,
+ min_reserved_threads,
+ max_reserved_threads,
+ min_used_threads,
+ max_used_threads,
+ total_spills,
+ min_spills,
+ max_spills,
+ query_text,
+ query_plan_text,
+ row_hash
+ )
+ SELECT
+ s.server_start_time,
+ s.database_name,
+ s.sql_handle,
+ s.statement_start_offset,
+ s.statement_end_offset,
+ s.plan_generation_num,
+ s.plan_handle,
+ s.creation_time,
+ s.last_execution_time,
+ s.execution_count,
+ s.total_worker_time,
+ s.min_worker_time,
+ s.max_worker_time,
+ s.total_physical_reads,
+ s.min_physical_reads,
+ s.max_physical_reads,
+ s.total_logical_writes,
+ s.total_logical_reads,
+ s.total_clr_time,
+ s.total_elapsed_time,
+ s.min_elapsed_time,
+ s.max_elapsed_time,
+ s.query_hash,
+ s.query_plan_hash,
+ s.total_rows,
+ s.min_rows,
+ s.max_rows,
+ s.statement_sql_handle,
+ s.statement_context_id,
+ s.min_dop,
+ s.max_dop,
+ s.min_grant_kb,
+ s.max_grant_kb,
+ s.min_used_grant_kb,
+ s.max_used_grant_kb,
+ s.min_ideal_grant_kb,
+ s.max_ideal_grant_kb,
+ s.min_reserved_threads,
+ s.max_reserved_threads,
+ s.min_used_threads,
+ s.max_used_threads,
+ s.total_spills,
+ s.min_spills,
+ s.max_spills,
+ COMPRESS(s.query_text),
+ COMPRESS(s.query_plan_text),
+ s.row_hash
+ FROM #query_stats_staging AS s
+ LEFT JOIN collect.query_stats_latest_hash AS h
+ ON h.sql_handle = s.sql_handle
+ AND h.statement_start_offset = s.statement_start_offset
+ AND h.statement_end_offset = s.statement_end_offset
+ AND h.plan_handle = s.plan_handle
+ AND h.row_hash = s.row_hash
+ WHERE h.sql_handle IS NULL /*no match = new or changed*/
+ OPTION(RECOMPILE);
+
SET @rows_collected = ROWCOUNT_BIG();
+ /*
+ Stage 4: Update tracking table with current hashes
+ */
+ MERGE collect.query_stats_latest_hash AS t
+ USING
+ (
+ SELECT
+ sql_handle,
+ statement_start_offset,
+ statement_end_offset,
+ plan_handle,
+ row_hash
+ FROM
+ (
+ SELECT
+ s2.sql_handle,
+ s2.statement_start_offset,
+ s2.statement_end_offset,
+ s2.plan_handle,
+ s2.row_hash,
+ rn = ROW_NUMBER() OVER
+ (
+ PARTITION BY
+ s2.sql_handle,
+ s2.statement_start_offset,
+ s2.statement_end_offset,
+ s2.plan_handle
+ ORDER BY
+ s2.last_execution_time DESC
+ )
+ FROM #query_stats_staging AS s2
+ ) AS ranked
+ WHERE ranked.rn = 1
+ ) AS s
+ ON t.sql_handle = s.sql_handle
+ AND t.statement_start_offset = s.statement_start_offset
+ AND t.statement_end_offset = s.statement_end_offset
+ AND t.plan_handle = s.plan_handle
+ WHEN MATCHED
+ THEN UPDATE SET
+ t.row_hash = s.row_hash,
+ t.last_seen = SYSDATETIME()
+ WHEN NOT MATCHED
+ THEN INSERT
+ (
+ sql_handle,
+ statement_start_offset,
+ statement_end_offset,
+ plan_handle,
+ row_hash,
+ last_seen
+ )
+ VALUES
+ (
+ s.sql_handle,
+ s.statement_start_offset,
+ s.statement_end_offset,
+ s.plan_handle,
+ s.row_hash,
+ SYSDATETIME()
+ );
+
+ IF @debug = 1
+ BEGIN
+ DECLARE @staging_count bigint;
+ SELECT @staging_count = COUNT_BIG(*) FROM #query_stats_staging;
+ RAISERROR(N'Staged %I64d rows, inserted %I64d changed rows', 0, 1, @staging_count, @rows_collected) WITH NOWAIT;
+ END;
+
/*
Calculate deltas for the newly inserted data
*/
@@ -371,5 +671,5 @@ GO
PRINT 'Query stats collector created successfully';
PRINT 'Collects queries executed since last collection from sys.dm_exec_query_stats';
-PRINT 'Includes min/max values for parameter sensitivity detection';
+PRINT 'LOB columns compressed with COMPRESS(), unchanged rows skipped via row_hash';
GO
diff --git a/install/09_collect_query_store.sql b/install/09_collect_query_store.sql
index 769ccfd..1b5d30b 100644
--- a/install/09_collect_query_store.sql
+++ b/install/09_collect_query_store.sql
@@ -274,7 +274,8 @@ BEGIN
compatibility_level smallint NULL,
query_plan_text nvarchar(max) NULL,
compilation_metrics xml NULL,
- query_plan_hash binary(8) NULL
+ query_plan_hash binary(8) NULL,
+ row_hash binary(32) NULL
);
/*
@@ -665,8 +666,53 @@ BEGIN
INTO @database_name;
END;
+ /*
+ Compute row_hash on staging data
+ Hash of metric columns that change between collection cycles
+ Binary concat: works on SQL 2016+, no CONCAT_WS dependency
+ */
+ UPDATE
+ #query_store_data
+ SET
+ row_hash =
+ HASHBYTES
+ (
+ 'SHA2_256',
+ CAST(count_executions AS binary(8)) +
+ CAST(avg_duration AS binary(8)) +
+ CAST(avg_cpu_time AS binary(8)) +
+ CAST(avg_logical_io_reads AS binary(8)) +
+ CAST(avg_logical_io_writes AS binary(8)) +
+ CAST(avg_physical_io_reads AS binary(8)) +
+ CAST(avg_rowcount AS binary(8))
+ );
+
+ /*
+ Ensure tracking table exists
+ */
+ IF OBJECT_ID(N'collect.query_store_data_latest_hash', N'U') IS NULL
+ BEGIN
+ CREATE TABLE
+ collect.query_store_data_latest_hash
+ (
+ database_name sysname NOT NULL,
+ query_id bigint NOT NULL,
+ plan_id bigint NOT NULL,
+ row_hash binary(32) NOT NULL,
+ last_seen datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ CONSTRAINT
+ PK_query_store_data_latest_hash
+ PRIMARY KEY CLUSTERED
+ (database_name, query_id, plan_id)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+ END;
+
/*
Insert collected data into the permanent table
+ COMPRESS on LOB columns, skip unchanged rows via hash comparison
*/
INSERT INTO
collect.query_store_data
@@ -726,7 +772,8 @@ BEGIN
compatibility_level,
query_plan_text,
compilation_metrics,
- query_plan_hash
+ query_plan_hash,
+ row_hash
)
SELECT
qsd.database_name,
@@ -738,7 +785,7 @@ BEGIN
qsd.server_first_execution_time,
qsd.server_last_execution_time,
qsd.module_name,
- qsd.query_sql_text,
+ COMPRESS(qsd.query_sql_text),
qsd.query_hash,
qsd.count_executions,
qsd.avg_duration,
@@ -782,14 +829,84 @@ BEGIN
qsd.last_force_failure_reason_desc,
qsd.plan_forcing_type,
qsd.compatibility_level,
- qsd.query_plan_text,
- qsd.compilation_metrics,
- qsd.query_plan_hash
+ COMPRESS(qsd.query_plan_text),
+ COMPRESS(CAST(qsd.compilation_metrics AS nvarchar(max))),
+ qsd.query_plan_hash,
+ qsd.row_hash
FROM #query_store_data AS qsd
+ LEFT JOIN collect.query_store_data_latest_hash AS h
+ ON h.database_name = qsd.database_name
+ AND h.query_id = qsd.query_id
+ AND h.plan_id = qsd.plan_id
+ AND h.row_hash = qsd.row_hash
+ WHERE h.database_name IS NULL /*no match = new or changed*/
OPTION(RECOMPILE, KEEPFIXED PLAN);
SET @rows_collected = ROWCOUNT_BIG();
+ /*
+ Update tracking table with current hashes
+ */
+ MERGE collect.query_store_data_latest_hash AS t
+ USING
+ (
+ SELECT
+ database_name,
+ query_id,
+ plan_id,
+ row_hash
+ FROM
+ (
+ SELECT
+ qsd.database_name,
+ qsd.query_id,
+ qsd.plan_id,
+ qsd.row_hash,
+ rn = ROW_NUMBER() OVER
+ (
+ PARTITION BY
+ qsd.database_name,
+ qsd.query_id,
+ qsd.plan_id
+ ORDER BY
+ qsd.utc_last_execution_time DESC
+ )
+ FROM #query_store_data AS qsd
+ ) AS ranked
+ WHERE ranked.rn = 1
+ ) AS s
+ ON t.database_name = s.database_name
+ AND t.query_id = s.query_id
+ AND t.plan_id = s.plan_id
+ WHEN MATCHED
+ THEN UPDATE SET
+ t.row_hash = s.row_hash,
+ t.last_seen = SYSDATETIME()
+ WHEN NOT MATCHED
+ THEN INSERT
+ (
+ database_name,
+ query_id,
+ plan_id,
+ row_hash,
+ last_seen
+ )
+ VALUES
+ (
+ s.database_name,
+ s.query_id,
+ s.plan_id,
+ s.row_hash,
+ SYSDATETIME()
+ );
+
+ IF @debug = 1
+ BEGIN
+ DECLARE @staging_count bigint;
+ SELECT @staging_count = COUNT_BIG(*) FROM #query_store_data;
+ RAISERROR(N'Staged %I64d rows, inserted %I64d changed rows', 0, 1, @staging_count, @rows_collected) WITH NOWAIT;
+ END;
+
/*
Log successful collection
*/
@@ -848,4 +965,5 @@ GO
PRINT 'Query Store collector created successfully';
PRINT 'Collects comprehensive runtime statistics from all Query Store enabled databases';
+PRINT 'LOB columns compressed with COMPRESS(), unchanged rows skipped via row_hash';
GO
diff --git a/install/10_collect_procedure_stats.sql b/install/10_collect_procedure_stats.sql
index 7f8e411..ad36e2f 100644
--- a/install/10_collect_procedure_stats.sql
+++ b/install/10_collect_procedure_stats.sql
@@ -1,4 +1,4 @@
-/*
+/*
Copyright 2026 Darling Data, LLC
https://www.erikdarling.com/
@@ -20,9 +20,10 @@ GO
/*
Procedure, trigger, and function stats collector
-Collects execution statistics from sys.dm_exec_procedure_stats,
+Collects execution statistics from sys.dm_exec_procedure_stats,
sys.dm_exec_trigger_stats, and sys.dm_exec_function_stats
-Includes execution plans for performance analysis
+LOB columns are compressed with COMPRESS() before storage
+Unchanged rows are skipped via row_hash deduplication
*/
IF OBJECT_ID(N'collect.procedure_stats_collector', N'P') IS NULL
@@ -48,7 +49,9 @@ BEGIN
@server_start_time datetime2(7),
@last_collection_time datetime2(7) = NULL,
@frequency_minutes integer = NULL,
- @cutoff_time datetime2(7) = NULL;
+ @cutoff_time datetime2(7) = NULL,
+ @collect_query bit = 1,
+ @collect_plan bit = 1;
BEGIN TRY
BEGIN TRANSACTION;
@@ -106,6 +109,15 @@ BEGIN
END;
END;
+ /*
+ Read collection flags for optional plan collection
+ */
+ SELECT
+ @collect_query = cs.collect_query,
+ @collect_plan = cs.collect_plan
+ FROM config.collection_schedule AS cs
+ WHERE cs.collector_name = N'procedure_stats_collector';
+
/*
First run detection - collect all procedures if this is the first execution
*/
@@ -154,11 +166,48 @@ BEGIN
END;
/*
- Collect procedure, trigger, and function statistics
- Single query with UNION ALL to collect from all three DMVs
+ Stage 1: Collect procedure, trigger, and function statistics into temp table
+ Temp table stays nvarchar(max) — COMPRESS happens at INSERT to permanent table
*/
+ CREATE TABLE
+ #procedure_stats_staging
+ (
+ server_start_time datetime2(7) NOT NULL,
+ object_type nvarchar(20) NOT NULL,
+ database_name sysname NOT NULL,
+ object_id integer NOT NULL,
+ object_name sysname NULL,
+ schema_name sysname NULL,
+ type_desc nvarchar(60) NULL,
+ sql_handle varbinary(64) NOT NULL,
+ plan_handle varbinary(64) NOT NULL,
+ cached_time datetime2(7) NOT NULL,
+ last_execution_time datetime2(7) NOT NULL,
+ execution_count bigint NOT NULL,
+ total_worker_time bigint NOT NULL,
+ min_worker_time bigint NOT NULL,
+ max_worker_time bigint NOT NULL,
+ total_elapsed_time bigint NOT NULL,
+ min_elapsed_time bigint NOT NULL,
+ max_elapsed_time bigint NOT NULL,
+ total_logical_reads bigint NOT NULL,
+ min_logical_reads bigint NOT NULL,
+ max_logical_reads bigint NOT NULL,
+ total_physical_reads bigint NOT NULL,
+ min_physical_reads bigint NOT NULL,
+ max_physical_reads bigint NOT NULL,
+ total_logical_writes bigint NOT NULL,
+ min_logical_writes bigint NOT NULL,
+ max_logical_writes bigint NOT NULL,
+ total_spills bigint NULL,
+ min_spills bigint NULL,
+ max_spills bigint NULL,
+ query_plan_text nvarchar(max) NULL,
+ row_hash binary(32) NULL
+ );
+
INSERT INTO
- collect.procedure_stats
+ #procedure_stats_staging
(
server_start_time,
object_type,
@@ -223,7 +272,12 @@ BEGIN
total_spills = ps.total_spills,
min_spills = ps.min_spills,
max_spills = ps.max_spills,
- query_plan_text = CONVERT(nvarchar(max), tqp.query_plan)
+ query_plan_text =
+ CASE
+ WHEN @collect_plan = 1
+ THEN CONVERT(nvarchar(max), tqp.query_plan)
+ ELSE NULL
+ END
FROM sys.dm_exec_procedure_stats AS ps
OUTER APPLY
sys.dm_exec_text_query_plan
@@ -234,7 +288,7 @@ BEGIN
) AS tqp
OUTER APPLY
(
- SELECT
+ SELECT
dbid = CONVERT(integer, pa.value)
FROM sys.dm_exec_plan_attributes(ps.plan_handle) AS pa
WHERE pa.attribute = N'dbid'
@@ -386,7 +440,12 @@ BEGIN
total_spills = ts.total_spills,
min_spills = ts.min_spills,
max_spills = ts.max_spills,
- query_plan_text = CONVERT(nvarchar(max), tqp.query_plan)
+ query_plan_text =
+ CASE
+ WHEN @collect_plan = 1
+ THEN CONVERT(nvarchar(max), tqp.query_plan)
+ ELSE NULL
+ END
FROM sys.dm_exec_trigger_stats AS ts
CROSS APPLY sys.dm_exec_sql_text(ts.sql_handle) AS st
OUTER APPLY
@@ -446,7 +505,12 @@ BEGIN
total_spills = NULL,
min_spills = NULL,
max_spills = NULL,
- query_plan_text = CONVERT(nvarchar(max), tqp.query_plan)
+ query_plan_text =
+ CASE
+ WHEN @collect_plan = 1
+ THEN CONVERT(nvarchar(max), tqp.query_plan)
+ ELSE NULL
+ END
FROM sys.dm_exec_function_stats AS fs
OUTER APPLY
sys.dm_exec_text_query_plan
@@ -457,7 +521,7 @@ BEGIN
) AS tqp
OUTER APPLY
(
- SELECT
+ SELECT
dbid = CONVERT(integer, pa.value)
FROM sys.dm_exec_plan_attributes(fs.plan_handle) AS pa
WHERE pa.attribute = N'dbid'
@@ -473,9 +537,197 @@ BEGIN
)
AND pa.dbid < 32761 /*exclude contained AG system databases*/
OPTION(RECOMPILE);
-
+
+ /*
+ Stage 2: Compute row_hash on staging data
+ Hash of cumulative metric columns — changes when procedure executes
+ total_spills is nullable (functions don't have spills), use ISNULL
+ */
+ UPDATE
+ #procedure_stats_staging
+ SET
+ row_hash =
+ HASHBYTES
+ (
+ 'SHA2_256',
+ CAST(execution_count AS binary(8)) +
+ CAST(total_worker_time AS binary(8)) +
+ CAST(total_elapsed_time AS binary(8)) +
+ CAST(total_logical_reads AS binary(8)) +
+ CAST(total_physical_reads AS binary(8)) +
+ CAST(total_logical_writes AS binary(8)) +
+ ISNULL(CAST(total_spills AS binary(8)), 0x0000000000000000)
+ );
+
+ /*
+ Ensure tracking table exists
+ */
+ IF OBJECT_ID(N'collect.procedure_stats_latest_hash', N'U') IS NULL
+ BEGIN
+ CREATE TABLE
+ collect.procedure_stats_latest_hash
+ (
+ database_name sysname NOT NULL,
+ object_id integer NOT NULL,
+ plan_handle varbinary(64) NOT NULL,
+ row_hash binary(32) NOT NULL,
+ last_seen datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ CONSTRAINT
+ PK_procedure_stats_latest_hash
+ PRIMARY KEY CLUSTERED
+ (database_name, object_id, plan_handle)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+ END;
+
+ /*
+ Stage 3: INSERT only changed rows with COMPRESS on LOB columns
+ */
+ INSERT INTO
+ collect.procedure_stats
+ (
+ server_start_time,
+ object_type,
+ database_name,
+ object_id,
+ object_name,
+ schema_name,
+ type_desc,
+ sql_handle,
+ plan_handle,
+ cached_time,
+ last_execution_time,
+ execution_count,
+ total_worker_time,
+ min_worker_time,
+ max_worker_time,
+ total_elapsed_time,
+ min_elapsed_time,
+ max_elapsed_time,
+ total_logical_reads,
+ min_logical_reads,
+ max_logical_reads,
+ total_physical_reads,
+ min_physical_reads,
+ max_physical_reads,
+ total_logical_writes,
+ min_logical_writes,
+ max_logical_writes,
+ total_spills,
+ min_spills,
+ max_spills,
+ query_plan_text,
+ row_hash
+ )
+ SELECT
+ s.server_start_time,
+ s.object_type,
+ s.database_name,
+ s.object_id,
+ s.object_name,
+ s.schema_name,
+ s.type_desc,
+ s.sql_handle,
+ s.plan_handle,
+ s.cached_time,
+ s.last_execution_time,
+ s.execution_count,
+ s.total_worker_time,
+ s.min_worker_time,
+ s.max_worker_time,
+ s.total_elapsed_time,
+ s.min_elapsed_time,
+ s.max_elapsed_time,
+ s.total_logical_reads,
+ s.min_logical_reads,
+ s.max_logical_reads,
+ s.total_physical_reads,
+ s.min_physical_reads,
+ s.max_physical_reads,
+ s.total_logical_writes,
+ s.min_logical_writes,
+ s.max_logical_writes,
+ s.total_spills,
+ s.min_spills,
+ s.max_spills,
+ COMPRESS(s.query_plan_text),
+ s.row_hash
+ FROM #procedure_stats_staging AS s
+ LEFT JOIN collect.procedure_stats_latest_hash AS h
+ ON h.database_name = s.database_name
+ AND h.object_id = s.object_id
+ AND h.plan_handle = s.plan_handle
+ AND h.row_hash = s.row_hash
+ WHERE h.database_name IS NULL /*no match = new or changed*/
+ OPTION(RECOMPILE);
+
SET @rows_collected = ROWCOUNT_BIG();
-
+
+ /*
+ Stage 4: Update tracking table with current hashes
+ */
+ MERGE collect.procedure_stats_latest_hash AS t
+ USING
+ (
+ SELECT
+ database_name,
+ object_id,
+ plan_handle,
+ row_hash
+ FROM
+ (
+ SELECT
+ s2.database_name,
+ s2.object_id,
+ s2.plan_handle,
+ s2.row_hash,
+ rn = ROW_NUMBER() OVER
+ (
+ PARTITION BY
+ s2.database_name,
+ s2.object_id,
+ s2.plan_handle
+ ORDER BY
+ s2.last_execution_time DESC
+ )
+ FROM #procedure_stats_staging AS s2
+ ) AS ranked
+ WHERE ranked.rn = 1
+ ) AS s
+ ON t.database_name = s.database_name
+ AND t.object_id = s.object_id
+ AND t.plan_handle = s.plan_handle
+ WHEN MATCHED
+ THEN UPDATE SET
+ t.row_hash = s.row_hash,
+ t.last_seen = SYSDATETIME()
+ WHEN NOT MATCHED
+ THEN INSERT
+ (
+ database_name,
+ object_id,
+ plan_handle,
+ row_hash,
+ last_seen
+ )
+ VALUES
+ (
+ s.database_name,
+ s.object_id,
+ s.plan_handle,
+ s.row_hash,
+ SYSDATETIME()
+ );
+
+ IF @debug = 1
+ BEGIN
+ DECLARE @staging_count bigint;
+ SELECT @staging_count = COUNT_BIG(*) FROM #procedure_stats_staging;
+ RAISERROR(N'Staged %I64d rows, inserted %I64d changed rows', 0, 1, @staging_count, @rows_collected) WITH NOWAIT;
+ END;
+
/*
Calculate deltas for the newly inserted data
*/
@@ -483,7 +735,7 @@ BEGIN
@table_name = N'procedure_stats',
@debug = @debug;
- /*Tie statement sto procedures when possible*/
+ /*Tie statements to procedures when possible*/
UPDATE
qs
SET
@@ -499,7 +751,6 @@ BEGIN
AND qs.object_name IS NULL
OPTION(RECOMPILE);
-
/*
Log successful collection
*/
@@ -518,24 +769,24 @@ BEGIN
@rows_collected,
DATEDIFF(MILLISECOND, @start_time, SYSDATETIME())
);
-
+
IF @debug = 1
BEGIN
RAISERROR(N'Collected %d procedure/trigger/function stats rows', 0, 1, @rows_collected) WITH NOWAIT;
END;
-
+
COMMIT TRANSACTION;
-
+
END TRY
BEGIN CATCH
IF @@TRANCOUNT > 0
BEGIN
ROLLBACK TRANSACTION;
END;
-
+
DECLARE
@error_message nvarchar(4000) = ERROR_MESSAGE();
-
+
/*
Log the error
*/
@@ -554,11 +805,12 @@ BEGIN
DATEDIFF(MILLISECOND, @start_time, SYSDATETIME()),
@error_message
);
-
+
RAISERROR(N'Error in procedure stats collector: %s', 16, 1, @error_message);
END CATCH;
END;
GO
PRINT 'Procedure stats collector created successfully';
+PRINT 'LOB columns compressed with COMPRESS(), unchanged rows skipped via row_hash';
GO
diff --git a/install/46_create_query_plan_views.sql b/install/46_create_query_plan_views.sql
index 29646c4..c6ad5d9 100644
--- a/install/46_create_query_plan_views.sql
+++ b/install/46_create_query_plan_views.sql
@@ -30,12 +30,12 @@ CREATE OR ALTER VIEW
report.query_stats_with_formatted_plans
AS
SELECT
- *,
+ qs.*,
query_plan_formatted =
CASE
- WHEN TRY_CAST(qs.query_plan_text AS xml) IS NOT NULL
- THEN TRY_CAST(qs.query_plan_text AS xml)
- WHEN TRY_CAST(qs.query_plan_text AS xml) IS NULL
+ WHEN TRY_CAST(d.plan_text AS xml) IS NOT NULL
+ THEN TRY_CAST(d.plan_text AS xml)
+ WHEN TRY_CAST(d.plan_text AS xml) IS NULL
THEN
(
SELECT
@@ -44,14 +44,19 @@ SELECT
N'-- This is a huge query plan.' + NCHAR(13) + NCHAR(10) +
N'-- Remove the headers and footers, save it as a .sqlplan file, and re-open it.' + NCHAR(13) + NCHAR(10) +
NCHAR(13) + NCHAR(10) +
- REPLACE(qs.query_plan_text, N'= DATEADD(DAY, -7, SYSDATETIME())
diff --git a/upgrades/2.1.0-to-2.2.0/01_compress_query_stats.sql b/upgrades/2.1.0-to-2.2.0/01_compress_query_stats.sql
new file mode 100644
index 0000000..b64ea27
--- /dev/null
+++ b/upgrades/2.1.0-to-2.2.0/01_compress_query_stats.sql
@@ -0,0 +1,386 @@
+/*
+Copyright 2026 Darling Data, LLC
+https://www.erikdarling.com/
+
+Upgrade from 2.1.0 to 2.2.0
+Migrates collect.query_stats to compressed LOB storage:
+ - query_text nvarchar(max) -> varbinary(max) via COMPRESS()
+ - query_plan_text nvarchar(max) -> varbinary(max) via COMPRESS()
+ - Drops unused query_plan xml column (never populated by collectors)
+ - Adds row_hash binary(32) for deduplication
+*/
+
+SET ANSI_NULLS ON;
+SET ANSI_PADDING ON;
+SET ANSI_WARNINGS ON;
+SET ARITHABORT ON;
+SET CONCAT_NULL_YIELDS_NULL ON;
+SET QUOTED_IDENTIFIER ON;
+SET NUMERIC_ROUNDABORT OFF;
+SET IMPLICIT_TRANSACTIONS OFF;
+SET STATISTICS TIME, IO OFF;
+GO
+
+USE PerformanceMonitor;
+GO
+
+/*
+Skip if already migrated (query_text is already varbinary)
+*/
+IF EXISTS
+(
+ SELECT
+ 1/0
+ FROM sys.columns
+ WHERE object_id = OBJECT_ID(N'collect.query_stats')
+ AND name = N'query_text'
+ AND system_type_id = 165 /*varbinary*/
+)
+BEGIN
+ PRINT 'collect.query_stats already migrated to compressed storage — skipping.';
+ RETURN;
+END;
+GO
+
+/*
+Skip if source table doesn't exist
+*/
+IF OBJECT_ID(N'collect.query_stats', N'U') IS NULL
+BEGIN
+ PRINT 'collect.query_stats does not exist — skipping.';
+ RETURN;
+END;
+GO
+
+PRINT '=== Migrating collect.query_stats to compressed LOB storage ===';
+PRINT '';
+GO
+
+BEGIN TRY
+
+ /*
+ Step 1: Create the _new table with compressed column types
+ */
+ IF OBJECT_ID(N'collect.query_stats_new', N'U') IS NOT NULL
+ BEGIN
+ DROP TABLE collect.query_stats_new;
+ PRINT 'Dropped existing collect.query_stats_new';
+ END;
+
+ CREATE TABLE
+ collect.query_stats_new
+ (
+ collection_id bigint IDENTITY NOT NULL,
+ collection_time datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ server_start_time datetime2(7) NOT NULL,
+ object_type nvarchar(20) NOT NULL
+ DEFAULT N'STATEMENT',
+ database_name sysname NOT NULL,
+ object_name sysname NULL,
+ schema_name sysname NULL,
+ sql_handle varbinary(64) NOT NULL,
+ statement_start_offset integer NOT NULL,
+ statement_end_offset integer NOT NULL,
+ plan_generation_num bigint NOT NULL,
+ plan_handle varbinary(64) NOT NULL,
+ creation_time datetime2(7) NOT NULL,
+ last_execution_time datetime2(7) NOT NULL,
+ /*Raw cumulative values*/
+ execution_count bigint NOT NULL,
+ total_worker_time bigint NOT NULL,
+ min_worker_time bigint NOT NULL,
+ max_worker_time bigint NOT NULL,
+ total_physical_reads bigint NOT NULL,
+ min_physical_reads bigint NOT NULL,
+ max_physical_reads bigint NOT NULL,
+ total_logical_writes bigint NOT NULL,
+ total_logical_reads bigint NOT NULL,
+ total_clr_time bigint NOT NULL,
+ total_elapsed_time bigint NOT NULL,
+ min_elapsed_time bigint NOT NULL,
+ max_elapsed_time bigint NOT NULL,
+ query_hash binary(8) NULL,
+ query_plan_hash binary(8) NULL,
+ total_rows bigint NOT NULL,
+ min_rows bigint NOT NULL,
+ max_rows bigint NOT NULL,
+ statement_sql_handle varbinary(64) NULL,
+ statement_context_id bigint NULL,
+ min_dop smallint NOT NULL,
+ max_dop smallint NOT NULL,
+ min_grant_kb bigint NOT NULL,
+ max_grant_kb bigint NOT NULL,
+ min_used_grant_kb bigint NOT NULL,
+ max_used_grant_kb bigint NOT NULL,
+ min_ideal_grant_kb bigint NOT NULL,
+ max_ideal_grant_kb bigint NOT NULL,
+ min_reserved_threads integer NOT NULL,
+ max_reserved_threads integer NOT NULL,
+ min_used_threads integer NOT NULL,
+ max_used_threads integer NOT NULL,
+ total_spills bigint NOT NULL,
+ min_spills bigint NOT NULL,
+ max_spills bigint NOT NULL,
+ /*Delta calculations*/
+ execution_count_delta bigint NULL,
+ total_worker_time_delta bigint NULL,
+ total_elapsed_time_delta bigint NULL,
+ total_logical_reads_delta bigint NULL,
+ total_physical_reads_delta bigint NULL,
+ total_logical_writes_delta bigint NULL,
+ sample_interval_seconds integer NULL,
+ /*Analysis helpers - computed columns*/
+ avg_rows AS
+ (
+ total_rows /
+ NULLIF(execution_count, 0)
+ ),
+ avg_worker_time_ms AS
+ (
+ total_worker_time /
+ NULLIF(execution_count, 0) / 1000.
+ ),
+ avg_elapsed_time_ms AS
+ (
+ total_elapsed_time /
+ NULLIF(execution_count, 0) / 1000.
+ ),
+ avg_physical_reads AS
+ (
+ total_physical_reads /
+ NULLIF(execution_count, 0)
+ ),
+ worker_time_per_second AS
+ (
+ total_worker_time_delta /
+ NULLIF(sample_interval_seconds, 0) / 1000.
+ ),
+ /*Query text and execution plan (compressed with COMPRESS/DECOMPRESS)*/
+ query_text varbinary(max) NULL,
+ query_plan_text varbinary(max) NULL,
+ /*Deduplication hash for skipping unchanged rows*/
+ row_hash binary(32) NULL,
+ CONSTRAINT
+ PK_query_stats_new
+ PRIMARY KEY CLUSTERED
+ (collection_time, collection_id)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+
+ PRINT 'Created collect.query_stats_new';
+
+ /*
+ Step 2: Reseed IDENTITY to continue from the old table
+ */
+ DECLARE
+ @max_id bigint;
+
+ SELECT
+ @max_id = ISNULL(MAX(collection_id), 0)
+ FROM collect.query_stats;
+
+ DBCC CHECKIDENT(N'collect.query_stats_new', RESEED, @max_id);
+
+ PRINT 'Reseeded IDENTITY to ' + CAST(@max_id AS varchar(20));
+
+ /*
+ Step 3: Migrate data in batches with COMPRESS on LOB columns
+ Omits query_plan xml (never populated, dropping it)
+ Omits computed columns (avg_rows, avg_worker_time_ms, avg_elapsed_time_ms,
+ avg_physical_reads, worker_time_per_second) — can't appear in OUTPUT
+ */
+ DECLARE
+ @batch_size integer = 10000,
+ @rows_moved bigint = 0,
+ @batch_rows integer = 1;
+
+ PRINT '';
+ PRINT 'Migrating data in batches of ' + CAST(@batch_size AS varchar(10)) + '...';
+
+ SET IDENTITY_INSERT collect.query_stats_new ON;
+
+ WHILE @batch_rows > 0
+ BEGIN
+ DELETE TOP (@batch_size)
+ FROM collect.query_stats
+ OUTPUT
+ deleted.collection_id,
+ deleted.collection_time,
+ deleted.server_start_time,
+ deleted.object_type,
+ deleted.database_name,
+ deleted.object_name,
+ deleted.schema_name,
+ deleted.sql_handle,
+ deleted.statement_start_offset,
+ deleted.statement_end_offset,
+ deleted.plan_generation_num,
+ deleted.plan_handle,
+ deleted.creation_time,
+ deleted.last_execution_time,
+ deleted.execution_count,
+ deleted.total_worker_time,
+ deleted.min_worker_time,
+ deleted.max_worker_time,
+ deleted.total_physical_reads,
+ deleted.min_physical_reads,
+ deleted.max_physical_reads,
+ deleted.total_logical_writes,
+ deleted.total_logical_reads,
+ deleted.total_clr_time,
+ deleted.total_elapsed_time,
+ deleted.min_elapsed_time,
+ deleted.max_elapsed_time,
+ deleted.query_hash,
+ deleted.query_plan_hash,
+ deleted.total_rows,
+ deleted.min_rows,
+ deleted.max_rows,
+ deleted.statement_sql_handle,
+ deleted.statement_context_id,
+ deleted.min_dop,
+ deleted.max_dop,
+ deleted.min_grant_kb,
+ deleted.max_grant_kb,
+ deleted.min_used_grant_kb,
+ deleted.max_used_grant_kb,
+ deleted.min_ideal_grant_kb,
+ deleted.max_ideal_grant_kb,
+ deleted.min_reserved_threads,
+ deleted.max_reserved_threads,
+ deleted.min_used_threads,
+ deleted.max_used_threads,
+ deleted.total_spills,
+ deleted.min_spills,
+ deleted.max_spills,
+ deleted.execution_count_delta,
+ deleted.total_worker_time_delta,
+ deleted.total_elapsed_time_delta,
+ deleted.total_logical_reads_delta,
+ deleted.total_physical_reads_delta,
+ deleted.total_logical_writes_delta,
+ deleted.sample_interval_seconds,
+ COMPRESS(deleted.query_text),
+ COMPRESS(deleted.query_plan_text)
+ INTO collect.query_stats_new
+ (
+ collection_id,
+ collection_time,
+ server_start_time,
+ object_type,
+ database_name,
+ object_name,
+ schema_name,
+ sql_handle,
+ statement_start_offset,
+ statement_end_offset,
+ plan_generation_num,
+ plan_handle,
+ creation_time,
+ last_execution_time,
+ execution_count,
+ total_worker_time,
+ min_worker_time,
+ max_worker_time,
+ total_physical_reads,
+ min_physical_reads,
+ max_physical_reads,
+ total_logical_writes,
+ total_logical_reads,
+ total_clr_time,
+ total_elapsed_time,
+ min_elapsed_time,
+ max_elapsed_time,
+ query_hash,
+ query_plan_hash,
+ total_rows,
+ min_rows,
+ max_rows,
+ statement_sql_handle,
+ statement_context_id,
+ min_dop,
+ max_dop,
+ min_grant_kb,
+ max_grant_kb,
+ min_used_grant_kb,
+ max_used_grant_kb,
+ min_ideal_grant_kb,
+ max_ideal_grant_kb,
+ min_reserved_threads,
+ max_reserved_threads,
+ min_used_threads,
+ max_used_threads,
+ total_spills,
+ min_spills,
+ max_spills,
+ execution_count_delta,
+ total_worker_time_delta,
+ total_elapsed_time_delta,
+ total_logical_reads_delta,
+ total_physical_reads_delta,
+ total_logical_writes_delta,
+ sample_interval_seconds,
+ query_text,
+ query_plan_text
+ );
+
+ SET @batch_rows = @@ROWCOUNT;
+ SET @rows_moved += @batch_rows;
+
+ IF @batch_rows > 0
+ BEGIN
+ RAISERROR(N' Migrated %I64d rows so far...', 0, 1, @rows_moved) WITH NOWAIT;
+ END;
+ END;
+
+ SET IDENTITY_INSERT collect.query_stats_new OFF;
+
+ PRINT '';
+ PRINT 'Migration complete: ' + CAST(@rows_moved AS varchar(20)) + ' rows moved';
+
+ /*
+ Step 4: Rename old -> _old, new -> original
+ */
+ EXEC sp_rename
+ N'collect.query_stats',
+ N'query_stats_old',
+ N'OBJECT';
+
+ /* Rename old table's PK first to free the name */
+ EXEC sp_rename
+ N'collect.query_stats_old.PK_query_stats',
+ N'PK_query_stats_old',
+ N'INDEX';
+
+ EXEC sp_rename
+ N'collect.query_stats_new',
+ N'query_stats',
+ N'OBJECT';
+
+ EXEC sp_rename
+ N'collect.query_stats.PK_query_stats_new',
+ N'PK_query_stats',
+ N'INDEX';
+
+ PRINT '';
+ PRINT 'Renamed tables: query_stats -> query_stats_old, query_stats_new -> query_stats';
+ PRINT '';
+ PRINT '=== collect.query_stats migration complete ===';
+ PRINT '';
+ PRINT 'The old table is preserved as collect.query_stats_old.';
+ PRINT 'After verifying the migration, you can drop it:';
+ PRINT ' DROP TABLE IF EXISTS collect.query_stats_old;';
+
+END TRY
+BEGIN CATCH
+ PRINT '';
+ PRINT '*** ERROR migrating collect.query_stats ***';
+ PRINT 'Error ' + CAST(ERROR_NUMBER() AS varchar(10)) + ': ' + ERROR_MESSAGE();
+ PRINT '';
+ PRINT 'The original table has not been renamed.';
+ PRINT 'If collect.query_stats_new exists, it contains partial data.';
+ PRINT 'Review and resolve the error, then re-run this script.';
+END CATCH;
+GO
diff --git a/upgrades/2.1.0-to-2.2.0/02_compress_query_store_data.sql b/upgrades/2.1.0-to-2.2.0/02_compress_query_store_data.sql
new file mode 100644
index 0000000..71dc9f1
--- /dev/null
+++ b/upgrades/2.1.0-to-2.2.0/02_compress_query_store_data.sql
@@ -0,0 +1,368 @@
+/*
+Copyright 2026 Darling Data, LLC
+https://www.erikdarling.com/
+
+Upgrade from 2.1.0 to 2.2.0
+Migrates collect.query_store_data to compressed LOB storage:
+ - query_sql_text nvarchar(max) -> varbinary(max) via COMPRESS()
+ - query_plan_text nvarchar(max) -> varbinary(max) via COMPRESS()
+ - compilation_metrics xml -> varbinary(max) via COMPRESS(CAST(... AS nvarchar(max)))
+ - Adds row_hash binary(32) for deduplication
+*/
+
+SET ANSI_NULLS ON;
+SET ANSI_PADDING ON;
+SET ANSI_WARNINGS ON;
+SET ARITHABORT ON;
+SET CONCAT_NULL_YIELDS_NULL ON;
+SET QUOTED_IDENTIFIER ON;
+SET NUMERIC_ROUNDABORT OFF;
+SET IMPLICIT_TRANSACTIONS OFF;
+SET STATISTICS TIME, IO OFF;
+GO
+
+USE PerformanceMonitor;
+GO
+
+/*
+Skip if already migrated (query_sql_text is already varbinary)
+*/
+IF EXISTS
+(
+ SELECT
+ 1/0
+ FROM sys.columns
+ WHERE object_id = OBJECT_ID(N'collect.query_store_data')
+ AND name = N'query_sql_text'
+ AND system_type_id = 165 /*varbinary*/
+)
+BEGIN
+ PRINT 'collect.query_store_data already migrated to compressed storage — skipping.';
+ RETURN;
+END;
+GO
+
+/*
+Skip if source table doesn't exist
+*/
+IF OBJECT_ID(N'collect.query_store_data', N'U') IS NULL
+BEGIN
+ PRINT 'collect.query_store_data does not exist — skipping.';
+ RETURN;
+END;
+GO
+
+PRINT '=== Migrating collect.query_store_data to compressed LOB storage ===';
+PRINT '';
+GO
+
+BEGIN TRY
+
+ /*
+ Step 1: Create the _new table with compressed column types
+ */
+ IF OBJECT_ID(N'collect.query_store_data_new', N'U') IS NOT NULL
+ BEGIN
+ DROP TABLE collect.query_store_data_new;
+ PRINT 'Dropped existing collect.query_store_data_new';
+ END;
+
+ CREATE TABLE
+ collect.query_store_data_new
+ (
+ collection_id bigint IDENTITY NOT NULL,
+ collection_time datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ database_name sysname NOT NULL,
+ query_id bigint NOT NULL,
+ plan_id bigint NOT NULL,
+ execution_type_desc nvarchar(60) NULL,
+ utc_first_execution_time datetimeoffset(7) NOT NULL,
+ utc_last_execution_time datetimeoffset(7) NOT NULL,
+ server_first_execution_time datetime2(7) NOT NULL,
+ server_last_execution_time datetime2(7) NOT NULL,
+ module_name nvarchar(261) NULL,
+ query_sql_text varbinary(max) NULL,
+ query_hash binary(8) NULL,
+ /*Execution count*/
+ count_executions bigint NOT NULL,
+ /*Duration metrics (microseconds)*/
+ avg_duration bigint NOT NULL,
+ min_duration bigint NOT NULL,
+ max_duration bigint NOT NULL,
+ /*CPU time metrics (microseconds)*/
+ avg_cpu_time bigint NOT NULL,
+ min_cpu_time bigint NOT NULL,
+ max_cpu_time bigint NOT NULL,
+ /*Logical IO reads*/
+ avg_logical_io_reads bigint NOT NULL,
+ min_logical_io_reads bigint NOT NULL,
+ max_logical_io_reads bigint NOT NULL,
+ /*Logical IO writes*/
+ avg_logical_io_writes bigint NOT NULL,
+ min_logical_io_writes bigint NOT NULL,
+ max_logical_io_writes bigint NOT NULL,
+ /*Physical IO reads*/
+ avg_physical_io_reads bigint NOT NULL,
+ min_physical_io_reads bigint NOT NULL,
+ max_physical_io_reads bigint NOT NULL,
+ /*Number of physical IO reads - NULL on SQL 2016*/
+ avg_num_physical_io_reads bigint NULL,
+ min_num_physical_io_reads bigint NULL,
+ max_num_physical_io_reads bigint NULL,
+ /*CLR time (microseconds)*/
+ avg_clr_time bigint NOT NULL,
+ min_clr_time bigint NOT NULL,
+ max_clr_time bigint NOT NULL,
+ /*DOP (degree of parallelism)*/
+ min_dop bigint NOT NULL,
+ max_dop bigint NOT NULL,
+ /*Memory grant (8KB pages)*/
+ avg_query_max_used_memory bigint NOT NULL,
+ min_query_max_used_memory bigint NOT NULL,
+ max_query_max_used_memory bigint NOT NULL,
+ /*Row count*/
+ avg_rowcount bigint NOT NULL,
+ min_rowcount bigint NOT NULL,
+ max_rowcount bigint NOT NULL,
+ /*Log bytes used*/
+ avg_log_bytes_used bigint NULL,
+ min_log_bytes_used bigint NULL,
+ max_log_bytes_used bigint NULL,
+ /*Tempdb space used (8KB pages)*/
+ avg_tempdb_space_used bigint NULL,
+ min_tempdb_space_used bigint NULL,
+ max_tempdb_space_used bigint NULL,
+ /*Plan information*/
+ plan_type nvarchar(60) NULL,
+ is_forced_plan bit NOT NULL,
+ force_failure_count bigint NULL,
+ last_force_failure_reason_desc nvarchar(128) NULL,
+ plan_forcing_type nvarchar(60) NULL,
+ compatibility_level smallint NULL,
+ query_plan_text varbinary(max) NULL,
+ compilation_metrics varbinary(max) NULL,
+ query_plan_hash binary(8) NULL,
+ /*Deduplication hash for skipping unchanged rows*/
+ row_hash binary(32) NULL,
+ CONSTRAINT
+ PK_query_store_data_new
+ PRIMARY KEY CLUSTERED
+ (collection_time, collection_id)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+
+ PRINT 'Created collect.query_store_data_new';
+
+ /*
+ Step 2: Reseed IDENTITY to continue from the old table
+ */
+ DECLARE
+ @max_id bigint;
+
+ SELECT
+ @max_id = ISNULL(MAX(collection_id), 0)
+ FROM collect.query_store_data;
+
+ DBCC CHECKIDENT(N'collect.query_store_data_new', RESEED, @max_id);
+
+ PRINT 'Reseeded IDENTITY to ' + CAST(@max_id AS varchar(20));
+
+ /*
+ Step 3: Migrate data in batches with COMPRESS on LOB columns
+ compilation_metrics is xml, so CAST to nvarchar(max) before COMPRESS
+ */
+ DECLARE
+ @batch_size integer = 10000,
+ @rows_moved bigint = 0,
+ @batch_rows integer = 1;
+
+ PRINT '';
+ PRINT 'Migrating data in batches of ' + CAST(@batch_size AS varchar(10)) + '...';
+
+ SET IDENTITY_INSERT collect.query_store_data_new ON;
+
+ WHILE @batch_rows > 0
+ BEGIN
+ DELETE TOP (@batch_size)
+ FROM collect.query_store_data
+ OUTPUT
+ deleted.collection_id,
+ deleted.collection_time,
+ deleted.database_name,
+ deleted.query_id,
+ deleted.plan_id,
+ deleted.execution_type_desc,
+ deleted.utc_first_execution_time,
+ deleted.utc_last_execution_time,
+ deleted.server_first_execution_time,
+ deleted.server_last_execution_time,
+ deleted.module_name,
+ COMPRESS(deleted.query_sql_text),
+ deleted.query_hash,
+ deleted.count_executions,
+ deleted.avg_duration,
+ deleted.min_duration,
+ deleted.max_duration,
+ deleted.avg_cpu_time,
+ deleted.min_cpu_time,
+ deleted.max_cpu_time,
+ deleted.avg_logical_io_reads,
+ deleted.min_logical_io_reads,
+ deleted.max_logical_io_reads,
+ deleted.avg_logical_io_writes,
+ deleted.min_logical_io_writes,
+ deleted.max_logical_io_writes,
+ deleted.avg_physical_io_reads,
+ deleted.min_physical_io_reads,
+ deleted.max_physical_io_reads,
+ deleted.avg_num_physical_io_reads,
+ deleted.min_num_physical_io_reads,
+ deleted.max_num_physical_io_reads,
+ deleted.avg_clr_time,
+ deleted.min_clr_time,
+ deleted.max_clr_time,
+ deleted.min_dop,
+ deleted.max_dop,
+ deleted.avg_query_max_used_memory,
+ deleted.min_query_max_used_memory,
+ deleted.max_query_max_used_memory,
+ deleted.avg_rowcount,
+ deleted.min_rowcount,
+ deleted.max_rowcount,
+ deleted.avg_log_bytes_used,
+ deleted.min_log_bytes_used,
+ deleted.max_log_bytes_used,
+ deleted.avg_tempdb_space_used,
+ deleted.min_tempdb_space_used,
+ deleted.max_tempdb_space_used,
+ deleted.plan_type,
+ deleted.is_forced_plan,
+ deleted.force_failure_count,
+ deleted.last_force_failure_reason_desc,
+ deleted.plan_forcing_type,
+ deleted.compatibility_level,
+ COMPRESS(deleted.query_plan_text),
+ COMPRESS(CAST(deleted.compilation_metrics AS nvarchar(max))),
+ deleted.query_plan_hash
+ INTO collect.query_store_data_new
+ (
+ collection_id,
+ collection_time,
+ database_name,
+ query_id,
+ plan_id,
+ execution_type_desc,
+ utc_first_execution_time,
+ utc_last_execution_time,
+ server_first_execution_time,
+ server_last_execution_time,
+ module_name,
+ query_sql_text,
+ query_hash,
+ count_executions,
+ avg_duration,
+ min_duration,
+ max_duration,
+ avg_cpu_time,
+ min_cpu_time,
+ max_cpu_time,
+ avg_logical_io_reads,
+ min_logical_io_reads,
+ max_logical_io_reads,
+ avg_logical_io_writes,
+ min_logical_io_writes,
+ max_logical_io_writes,
+ avg_physical_io_reads,
+ min_physical_io_reads,
+ max_physical_io_reads,
+ avg_num_physical_io_reads,
+ min_num_physical_io_reads,
+ max_num_physical_io_reads,
+ avg_clr_time,
+ min_clr_time,
+ max_clr_time,
+ min_dop,
+ max_dop,
+ avg_query_max_used_memory,
+ min_query_max_used_memory,
+ max_query_max_used_memory,
+ avg_rowcount,
+ min_rowcount,
+ max_rowcount,
+ avg_log_bytes_used,
+ min_log_bytes_used,
+ max_log_bytes_used,
+ avg_tempdb_space_used,
+ min_tempdb_space_used,
+ max_tempdb_space_used,
+ plan_type,
+ is_forced_plan,
+ force_failure_count,
+ last_force_failure_reason_desc,
+ plan_forcing_type,
+ compatibility_level,
+ query_plan_text,
+ compilation_metrics,
+ query_plan_hash
+ );
+
+ SET @batch_rows = @@ROWCOUNT;
+ SET @rows_moved += @batch_rows;
+
+ IF @batch_rows > 0
+ BEGIN
+ RAISERROR(N' Migrated %I64d rows so far...', 0, 1, @rows_moved) WITH NOWAIT;
+ END;
+ END;
+
+ SET IDENTITY_INSERT collect.query_store_data_new OFF;
+
+ PRINT '';
+ PRINT 'Migration complete: ' + CAST(@rows_moved AS varchar(20)) + ' rows moved';
+
+ /*
+ Step 4: Rename old -> _old, new -> original
+ */
+ EXEC sp_rename
+ N'collect.query_store_data',
+ N'query_store_data_old',
+ N'OBJECT';
+
+ /* Rename old table's PK first to free the name */
+ EXEC sp_rename
+ N'collect.query_store_data_old.PK_query_store_data',
+ N'PK_query_store_data_old',
+ N'INDEX';
+
+ EXEC sp_rename
+ N'collect.query_store_data_new',
+ N'query_store_data',
+ N'OBJECT';
+
+ EXEC sp_rename
+ N'collect.query_store_data.PK_query_store_data_new',
+ N'PK_query_store_data',
+ N'INDEX';
+
+ PRINT '';
+ PRINT 'Renamed tables: query_store_data -> query_store_data_old, query_store_data_new -> query_store_data';
+ PRINT '';
+ PRINT '=== collect.query_store_data migration complete ===';
+ PRINT '';
+ PRINT 'The old table is preserved as collect.query_store_data_old.';
+ PRINT 'After verifying the migration, you can drop it:';
+ PRINT ' DROP TABLE IF EXISTS collect.query_store_data_old;';
+
+END TRY
+BEGIN CATCH
+ PRINT '';
+ PRINT '*** ERROR migrating collect.query_store_data ***';
+ PRINT 'Error ' + CAST(ERROR_NUMBER() AS varchar(10)) + ': ' + ERROR_MESSAGE();
+ PRINT '';
+ PRINT 'The original table has not been renamed.';
+ PRINT 'If collect.query_store_data_new exists, it contains partial data.';
+ PRINT 'Review and resolve the error, then re-run this script.';
+END CATCH;
+GO
diff --git a/upgrades/2.1.0-to-2.2.0/03_compress_procedure_stats.sql b/upgrades/2.1.0-to-2.2.0/03_compress_procedure_stats.sql
new file mode 100644
index 0000000..dc672aa
--- /dev/null
+++ b/upgrades/2.1.0-to-2.2.0/03_compress_procedure_stats.sql
@@ -0,0 +1,325 @@
+/*
+Copyright 2026 Darling Data, LLC
+https://www.erikdarling.com/
+
+Upgrade from 2.1.0 to 2.2.0
+Migrates collect.procedure_stats to compressed LOB storage:
+ - query_plan_text nvarchar(max) -> varbinary(max) via COMPRESS()
+ - Drops unused query_plan xml column (never populated by collectors)
+ - Adds row_hash binary(32) for deduplication
+*/
+
+SET ANSI_NULLS ON;
+SET ANSI_PADDING ON;
+SET ANSI_WARNINGS ON;
+SET ARITHABORT ON;
+SET CONCAT_NULL_YIELDS_NULL ON;
+SET QUOTED_IDENTIFIER ON;
+SET NUMERIC_ROUNDABORT OFF;
+SET IMPLICIT_TRANSACTIONS OFF;
+SET STATISTICS TIME, IO OFF;
+GO
+
+USE PerformanceMonitor;
+GO
+
+/*
+Skip if already migrated (query_plan_text is already varbinary)
+*/
+IF EXISTS
+(
+ SELECT
+ 1/0
+ FROM sys.columns
+ WHERE object_id = OBJECT_ID(N'collect.procedure_stats')
+ AND name = N'query_plan_text'
+ AND system_type_id = 165 /*varbinary*/
+)
+BEGIN
+ PRINT 'collect.procedure_stats already migrated to compressed storage — skipping.';
+ RETURN;
+END;
+GO
+
+/*
+Skip if source table doesn't exist
+*/
+IF OBJECT_ID(N'collect.procedure_stats', N'U') IS NULL
+BEGIN
+ PRINT 'collect.procedure_stats does not exist — skipping.';
+ RETURN;
+END;
+GO
+
+PRINT '=== Migrating collect.procedure_stats to compressed LOB storage ===';
+PRINT '';
+GO
+
+BEGIN TRY
+
+ /*
+ Step 1: Create the _new table with compressed column types
+ */
+ IF OBJECT_ID(N'collect.procedure_stats_new', N'U') IS NOT NULL
+ BEGIN
+ DROP TABLE collect.procedure_stats_new;
+ PRINT 'Dropped existing collect.procedure_stats_new';
+ END;
+
+ CREATE TABLE
+ collect.procedure_stats_new
+ (
+ collection_id bigint IDENTITY NOT NULL,
+ collection_time datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ server_start_time datetime2(7) NOT NULL,
+ object_type nvarchar(20) NOT NULL,
+ database_name sysname NOT NULL,
+ object_id integer NOT NULL,
+ object_name sysname NULL,
+ schema_name sysname NULL,
+ type_desc nvarchar(60) NULL,
+ sql_handle varbinary(64) NOT NULL,
+ plan_handle varbinary(64) NOT NULL,
+ cached_time datetime2(7) NOT NULL,
+ last_execution_time datetime2(7) NOT NULL,
+ /*Raw cumulative values*/
+ execution_count bigint NOT NULL,
+ total_worker_time bigint NOT NULL,
+ min_worker_time bigint NOT NULL,
+ max_worker_time bigint NOT NULL,
+ total_elapsed_time bigint NOT NULL,
+ min_elapsed_time bigint NOT NULL,
+ max_elapsed_time bigint NOT NULL,
+ total_logical_reads bigint NOT NULL,
+ min_logical_reads bigint NOT NULL,
+ max_logical_reads bigint NOT NULL,
+ total_physical_reads bigint NOT NULL,
+ min_physical_reads bigint NOT NULL,
+ max_physical_reads bigint NOT NULL,
+ total_logical_writes bigint NOT NULL,
+ min_logical_writes bigint NOT NULL,
+ max_logical_writes bigint NOT NULL,
+ total_spills bigint NULL,
+ min_spills bigint NULL,
+ max_spills bigint NULL,
+ /*Delta calculations*/
+ execution_count_delta bigint NULL,
+ total_worker_time_delta bigint NULL,
+ total_elapsed_time_delta bigint NULL,
+ total_logical_reads_delta bigint NULL,
+ total_physical_reads_delta bigint NULL,
+ total_logical_writes_delta bigint NULL,
+ sample_interval_seconds integer NULL,
+ /*Analysis helpers - computed columns*/
+ avg_worker_time_ms AS
+ (
+ total_worker_time /
+ NULLIF(execution_count, 0) / 1000.
+ ),
+ avg_elapsed_time_ms AS
+ (
+ total_elapsed_time /
+ NULLIF(execution_count, 0) / 1000.
+ ),
+ avg_physical_reads AS
+ (
+ total_physical_reads /
+ NULLIF(execution_count, 0)
+ ),
+ worker_time_per_second AS
+ (
+ total_worker_time_delta /
+ NULLIF(sample_interval_seconds, 0) / 1000.
+ ),
+ /*Execution plan (compressed with COMPRESS/DECOMPRESS)*/
+ query_plan_text varbinary(max) NULL,
+ /*Deduplication hash for skipping unchanged rows*/
+ row_hash binary(32) NULL,
+ CONSTRAINT
+ PK_procedure_stats_new
+ PRIMARY KEY CLUSTERED
+ (collection_time, collection_id)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+
+ PRINT 'Created collect.procedure_stats_new';
+
+ /*
+ Step 2: Reseed IDENTITY to continue from the old table
+ */
+ DECLARE
+ @max_id bigint;
+
+ SELECT
+ @max_id = ISNULL(MAX(collection_id), 0)
+ FROM collect.procedure_stats;
+
+ DBCC CHECKIDENT(N'collect.procedure_stats_new', RESEED, @max_id);
+
+ PRINT 'Reseeded IDENTITY to ' + CAST(@max_id AS varchar(20));
+
+ /*
+ Step 3: Migrate data in batches with COMPRESS on LOB columns
+ Omits query_plan xml (never populated, dropping it)
+ Omits computed columns (avg_worker_time_ms, avg_elapsed_time_ms,
+ avg_physical_reads, worker_time_per_second) — can't appear in OUTPUT
+ */
+ DECLARE
+ @batch_size integer = 10000,
+ @rows_moved bigint = 0,
+ @batch_rows integer = 1;
+
+ PRINT '';
+ PRINT 'Migrating data in batches of ' + CAST(@batch_size AS varchar(10)) + '...';
+
+ SET IDENTITY_INSERT collect.procedure_stats_new ON;
+
+ WHILE @batch_rows > 0
+ BEGIN
+ DELETE TOP (@batch_size)
+ FROM collect.procedure_stats
+ OUTPUT
+ deleted.collection_id,
+ deleted.collection_time,
+ deleted.server_start_time,
+ deleted.object_type,
+ deleted.database_name,
+ deleted.object_id,
+ deleted.object_name,
+ deleted.schema_name,
+ deleted.type_desc,
+ deleted.sql_handle,
+ deleted.plan_handle,
+ deleted.cached_time,
+ deleted.last_execution_time,
+ deleted.execution_count,
+ deleted.total_worker_time,
+ deleted.min_worker_time,
+ deleted.max_worker_time,
+ deleted.total_elapsed_time,
+ deleted.min_elapsed_time,
+ deleted.max_elapsed_time,
+ deleted.total_logical_reads,
+ deleted.min_logical_reads,
+ deleted.max_logical_reads,
+ deleted.total_physical_reads,
+ deleted.min_physical_reads,
+ deleted.max_physical_reads,
+ deleted.total_logical_writes,
+ deleted.min_logical_writes,
+ deleted.max_logical_writes,
+ deleted.total_spills,
+ deleted.min_spills,
+ deleted.max_spills,
+ deleted.execution_count_delta,
+ deleted.total_worker_time_delta,
+ deleted.total_elapsed_time_delta,
+ deleted.total_logical_reads_delta,
+ deleted.total_physical_reads_delta,
+ deleted.total_logical_writes_delta,
+ deleted.sample_interval_seconds,
+ COMPRESS(deleted.query_plan_text)
+ INTO collect.procedure_stats_new
+ (
+ collection_id,
+ collection_time,
+ server_start_time,
+ object_type,
+ database_name,
+ object_id,
+ object_name,
+ schema_name,
+ type_desc,
+ sql_handle,
+ plan_handle,
+ cached_time,
+ last_execution_time,
+ execution_count,
+ total_worker_time,
+ min_worker_time,
+ max_worker_time,
+ total_elapsed_time,
+ min_elapsed_time,
+ max_elapsed_time,
+ total_logical_reads,
+ min_logical_reads,
+ max_logical_reads,
+ total_physical_reads,
+ min_physical_reads,
+ max_physical_reads,
+ total_logical_writes,
+ min_logical_writes,
+ max_logical_writes,
+ total_spills,
+ min_spills,
+ max_spills,
+ execution_count_delta,
+ total_worker_time_delta,
+ total_elapsed_time_delta,
+ total_logical_reads_delta,
+ total_physical_reads_delta,
+ total_logical_writes_delta,
+ sample_interval_seconds,
+ query_plan_text
+ );
+
+ SET @batch_rows = @@ROWCOUNT;
+ SET @rows_moved += @batch_rows;
+
+ IF @batch_rows > 0
+ BEGIN
+ RAISERROR(N' Migrated %I64d rows so far...', 0, 1, @rows_moved) WITH NOWAIT;
+ END;
+ END;
+
+ SET IDENTITY_INSERT collect.procedure_stats_new OFF;
+
+ PRINT '';
+ PRINT 'Migration complete: ' + CAST(@rows_moved AS varchar(20)) + ' rows moved';
+
+ /*
+ Step 4: Rename old -> _old, new -> original
+ */
+ EXEC sp_rename
+ N'collect.procedure_stats',
+ N'procedure_stats_old',
+ N'OBJECT';
+
+ /* Rename old table's PK first to free the name */
+ EXEC sp_rename
+ N'collect.procedure_stats_old.PK_procedure_stats',
+ N'PK_procedure_stats_old',
+ N'INDEX';
+
+ EXEC sp_rename
+ N'collect.procedure_stats_new',
+ N'procedure_stats',
+ N'OBJECT';
+
+ EXEC sp_rename
+ N'collect.procedure_stats.PK_procedure_stats_new',
+ N'PK_procedure_stats',
+ N'INDEX';
+
+ PRINT '';
+ PRINT 'Renamed tables: procedure_stats -> procedure_stats_old, procedure_stats_new -> procedure_stats';
+ PRINT '';
+ PRINT '=== collect.procedure_stats migration complete ===';
+ PRINT '';
+ PRINT 'The old table is preserved as collect.procedure_stats_old.';
+ PRINT 'After verifying the migration, you can drop it:';
+ PRINT ' DROP TABLE IF EXISTS collect.procedure_stats_old;';
+
+END TRY
+BEGIN CATCH
+ PRINT '';
+ PRINT '*** ERROR migrating collect.procedure_stats ***';
+ PRINT 'Error ' + CAST(ERROR_NUMBER() AS varchar(10)) + ': ' + ERROR_MESSAGE();
+ PRINT '';
+ PRINT 'The original table has not been renamed.';
+ PRINT 'If collect.procedure_stats_new exists, it contains partial data.';
+ PRINT 'Review and resolve the error, then re-run this script.';
+END CATCH;
+GO
diff --git a/upgrades/2.1.0-to-2.2.0/04_create_tracking_tables.sql b/upgrades/2.1.0-to-2.2.0/04_create_tracking_tables.sql
new file mode 100644
index 0000000..dae8c83
--- /dev/null
+++ b/upgrades/2.1.0-to-2.2.0/04_create_tracking_tables.sql
@@ -0,0 +1,106 @@
+/*
+Copyright 2026 Darling Data, LLC
+https://www.erikdarling.com/
+
+Upgrade from 2.1.0 to 2.2.0
+Creates deduplication tracking tables for the three compressed collectors.
+Each table holds one row per natural key with the latest row_hash,
+allowing collectors to skip unchanged rows without scanning full history.
+*/
+
+SET ANSI_NULLS ON;
+SET ANSI_PADDING ON;
+SET ANSI_WARNINGS ON;
+SET ARITHABORT ON;
+SET CONCAT_NULL_YIELDS_NULL ON;
+SET QUOTED_IDENTIFIER ON;
+SET NUMERIC_ROUNDABORT OFF;
+SET IMPLICIT_TRANSACTIONS OFF;
+SET STATISTICS TIME, IO OFF;
+GO
+
+USE PerformanceMonitor;
+GO
+
+IF OBJECT_ID(N'collect.query_stats_latest_hash', N'U') IS NULL
+BEGIN
+ CREATE TABLE
+ collect.query_stats_latest_hash
+ (
+ sql_handle varbinary(64) NOT NULL,
+ statement_start_offset integer NOT NULL,
+ statement_end_offset integer NOT NULL,
+ plan_handle varbinary(64) NOT NULL,
+ row_hash binary(32) NOT NULL,
+ last_seen datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ CONSTRAINT
+ PK_query_stats_latest_hash
+ PRIMARY KEY CLUSTERED
+ (sql_handle, statement_start_offset,
+ statement_end_offset, plan_handle)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+
+ PRINT 'Created collect.query_stats_latest_hash';
+END;
+ELSE
+BEGIN
+ PRINT 'collect.query_stats_latest_hash already exists — skipping.';
+END;
+GO
+
+IF OBJECT_ID(N'collect.procedure_stats_latest_hash', N'U') IS NULL
+BEGIN
+ CREATE TABLE
+ collect.procedure_stats_latest_hash
+ (
+ database_name sysname NOT NULL,
+ object_id integer NOT NULL,
+ plan_handle varbinary(64) NOT NULL,
+ row_hash binary(32) NOT NULL,
+ last_seen datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ CONSTRAINT
+ PK_procedure_stats_latest_hash
+ PRIMARY KEY CLUSTERED
+ (database_name, object_id, plan_handle)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+
+ PRINT 'Created collect.procedure_stats_latest_hash';
+END;
+ELSE
+BEGIN
+ PRINT 'collect.procedure_stats_latest_hash already exists — skipping.';
+END;
+GO
+
+IF OBJECT_ID(N'collect.query_store_data_latest_hash', N'U') IS NULL
+BEGIN
+ CREATE TABLE
+ collect.query_store_data_latest_hash
+ (
+ database_name sysname NOT NULL,
+ query_id bigint NOT NULL,
+ plan_id bigint NOT NULL,
+ row_hash binary(32) NOT NULL,
+ last_seen datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ CONSTRAINT
+ PK_query_store_data_latest_hash
+ PRIMARY KEY CLUSTERED
+ (database_name, query_id, plan_id)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+
+ PRINT 'Created collect.query_store_data_latest_hash';
+END;
+ELSE
+BEGIN
+ PRINT 'collect.query_store_data_latest_hash already exists — skipping.';
+END;
+GO
diff --git a/upgrades/2.1.0-to-2.2.0/upgrade.txt b/upgrades/2.1.0-to-2.2.0/upgrade.txt
new file mode 100644
index 0000000..22e6d63
--- /dev/null
+++ b/upgrades/2.1.0-to-2.2.0/upgrade.txt
@@ -0,0 +1,4 @@
+01_compress_query_stats.sql
+02_compress_query_store_data.sql
+03_compress_procedure_stats.sql
+04_create_tracking_tables.sql