diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
new file mode 100644
index 0000000..dc1c917
--- /dev/null
+++ b/.github/workflows/nightly.yml
@@ -0,0 +1,148 @@
+name: Nightly Build
+
+on:
+ schedule:
+ # 6:00 AM UTC (1:00 AM EST / 2:00 AM EDT)
+ - cron: '0 6 * * *'
+ workflow_dispatch: # manual trigger
+
+permissions:
+ contents: write
+
+jobs:
+ check:
+ runs-on: ubuntu-latest
+ outputs:
+ has_changes: ${{ steps.check.outputs.has_changes }}
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ ref: dev
+ fetch-depth: 0
+
+ - name: Check for new commits in last 24 hours
+ id: check
+ run: |
+ RECENT=$(git log --since="24 hours ago" --oneline | head -1)
+ if [ -n "$RECENT" ]; then
+ echo "has_changes=true" >> $GITHUB_OUTPUT
+ echo "New commits found — building nightly"
+ else
+ echo "has_changes=false" >> $GITHUB_OUTPUT
+ echo "No new commits — skipping nightly build"
+ fi
+
+ build:
+ needs: check
+ if: needs.check.outputs.has_changes == 'true' || github.event_name == 'workflow_dispatch'
+ runs-on: windows-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ ref: dev
+
+ - name: Setup .NET 8.0
+ uses: actions/setup-dotnet@v4
+ with:
+ dotnet-version: 8.0.x
+
+ - name: Set nightly version
+ id: version
+ shell: pwsh
+ run: |
+ $base = ([xml](Get-Content Dashboard/Dashboard.csproj)).Project.PropertyGroup.Version | Where-Object { $_ }
+ $date = Get-Date -Format "yyyyMMdd"
+ $nightly = "$base-nightly.$date"
+ echo "VERSION=$nightly" >> $env:GITHUB_OUTPUT
+ echo "Nightly version: $nightly"
+
+ - name: Restore dependencies
+ run: |
+ dotnet restore Dashboard/Dashboard.csproj
+ dotnet restore Lite/PerformanceMonitorLite.csproj
+ dotnet restore Installer/PerformanceMonitorInstaller.csproj
+ dotnet restore InstallerGui/InstallerGui.csproj
+ dotnet restore Lite.Tests/Lite.Tests.csproj
+
+ - name: Run tests
+ run: dotnet test Lite.Tests/Lite.Tests.csproj -c Release --verbosity normal
+
+ - name: Publish Dashboard
+ run: dotnet publish Dashboard/Dashboard.csproj -c Release -o publish/Dashboard
+
+ - name: Publish Lite
+ run: dotnet publish Lite/PerformanceMonitorLite.csproj -c Release -o publish/Lite
+
+ - name: Publish CLI Installer
+ run: dotnet publish Installer/PerformanceMonitorInstaller.csproj -c Release
+
+ - name: Publish GUI Installer
+ run: dotnet publish InstallerGui/InstallerGui.csproj -c Release
+
+ - name: Package artifacts
+ shell: pwsh
+ run: |
+ $version = "${{ steps.version.outputs.VERSION }}"
+ New-Item -ItemType Directory -Force -Path releases
+
+ Compress-Archive -Path 'publish/Dashboard/*' -DestinationPath "releases/PerformanceMonitorDashboard-$version.zip" -Force
+ Compress-Archive -Path 'publish/Lite/*' -DestinationPath "releases/PerformanceMonitorLite-$version.zip" -Force
+
+ $instDir = 'publish/Installer'
+ New-Item -ItemType Directory -Force -Path $instDir
+ New-Item -ItemType Directory -Force -Path "$instDir/install"
+ New-Item -ItemType Directory -Force -Path "$instDir/upgrades"
+
+ Copy-Item 'Installer/bin/Release/net8.0/win-x64/publish/PerformanceMonitorInstaller.exe' $instDir
+ Copy-Item 'InstallerGui/bin/Release/net8.0-windows/win-x64/publish/PerformanceMonitorInstallerGui.exe' $instDir -ErrorAction SilentlyContinue
+ Copy-Item 'install/*.sql' "$instDir/install/"
+ if (Test-Path 'install/templates') { Copy-Item 'install/templates' "$instDir/install/templates" -Recurse -ErrorAction SilentlyContinue }
+ if (Test-Path 'upgrades') { Copy-Item 'upgrades/*' "$instDir/upgrades/" -Recurse -ErrorAction SilentlyContinue }
+ if (Test-Path 'README.md') { Copy-Item 'README.md' $instDir }
+ if (Test-Path 'LICENSE') { Copy-Item 'LICENSE' $instDir }
+ if (Test-Path 'THIRD_PARTY_NOTICES.md') { Copy-Item 'THIRD_PARTY_NOTICES.md' $instDir }
+
+ Compress-Archive -Path 'publish/Installer/*' -DestinationPath "releases/PerformanceMonitorInstaller-$version.zip" -Force
+
+ - name: Generate checksums
+ shell: pwsh
+ run: |
+ $checksums = Get-ChildItem releases/*.zip | ForEach-Object {
+ $hash = (Get-FileHash $_.FullName -Algorithm SHA256).Hash.ToLower()
+ "$hash $($_.Name)"
+ }
+ $checksums | Out-File -FilePath releases/SHA256SUMS.txt -Encoding utf8
+ Write-Host "Checksums:"
+ $checksums | ForEach-Object { Write-Host $_ }
+
+ - name: Delete previous nightly release
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: gh release delete nightly --yes --cleanup-tag 2>$null; exit 0
+ shell: pwsh
+
+ - name: Create nightly release
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ shell: pwsh
+ run: |
+ $version = "${{ steps.version.outputs.VERSION }}"
+ $sha = git rev-parse --short HEAD
+ $body = @"
+ Automated nightly build from ``dev`` branch.
+
+ **Version:** ``$version``
+ **Commit:** ``$sha``
+ **Built:** $(Get-Date -Format "yyyy-MM-dd HH:mm UTC")
+
+ > These builds include the latest changes and may be unstable.
+ > For production use, download the [latest stable release](https://github.com/erikdarlingdata/PerformanceMonitor/releases/latest).
+ "@
+
+ gh release create nightly `
+ --target dev `
+ --title "Nightly Build ($version)" `
+ --notes $body `
+ --prerelease `
+ releases/*.zip releases/SHA256SUMS.txt
diff --git a/Dashboard/Controls/MemoryContent.xaml.cs b/Dashboard/Controls/MemoryContent.xaml.cs
index f78b0ab..f381af5 100644
--- a/Dashboard/Controls/MemoryContent.xaml.cs
+++ b/Dashboard/Controls/MemoryContent.xaml.cs
@@ -82,6 +82,14 @@ public MemoryContent()
SetupChartContextMenus();
Loaded += OnLoaded;
+ // Apply dark theme immediately so charts don't flash white before data loads
+ TabHelpers.ApplyDarkModeToChart(MemoryStatsOverviewChart);
+ TabHelpers.ApplyDarkModeToChart(MemoryGrantSizingChart);
+ TabHelpers.ApplyDarkModeToChart(MemoryGrantActivityChart);
+ TabHelpers.ApplyDarkModeToChart(MemoryClerksChart);
+ TabHelpers.ApplyDarkModeToChart(PlanCacheChart);
+ TabHelpers.ApplyDarkModeToChart(MemoryPressureEventsChart);
+
_memoryStatsOverviewHover = new Helpers.ChartHoverHelper(MemoryStatsOverviewChart, "MB");
_memoryGrantSizingHover = new Helpers.ChartHoverHelper(MemoryGrantSizingChart, "MB");
_memoryGrantActivityHover = new Helpers.ChartHoverHelper(MemoryGrantActivityChart, "count");
diff --git a/Dashboard/Controls/PlanViewerControl.xaml b/Dashboard/Controls/PlanViewerControl.xaml
index c5e0608..a3610c9 100644
--- a/Dashboard/Controls/PlanViewerControl.xaml
+++ b/Dashboard/Controls/PlanViewerControl.xaml
@@ -27,13 +27,10 @@
-
-
-
+
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+ PreviewMouseWheel="PlanScrollViewer_PreviewMouseWheel"
+ PreviewMouseLeftButtonDown="PlanScrollViewer_PreviewMouseLeftButtonDown"
+ PreviewMouseMove="PlanScrollViewer_PreviewMouseMove"
+ PreviewMouseLeftButtonUp="PlanScrollViewer_PreviewMouseLeftButtonUp">
-
+
-
-
+
-
diff --git a/Dashboard/Controls/PlanViewerControl.xaml.cs b/Dashboard/Controls/PlanViewerControl.xaml.cs
index 4e1f6e7..fc26e47 100644
--- a/Dashboard/Controls/PlanViewerControl.xaml.cs
+++ b/Dashboard/Controls/PlanViewerControl.xaml.cs
@@ -45,6 +45,12 @@ public partial class PlanViewerControl : UserControl
// Current property section for collapsible groups
private StackPanel? _currentPropertySection;
+ // Canvas panning
+ private bool _isPanning;
+ private Point _panStart;
+ private double _panStartOffsetX;
+ private double _panStartOffsetY;
+
public PlanViewerControl()
{
InitializeComponent();
@@ -81,33 +87,18 @@ public void LoadPlan(string planXml, string label, string? queryText = null)
EmptyState.Visibility = Visibility.Collapsed;
PlanScrollViewer.Visibility = Visibility.Visible;
- // Populate statement selector
+ // Populate statement grid for multi-statement plans
if (allStatements.Count > 1)
{
- StatementSelector.Items.Clear();
- for (int i = 0; i < allStatements.Count; i++)
- {
- var s = allStatements[i];
- var text = s.StatementText.Length > 80
- ? s.StatementText[..80] + "..."
- : s.StatementText;
- if (string.IsNullOrWhiteSpace(text))
- text = $"Statement {i + 1}";
- StatementSelector.Items.Add(new ComboBoxItem
- {
- Content = $"[{s.StatementSubTreeCost:F4}] {text}",
- Tag = i
- });
- }
- StatementSelector.SelectedIndex = 0;
- StatementLabel.Visibility = Visibility.Visible;
- StatementSelector.Visibility = Visibility.Visible;
+ PopulateStatementsGrid(allStatements);
+ ShowStatementsPanel();
CostText.Visibility = Visibility.Visible;
+ // Auto-select first statement to render it
+ if (StatementsGrid.Items.Count > 0)
+ StatementsGrid.SelectedIndex = 0;
}
else
{
- StatementLabel.Visibility = Visibility.Collapsed;
- StatementSelector.Visibility = Visibility.Collapsed;
CostText.Visibility = Visibility.Collapsed;
RenderStatement(allStatements[0]);
}
@@ -123,8 +114,7 @@ public void Clear()
PlanScrollViewer.Visibility = Visibility.Collapsed;
MissingIndexBanner.Visibility = Visibility.Collapsed;
WarningsBanner.Visibility = Visibility.Collapsed;
- StatementLabel.Visibility = Visibility.Collapsed;
- StatementSelector.Visibility = Visibility.Collapsed;
+ CloseStatementsPanel();
CostText.Text = "";
CostText.Visibility = Visibility.Collapsed;
ClosePropertiesPanel();
@@ -698,17 +688,17 @@ private void ShowPropertiesPanel(PlanNode node)
// Timing
if (node.ActualElapsedMs > 0 || node.ActualCPUMs > 0
- || node.UdfCpuTimeUs > 0 || node.UdfElapsedTimeUs > 0)
+ || node.UdfCpuTimeMs > 0 || node.UdfElapsedTimeMs > 0)
{
AddPropertySection("Actual Timing");
if (node.ActualElapsedMs > 0)
AddPropertyRow("Elapsed Time", $"{node.ActualElapsedMs:N0} ms");
if (node.ActualCPUMs > 0)
AddPropertyRow("CPU Time", $"{node.ActualCPUMs:N0} ms");
- if (node.UdfElapsedTimeUs > 0)
- AddPropertyRow("UDF Elapsed", $"{node.UdfElapsedTimeUs:N0} us");
- if (node.UdfCpuTimeUs > 0)
- AddPropertyRow("UDF CPU", $"{node.UdfCpuTimeUs:N0} us");
+ if (node.UdfElapsedTimeMs > 0)
+ AddPropertyRow("UDF Elapsed", $"{node.UdfElapsedTimeMs:N0} ms");
+ if (node.UdfCpuTimeMs > 0)
+ AddPropertyRow("UDF CPU", $"{node.UdfCpuTimeMs:N0} ms");
}
// I/O
@@ -1467,19 +1457,242 @@ private void SavePlan_Click(object sender, RoutedEventArgs e)
}
}
- private void StatementSelector_Changed(object sender, SelectionChangedEventArgs e)
+ private void PopulateStatementsGrid(List statements)
{
- if (StatementSelector.SelectedItem is ComboBoxItem item && item.Tag is int index)
+ StatementsHeader.Text = $"Statements ({statements.Count})";
+
+ var hasActualTimes = statements.Any(s => s.QueryTimeStats != null &&
+ (s.QueryTimeStats.CpuTimeMs > 0 || s.QueryTimeStats.ElapsedTimeMs > 0));
+ var hasUdf = statements.Any(s => s.QueryUdfElapsedTimeMs > 0);
+
+ // Build columns
+ StatementsGrid.Columns.Clear();
+
+ StatementsGrid.Columns.Add(new DataGridTextColumn
{
- var allStatements = _currentPlan?.Batches
- .SelectMany(b => b.Statements)
- .Where(s => s.RootNode != null)
- .ToList();
+ Header = "#",
+ Binding = new System.Windows.Data.Binding("Index"),
+ Width = new DataGridLength(40),
+ IsReadOnly = true
+ });
- if (allStatements != null && index >= 0 && index < allStatements.Count)
- RenderStatement(allStatements[index]);
+ StatementsGrid.Columns.Add(new DataGridTextColumn
+ {
+ Header = "Query",
+ Binding = new System.Windows.Data.Binding("QueryText"),
+ Width = new DataGridLength(1, DataGridLengthUnitType.Star),
+ IsReadOnly = true
+ });
+
+ if (hasActualTimes)
+ {
+ StatementsGrid.Columns.Add(new DataGridTextColumn
+ {
+ Header = "CPU",
+ Binding = new System.Windows.Data.Binding("CpuDisplay"),
+ Width = new DataGridLength(70),
+ IsReadOnly = true,
+ SortMemberPath = "CpuMs"
+ });
+ StatementsGrid.Columns.Add(new DataGridTextColumn
+ {
+ Header = "Elapsed",
+ Binding = new System.Windows.Data.Binding("ElapsedDisplay"),
+ Width = new DataGridLength(70),
+ IsReadOnly = true,
+ SortMemberPath = "ElapsedMs"
+ });
}
+
+ if (hasUdf)
+ {
+ StatementsGrid.Columns.Add(new DataGridTextColumn
+ {
+ Header = "UDF",
+ Binding = new System.Windows.Data.Binding("UdfDisplay"),
+ Width = new DataGridLength(70),
+ IsReadOnly = true,
+ SortMemberPath = "UdfMs"
+ });
+ }
+
+ if (!hasActualTimes)
+ {
+ StatementsGrid.Columns.Add(new DataGridTextColumn
+ {
+ Header = "Est. Cost",
+ Binding = new System.Windows.Data.Binding("CostDisplay"),
+ Width = new DataGridLength(80),
+ IsReadOnly = true,
+ SortMemberPath = "EstCost"
+ });
+ }
+
+ StatementsGrid.Columns.Add(new DataGridTextColumn
+ {
+ Header = "\u26A0 Crit",
+ Binding = new System.Windows.Data.Binding("Critical"),
+ Width = new DataGridLength(55),
+ IsReadOnly = true
+ });
+
+ StatementsGrid.Columns.Add(new DataGridTextColumn
+ {
+ Header = "\u26A0 Warn",
+ Binding = new System.Windows.Data.Binding("Warnings"),
+ Width = new DataGridLength(60),
+ IsReadOnly = true
+ });
+
+ // Build rows
+ var rows = new List();
+ for (int i = 0; i < statements.Count; i++)
+ {
+ var stmt = statements[i];
+ var allWarnings = stmt.PlanWarnings.ToList();
+ if (stmt.RootNode != null)
+ CollectWarnings(stmt.RootNode, allWarnings);
+
+ var text = stmt.StatementText;
+ if (string.IsNullOrWhiteSpace(text))
+ text = $"Statement {i + 1}";
+ if (text.Length > 120)
+ text = text[..120] + "...";
+
+ rows.Add(new StatementRow
+ {
+ Index = i + 1,
+ QueryText = text,
+ CpuMs = stmt.QueryTimeStats?.CpuTimeMs ?? 0,
+ ElapsedMs = stmt.QueryTimeStats?.ElapsedTimeMs ?? 0,
+ UdfMs = stmt.QueryUdfElapsedTimeMs,
+ EstCost = stmt.StatementSubTreeCost,
+ Critical = allWarnings.Count(w => w.Severity == PlanWarningSeverity.Critical),
+ Warnings = allWarnings.Count(w => w.Severity == PlanWarningSeverity.Warning),
+ Statement = stmt
+ });
+ }
+
+ StatementsGrid.ItemsSource = rows;
+ }
+
+ private void StatementsGrid_SelectionChanged(object sender, SelectionChangedEventArgs e)
+ {
+ if (StatementsGrid.SelectedItem is StatementRow row)
+ RenderStatement(row.Statement);
+ }
+
+ private void ToggleStatements_Click(object sender, RoutedEventArgs e)
+ {
+ if (StatementsPanel.Visibility == Visibility.Visible)
+ CloseStatementsPanel();
+ else
+ ShowStatementsPanel();
+ }
+
+ private void CloseStatements_Click(object sender, RoutedEventArgs e)
+ {
+ CloseStatementsPanel();
+ }
+
+ private void ShowStatementsPanel()
+ {
+ StatementsColumn.Width = new GridLength(450);
+ StatementsSplitterColumn.Width = new GridLength(5);
+ StatementsSplitter.Visibility = Visibility.Visible;
+ StatementsPanel.Visibility = Visibility.Visible;
+ StatementsButton.Visibility = Visibility.Visible;
+ StatementsButtonSeparator.Visibility = Visibility.Visible;
+ }
+
+ private void CloseStatementsPanel()
+ {
+ StatementsPanel.Visibility = Visibility.Collapsed;
+ StatementsSplitter.Visibility = Visibility.Collapsed;
+ StatementsColumn.Width = new GridLength(0);
+ StatementsSplitterColumn.Width = new GridLength(0);
+ }
+
+ #endregion
+
+ #region Canvas Panning
+
+ private void PlanScrollViewer_PreviewMouseLeftButtonDown(object sender, MouseButtonEventArgs e)
+ {
+ // Don't pan if clicking on a node
+ if (IsNodeAtPoint(e))
+ return;
+
+ _isPanning = true;
+ _panStart = e.GetPosition(PlanScrollViewer);
+ _panStartOffsetX = PlanScrollViewer.HorizontalOffset;
+ _panStartOffsetY = PlanScrollViewer.VerticalOffset;
+ PlanScrollViewer.Cursor = Cursors.SizeAll;
+ PlanScrollViewer.CaptureMouse();
+ e.Handled = true;
+ }
+
+ private void PlanScrollViewer_PreviewMouseMove(object sender, MouseEventArgs e)
+ {
+ if (!_isPanning) return;
+
+ var current = e.GetPosition(PlanScrollViewer);
+ var dx = current.X - _panStart.X;
+ var dy = current.Y - _panStart.Y;
+
+ PlanScrollViewer.ScrollToHorizontalOffset(Math.Max(0, _panStartOffsetX - dx));
+ PlanScrollViewer.ScrollToVerticalOffset(Math.Max(0, _panStartOffsetY - dy));
+ e.Handled = true;
+ }
+
+ private void PlanScrollViewer_PreviewMouseLeftButtonUp(object sender, MouseButtonEventArgs e)
+ {
+ if (!_isPanning) return;
+ _isPanning = false;
+ PlanScrollViewer.Cursor = Cursors.Arrow;
+ PlanScrollViewer.ReleaseMouseCapture();
+ e.Handled = true;
+ }
+
+ /// Check if the mouse event originated from a node Border (has PlanNode in Tag).
+ private static bool IsNodeAtPoint(MouseButtonEventArgs e)
+ {
+ var source = e.OriginalSource as DependencyObject;
+ while (source != null)
+ {
+ if (source is Border b && b.Tag is PlanNode)
+ return true;
+ source = VisualTreeHelper.GetParent(source);
+ }
+ return false;
}
#endregion
}
+
+/// Data model for the statement DataGrid rows.
+public class StatementRow
+{
+ public int Index { get; set; }
+ public string QueryText { get; set; } = "";
+ public long CpuMs { get; set; }
+ public long ElapsedMs { get; set; }
+ public long UdfMs { get; set; }
+ public double EstCost { get; set; }
+ public int Critical { get; set; }
+ public int Warnings { get; set; }
+ public PlanStatement Statement { get; set; } = null!;
+
+ // Display helpers — grid binds to these, sorting uses the raw properties via SortMemberPath
+ public string CpuDisplay => FormatDuration(CpuMs);
+ public string ElapsedDisplay => FormatDuration(ElapsedMs);
+ public string UdfDisplay => UdfMs > 0 ? FormatDuration(UdfMs) : "";
+ public string CostDisplay => EstCost > 0 ? $"{EstCost:F2}" : "";
+
+ private static string FormatDuration(long ms)
+ {
+ if (ms < 1000) return $"{ms}ms";
+ if (ms < 60_000) return $"{ms / 1000.0:F1}s";
+ return $"{ms / 60_000}m {(ms % 60_000) / 1000}s";
+ }
+}
diff --git a/Dashboard/Controls/ResourceMetricsContent.xaml.cs b/Dashboard/Controls/ResourceMetricsContent.xaml.cs
index 9c8c889..aa3def9 100644
--- a/Dashboard/Controls/ResourceMetricsContent.xaml.cs
+++ b/Dashboard/Controls/ResourceMetricsContent.xaml.cs
@@ -109,6 +109,23 @@ public ResourceMetricsContent()
SetupChartContextMenus();
Loaded += OnLoaded;
+ // Apply dark theme immediately so charts don't flash white before data loads
+ TabHelpers.ApplyDarkModeToChart(LatchStatsChart);
+ TabHelpers.ApplyDarkModeToChart(SpinlockStatsChart);
+ TabHelpers.ApplyDarkModeToChart(TempdbStatsChart);
+ TabHelpers.ApplyDarkModeToChart(TempDbLatencyChart);
+ TabHelpers.ApplyDarkModeToChart(SessionStatsChart);
+ TabHelpers.ApplyDarkModeToChart(UserDbReadLatencyChart);
+ TabHelpers.ApplyDarkModeToChart(UserDbWriteLatencyChart);
+ TabHelpers.ApplyDarkModeToChart(FileIoReadThroughputChart);
+ TabHelpers.ApplyDarkModeToChart(FileIoWriteThroughputChart);
+ TabHelpers.ApplyDarkModeToChart(PerfmonCountersChart);
+ TabHelpers.ApplyDarkModeToChart(WaitStatsDetailChart);
+ TabHelpers.ApplyDarkModeToChart(ServerUtilTrendsCpuChart);
+ TabHelpers.ApplyDarkModeToChart(ServerUtilTrendsTempdbChart);
+ TabHelpers.ApplyDarkModeToChart(ServerUtilTrendsMemoryChart);
+ TabHelpers.ApplyDarkModeToChart(ServerUtilTrendsPerfmonChart);
+
_sessionStatsHover = new Helpers.ChartHoverHelper(SessionStatsChart, "sessions");
_latchStatsHover = new Helpers.ChartHoverHelper(LatchStatsChart, "ms/sec");
_spinlockStatsHover = new Helpers.ChartHoverHelper(SpinlockStatsChart, "collisions/sec");
diff --git a/Dashboard/Controls/SystemEventsContent.xaml.cs b/Dashboard/Controls/SystemEventsContent.xaml.cs
index 4778838..d4dd73a 100644
--- a/Dashboard/Controls/SystemEventsContent.xaml.cs
+++ b/Dashboard/Controls/SystemEventsContent.xaml.cs
@@ -130,6 +130,27 @@ public SystemEventsContent()
Loaded += OnLoaded;
Unloaded += OnUnloaded;
+ // Apply dark theme immediately so charts don't flash white before data loads
+ TabHelpers.ApplyDarkModeToChart(BadPagesChart);
+ TabHelpers.ApplyDarkModeToChart(DumpRequestsChart);
+ TabHelpers.ApplyDarkModeToChart(AccessViolationsChart);
+ TabHelpers.ApplyDarkModeToChart(WriteAccessViolationsChart);
+ TabHelpers.ApplyDarkModeToChart(NonYieldingTasksChart);
+ TabHelpers.ApplyDarkModeToChart(LatchWarningsChart);
+ TabHelpers.ApplyDarkModeToChart(SickSpinlocksChart);
+ TabHelpers.ApplyDarkModeToChart(CpuComparisonChart);
+ TabHelpers.ApplyDarkModeToChart(SevereErrorsChart);
+ TabHelpers.ApplyDarkModeToChart(IOIssuesChart);
+ TabHelpers.ApplyDarkModeToChart(LongestPendingIOChart);
+ TabHelpers.ApplyDarkModeToChart(SchedulerIssuesChart);
+ TabHelpers.ApplyDarkModeToChart(MemoryConditionsChart);
+ TabHelpers.ApplyDarkModeToChart(CPUTasksChart);
+ TabHelpers.ApplyDarkModeToChart(MemoryBrokerChart);
+ TabHelpers.ApplyDarkModeToChart(MemoryBrokerRatioChart);
+ TabHelpers.ApplyDarkModeToChart(MemoryNodeOOMChart);
+ TabHelpers.ApplyDarkModeToChart(MemoryNodeOOMUtilChart);
+ TabHelpers.ApplyDarkModeToChart(MemoryNodeOOMMemoryChart);
+
_badPagesHover = new Helpers.ChartHoverHelper(BadPagesChart, "events");
_dumpRequestsHover = new Helpers.ChartHoverHelper(DumpRequestsChart, "events");
_accessViolationsHover = new Helpers.ChartHoverHelper(AccessViolationsChart, "events");
diff --git a/Dashboard/Helpers/ChartHoverHelper.cs b/Dashboard/Helpers/ChartHoverHelper.cs
index dd0e712..6318040 100644
--- a/Dashboard/Helpers/ChartHoverHelper.cs
+++ b/Dashboard/Helpers/ChartHoverHelper.cs
@@ -71,9 +71,10 @@ private void OnMouseMove(object sender, MouseEventArgs e)
try
{
var pos = e.GetPosition(_chart);
+ var dpi = VisualTreeHelper.GetDpi(_chart);
var pixel = new ScottPlot.Pixel(
- (float)(pos.X * _chart.DisplayScale),
- (float)(pos.Y * _chart.DisplayScale));
+ (float)(pos.X * dpi.DpiScaleX),
+ (float)(pos.Y * dpi.DpiScaleY));
var mouseCoords = _chart.Plot.GetCoordinates(pixel);
/* Use X-axis (time) proximity as the primary filter, Y-axis distance
diff --git a/Dashboard/Models/PlanModels.cs b/Dashboard/Models/PlanModels.cs
index c6a6f87..6ba0390 100644
--- a/Dashboard/Models/PlanModels.cs
+++ b/Dashboard/Models/PlanModels.cs
@@ -272,8 +272,8 @@ public class PlanNode
public string? ActionColumn { get; set; }
public long ActualSegmentReads { get; set; }
public long ActualSegmentSkips { get; set; }
- public long UdfCpuTimeUs { get; set; }
- public long UdfElapsedTimeUs { get; set; }
+ public long UdfCpuTimeMs { get; set; }
+ public long UdfElapsedTimeMs { get; set; }
// XSD gap: RelOp-level metadata
public bool GroupExecuted { get; set; }
diff --git a/Dashboard/Services/PlanAnalyzer.cs b/Dashboard/Services/PlanAnalyzer.cs
index 89a6568..f49abb3 100644
--- a/Dashboard/Services/PlanAnalyzer.cs
+++ b/Dashboard/Services/PlanAnalyzer.cs
@@ -1,6 +1,7 @@
using System;
using System.Collections.Generic;
using System.Linq;
+using System.Text.RegularExpressions;
using PerformanceMonitorDashboard.Models;
namespace PerformanceMonitorDashboard.Services;
@@ -11,6 +12,23 @@ namespace PerformanceMonitorDashboard.Services;
///
public static class PlanAnalyzer
{
+ private static readonly Regex FunctionInPredicateRegex = new(
+ @"\b(CONVERT_IMPLICIT|CONVERT|CAST|isnull|coalesce|datepart|datediff|dateadd|year|month|day|upper|lower|ltrim|rtrim|trim|substring|left|right|charindex|replace|len|datalength|abs|floor|ceiling|round|reverse|stuff|format)\s*\(",
+ RegexOptions.IgnoreCase | RegexOptions.Compiled);
+
+ private static readonly Regex LeadingWildcardLikeRegex = new(
+ @"\blike\b[^'""]*?N?'%",
+ RegexOptions.IgnoreCase | RegexOptions.Compiled);
+
+ private static readonly Regex CaseInPredicateRegex = new(
+ @"\bCASE\s+(WHEN\b|$)",
+ RegexOptions.IgnoreCase | RegexOptions.Compiled);
+
+ // Matches CTE definitions: WITH name AS ( or , name AS (
+ private static readonly Regex CteDefinitionRegex = new(
+ @"(?:\bWITH\s+|\,\s*)(\w+)\s+AS\s*\(",
+ RegexOptions.IgnoreCase | RegexOptions.Compiled);
+
public static void Analyze(ParsedPlan plan)
{
foreach (var batch in plan.Batches)
@@ -20,7 +38,7 @@ public static void Analyze(ParsedPlan plan)
AnalyzeStatement(stmt);
if (stmt.RootNode != null)
- AnalyzeNodeTree(stmt.RootNode);
+ AnalyzeNodeTree(stmt.RootNode, stmt);
}
}
}
@@ -57,7 +75,7 @@ private static void AnalyzeStatement(PlanStatement stmt)
if (grant.GrantedMemoryKB > 0 && grant.MaxUsedMemoryKB > 0)
{
var wasteRatio = (double)grant.GrantedMemoryKB / grant.MaxUsedMemoryKB;
- if (wasteRatio >= 10 && grant.GrantedMemoryKB > 1024)
+ if (wasteRatio >= 10 && grant.GrantedMemoryKB >= 1048576)
{
stmt.PlanWarnings.Add(new PlanWarning
{
@@ -78,18 +96,102 @@ private static void AnalyzeStatement(PlanStatement stmt)
Severity = grant.GrantWaitTimeMs >= 5000 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
});
}
+
+ // Large memory grant with sort/hash guidance
+ if (grant.GrantedMemoryKB >= 1048576 && stmt.RootNode != null)
+ {
+ var consumers = new List();
+ FindMemoryConsumers(stmt.RootNode, consumers);
+
+ var grantMB = grant.GrantedMemoryKB / 1024.0;
+ var guidance = consumers.Count > 0
+ ? $" Memory consumers: {string.Join(", ", consumers)}. Check whether these operators are processing more rows than necessary."
+ : "";
+
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "Large Memory Grant",
+ Message = $"Query granted {grantMB:F0} MB of memory.{guidance}",
+ Severity = grantMB >= 4096 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
+ });
+ }
+ }
+
+ // Rule 18: Compile memory exceeded (early abort)
+ if (stmt.StatementOptmEarlyAbortReason == "MemoryLimitExceeded")
+ {
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "Compile Memory Exceeded",
+ Message = "Optimization was aborted early because the compile memory limit was exceeded. The plan may be suboptimal. Simplify the query or break it into smaller parts.",
+ Severity = PlanWarningSeverity.Critical
+ });
+ }
+
+ // Rule 19: High compile CPU
+ if (stmt.CompileCPUMs >= 1000)
+ {
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "High Compile CPU",
+ Message = $"Query took {stmt.CompileCPUMs:N0}ms of CPU to compile. Complex queries with many joins or subqueries can cause excessive compile time.",
+ Severity = stmt.CompileCPUMs >= 5000 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
+ });
+ }
+
+ // Rule 4 (statement-level): UDF execution timing from QueryTimeStats
+ // Some plans report UDF timing only at the statement level, not per-node.
+ if (stmt.QueryUdfCpuTimeMs > 0 || stmt.QueryUdfElapsedTimeMs > 0)
+ {
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "UDF Execution",
+ Message = $"Scalar UDF executing in this statement. UDF elapsed: {stmt.QueryUdfElapsedTimeMs:N0}ms, UDF CPU: {stmt.QueryUdfCpuTimeMs:N0}ms",
+ Severity = stmt.QueryUdfElapsedTimeMs >= 1000 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
+ });
+ }
+
+ // Rule 20: Local variables without RECOMPILE
+ // Parameters with no CompiledValue are likely local variables — the optimizer
+ // cannot sniff their values and uses density-based ("unknown") estimates.
+ if (stmt.Parameters.Count > 0)
+ {
+ var unsnifffedParams = stmt.Parameters
+ .Where(p => string.IsNullOrEmpty(p.CompiledValue))
+ .ToList();
+
+ if (unsnifffedParams.Count > 0)
+ {
+ var hasRecompile = stmt.StatementText.Contains("RECOMPILE", StringComparison.OrdinalIgnoreCase);
+ if (!hasRecompile)
+ {
+ var names = string.Join(", ", unsnifffedParams.Select(p => p.Name));
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "Local Variables",
+ Message = $"Parameters without compiled values detected: {names}. These are likely local variables, which cause the optimizer to use density-based (\"unknown\") estimates. Consider using OPTION (RECOMPILE) or rewriting with parameters.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+ }
+ }
+
+ // Rule 21: CTE referenced multiple times
+ if (!string.IsNullOrEmpty(stmt.StatementText))
+ {
+ DetectMultiReferenceCte(stmt);
}
}
- private static void AnalyzeNodeTree(PlanNode node)
+ private static void AnalyzeNodeTree(PlanNode node, PlanStatement stmt)
{
- AnalyzeNode(node);
+ AnalyzeNode(node, stmt);
foreach (var child in node.Children)
- AnalyzeNodeTree(child);
+ AnalyzeNodeTree(child, stmt);
}
- private static void AnalyzeNode(PlanNode node)
+ private static void AnalyzeNode(PlanNode node, PlanStatement stmt)
{
// Rule 1: Filter operators — rows survived the tree just to be discarded
if (node.PhysicalOp == "Filter" && !string.IsNullOrEmpty(node.Predicate))
@@ -119,33 +221,43 @@ private static void AnalyzeNode(PlanNode node)
}
// Rule 4: UDF timing — any node spending time in UDFs (actual plans)
- if (node.UdfCpuTimeUs > 0 || node.UdfElapsedTimeUs > 0)
+ if (node.UdfCpuTimeMs > 0 || node.UdfElapsedTimeMs > 0)
{
- var cpuMs = node.UdfCpuTimeUs / 1000.0;
- var elapsedMs = node.UdfElapsedTimeUs / 1000.0;
node.Warnings.Add(new PlanWarning
{
WarningType = "UDF Execution",
- Message = $"Scalar UDF executing on this operator. UDF elapsed: {elapsedMs:F1}ms, UDF CPU: {cpuMs:F1}ms",
- Severity = elapsedMs >= 1000 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
+ Message = $"Scalar UDF executing on this operator. UDF elapsed: {node.UdfElapsedTimeMs:N0}ms, UDF CPU: {node.UdfCpuTimeMs:N0}ms",
+ Severity = node.UdfElapsedTimeMs >= 1000 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
});
}
// Rule 5: Large estimate vs actual row gaps (actual plans only)
if (node.HasActualStats && node.EstimateRows > 0)
{
- var ratio = node.ActualRows / node.EstimateRows;
- if (ratio >= 10.0 || ratio <= 0.1)
+ if (node.ActualRows == 0)
{
- var direction = ratio >= 10.0 ? "underestimated" : "overestimated";
- var factor = ratio >= 10.0 ? ratio : 1.0 / ratio;
node.Warnings.Add(new PlanWarning
{
WarningType = "Row Estimate Mismatch",
- Message = $"Estimated {node.EstimateRows:N0} rows, actual {node.ActualRows:N0} ({factor:F0}x {direction}). May cause poor plan choices.",
- Severity = factor >= 100 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
+ Message = $"Estimated {node.EstimateRows:N0} rows, actual 0 rows returned. May cause poor plan choices.",
+ Severity = node.EstimateRows >= 100 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
});
}
+ else
+ {
+ var ratio = node.ActualRows / node.EstimateRows;
+ if (ratio >= 10.0 || ratio <= 0.1)
+ {
+ var direction = ratio >= 10.0 ? "underestimated" : "overestimated";
+ var factor = ratio >= 10.0 ? ratio : 1.0 / ratio;
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Row Estimate Mismatch",
+ Message = $"Estimated {node.EstimateRows:N0} rows, actual {node.ActualRows:N0} ({factor:F0}x {direction}). May cause poor plan choices.",
+ Severity = factor >= 100 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
+ });
+ }
+ }
}
// Rule 6: Scalar UDF references (works on estimated plans too)
@@ -160,22 +272,40 @@ private static void AnalyzeNode(PlanNode node)
});
}
- // Rule 7: Spill detection — promote severity for large spills
+ // Rule 7: Spill detection — calculate operator time and set severity
+ // based on what percentage of statement elapsed time the spill accounts for
foreach (var w in node.Warnings.ToList())
{
- if (w.SpillDetails != null && w.SpillDetails.WritesToTempDb > 1000)
- w.Severity = PlanWarningSeverity.Critical;
+ if (w.SpillDetails != null && node.ActualElapsedMs > 0)
+ {
+ var operatorMs = GetOperatorOwnElapsedMs(node);
+ var stmtMs = stmt.QueryTimeStats?.ElapsedTimeMs ?? 0;
+
+ if (stmtMs > 0)
+ {
+ var pct = (double)operatorMs / stmtMs;
+ w.Message += $" Operator time: {operatorMs:N0}ms ({pct:P0} of statement).";
+
+ if (pct >= 0.5)
+ w.Severity = PlanWarningSeverity.Critical;
+ else if (pct >= 0.1)
+ w.Severity = PlanWarningSeverity.Warning;
+ }
+ }
}
// Rule 8: Parallel thread skew (actual plans with per-thread stats)
+ // Only warn when there are enough rows to meaningfully distribute across threads
if (node.PerThreadStats.Count > 1)
{
var totalRows = node.PerThreadStats.Sum(t => t.ActualRows);
- if (totalRows > 0)
+ var minRowsForSkew = node.PerThreadStats.Count * 1000;
+ if (totalRows >= minRowsForSkew)
{
var maxThread = node.PerThreadStats.OrderByDescending(t => t.ActualRows).First();
var skewRatio = (double)maxThread.ActualRows / totalRows;
- if (skewRatio >= 0.9 && node.PerThreadStats.Count >= 4)
+ var skewThreshold = node.PerThreadStats.Count == 2 ? 0.75 : 0.50;
+ if (skewRatio >= skewThreshold)
{
node.Warnings.Add(new PlanWarning
{
@@ -198,10 +328,20 @@ private static void AnalyzeNode(PlanNode node)
});
}
- // Rule 11: Scan with residual predicate (not spools)
- if (node.PhysicalOp.Contains("Scan", StringComparison.OrdinalIgnoreCase) &&
- !node.PhysicalOp.Contains("Spool", StringComparison.OrdinalIgnoreCase) &&
- !string.IsNullOrEmpty(node.Predicate))
+ // Rule 12: Non-SARGable predicate on scan
+ var nonSargableReason = DetectNonSargablePredicate(node);
+ if (nonSargableReason != null)
+ {
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Non-SARGable Predicate",
+ Message = $"{nonSargableReason} prevents index seek, forcing a scan. Fix the predicate or add a computed column with an index. Predicate: {Truncate(node.Predicate!, 200)}",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+
+ // Rule 11: Scan with residual predicate (skip if non-SARGable already flagged)
+ if (nonSargableReason == null && IsRowstoreScan(node) && !string.IsNullOrEmpty(node.Predicate))
{
node.Warnings.Add(new PlanWarning
{
@@ -210,6 +350,350 @@ private static void AnalyzeNode(PlanNode node)
Severity = PlanWarningSeverity.Warning
});
}
+
+ // Rule 13: Mismatched data types (GetRangeWithMismatchedTypes / GetRangeThroughConvert)
+ if (node.PhysicalOp == "Compute Scalar" && !string.IsNullOrEmpty(node.DefinedValues))
+ {
+ var hasMismatch = node.DefinedValues.Contains("GetRangeWithMismatchedTypes", StringComparison.OrdinalIgnoreCase);
+ var hasConvert = node.DefinedValues.Contains("GetRangeThroughConvert", StringComparison.OrdinalIgnoreCase);
+
+ if (hasMismatch || hasConvert)
+ {
+ var reason = hasMismatch
+ ? "Implicit conversion due to mismatched data types. The column type does not match the parameter or literal type, forcing SQL Server to convert values at runtime. Fix the parameter type to match the column."
+ : "Implicit conversion through CONVERT/CAST on a column. SQL Server must convert values at runtime, which can prevent index seeks. Remove the conversion or add a computed column.";
+
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Data Type Mismatch",
+ Message = reason,
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+ }
+
+ // Rule 14: Lazy Table Spool unfavorable rebind/rewind ratio
+ // Rebinds = cache misses (child re-executes), rewinds = cache hits (reuse cached result)
+ if (node.LogicalOp == "Lazy Spool")
+ {
+ var rebinds = node.HasActualStats ? (double)node.ActualRebinds : node.EstimateRebinds;
+ var rewinds = node.HasActualStats ? (double)node.ActualRewinds : node.EstimateRewinds;
+ var source = node.HasActualStats ? "actual" : "estimated";
+
+ if (rebinds > 100 && rewinds < rebinds * 5)
+ {
+ var severity = rewinds < rebinds
+ ? PlanWarningSeverity.Critical
+ : PlanWarningSeverity.Warning;
+
+ var ratio = rewinds > 0
+ ? $"{rewinds / rebinds:F1}x rewinds (cache hits) per rebind (cache miss)"
+ : "no rewinds (cache hits) at all";
+
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Lazy Spool Ineffective",
+ Message = $"Lazy spool has low cache hit ratio ({source}): {rebinds:N0} rebinds, {rewinds:N0} rewinds — {ratio}. The spool cache is not earning its overhead.",
+ Severity = severity
+ });
+ }
+ }
+
+ // Rule 15: Join OR clause (Concatenation + Constant Scan under join/Merge Interval)
+ // Best signal: Merge Interval → TopN Sort → Concatenation → Constant Scans
+ // Also fires under a join ancestor (broader catch)
+ if (node.PhysicalOp == "Concatenation")
+ {
+ var constantScanBranches = node.Children
+ .Count(c => c.PhysicalOp == "Constant Scan" ||
+ c.Children.Any(gc => gc.PhysicalOp == "Constant Scan"));
+
+ if (constantScanBranches >= 2 && (HasMergeIntervalAncestor(node) || HasJoinAncestor(node)))
+ {
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Join OR Clause",
+ Message = $"OR clause expansion in a join predicate. SQL Server rewrote the OR as {constantScanBranches} separate branches (Concatenation of Constant Scans), each evaluated independently. This pattern often causes excessive inner-side executions.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+ }
+
+ // Rule 16: Nested Loops high inner-side execution count
+ if (node.PhysicalOp == "Nested Loops" &&
+ node.LogicalOp.Contains("Join", StringComparison.OrdinalIgnoreCase) &&
+ node.Children.Count >= 2)
+ {
+ var innerChild = node.Children[1];
+
+ if (innerChild.HasActualStats && innerChild.ActualExecutions > 1000)
+ {
+ var dop = stmt.DegreeOfParallelism > 0 ? stmt.DegreeOfParallelism : 1;
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Nested Loops High Executions",
+ Message = $"Nested Loops inner side executed {innerChild.ActualExecutions:N0} times (DOP {dop}). A Hash Join or Merge Join may be more efficient for this row count.",
+ Severity = innerChild.ActualExecutions > 100000
+ ? PlanWarningSeverity.Critical
+ : PlanWarningSeverity.Warning
+ });
+ }
+ else if (!innerChild.HasActualStats && innerChild.EstimateRebinds > 1000)
+ {
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Nested Loops High Executions",
+ Message = $"Nested Loops inner side estimated to execute {innerChild.EstimateRebinds + 1:N0} times. A Hash Join or Merge Join may be more efficient for this row count.",
+ Severity = innerChild.EstimateRebinds > 100000
+ ? PlanWarningSeverity.Critical
+ : PlanWarningSeverity.Warning
+ });
+ }
+ }
+
+ // Rule 17: Many-to-many Merge Join
+ if (node.ManyToMany && node.PhysicalOp.Contains("Merge", StringComparison.OrdinalIgnoreCase))
+ {
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Many-to-Many Merge Join",
+ Message = "Many-to-many Merge Join requires a worktable to handle duplicate values. This can be expensive with large numbers of duplicates.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+
+ // Rule 22: Table variables (Object name starts with @)
+ if (!string.IsNullOrEmpty(node.ObjectName) &&
+ node.ObjectName.Contains("@"))
+ {
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Table Variable",
+ Message = "Table variable detected. Table variables have no statistics, so the optimizer always estimates 1 row regardless of actual cardinality. Consider using a temp table (#table) for better estimates.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+
+ // Rule 23: Table-valued functions
+ if (node.LogicalOp == "Table-valued function")
+ {
+ var funcName = node.ObjectName ?? node.PhysicalOp;
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Table-Valued Function",
+ Message = $"Table-valued function: {funcName}. Multi-statement TVFs have no statistics and a fixed estimate of 1 row (pre-2017) or 100 rows (2017+). Consider inlining the logic or using an inline TVF.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+
+ // Rule 24: Top above a scan on the inner side of Nested Loops
+ // This pattern means the scan executes once per outer row, and the Top
+ // limits each iteration — but with no supporting index the scan is a
+ // linear search repeated potentially millions of times.
+ if (node.PhysicalOp == "Nested Loops" && node.Children.Count >= 2)
+ {
+ var inner = node.Children[1];
+
+ // Walk through pass-through operators to find Top
+ while (inner.PhysicalOp == "Compute Scalar" && inner.Children.Count > 0)
+ inner = inner.Children[0];
+
+ if (inner.PhysicalOp == "Top" && inner.Children.Count > 0)
+ {
+ // Walk through pass-through operators below the Top to find the scan
+ var scanCandidate = inner.Children[0];
+ while (scanCandidate.PhysicalOp == "Compute Scalar" && scanCandidate.Children.Count > 0)
+ scanCandidate = scanCandidate.Children[0];
+
+ if (IsRowstoreScan(scanCandidate))
+ {
+ var predInfo = !string.IsNullOrEmpty(scanCandidate.Predicate)
+ ? " The scan has a residual predicate, so it may read many rows before the Top is satisfied."
+ : "";
+ inner.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Top Above Scan",
+ Message = $"Top operator reads from {scanCandidate.PhysicalOp} (Node {scanCandidate.NodeId}) on the inner side of Nested Loops (Node {node.NodeId}).{predInfo} An index supporting the filter and ordering may convert this to a seek.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+ }
+ }
+ }
+
+ ///
+ /// Returns true for rowstore scan operators (Index Scan, Clustered Index Scan,
+ /// Table Scan). Excludes columnstore scans, spools, and constant scans.
+ ///
+ private static bool IsRowstoreScan(PlanNode node)
+ {
+ return node.PhysicalOp.Contains("Scan", StringComparison.OrdinalIgnoreCase) &&
+ !node.PhysicalOp.Contains("Spool", StringComparison.OrdinalIgnoreCase) &&
+ !node.PhysicalOp.Contains("Constant", StringComparison.OrdinalIgnoreCase) &&
+ !node.PhysicalOp.Contains("Columnstore", StringComparison.OrdinalIgnoreCase);
+ }
+
+ ///
+ /// Detects non-SARGable patterns in scan predicates.
+ /// Returns a description of the issue, or null if the predicate is fine.
+ ///
+ private static string? DetectNonSargablePredicate(PlanNode node)
+ {
+ if (string.IsNullOrEmpty(node.Predicate))
+ return null;
+
+ // Only check rowstore scan operators — columnstore is designed to be scanned
+ if (!IsRowstoreScan(node))
+ return null;
+
+ var predicate = node.Predicate;
+
+ // CASE expression in predicate — check first because CASE bodies
+ // often contain CONVERT_IMPLICIT that isn't the root cause
+ if (CaseInPredicateRegex.IsMatch(predicate))
+ return "CASE expression in predicate";
+
+ // CONVERT_IMPLICIT — most common non-SARGable pattern
+ if (predicate.Contains("CONVERT_IMPLICIT", StringComparison.OrdinalIgnoreCase))
+ return "Implicit conversion (CONVERT_IMPLICIT)";
+
+ // ISNULL / COALESCE wrapping column
+ if (Regex.IsMatch(predicate, @"\b(isnull|coalesce)\s*\(", RegexOptions.IgnoreCase))
+ return "ISNULL/COALESCE wrapping column";
+
+ // Common function calls on columns
+ var funcMatch = FunctionInPredicateRegex.Match(predicate);
+ if (funcMatch.Success)
+ {
+ var funcName = funcMatch.Groups[1].Value.ToUpperInvariant();
+ if (funcName != "CONVERT_IMPLICIT")
+ return $"Function call ({funcName}) on column";
+ }
+
+ // Leading wildcard LIKE
+ if (LeadingWildcardLikeRegex.IsMatch(predicate))
+ return "Leading wildcard LIKE pattern";
+
+ return null;
+ }
+
+ ///
+ /// Detects CTEs that are referenced more than once in the statement text.
+ /// Each reference re-executes the CTE since SQL Server does not materialize them.
+ ///
+ private static void DetectMultiReferenceCte(PlanStatement stmt)
+ {
+ var text = stmt.StatementText;
+ var cteMatches = CteDefinitionRegex.Matches(text);
+ if (cteMatches.Count == 0)
+ return;
+
+ foreach (Match match in cteMatches)
+ {
+ var cteName = match.Groups[1].Value;
+ if (string.IsNullOrEmpty(cteName))
+ continue;
+
+ // Count references as FROM/JOIN targets after the CTE definition
+ var refPattern = new Regex(
+ $@"\b(FROM|JOIN)\s+{Regex.Escape(cteName)}\b",
+ RegexOptions.IgnoreCase);
+ var refCount = refPattern.Matches(text).Count;
+
+ if (refCount > 1)
+ {
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "CTE Multiple References",
+ Message = $"CTE \"{cteName}\" is referenced {refCount} times. SQL Server does not materialize CTEs — each reference re-executes the entire CTE query. Consider materializing into a temp table.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+ }
+ }
+
+ ///
+ /// Checks whether a node has a Merge Interval ancestor (OR expansion pattern).
+ ///
+ private static bool HasMergeIntervalAncestor(PlanNode node)
+ {
+ var ancestor = node.Parent;
+ while (ancestor != null)
+ {
+ if (ancestor.PhysicalOp == "Merge Interval")
+ return true;
+ ancestor = ancestor.Parent;
+ }
+ return false;
+ }
+
+ ///
+ /// Checks whether a node has any join ancestor.
+ ///
+ private static bool HasJoinAncestor(PlanNode node)
+ {
+ var ancestor = node.Parent;
+ while (ancestor != null)
+ {
+ if (ancestor.LogicalOp != null &&
+ ancestor.LogicalOp.Contains("Join", StringComparison.OrdinalIgnoreCase))
+ return true;
+ ancestor = ancestor.Parent;
+ }
+ return false;
+ }
+
+ ///
+ /// Finds Sort and Hash Match operators in the tree that consume memory.
+ ///
+ private static void FindMemoryConsumers(PlanNode node, List consumers)
+ {
+ if (node.PhysicalOp.Contains("Sort", StringComparison.OrdinalIgnoreCase) &&
+ !node.PhysicalOp.Contains("Spool", StringComparison.OrdinalIgnoreCase))
+ {
+ var rows = node.HasActualStats
+ ? $"{node.ActualRows:N0} actual rows"
+ : $"{node.EstimateRows:N0} estimated rows";
+ consumers.Add($"Sort (Node {node.NodeId}, {rows})");
+ }
+ else if (node.PhysicalOp.Contains("Hash", StringComparison.OrdinalIgnoreCase))
+ {
+ var rows = node.HasActualStats
+ ? $"{node.ActualRows:N0} actual rows"
+ : $"{node.EstimateRows:N0} estimated rows";
+ consumers.Add($"Hash Match (Node {node.NodeId}, {rows})");
+ }
+
+ foreach (var child in node.Children)
+ FindMemoryConsumers(child, consumers);
+ }
+
+ ///
+ /// Calculates an operator's own elapsed time by subtracting child time.
+ /// In batch mode, operator times are self-contained. In row mode, times are
+ /// cumulative (include children), so we subtract the dominant child's time.
+ /// Parallelism (exchange) operators are skipped because they have timing bugs.
+ ///
+ private static long GetOperatorOwnElapsedMs(PlanNode node)
+ {
+ if (node.ActualExecutionMode == "Batch")
+ return node.ActualElapsedMs;
+
+ // Row mode: subtract the dominant child's elapsed time
+ var maxChildElapsed = 0L;
+ foreach (var child in node.Children)
+ {
+ var childElapsed = child.ActualElapsedMs;
+
+ // Exchange operators have timing bugs — skip to their child
+ if (child.PhysicalOp == "Parallelism" && child.Children.Count > 0)
+ childElapsed = child.Children.Max(c => c.ActualElapsedMs);
+
+ if (childElapsed > maxChildElapsed)
+ maxChildElapsed = childElapsed;
+ }
+
+ return Math.Max(0, node.ActualElapsedMs - maxChildElapsed);
}
private static string Truncate(string value, int maxLength)
diff --git a/Dashboard/Services/ShowPlanParser.cs b/Dashboard/Services/ShowPlanParser.cs
index 6dfabd3..f367419 100644
--- a/Dashboard/Services/ShowPlanParser.cs
+++ b/Dashboard/Services/ShowPlanParser.cs
@@ -1179,8 +1179,8 @@ private static PlanNode ParseRelOp(XElement relOpEl)
node.ActualExecutionMode = actualExecMode;
node.ActualSegmentReads = totalSegmentReads;
node.ActualSegmentSkips = totalSegmentSkips;
- node.UdfCpuTimeUs = totalUdfCpu;
- node.UdfElapsedTimeUs = maxUdfElapsed;
+ node.UdfCpuTimeMs = totalUdfCpu;
+ node.UdfElapsedTimeMs = maxUdfElapsed;
// Store per-thread data for parallel skew analysis
foreach (var thread in runtimeEl.Elements(Ns + "RunTimeCountersPerThread"))
diff --git a/Installer/Program.cs b/Installer/Program.cs
index a47ed83..f00279b 100644
--- a/Installer/Program.cs
+++ b/Installer/Program.cs
@@ -95,6 +95,7 @@ static async Task Main(string[] args)
Console.WriteLine(" -h, --help Show this help message");
Console.WriteLine(" --reinstall Drop existing database and perform clean install");
Console.WriteLine(" --reset-schedule Reset collection schedule to recommended defaults");
+ Console.WriteLine(" --preserve-jobs Keep existing SQL Agent jobs (owner, schedule, notifications)");
Console.WriteLine(" --encrypt= Connection encryption: mandatory (default), optional, strict");
Console.WriteLine(" --trust-cert Trust server certificate without validation");
Console.WriteLine();
@@ -115,6 +116,7 @@ static async Task Main(string[] args)
bool automatedMode = args.Length > 0;
bool reinstallMode = args.Any(a => a.Equals("--reinstall", StringComparison.OrdinalIgnoreCase));
bool resetSchedule = args.Any(a => a.Equals("--reset-schedule", StringComparison.OrdinalIgnoreCase));
+ bool preserveJobs = args.Any(a => a.Equals("--preserve-jobs", StringComparison.OrdinalIgnoreCase));
bool trustCert = args.Any(a => a.Equals("--trust-cert", StringComparison.OrdinalIgnoreCase));
/*Parse encryption option (default: Mandatory)*/
@@ -135,6 +137,7 @@ static async Task Main(string[] args)
var filteredArgs = args
.Where(a => !a.Equals("--reinstall", StringComparison.OrdinalIgnoreCase))
.Where(a => !a.Equals("--reset-schedule", StringComparison.OrdinalIgnoreCase))
+ .Where(a => !a.Equals("--preserve-jobs", StringComparison.OrdinalIgnoreCase))
.Where(a => !a.Equals("--trust-cert", StringComparison.OrdinalIgnoreCase))
.Where(a => !a.StartsWith("--encrypt=", StringComparison.OrdinalIgnoreCase))
.ToArray();
@@ -653,6 +656,16 @@ INSERT...WHERE NOT EXISTS re-populates with current recommended values
Console.Write("(resetting schedule) ");
}
+ /*
+ Preserve existing SQL Agent jobs if requested — flip the T-SQL
+ variable so existing jobs are left untouched during upgrade
+ */
+ if (preserveJobs && fileName.StartsWith("45_", StringComparison.Ordinal))
+ {
+ sqlContent = sqlContent.Replace("@preserve_jobs bit = 0", "@preserve_jobs bit = 1");
+ Console.Write("(preserving existing jobs) ");
+ }
+
/*
Remove SQLCMD directives (:r includes) as we're executing files directly
*/
diff --git a/InstallerGui/MainWindow.xaml b/InstallerGui/MainWindow.xaml
index 61b4046..76e650d 100644
--- a/InstallerGui/MainWindow.xaml
+++ b/InstallerGui/MainWindow.xaml
@@ -186,6 +186,12 @@
Margin="0,0,0,10"
Foreground="{DynamicResource ForegroundBrush}"/>
+
+
+
{
diff --git a/InstallerGui/Services/InstallationService.cs b/InstallerGui/Services/InstallationService.cs
index 512a6ab..dd6705b 100644
--- a/InstallerGui/Services/InstallationService.cs
+++ b/InstallerGui/Services/InstallationService.cs
@@ -325,6 +325,7 @@ public static async Task ExecuteInstallationAsync(
List sqlFiles,
bool cleanInstall,
bool resetSchedule = false,
+ bool preserveJobs = false,
IProgress? progress = null,
Func? preValidationAction = null,
CancellationToken cancellationToken = default)
@@ -422,6 +423,17 @@ Execute SQL files
});
}
+ /*Preserve existing SQL Agent jobs if requested*/
+ if (preserveJobs && fileName.StartsWith("45_", StringComparison.Ordinal))
+ {
+ sqlContent = sqlContent.Replace("@preserve_jobs bit = 0", "@preserve_jobs bit = 1");
+ progress?.Report(new InstallationProgress
+ {
+ Message = "Preserving existing SQL Agent jobs...",
+ Status = "Info"
+ });
+ }
+
/*Remove SQLCMD directives*/
sqlContent = SqlCmdDirectivePattern.Replace(sqlContent, "");
diff --git a/Lite/Controls/PlanViewerControl.xaml.cs b/Lite/Controls/PlanViewerControl.xaml.cs
index 9b17ad5..69e639d 100644
--- a/Lite/Controls/PlanViewerControl.xaml.cs
+++ b/Lite/Controls/PlanViewerControl.xaml.cs
@@ -698,17 +698,17 @@ private void ShowPropertiesPanel(PlanNode node)
// Timing
if (node.ActualElapsedMs > 0 || node.ActualCPUMs > 0
- || node.UdfCpuTimeUs > 0 || node.UdfElapsedTimeUs > 0)
+ || node.UdfCpuTimeMs > 0 || node.UdfElapsedTimeMs > 0)
{
AddPropertySection("Actual Timing");
if (node.ActualElapsedMs > 0)
AddPropertyRow("Elapsed Time", $"{node.ActualElapsedMs:N0} ms");
if (node.ActualCPUMs > 0)
AddPropertyRow("CPU Time", $"{node.ActualCPUMs:N0} ms");
- if (node.UdfElapsedTimeUs > 0)
- AddPropertyRow("UDF Elapsed", $"{node.UdfElapsedTimeUs:N0} us");
- if (node.UdfCpuTimeUs > 0)
- AddPropertyRow("UDF CPU", $"{node.UdfCpuTimeUs:N0} us");
+ if (node.UdfElapsedTimeMs > 0)
+ AddPropertyRow("UDF Elapsed", $"{node.UdfElapsedTimeMs:N0} ms");
+ if (node.UdfCpuTimeMs > 0)
+ AddPropertyRow("UDF CPU", $"{node.UdfCpuTimeMs:N0} ms");
}
// I/O
diff --git a/Lite/Controls/ServerTab.xaml.cs b/Lite/Controls/ServerTab.xaml.cs
index ea70408..800d2a8 100644
--- a/Lite/Controls/ServerTab.xaml.cs
+++ b/Lite/Controls/ServerTab.xaml.cs
@@ -158,6 +158,31 @@ public ServerTab(ServerConnection server, DuckDbInitializer duckDb, CredentialSe
grid.CopyingRowClipboardContent += Helpers.DataGridClipboardBehavior.FixHeaderCopy;
}
+ /* Apply dark theme immediately so charts don't flash white before data loads */
+ ApplyDarkTheme(WaitStatsChart);
+ ApplyDarkTheme(QueryDurationTrendChart);
+ ApplyDarkTheme(ProcDurationTrendChart);
+ ApplyDarkTheme(QueryStoreDurationTrendChart);
+ ApplyDarkTheme(ExecutionCountTrendChart);
+ ApplyDarkTheme(CpuChart);
+ ApplyDarkTheme(MemoryChart);
+ ApplyDarkTheme(MemoryClerksChart);
+ ApplyDarkTheme(MemoryGrantSizingChart);
+ ApplyDarkTheme(MemoryGrantActivityChart);
+ ApplyDarkTheme(FileIoReadChart);
+ ApplyDarkTheme(FileIoWriteChart);
+ ApplyDarkTheme(FileIoReadThroughputChart);
+ ApplyDarkTheme(FileIoWriteThroughputChart);
+ ApplyDarkTheme(TempDbChart);
+ ApplyDarkTheme(TempDbFileIoChart);
+ ApplyDarkTheme(LockWaitTrendChart);
+ ApplyDarkTheme(BlockingTrendChart);
+ ApplyDarkTheme(DeadlockTrendChart);
+ ApplyDarkTheme(CurrentWaitsDurationChart);
+ ApplyDarkTheme(CurrentWaitsBlockedChart);
+ ApplyDarkTheme(PerfmonChart);
+ ApplyDarkTheme(CollectorDurationChart);
+
/* Chart hover tooltips */
_waitStatsHover = new Helpers.ChartHoverHelper(WaitStatsChart, "ms/sec");
_perfmonHover = new Helpers.ChartHoverHelper(PerfmonChart, "");
diff --git a/Lite/Helpers/ChartHoverHelper.cs b/Lite/Helpers/ChartHoverHelper.cs
index 71a8fbb..09e6410 100644
--- a/Lite/Helpers/ChartHoverHelper.cs
+++ b/Lite/Helpers/ChartHoverHelper.cs
@@ -68,9 +68,10 @@ private void OnMouseMove(object sender, MouseEventArgs e)
_lastUpdate = now;
var pos = e.GetPosition(_chart);
+ var dpi = VisualTreeHelper.GetDpi(_chart);
var pixel = new ScottPlot.Pixel(
- (float)(pos.X * _chart.DisplayScale),
- (float)(pos.Y * _chart.DisplayScale));
+ (float)(pos.X * dpi.DpiScaleX),
+ (float)(pos.Y * dpi.DpiScaleY));
var mouseCoords = _chart.Plot.GetCoordinates(pixel);
double bestDistance = double.MaxValue;
diff --git a/Lite/Models/PlanModels.cs b/Lite/Models/PlanModels.cs
index 2da98c8..323b011 100644
--- a/Lite/Models/PlanModels.cs
+++ b/Lite/Models/PlanModels.cs
@@ -272,8 +272,8 @@ public class PlanNode
public string? ActionColumn { get; set; }
public long ActualSegmentReads { get; set; }
public long ActualSegmentSkips { get; set; }
- public long UdfCpuTimeUs { get; set; }
- public long UdfElapsedTimeUs { get; set; }
+ public long UdfCpuTimeMs { get; set; }
+ public long UdfElapsedTimeMs { get; set; }
// XSD gap: RelOp-level metadata
public bool GroupExecuted { get; set; }
diff --git a/Lite/Services/PlanAnalyzer.cs b/Lite/Services/PlanAnalyzer.cs
index 603cc83..f87f5be 100644
--- a/Lite/Services/PlanAnalyzer.cs
+++ b/Lite/Services/PlanAnalyzer.cs
@@ -1,6 +1,7 @@
using System;
using System.Collections.Generic;
using System.Linq;
+using System.Text.RegularExpressions;
using PerformanceMonitorLite.Models;
namespace PerformanceMonitorLite.Services;
@@ -11,6 +12,23 @@ namespace PerformanceMonitorLite.Services;
///
public static class PlanAnalyzer
{
+ private static readonly Regex FunctionInPredicateRegex = new(
+ @"\b(CONVERT_IMPLICIT|CONVERT|CAST|isnull|coalesce|datepart|datediff|dateadd|year|month|day|upper|lower|ltrim|rtrim|trim|substring|left|right|charindex|replace|len|datalength|abs|floor|ceiling|round|reverse|stuff|format)\s*\(",
+ RegexOptions.IgnoreCase | RegexOptions.Compiled);
+
+ private static readonly Regex LeadingWildcardLikeRegex = new(
+ @"\blike\b[^'""]*?N?'%",
+ RegexOptions.IgnoreCase | RegexOptions.Compiled);
+
+ private static readonly Regex CaseInPredicateRegex = new(
+ @"\bCASE\s+(WHEN\b|$)",
+ RegexOptions.IgnoreCase | RegexOptions.Compiled);
+
+ // Matches CTE definitions: WITH name AS ( or , name AS (
+ private static readonly Regex CteDefinitionRegex = new(
+ @"(?:\bWITH\s+|\,\s*)(\w+)\s+AS\s*\(",
+ RegexOptions.IgnoreCase | RegexOptions.Compiled);
+
public static void Analyze(ParsedPlan plan)
{
foreach (var batch in plan.Batches)
@@ -20,7 +38,7 @@ public static void Analyze(ParsedPlan plan)
AnalyzeStatement(stmt);
if (stmt.RootNode != null)
- AnalyzeNodeTree(stmt.RootNode);
+ AnalyzeNodeTree(stmt.RootNode, stmt);
}
}
}
@@ -57,7 +75,7 @@ private static void AnalyzeStatement(PlanStatement stmt)
if (grant.GrantedMemoryKB > 0 && grant.MaxUsedMemoryKB > 0)
{
var wasteRatio = (double)grant.GrantedMemoryKB / grant.MaxUsedMemoryKB;
- if (wasteRatio >= 10 && grant.GrantedMemoryKB > 1024)
+ if (wasteRatio >= 10 && grant.GrantedMemoryKB >= 1048576)
{
stmt.PlanWarnings.Add(new PlanWarning
{
@@ -78,18 +96,102 @@ private static void AnalyzeStatement(PlanStatement stmt)
Severity = grant.GrantWaitTimeMs >= 5000 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
});
}
+
+ // Large memory grant with sort/hash guidance
+ if (grant.GrantedMemoryKB >= 1048576 && stmt.RootNode != null)
+ {
+ var consumers = new List();
+ FindMemoryConsumers(stmt.RootNode, consumers);
+
+ var grantMB = grant.GrantedMemoryKB / 1024.0;
+ var guidance = consumers.Count > 0
+ ? $" Memory consumers: {string.Join(", ", consumers)}. Check whether these operators are processing more rows than necessary."
+ : "";
+
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "Large Memory Grant",
+ Message = $"Query granted {grantMB:F0} MB of memory.{guidance}",
+ Severity = grantMB >= 4096 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
+ });
+ }
+ }
+
+ // Rule 18: Compile memory exceeded (early abort)
+ if (stmt.StatementOptmEarlyAbortReason == "MemoryLimitExceeded")
+ {
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "Compile Memory Exceeded",
+ Message = "Optimization was aborted early because the compile memory limit was exceeded. The plan may be suboptimal. Simplify the query or break it into smaller parts.",
+ Severity = PlanWarningSeverity.Critical
+ });
+ }
+
+ // Rule 19: High compile CPU
+ if (stmt.CompileCPUMs >= 1000)
+ {
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "High Compile CPU",
+ Message = $"Query took {stmt.CompileCPUMs:N0}ms of CPU to compile. Complex queries with many joins or subqueries can cause excessive compile time.",
+ Severity = stmt.CompileCPUMs >= 5000 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
+ });
+ }
+
+ // Rule 4 (statement-level): UDF execution timing from QueryTimeStats
+ // Some plans report UDF timing only at the statement level, not per-node.
+ if (stmt.QueryUdfCpuTimeMs > 0 || stmt.QueryUdfElapsedTimeMs > 0)
+ {
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "UDF Execution",
+ Message = $"Scalar UDF executing in this statement. UDF elapsed: {stmt.QueryUdfElapsedTimeMs:N0}ms, UDF CPU: {stmt.QueryUdfCpuTimeMs:N0}ms",
+ Severity = stmt.QueryUdfElapsedTimeMs >= 1000 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
+ });
+ }
+
+ // Rule 20: Local variables without RECOMPILE
+ // Parameters with no CompiledValue are likely local variables — the optimizer
+ // cannot sniff their values and uses density-based ("unknown") estimates.
+ if (stmt.Parameters.Count > 0)
+ {
+ var unsnifffedParams = stmt.Parameters
+ .Where(p => string.IsNullOrEmpty(p.CompiledValue))
+ .ToList();
+
+ if (unsnifffedParams.Count > 0)
+ {
+ var hasRecompile = stmt.StatementText.Contains("RECOMPILE", StringComparison.OrdinalIgnoreCase);
+ if (!hasRecompile)
+ {
+ var names = string.Join(", ", unsnifffedParams.Select(p => p.Name));
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "Local Variables",
+ Message = $"Parameters without compiled values detected: {names}. These are likely local variables, which cause the optimizer to use density-based (\"unknown\") estimates. Consider using OPTION (RECOMPILE) or rewriting with parameters.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+ }
+ }
+
+ // Rule 21: CTE referenced multiple times
+ if (!string.IsNullOrEmpty(stmt.StatementText))
+ {
+ DetectMultiReferenceCte(stmt);
}
}
- private static void AnalyzeNodeTree(PlanNode node)
+ private static void AnalyzeNodeTree(PlanNode node, PlanStatement stmt)
{
- AnalyzeNode(node);
+ AnalyzeNode(node, stmt);
foreach (var child in node.Children)
- AnalyzeNodeTree(child);
+ AnalyzeNodeTree(child, stmt);
}
- private static void AnalyzeNode(PlanNode node)
+ private static void AnalyzeNode(PlanNode node, PlanStatement stmt)
{
// Rule 1: Filter operators — rows survived the tree just to be discarded
if (node.PhysicalOp == "Filter" && !string.IsNullOrEmpty(node.Predicate))
@@ -119,33 +221,43 @@ private static void AnalyzeNode(PlanNode node)
}
// Rule 4: UDF timing — any node spending time in UDFs (actual plans)
- if (node.UdfCpuTimeUs > 0 || node.UdfElapsedTimeUs > 0)
+ if (node.UdfCpuTimeMs > 0 || node.UdfElapsedTimeMs > 0)
{
- var cpuMs = node.UdfCpuTimeUs / 1000.0;
- var elapsedMs = node.UdfElapsedTimeUs / 1000.0;
node.Warnings.Add(new PlanWarning
{
WarningType = "UDF Execution",
- Message = $"Scalar UDF executing on this operator. UDF elapsed: {elapsedMs:F1}ms, UDF CPU: {cpuMs:F1}ms",
- Severity = elapsedMs >= 1000 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
+ Message = $"Scalar UDF executing on this operator. UDF elapsed: {node.UdfElapsedTimeMs:N0}ms, UDF CPU: {node.UdfCpuTimeMs:N0}ms",
+ Severity = node.UdfElapsedTimeMs >= 1000 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
});
}
// Rule 5: Large estimate vs actual row gaps (actual plans only)
if (node.HasActualStats && node.EstimateRows > 0)
{
- var ratio = node.ActualRows / node.EstimateRows;
- if (ratio >= 10.0 || ratio <= 0.1)
+ if (node.ActualRows == 0)
{
- var direction = ratio >= 10.0 ? "underestimated" : "overestimated";
- var factor = ratio >= 10.0 ? ratio : 1.0 / ratio;
node.Warnings.Add(new PlanWarning
{
WarningType = "Row Estimate Mismatch",
- Message = $"Estimated {node.EstimateRows:N0} rows, actual {node.ActualRows:N0} ({factor:F0}x {direction}). May cause poor plan choices.",
- Severity = factor >= 100 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
+ Message = $"Estimated {node.EstimateRows:N0} rows, actual 0 rows returned. May cause poor plan choices.",
+ Severity = node.EstimateRows >= 100 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
});
}
+ else
+ {
+ var ratio = node.ActualRows / node.EstimateRows;
+ if (ratio >= 10.0 || ratio <= 0.1)
+ {
+ var direction = ratio >= 10.0 ? "underestimated" : "overestimated";
+ var factor = ratio >= 10.0 ? ratio : 1.0 / ratio;
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Row Estimate Mismatch",
+ Message = $"Estimated {node.EstimateRows:N0} rows, actual {node.ActualRows:N0} ({factor:F0}x {direction}). May cause poor plan choices.",
+ Severity = factor >= 100 ? PlanWarningSeverity.Critical : PlanWarningSeverity.Warning
+ });
+ }
+ }
}
// Rule 6: Scalar UDF references (works on estimated plans too)
@@ -160,22 +272,40 @@ private static void AnalyzeNode(PlanNode node)
});
}
- // Rule 7: Spill detection — promote severity for large spills
+ // Rule 7: Spill detection — calculate operator time and set severity
+ // based on what percentage of statement elapsed time the spill accounts for
foreach (var w in node.Warnings.ToList())
{
- if (w.SpillDetails != null && w.SpillDetails.WritesToTempDb > 1000)
- w.Severity = PlanWarningSeverity.Critical;
+ if (w.SpillDetails != null && node.ActualElapsedMs > 0)
+ {
+ var operatorMs = GetOperatorOwnElapsedMs(node);
+ var stmtMs = stmt.QueryTimeStats?.ElapsedTimeMs ?? 0;
+
+ if (stmtMs > 0)
+ {
+ var pct = (double)operatorMs / stmtMs;
+ w.Message += $" Operator time: {operatorMs:N0}ms ({pct:P0} of statement).";
+
+ if (pct >= 0.5)
+ w.Severity = PlanWarningSeverity.Critical;
+ else if (pct >= 0.1)
+ w.Severity = PlanWarningSeverity.Warning;
+ }
+ }
}
// Rule 8: Parallel thread skew (actual plans with per-thread stats)
+ // Only warn when there are enough rows to meaningfully distribute across threads
if (node.PerThreadStats.Count > 1)
{
var totalRows = node.PerThreadStats.Sum(t => t.ActualRows);
- if (totalRows > 0)
+ var minRowsForSkew = node.PerThreadStats.Count * 1000;
+ if (totalRows >= minRowsForSkew)
{
var maxThread = node.PerThreadStats.OrderByDescending(t => t.ActualRows).First();
var skewRatio = (double)maxThread.ActualRows / totalRows;
- if (skewRatio >= 0.9 && node.PerThreadStats.Count >= 4)
+ var skewThreshold = node.PerThreadStats.Count == 2 ? 0.75 : 0.50;
+ if (skewRatio >= skewThreshold)
{
node.Warnings.Add(new PlanWarning
{
@@ -198,10 +328,20 @@ private static void AnalyzeNode(PlanNode node)
});
}
- // Rule 11: Scan with residual predicate (not spools)
- if (node.PhysicalOp.Contains("Scan", StringComparison.OrdinalIgnoreCase) &&
- !node.PhysicalOp.Contains("Spool", StringComparison.OrdinalIgnoreCase) &&
- !string.IsNullOrEmpty(node.Predicate))
+ // Rule 12: Non-SARGable predicate on scan
+ var nonSargableReason = DetectNonSargablePredicate(node);
+ if (nonSargableReason != null)
+ {
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Non-SARGable Predicate",
+ Message = $"{nonSargableReason} prevents index seek, forcing a scan. Fix the predicate or add a computed column with an index. Predicate: {Truncate(node.Predicate!, 200)}",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+
+ // Rule 11: Scan with residual predicate (skip if non-SARGable already flagged)
+ if (nonSargableReason == null && IsRowstoreScan(node) && !string.IsNullOrEmpty(node.Predicate))
{
node.Warnings.Add(new PlanWarning
{
@@ -210,6 +350,350 @@ private static void AnalyzeNode(PlanNode node)
Severity = PlanWarningSeverity.Warning
});
}
+
+ // Rule 13: Mismatched data types (GetRangeWithMismatchedTypes / GetRangeThroughConvert)
+ if (node.PhysicalOp == "Compute Scalar" && !string.IsNullOrEmpty(node.DefinedValues))
+ {
+ var hasMismatch = node.DefinedValues.Contains("GetRangeWithMismatchedTypes", StringComparison.OrdinalIgnoreCase);
+ var hasConvert = node.DefinedValues.Contains("GetRangeThroughConvert", StringComparison.OrdinalIgnoreCase);
+
+ if (hasMismatch || hasConvert)
+ {
+ var reason = hasMismatch
+ ? "Implicit conversion due to mismatched data types. The column type does not match the parameter or literal type, forcing SQL Server to convert values at runtime. Fix the parameter type to match the column."
+ : "Implicit conversion through CONVERT/CAST on a column. SQL Server must convert values at runtime, which can prevent index seeks. Remove the conversion or add a computed column.";
+
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Data Type Mismatch",
+ Message = reason,
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+ }
+
+ // Rule 14: Lazy Table Spool unfavorable rebind/rewind ratio
+ // Rebinds = cache misses (child re-executes), rewinds = cache hits (reuse cached result)
+ if (node.LogicalOp == "Lazy Spool")
+ {
+ var rebinds = node.HasActualStats ? (double)node.ActualRebinds : node.EstimateRebinds;
+ var rewinds = node.HasActualStats ? (double)node.ActualRewinds : node.EstimateRewinds;
+ var source = node.HasActualStats ? "actual" : "estimated";
+
+ if (rebinds > 100 && rewinds < rebinds * 5)
+ {
+ var severity = rewinds < rebinds
+ ? PlanWarningSeverity.Critical
+ : PlanWarningSeverity.Warning;
+
+ var ratio = rewinds > 0
+ ? $"{rewinds / rebinds:F1}x rewinds (cache hits) per rebind (cache miss)"
+ : "no rewinds (cache hits) at all";
+
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Lazy Spool Ineffective",
+ Message = $"Lazy spool has low cache hit ratio ({source}): {rebinds:N0} rebinds, {rewinds:N0} rewinds — {ratio}. The spool cache is not earning its overhead.",
+ Severity = severity
+ });
+ }
+ }
+
+ // Rule 15: Join OR clause (Concatenation + Constant Scan under join/Merge Interval)
+ // Best signal: Merge Interval → TopN Sort → Concatenation → Constant Scans
+ // Also fires under a join ancestor (broader catch)
+ if (node.PhysicalOp == "Concatenation")
+ {
+ var constantScanBranches = node.Children
+ .Count(c => c.PhysicalOp == "Constant Scan" ||
+ c.Children.Any(gc => gc.PhysicalOp == "Constant Scan"));
+
+ if (constantScanBranches >= 2 && (HasMergeIntervalAncestor(node) || HasJoinAncestor(node)))
+ {
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Join OR Clause",
+ Message = $"OR clause expansion in a join predicate. SQL Server rewrote the OR as {constantScanBranches} separate branches (Concatenation of Constant Scans), each evaluated independently. This pattern often causes excessive inner-side executions.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+ }
+
+ // Rule 16: Nested Loops high inner-side execution count
+ if (node.PhysicalOp == "Nested Loops" &&
+ node.LogicalOp.Contains("Join", StringComparison.OrdinalIgnoreCase) &&
+ node.Children.Count >= 2)
+ {
+ var innerChild = node.Children[1];
+
+ if (innerChild.HasActualStats && innerChild.ActualExecutions > 1000)
+ {
+ var dop = stmt.DegreeOfParallelism > 0 ? stmt.DegreeOfParallelism : 1;
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Nested Loops High Executions",
+ Message = $"Nested Loops inner side executed {innerChild.ActualExecutions:N0} times (DOP {dop}). A Hash Join or Merge Join may be more efficient for this row count.",
+ Severity = innerChild.ActualExecutions > 100000
+ ? PlanWarningSeverity.Critical
+ : PlanWarningSeverity.Warning
+ });
+ }
+ else if (!innerChild.HasActualStats && innerChild.EstimateRebinds > 1000)
+ {
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Nested Loops High Executions",
+ Message = $"Nested Loops inner side estimated to execute {innerChild.EstimateRebinds + 1:N0} times. A Hash Join or Merge Join may be more efficient for this row count.",
+ Severity = innerChild.EstimateRebinds > 100000
+ ? PlanWarningSeverity.Critical
+ : PlanWarningSeverity.Warning
+ });
+ }
+ }
+
+ // Rule 17: Many-to-many Merge Join
+ if (node.ManyToMany && node.PhysicalOp.Contains("Merge", StringComparison.OrdinalIgnoreCase))
+ {
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Many-to-Many Merge Join",
+ Message = "Many-to-many Merge Join requires a worktable to handle duplicate values. This can be expensive with large numbers of duplicates.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+
+ // Rule 22: Table variables (Object name starts with @)
+ if (!string.IsNullOrEmpty(node.ObjectName) &&
+ node.ObjectName.Contains("@"))
+ {
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Table Variable",
+ Message = "Table variable detected. Table variables have no statistics, so the optimizer always estimates 1 row regardless of actual cardinality. Consider using a temp table (#table) for better estimates.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+
+ // Rule 23: Table-valued functions
+ if (node.LogicalOp == "Table-valued function")
+ {
+ var funcName = node.ObjectName ?? node.PhysicalOp;
+ node.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Table-Valued Function",
+ Message = $"Table-valued function: {funcName}. Multi-statement TVFs have no statistics and a fixed estimate of 1 row (pre-2017) or 100 rows (2017+). Consider inlining the logic or using an inline TVF.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+
+ // Rule 24: Top above a scan on the inner side of Nested Loops
+ // This pattern means the scan executes once per outer row, and the Top
+ // limits each iteration — but with no supporting index the scan is a
+ // linear search repeated potentially millions of times.
+ if (node.PhysicalOp == "Nested Loops" && node.Children.Count >= 2)
+ {
+ var inner = node.Children[1];
+
+ // Walk through pass-through operators to find Top
+ while (inner.PhysicalOp == "Compute Scalar" && inner.Children.Count > 0)
+ inner = inner.Children[0];
+
+ if (inner.PhysicalOp == "Top" && inner.Children.Count > 0)
+ {
+ // Walk through pass-through operators below the Top to find the scan
+ var scanCandidate = inner.Children[0];
+ while (scanCandidate.PhysicalOp == "Compute Scalar" && scanCandidate.Children.Count > 0)
+ scanCandidate = scanCandidate.Children[0];
+
+ if (IsRowstoreScan(scanCandidate))
+ {
+ var predInfo = !string.IsNullOrEmpty(scanCandidate.Predicate)
+ ? " The scan has a residual predicate, so it may read many rows before the Top is satisfied."
+ : "";
+ inner.Warnings.Add(new PlanWarning
+ {
+ WarningType = "Top Above Scan",
+ Message = $"Top operator reads from {scanCandidate.PhysicalOp} (Node {scanCandidate.NodeId}) on the inner side of Nested Loops (Node {node.NodeId}).{predInfo} An index supporting the filter and ordering may convert this to a seek.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+ }
+ }
+ }
+
+ ///
+ /// Returns true for rowstore scan operators (Index Scan, Clustered Index Scan,
+ /// Table Scan). Excludes columnstore scans, spools, and constant scans.
+ ///
+ private static bool IsRowstoreScan(PlanNode node)
+ {
+ return node.PhysicalOp.Contains("Scan", StringComparison.OrdinalIgnoreCase) &&
+ !node.PhysicalOp.Contains("Spool", StringComparison.OrdinalIgnoreCase) &&
+ !node.PhysicalOp.Contains("Constant", StringComparison.OrdinalIgnoreCase) &&
+ !node.PhysicalOp.Contains("Columnstore", StringComparison.OrdinalIgnoreCase);
+ }
+
+ ///
+ /// Detects non-SARGable patterns in scan predicates.
+ /// Returns a description of the issue, or null if the predicate is fine.
+ ///
+ private static string? DetectNonSargablePredicate(PlanNode node)
+ {
+ if (string.IsNullOrEmpty(node.Predicate))
+ return null;
+
+ // Only check rowstore scan operators — columnstore is designed to be scanned
+ if (!IsRowstoreScan(node))
+ return null;
+
+ var predicate = node.Predicate;
+
+ // CASE expression in predicate — check first because CASE bodies
+ // often contain CONVERT_IMPLICIT that isn't the root cause
+ if (CaseInPredicateRegex.IsMatch(predicate))
+ return "CASE expression in predicate";
+
+ // CONVERT_IMPLICIT — most common non-SARGable pattern
+ if (predicate.Contains("CONVERT_IMPLICIT", StringComparison.OrdinalIgnoreCase))
+ return "Implicit conversion (CONVERT_IMPLICIT)";
+
+ // ISNULL / COALESCE wrapping column
+ if (Regex.IsMatch(predicate, @"\b(isnull|coalesce)\s*\(", RegexOptions.IgnoreCase))
+ return "ISNULL/COALESCE wrapping column";
+
+ // Common function calls on columns
+ var funcMatch = FunctionInPredicateRegex.Match(predicate);
+ if (funcMatch.Success)
+ {
+ var funcName = funcMatch.Groups[1].Value.ToUpperInvariant();
+ if (funcName != "CONVERT_IMPLICIT")
+ return $"Function call ({funcName}) on column";
+ }
+
+ // Leading wildcard LIKE
+ if (LeadingWildcardLikeRegex.IsMatch(predicate))
+ return "Leading wildcard LIKE pattern";
+
+ return null;
+ }
+
+ ///
+ /// Detects CTEs that are referenced more than once in the statement text.
+ /// Each reference re-executes the CTE since SQL Server does not materialize them.
+ ///
+ private static void DetectMultiReferenceCte(PlanStatement stmt)
+ {
+ var text = stmt.StatementText;
+ var cteMatches = CteDefinitionRegex.Matches(text);
+ if (cteMatches.Count == 0)
+ return;
+
+ foreach (Match match in cteMatches)
+ {
+ var cteName = match.Groups[1].Value;
+ if (string.IsNullOrEmpty(cteName))
+ continue;
+
+ // Count references as FROM/JOIN targets after the CTE definition
+ var refPattern = new Regex(
+ $@"\b(FROM|JOIN)\s+{Regex.Escape(cteName)}\b",
+ RegexOptions.IgnoreCase);
+ var refCount = refPattern.Matches(text).Count;
+
+ if (refCount > 1)
+ {
+ stmt.PlanWarnings.Add(new PlanWarning
+ {
+ WarningType = "CTE Multiple References",
+ Message = $"CTE \"{cteName}\" is referenced {refCount} times. SQL Server does not materialize CTEs — each reference re-executes the entire CTE query. Consider materializing into a temp table.",
+ Severity = PlanWarningSeverity.Warning
+ });
+ }
+ }
+ }
+
+ ///
+ /// Checks whether a node has a Merge Interval ancestor (OR expansion pattern).
+ ///
+ private static bool HasMergeIntervalAncestor(PlanNode node)
+ {
+ var ancestor = node.Parent;
+ while (ancestor != null)
+ {
+ if (ancestor.PhysicalOp == "Merge Interval")
+ return true;
+ ancestor = ancestor.Parent;
+ }
+ return false;
+ }
+
+ ///
+ /// Checks whether a node has any join ancestor.
+ ///
+ private static bool HasJoinAncestor(PlanNode node)
+ {
+ var ancestor = node.Parent;
+ while (ancestor != null)
+ {
+ if (ancestor.LogicalOp != null &&
+ ancestor.LogicalOp.Contains("Join", StringComparison.OrdinalIgnoreCase))
+ return true;
+ ancestor = ancestor.Parent;
+ }
+ return false;
+ }
+
+ ///
+ /// Finds Sort and Hash Match operators in the tree that consume memory.
+ ///
+ private static void FindMemoryConsumers(PlanNode node, List consumers)
+ {
+ if (node.PhysicalOp.Contains("Sort", StringComparison.OrdinalIgnoreCase) &&
+ !node.PhysicalOp.Contains("Spool", StringComparison.OrdinalIgnoreCase))
+ {
+ var rows = node.HasActualStats
+ ? $"{node.ActualRows:N0} actual rows"
+ : $"{node.EstimateRows:N0} estimated rows";
+ consumers.Add($"Sort (Node {node.NodeId}, {rows})");
+ }
+ else if (node.PhysicalOp.Contains("Hash", StringComparison.OrdinalIgnoreCase))
+ {
+ var rows = node.HasActualStats
+ ? $"{node.ActualRows:N0} actual rows"
+ : $"{node.EstimateRows:N0} estimated rows";
+ consumers.Add($"Hash Match (Node {node.NodeId}, {rows})");
+ }
+
+ foreach (var child in node.Children)
+ FindMemoryConsumers(child, consumers);
+ }
+
+ ///
+ /// Calculates an operator's own elapsed time by subtracting child time.
+ /// In batch mode, operator times are self-contained. In row mode, times are
+ /// cumulative (include children), so we subtract the dominant child's time.
+ /// Parallelism (exchange) operators are skipped because they have timing bugs.
+ ///
+ private static long GetOperatorOwnElapsedMs(PlanNode node)
+ {
+ if (node.ActualExecutionMode == "Batch")
+ return node.ActualElapsedMs;
+
+ // Row mode: subtract the dominant child's elapsed time
+ var maxChildElapsed = 0L;
+ foreach (var child in node.Children)
+ {
+ var childElapsed = child.ActualElapsedMs;
+
+ // Exchange operators have timing bugs — skip to their child
+ if (child.PhysicalOp == "Parallelism" && child.Children.Count > 0)
+ childElapsed = child.Children.Max(c => c.ActualElapsedMs);
+
+ if (childElapsed > maxChildElapsed)
+ maxChildElapsed = childElapsed;
+ }
+
+ return Math.Max(0, node.ActualElapsedMs - maxChildElapsed);
}
private static string Truncate(string value, int maxLength)
diff --git a/Lite/Services/RemoteCollectorService.QueryStore.cs b/Lite/Services/RemoteCollectorService.QueryStore.cs
index 737dd7f..53f8162 100644
--- a/Lite/Services/RemoteCollectorService.QueryStore.cs
+++ b/Lite/Services/RemoteCollectorService.QueryStore.cs
@@ -48,6 +48,23 @@ WHERE d.database_id > 4
AND d.database_id < 32761
AND d.state_desc = N'ONLINE'
AND d.name <> N'PerformanceMonitor'
+ AND d.database_id NOT IN
+ (
+ SELECT
+ d2.database_id
+ FROM sys.databases AS d2
+ JOIN sys.availability_replicas AS r
+ ON d2.replica_id = r.replica_id
+ WHERE NOT EXISTS
+ (
+ SELECT
+ 1/0
+ FROM sys.dm_hadr_availability_group_states AS s
+ WHERE s.primary_replica = r.replica_server_name
+ )
+ AND r.secondary_role_allow_connections_desc = N'READ_ONLY'
+ AND r.replica_server_name = @@SERVERNAME
+ )
OPTION(RECOMPILE);
OPEN db_check;
diff --git a/Lite/Services/RemoteCollectorService.ServerConfig.cs b/Lite/Services/RemoteCollectorService.ServerConfig.cs
index 9adad48..b0f325f 100644
--- a/Lite/Services/RemoteCollectorService.ServerConfig.cs
+++ b/Lite/Services/RemoteCollectorService.ServerConfig.cs
@@ -336,6 +336,23 @@ FROM sys.databases AS d
AND d.database_id < 32761
AND d.name <> N'PerformanceMonitor'
AND d.state_desc = N'ONLINE'
+AND d.database_id NOT IN
+ (
+ SELECT
+ d2.database_id
+ FROM sys.databases AS d2
+ JOIN sys.availability_replicas AS r
+ ON d2.replica_id = r.replica_id
+ WHERE NOT EXISTS
+ (
+ SELECT
+ 1/0
+ FROM sys.dm_hadr_availability_group_states AS s
+ WHERE s.primary_replica = r.replica_server_name
+ )
+ AND r.secondary_role_allow_connections_desc = N'READ_ONLY'
+ AND r.replica_server_name = @@SERVERNAME
+ )
ORDER BY d.name
OPTION(RECOMPILE);";
diff --git a/Lite/Services/ShowPlanParser.cs b/Lite/Services/ShowPlanParser.cs
index 7cea60e..02221b2 100644
--- a/Lite/Services/ShowPlanParser.cs
+++ b/Lite/Services/ShowPlanParser.cs
@@ -1179,8 +1179,8 @@ private static PlanNode ParseRelOp(XElement relOpEl)
node.ActualExecutionMode = actualExecMode;
node.ActualSegmentReads = totalSegmentReads;
node.ActualSegmentSkips = totalSegmentSkips;
- node.UdfCpuTimeUs = totalUdfCpu;
- node.UdfElapsedTimeUs = maxUdfElapsed;
+ node.UdfCpuTimeMs = totalUdfCpu;
+ node.UdfElapsedTimeMs = maxUdfElapsed;
// Store per-thread data for parallel skew analysis
foreach (var thread in runtimeEl.Elements(Ns + "RunTimeCountersPerThread"))
diff --git a/README.md b/README.md
index 9e37523..7318ae9 100644
--- a/README.md
+++ b/README.md
@@ -146,6 +146,7 @@ A GUI installer (`PerformanceMonitorInstallerGui.exe`) is also included in the r
| `USERNAME PASSWORD` | SQL Authentication credentials (positional, optional) |
| `--reinstall` | Drop existing database and perform clean install |
| `--reset-schedule` | Reset collection schedule to recommended defaults during upgrade |
+| `--preserve-jobs` | Keep existing SQL Agent jobs unchanged (owner, schedule, notifications) |
| `--encrypt=optional\|mandatory\|strict` | Connection encryption level (default: mandatory) |
| `--trust-cert` | Trust server certificate without validation (default: require valid cert) |
diff --git a/install/01_install_database.sql b/install/01_install_database.sql
index db141d7..a435add 100644
--- a/install/01_install_database.sql
+++ b/install/01_install_database.sql
@@ -274,6 +274,10 @@ BEGIN
DEFAULT 5,
retention_days integer NOT NULL
DEFAULT 30,
+ collect_query bit NOT NULL
+ DEFAULT CONVERT(bit, 'true'),
+ collect_plan bit NOT NULL
+ DEFAULT CONVERT(bit, 'true'),
[description] nvarchar(500) NULL,
created_date datetime2(7) NOT NULL
DEFAULT SYSDATETIME(),
@@ -317,6 +321,47 @@ BEGIN
PRINT 'Created config.collection_schedule table';
END;
+/*
+Add collect_query and collect_plan columns for existing installations
+Controls whether collectors store query text and execution plans
+Both default to enabled (1) for backwards compatibility
+*/
+IF NOT EXISTS
+(
+ SELECT
+ 1/0
+ FROM sys.columns
+ WHERE object_id = OBJECT_ID(N'config.collection_schedule')
+ AND name = N'collect_query'
+)
+BEGIN
+ ALTER TABLE
+ config.collection_schedule
+ ADD collect_query bit NOT NULL
+ CONSTRAINT DF_collection_schedule_collect_query
+ DEFAULT CONVERT(bit, 'true');
+
+ PRINT 'Added collect_query column to config.collection_schedule';
+END;
+
+IF NOT EXISTS
+(
+ SELECT
+ 1/0
+ FROM sys.columns
+ WHERE object_id = OBJECT_ID(N'config.collection_schedule')
+ AND name = N'collect_plan'
+)
+BEGIN
+ ALTER TABLE
+ config.collection_schedule
+ ADD collect_plan bit NOT NULL
+ CONSTRAINT DF_collection_schedule_collect_plan
+ DEFAULT CONVERT(bit, 'true');
+
+ PRINT 'Added collect_plan column to config.collection_schedule';
+END;
+
/*
Critical issues table
Logs significant performance problems detected by collectors and analysis procedures
diff --git a/install/08_collect_query_stats.sql b/install/08_collect_query_stats.sql
index df0163c..0d62c24 100644
--- a/install/08_collect_query_stats.sql
+++ b/install/08_collect_query_stats.sql
@@ -107,16 +107,16 @@ BEGIN
END;
/*
- First run detection - collect all queries if this is the first execution
+ First run detection - collect last 1 hour of queries if this is the first execution
*/
IF NOT EXISTS (SELECT 1/0 FROM collect.query_stats)
AND NOT EXISTS (SELECT 1/0 FROM config.collection_log WHERE collector_name = N'query_stats_collector')
BEGIN
- SET @cutoff_time = CONVERT(datetime2(7), '19000101');
+ SET @cutoff_time = DATEADD(HOUR, -1, SYSDATETIME());
IF @debug = 1
BEGIN
- RAISERROR(N'First run detected - collecting all queries from sys.dm_exec_query_stats', 0, 1) WITH NOWAIT;
+ RAISERROR(N'First run detected - collecting last 1 hour of query stats', 0, 1) WITH NOWAIT;
END;
END;
ELSE
@@ -153,6 +153,19 @@ BEGIN
RAISERROR(N'Collecting queries executed since %s', 0, 1, @cutoff_time_string) WITH NOWAIT;
END;
+ /*
+ Read collection flags for query text and plans
+ */
+ DECLARE
+ @collect_query bit = 1,
+ @collect_plan bit = 1;
+
+ SELECT
+ @collect_query = cs.collect_query,
+ @collect_plan = cs.collect_plan
+ FROM config.collection_schedule AS cs
+ WHERE cs.collector_name = N'query_stats_collector';
+
/*
Collect query statistics directly from DMV
Only collects queries executed since last collection
@@ -255,6 +268,8 @@ BEGIN
max_spills = qs.max_spills,
query_text =
CASE
+ WHEN @collect_query = 0
+ THEN NULL
WHEN qs.statement_start_offset = 0
AND qs.statement_end_offset = -1
THEN st.text
@@ -272,7 +287,12 @@ BEGIN
) / 2 + 1
)
END,
- query_plan_text = tqp.query_plan
+ query_plan_text =
+ CASE
+ WHEN @collect_plan = 1
+ THEN tqp.query_plan
+ ELSE NULL
+ END
FROM sys.dm_exec_query_stats AS qs
OUTER APPLY sys.dm_exec_sql_text(qs.sql_handle) AS st
OUTER APPLY
diff --git a/install/09_collect_query_store.sql b/install/09_collect_query_store.sql
index f8a13b2..6794ea9 100644
--- a/install/09_collect_query_store.sql
+++ b/install/09_collect_query_store.sql
@@ -145,16 +145,16 @@ BEGIN
END;
/*
- First run detection - collect 3 days of history if this is the first execution
+ First run detection - collect last 1 hour of history if this is the first execution
*/
IF NOT EXISTS (SELECT 1/0 FROM collect.query_store_data)
AND NOT EXISTS (SELECT 1/0 FROM config.collection_log WHERE collector_name = N'query_store_collector')
BEGIN
- SET @cutoff_time = TODATETIMEOFFSET(DATEADD(DAY, -3, SYSUTCDATETIME()), 0);
+ SET @cutoff_time = TODATETIMEOFFSET(DATEADD(HOUR, -1, SYSUTCDATETIME()), 0);
IF @debug = 1
BEGIN
- RAISERROR(N'First run detected - collecting last 3 days of Query Store data', 0, 1) WITH NOWAIT;
+ RAISERROR(N'First run detected - collecting last 1 hour of Query Store data', 0, 1) WITH NOWAIT;
END;
END;
ELSE
@@ -200,6 +200,19 @@ BEGIN
RAISERROR(@debug_msg, 0, 1) WITH NOWAIT;
END;
+ /*
+ Read collection flags for query text and plans
+ */
+ DECLARE
+ @collect_query bit = 1,
+ @collect_plan bit = 1;
+
+ SELECT
+ @collect_query = cs.collect_query,
+ @collect_plan = cs.collect_plan
+ FROM config.collection_schedule AS cs
+ WHERE cs.collector_name = N'query_store_collector';
+
/*
Create temp table to hold Query Store data from all databases
*/
@@ -291,6 +304,23 @@ BEGIN
AND d.is_read_only = 0
AND d.name <> N'PerformanceMonitor'
AND d.database_id < 32761 /*exclude contained AG system databases*/
+ AND d.database_id NOT IN
+ (
+ SELECT
+ d2.database_id
+ FROM sys.databases AS d2
+ JOIN sys.availability_replicas AS r
+ ON d2.replica_id = r.replica_id
+ WHERE NOT EXISTS
+ (
+ SELECT
+ 1/0
+ FROM sys.dm_hadr_availability_group_states AS s
+ WHERE s.primary_replica = r.replica_server_name
+ )
+ AND r.secondary_role_allow_connections_desc = N'READ_ONLY'
+ AND r.replica_server_name = @@SERVERNAME
+ )
OPTION(RECOMPILE);
OPEN @db_check_cursor;
@@ -391,8 +421,20 @@ BEGIN
WHEN q.object_id > 0
AND o.object_id IS NOT NULL
THEN o.object_name
- END,
- query_sql_text = qt.query_sql_text,
+ END,';
+
+ IF @collect_query = 1
+ BEGIN
+ SET @sql += N'
+ query_sql_text = qt.query_sql_text,';
+ END;
+ ELSE
+ BEGIN
+ SET @sql += N'
+ query_sql_text = NULL,';
+ END;
+
+ SET @sql += N'
query_hash = q.query_hash,
count_executions = rs.count_executions,
avg_duration = rs.avg_duration,
@@ -484,8 +526,20 @@ BEGIN
is_forced_plan = p.is_forced_plan,
p.force_failure_count,
p.last_force_failure_reason_desc,
- p.compatibility_level,
- query_plan_text = CONVERT(nvarchar(max), p.query_plan),
+ p.compatibility_level,';
+
+ IF @collect_plan = 1
+ BEGIN
+ SET @sql += N'
+ query_plan_text = CONVERT(nvarchar(max), p.query_plan),';
+ END;
+ ELSE
+ BEGIN
+ SET @sql += N'
+ query_plan_text = NULL,';
+ END;
+
+ SET @sql += N'
compilation_metrics =
(
SELECT
diff --git a/install/10_collect_procedure_stats.sql b/install/10_collect_procedure_stats.sql
index e2dd106..006616a 100644
--- a/install/10_collect_procedure_stats.sql
+++ b/install/10_collect_procedure_stats.sql
@@ -107,16 +107,16 @@ BEGIN
END;
/*
- First run detection - collect all procedures if this is the first execution
+ First run detection - collect last 1 hour of procedures if this is the first execution
*/
IF NOT EXISTS (SELECT 1/0 FROM collect.procedure_stats)
AND NOT EXISTS (SELECT 1/0 FROM config.collection_log WHERE collector_name = N'procedure_stats_collector')
BEGIN
- SET @cutoff_time = CONVERT(datetime2(7), '19000101');
+ SET @cutoff_time = DATEADD(HOUR, -1, SYSDATETIME());
IF @debug = 1
BEGIN
- RAISERROR(N'First run detected - collecting all procedures from sys.dm_exec_procedure_stats', 0, 1) WITH NOWAIT;
+ RAISERROR(N'First run detected - collecting last 1 hour of procedure stats', 0, 1) WITH NOWAIT;
END;
END;
ELSE
@@ -153,6 +153,17 @@ BEGIN
RAISERROR(N'Collecting procedure stats with cutoff time: %s', 0, 1, @cutoff_time_string) WITH NOWAIT;
END;
+ /*
+ Read collection flag for plans
+ */
+ DECLARE
+ @collect_plan bit = 1;
+
+ SELECT
+ @collect_plan = cs.collect_plan
+ FROM config.collection_schedule AS cs
+ WHERE cs.collector_name = N'procedure_stats_collector';
+
/*
Collect procedure, trigger, and function statistics
Single query with UNION ALL to collect from all three DMVs
@@ -223,7 +234,12 @@ BEGIN
total_spills = ps.total_spills,
min_spills = ps.min_spills,
max_spills = ps.max_spills,
- query_plan_text = CONVERT(nvarchar(max), tqp.query_plan)
+ query_plan_text =
+ CASE
+ WHEN @collect_plan = 1
+ THEN CONVERT(nvarchar(max), tqp.query_plan)
+ ELSE NULL
+ END
FROM sys.dm_exec_procedure_stats AS ps
OUTER APPLY
sys.dm_exec_text_query_plan
@@ -385,7 +401,12 @@ BEGIN
total_spills = ts.total_spills,
min_spills = ts.min_spills,
max_spills = ts.max_spills,
- query_plan_text = CONVERT(nvarchar(max), tqp.query_plan)
+ query_plan_text =
+ CASE
+ WHEN @collect_plan = 1
+ THEN CONVERT(nvarchar(max), tqp.query_plan)
+ ELSE NULL
+ END
FROM sys.dm_exec_trigger_stats AS ts
CROSS APPLY sys.dm_exec_sql_text(ts.sql_handle) AS st
OUTER APPLY
@@ -444,7 +465,12 @@ BEGIN
total_spills = NULL,
min_spills = NULL,
max_spills = NULL,
- query_plan_text = CONVERT(nvarchar(max), tqp.query_plan)
+ query_plan_text =
+ CASE
+ WHEN @collect_plan = 1
+ THEN CONVERT(nvarchar(max), tqp.query_plan)
+ ELSE NULL
+ END
FROM sys.dm_exec_function_stats AS fs
OUTER APPLY
sys.dm_exec_text_query_plan
diff --git a/install/18_collect_cpu_utilization_stats.sql b/install/18_collect_cpu_utilization_stats.sql
index 536ceca..6849a13 100644
--- a/install/18_collect_cpu_utilization_stats.sql
+++ b/install/18_collect_cpu_utilization_stats.sql
@@ -112,7 +112,7 @@ BEGIN
/*
Collect CPU utilization data from ring buffers
Only collects samples newer than the most recent sample we have
- On first run (NULL max_sample_time), looks back 7 days to populate initial data
+ On first run (NULL max_sample_time), looks back 1 hour to populate initial data
Avoids duplicate collection of same ring buffer events
*/
INSERT INTO
@@ -156,7 +156,7 @@ BEGIN
SECOND,
-((@current_ms_ticks - t.timestamp) / 1000),
@start_time
- ) > ISNULL(@max_sample_time, DATEADD(DAY, -7, @start_time))
+ ) > ISNULL(@max_sample_time, DATEADD(HOUR, -1, @start_time))
ORDER BY
t.timestamp DESC
OPTION(RECOMPILE);
diff --git a/install/22_collect_blocked_processes.sql b/install/22_collect_blocked_processes.sql
index 5032ad3..33e0e62 100644
--- a/install/22_collect_blocked_processes.sql
+++ b/install/22_collect_blocked_processes.sql
@@ -140,17 +140,17 @@ BEGIN
END;
/*
- First run detection - collect 3 days of history if this is the first execution
+ First run detection - collect last 1 hour of history if this is the first execution
*/
IF NOT EXISTS (SELECT 1/0 FROM collect.blocked_process_xml)
AND NOT EXISTS (SELECT 1/0 FROM config.collection_log WHERE collector_name = N'blocked_process_xml_collector')
BEGIN
- SET @minutes_back = 4320; /*3 days*/
+ SET @minutes_back = 60; /*1 hour*/
SET @cutoff_time = DATEADD(MINUTE, -@minutes_back, SYSUTCDATETIME());
IF @debug = 1
BEGIN
- RAISERROR(N'First run detected - collecting last 3 days of blocked process events', 0, 1) WITH NOWAIT;
+ RAISERROR(N'First run detected - collecting last 1 hour of blocked process events', 0, 1) WITH NOWAIT;
END;
END;
diff --git a/install/24_collect_deadlock_xml.sql b/install/24_collect_deadlock_xml.sql
index 6bf8f87..6314e7f 100644
--- a/install/24_collect_deadlock_xml.sql
+++ b/install/24_collect_deadlock_xml.sql
@@ -101,17 +101,17 @@ BEGIN
END;
/*
- First run detection - collect 3 days of history if this is the first execution
+ First run detection - collect last 1 hour of history if this is the first execution
*/
IF NOT EXISTS (SELECT 1/0 FROM collect.deadlock_xml)
AND NOT EXISTS (SELECT 1/0 FROM config.collection_log WHERE collector_name = N'deadlock_xml_collector')
BEGIN
- SET @minutes_back = 4320; /*3 days*/
+ SET @minutes_back = 60; /*1 hour*/
SET @cutoff_time = DATEADD(MINUTE, -@minutes_back, SYSUTCDATETIME());
IF @debug = 1
BEGIN
- RAISERROR(N'First run detected - collecting last 3 days of deadlock events', 0, 1) WITH NOWAIT;
+ RAISERROR(N'First run detected - collecting last 1 hour of deadlock events', 0, 1) WITH NOWAIT;
END;
END;
diff --git a/install/28_collect_system_health_wrapper.sql b/install/28_collect_system_health_wrapper.sql
index 1c2b7fc..2a96f3d 100644
--- a/install/28_collect_system_health_wrapper.sql
+++ b/install/28_collect_system_health_wrapper.sql
@@ -101,16 +101,16 @@ BEGIN
END;
/*
- First run detection - collect 3 days of history if this is the first execution
+ First run detection - collect last 1 hour of history if this is the first execution
*/
IF NOT EXISTS (SELECT 1/0 FROM config.collection_log WHERE collector_name = N'system_health_collector')
BEGIN
- SET @hours_back = 72; /*3 days*/
+ SET @hours_back = 1; /*1 hour*/
SET @start_date = DATEADD(HOUR, -@hours_back, SYSDATETIMEOFFSET());
IF @debug = 1
BEGIN
- RAISERROR(N'First run detected - collecting last 3 days of system health data', 0, 1) WITH NOWAIT;
+ RAISERROR(N'First run detected - collecting last 1 hour of system health data', 0, 1) WITH NOWAIT;
END;
END;
diff --git a/install/29_collect_default_trace.sql b/install/29_collect_default_trace.sql
index 9760fab..917466d 100644
--- a/install/29_collect_default_trace.sql
+++ b/install/29_collect_default_trace.sql
@@ -110,17 +110,17 @@ BEGIN
END;
/*
- First run detection - collect all available trace data if this is the first execution
+ First run detection - collect last 1 hour of trace data if this is the first execution
Ignore CONFIG_CHANGE entries when checking for first run (those are just from enabling the trace)
*/
IF NOT EXISTS (SELECT 1/0 FROM collect.default_trace_events)
AND NOT EXISTS (SELECT 1/0 FROM config.collection_log WHERE collector_name = N'default_trace_collector' AND collection_status = N'SUCCESS')
BEGIN
- SET @cutoff_time = CONVERT(datetime2(7), '19000101');
+ SET @cutoff_time = DATEADD(HOUR, -1, SYSDATETIME());
IF @debug = 1
BEGIN
- RAISERROR(N'First run detected - collecting all available default trace events', 0, 1) WITH NOWAIT;
+ RAISERROR(N'First run detected - collecting last 1 hour of default trace events', 0, 1) WITH NOWAIT;
END;
END;
diff --git a/install/39_collect_database_configuration.sql b/install/39_collect_database_configuration.sql
index 199f155..1bac443 100644
--- a/install/39_collect_database_configuration.sql
+++ b/install/39_collect_database_configuration.sql
@@ -165,6 +165,23 @@ BEGIN
AND d.name != DB_NAME()
AND d.state_desc = N'ONLINE'
AND d.database_id < 32761 /*exclude contained AG system databases*/
+ AND d.database_id NOT IN
+ (
+ SELECT
+ d2.database_id
+ FROM sys.databases AS d2
+ JOIN sys.availability_replicas AS r
+ ON d2.replica_id = r.replica_id
+ WHERE NOT EXISTS
+ (
+ SELECT
+ 1/0
+ FROM sys.dm_hadr_availability_group_states AS s
+ WHERE s.primary_replica = r.replica_server_name
+ )
+ AND r.secondary_role_allow_connections_desc = N'READ_ONLY'
+ AND r.replica_server_name = @@SERVERNAME
+ )
ORDER BY
d.name
OPTION (RECOMPILE);
diff --git a/install/45_create_agent_jobs.sql b/install/45_create_agent_jobs.sql
index e7ce07c..c9fe080 100644
--- a/install/45_create_agent_jobs.sql
+++ b/install/45_create_agent_jobs.sql
@@ -21,9 +21,21 @@ GO
/*
Create SQL Server Agent Jobs for Performance Monitor
These jobs automate data collection and retention
+
+When @preserve_jobs = 1, existing jobs are left untouched (owner,
+schedule, notifications, etc.) and only missing jobs are created.
+The installer sets this to 1 when --preserve-jobs is specified.
*/
+DECLARE
+ @preserve_jobs bit = 0;
+
PRINT 'Creating SQL Server Agent jobs for Performance Monitor';
+
+IF @preserve_jobs = 1
+BEGIN
+ PRINT '(preserve mode — existing jobs will not be modified)';
+END;
PRINT '';
/*
@@ -31,11 +43,8 @@ Job 1: PerformanceMonitor - Collection
Runs scheduled master collector every 1 minute
The collector checks config.collection_schedule to determine which collectors should run
*/
-
-/*
-Drop existing job if it exists
-*/
-IF EXISTS
+IF @preserve_jobs = 0
+AND EXISTS
(
SELECT
1/0
@@ -72,48 +81,55 @@ BEGIN
PRINT 'Dropped existing PerformanceMonitor - Collection job';
END;
-/*
-Create the collection job
-*/
-EXECUTE msdb.dbo.sp_add_job
- @job_name = N'PerformanceMonitor - Collection',
- @enabled = 1,
- @description = N'Runs scheduled master collector to execute collectors based on config.collection_schedule',
- @category_name = N'Data Collector';
-
-EXECUTE msdb.dbo.sp_add_jobstep
- @job_name = N'PerformanceMonitor - Collection',
- @step_name = N'Run Scheduled Master Collector',
- @subsystem = N'TSQL',
- @database_name = N'PerformanceMonitor',
- @command = N'EXECUTE collect.scheduled_master_collector @debug = 0;',
- @retry_attempts = 0,
- @on_success_action = 1; /*Quit with success*/
-
-EXECUTE msdb.dbo.sp_add_jobschedule
- @job_name = N'PerformanceMonitor - Collection',
- @name = N'Every 1 Minute',
- @freq_type = 4, /*Daily*/
- @freq_interval = 1,
- @freq_subday_type = 4, /*Minutes*/
- @freq_subday_interval = 1; /*Every 1 minute*/
-
-EXECUTE msdb.dbo.sp_add_jobserver
- @job_name = N'PerformanceMonitor - Collection',
- @server_name = N'(local)';
-
-PRINT 'Created PerformanceMonitor - Collection job (runs every 1 minute)';
+IF NOT EXISTS
+(
+ SELECT
+ 1/0
+ FROM msdb.dbo.sysjobs AS j
+ WHERE j.name = N'PerformanceMonitor - Collection'
+)
+BEGIN
+ EXECUTE msdb.dbo.sp_add_job
+ @job_name = N'PerformanceMonitor - Collection',
+ @enabled = 1,
+ @description = N'Runs scheduled master collector to execute collectors based on config.collection_schedule',
+ @category_name = N'Data Collector';
+
+ EXECUTE msdb.dbo.sp_add_jobstep
+ @job_name = N'PerformanceMonitor - Collection',
+ @step_name = N'Run Scheduled Master Collector',
+ @subsystem = N'TSQL',
+ @database_name = N'PerformanceMonitor',
+ @command = N'EXECUTE collect.scheduled_master_collector @debug = 0;',
+ @retry_attempts = 0,
+ @on_success_action = 1; /*Quit with success*/
+
+ EXECUTE msdb.dbo.sp_add_jobschedule
+ @job_name = N'PerformanceMonitor - Collection',
+ @name = N'Every 1 Minute',
+ @freq_type = 4, /*Daily*/
+ @freq_interval = 1,
+ @freq_subday_type = 4, /*Minutes*/
+ @freq_subday_interval = 1; /*Every 1 minute*/
+
+ EXECUTE msdb.dbo.sp_add_jobserver
+ @job_name = N'PerformanceMonitor - Collection',
+ @server_name = N'(local)';
+
+ PRINT 'Created PerformanceMonitor - Collection job (runs every 1 minute)';
+END;
+ELSE IF @preserve_jobs = 1
+BEGIN
+ PRINT 'PerformanceMonitor - Collection job already exists — preserving current settings';
+END;
PRINT '';
/*
Job 2: PerformanceMonitor - Data Retention
Purges old performance monitoring data daily at 2am
*/
-
-/*
-Drop existing job if it exists
-*/
-IF EXISTS
+IF @preserve_jobs = 0
+AND EXISTS
(
SELECT
1/0
@@ -150,47 +166,54 @@ BEGIN
PRINT 'Dropped existing PerformanceMonitor - Data Retention job';
END;
-/*
-Create the data retention job
-*/
-EXECUTE msdb.dbo.sp_add_job
- @job_name = N'PerformanceMonitor - Data Retention',
- @enabled = 1,
- @description = N'Purges old performance monitoring data',
- @category_name = N'Data Collector';
-
-EXECUTE msdb.dbo.sp_add_jobstep
- @job_name = N'PerformanceMonitor - Data Retention',
- @step_name = N'Run Data Retention',
- @subsystem = N'TSQL',
- @database_name = N'PerformanceMonitor',
- @command = N'EXECUTE config.data_retention @debug = 1;',
- @retry_attempts = 0,
- @on_success_action = 1; /*Quit with success*/
-
-EXECUTE msdb.dbo.sp_add_jobschedule
- @job_name = N'PerformanceMonitor - Data Retention',
- @name = N'Daily at 2am',
- @freq_type = 4, /*Daily*/
- @freq_interval = 1,
- @active_start_time = 20000; /*2:00 AM*/
-
-EXECUTE msdb.dbo.sp_add_jobserver
- @job_name = N'PerformanceMonitor - Data Retention',
- @server_name = N'(local)';
-
-PRINT 'Created PerformanceMonitor - Data Retention job (runs daily at 2:00 AM)';
+IF NOT EXISTS
+(
+ SELECT
+ 1/0
+ FROM msdb.dbo.sysjobs AS j
+ WHERE j.name = N'PerformanceMonitor - Data Retention'
+)
+BEGIN
+ EXECUTE msdb.dbo.sp_add_job
+ @job_name = N'PerformanceMonitor - Data Retention',
+ @enabled = 1,
+ @description = N'Purges old performance monitoring data',
+ @category_name = N'Data Collector';
+
+ EXECUTE msdb.dbo.sp_add_jobstep
+ @job_name = N'PerformanceMonitor - Data Retention',
+ @step_name = N'Run Data Retention',
+ @subsystem = N'TSQL',
+ @database_name = N'PerformanceMonitor',
+ @command = N'EXECUTE config.data_retention @debug = 1;',
+ @retry_attempts = 0,
+ @on_success_action = 1; /*Quit with success*/
+
+ EXECUTE msdb.dbo.sp_add_jobschedule
+ @job_name = N'PerformanceMonitor - Data Retention',
+ @name = N'Daily at 2am',
+ @freq_type = 4, /*Daily*/
+ @freq_interval = 1,
+ @active_start_time = 20000; /*2:00 AM*/
+
+ EXECUTE msdb.dbo.sp_add_jobserver
+ @job_name = N'PerformanceMonitor - Data Retention',
+ @server_name = N'(local)';
+
+ PRINT 'Created PerformanceMonitor - Data Retention job (runs daily at 2:00 AM)';
+END;
+ELSE IF @preserve_jobs = 1
+BEGIN
+ PRINT 'PerformanceMonitor - Data Retention job already exists — preserving current settings';
+END;
PRINT '';
/*
Job 3: PerformanceMonitor - Hung Job Monitor
Monitors the collection job for hung state every 5 minutes
*/
-
-/*
-Drop existing job if it exists
-*/
-IF EXISTS
+IF @preserve_jobs = 0
+AND EXISTS
(
SELECT
1/0
@@ -227,42 +250,52 @@ BEGIN
PRINT 'Dropped existing PerformanceMonitor - Hung Job Monitor job';
END;
-/*
-Create the hung job monitor job
-*/
-EXECUTE msdb.dbo.sp_add_job
- @job_name = N'PerformanceMonitor - Hung Job Monitor',
- @enabled = 1,
- @description = N'Monitors collection job for hung state and stops it if needed',
- @category_name = N'Data Collector';
-
-EXECUTE msdb.dbo.sp_add_jobstep
- @job_name = N'PerformanceMonitor - Hung Job Monitor',
- @step_name = N'Check for Hung Collection Job',
- @subsystem = N'TSQL',
- @database_name = N'PerformanceMonitor',
- @command = N'EXECUTE config.check_hung_collector_job
+IF NOT EXISTS
+(
+ SELECT
+ 1/0
+ FROM msdb.dbo.sysjobs AS j
+ WHERE j.name = N'PerformanceMonitor - Hung Job Monitor'
+)
+BEGIN
+ EXECUTE msdb.dbo.sp_add_job
+ @job_name = N'PerformanceMonitor - Hung Job Monitor',
+ @enabled = 1,
+ @description = N'Monitors collection job for hung state and stops it if needed',
+ @category_name = N'Data Collector';
+
+ EXECUTE msdb.dbo.sp_add_jobstep
+ @job_name = N'PerformanceMonitor - Hung Job Monitor',
+ @step_name = N'Check for Hung Collection Job',
+ @subsystem = N'TSQL',
+ @database_name = N'PerformanceMonitor',
+ @command = N'EXECUTE config.check_hung_collector_job
@job_name = N''PerformanceMonitor - Collection'',
@normal_max_duration_minutes = 5,
@first_run_max_duration_minutes = 30,
@stop_hung_job = 1,
@debug = 0;',
- @retry_attempts = 0,
- @on_success_action = 1; /*Quit with success*/
-
-EXECUTE msdb.dbo.sp_add_jobschedule
- @job_name = N'PerformanceMonitor - Hung Job Monitor',
- @name = N'Every 5 Minutes',
- @freq_type = 4, /*Daily*/
- @freq_interval = 1,
- @freq_subday_type = 4, /*Minutes*/
- @freq_subday_interval = 5; /*Every 5 minutes*/
-
-EXECUTE msdb.dbo.sp_add_jobserver
- @job_name = N'PerformanceMonitor - Hung Job Monitor',
- @server_name = N'(local)';
-
-PRINT 'Created PerformanceMonitor - Hung Job Monitor job (runs every 5 minutes)';
+ @retry_attempts = 0,
+ @on_success_action = 1; /*Quit with success*/
+
+ EXECUTE msdb.dbo.sp_add_jobschedule
+ @job_name = N'PerformanceMonitor - Hung Job Monitor',
+ @name = N'Every 5 Minutes',
+ @freq_type = 4, /*Daily*/
+ @freq_interval = 1,
+ @freq_subday_type = 4, /*Minutes*/
+ @freq_subday_interval = 5; /*Every 5 minutes*/
+
+ EXECUTE msdb.dbo.sp_add_jobserver
+ @job_name = N'PerformanceMonitor - Hung Job Monitor',
+ @server_name = N'(local)';
+
+ PRINT 'Created PerformanceMonitor - Hung Job Monitor job (runs every 5 minutes)';
+END;
+ELSE IF @preserve_jobs = 1
+BEGIN
+ PRINT 'PerformanceMonitor - Hung Job Monitor job already exists — preserving current settings';
+END;
PRINT '';
/*