Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
c625ac9
add log_pipelines table
Polliog Mar 20, 2026
56a8ab9
add pipeline types
Polliog Mar 20, 2026
ebc9f55
add built-in log parsers
Polliog Mar 20, 2026
0b3ff65
add grok engine
Polliog Mar 20, 2026
74e7cbd
add pipeline executor and geoip step
Polliog Mar 20, 2026
421774d
add pipeline service with CRUD and yaml import
Polliog Mar 20, 2026
f8eec43
add pipeline API routes
Polliog Mar 20, 2026
995e872
add pipeline BullMQ job and worker
Polliog Mar 20, 2026
8c54992
add pipeline frontend API client and store
Polliog Mar 20, 2026
41c62d6
add pipeline settings pages and components
Polliog Mar 20, 2026
4237ccb
fix TS errors in geoip step and pipeline job
Polliog Mar 20, 2026
fd7f402
fix route prefix to log-pipelines and import-yaml path
Polliog Mar 20, 2026
21b3ddb
fix jsonb serialization for steps column
Polliog Mar 20, 2026
b80fa1d
add project selector to new pipeline form
Polliog Mar 20, 2026
b650176
fix org switch not reloading pipeline pages
Polliog Mar 20, 2026
e53c62c
redirect to list on org switch from pipeline edit page
Polliog Mar 20, 2026
81192bb
add 0.9.0 changelog for pipeline feature
Polliog Mar 21, 2026
0a3224d
add docker compose commands for local infrastructure management
Polliog Mar 21, 2026
c56c603
fix delete test: remove content-type from bodyless requests
Polliog Mar 21, 2026
4af9ec5
refactor session creation: remove unused sessionId and improve orderB…
Polliog Mar 21, 2026
3eb92be
Merge pull request #177 from logtide-dev/feature/153-feature-log-pars…
Polliog Mar 21, 2026
c316152
add service health monitoring and status pages
Polliog Mar 22, 2026
38c68a6
fix monitoring: slug collision, transaction, httpconfig, types, ux
Polliog Mar 23, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,22 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).


## [0.9.0] - unreleased

### Added
- **Log parsing and enrichment pipelines**: define multi-step processing rules that automatically parse and enrich incoming log messages before they are stored
- **5 built-in parsers**: nginx (combined log format), apache (identical to nginx), syslog (RFC 3164 and RFC 5424), logfmt, and JSON message body
- **Custom grok patterns**: `%{PATTERN:field}` and `%{PATTERN:field:type}` syntax with 22 built-in patterns (IPV4, WORD, NOTSPACE, NUMBER, POSINT, DATA, GREEDYDATA, QUOTEDSTRING, METHOD, URIPATH, HTTPDATE, etc.) and optional type coercion (`:int`, `:float`)
- **GeoIP enrichment**: extract country, city, coordinates, timezone, and ISP data from any IP field using the embedded MaxMind GeoLite2 database
- **Async processing via BullMQ**: pipelines run as background jobs after ingestion — zero impact on ingestion latency
- **Project-scoped vs org-wide**: pipelines can target a specific project or apply to all projects in the organization; project-specific pipelines take priority over org-wide ones
- **Pipeline preview**: test any combination of steps against a sample log message and inspect per-step extracted fields and the final merged result before saving
- **YAML import/export**: import pipeline definitions from YAML with `name`, `description`, `enabled`, and `steps` fields; upserts (replace existing pipeline for the same scope)
- **In-memory cache**: `getForProject` caches the resolved pipeline per project for 5 minutes, automatically invalidated on create/update/delete
- **Settings UI** (`/dashboard/settings/pipelines`): list, enable/disable toggle, create, edit, and delete pipelines with live org-switch reactivity (`$effect` instead of `onMount`)
- **Step builder**: interactive UI for adding, reordering, and configuring parser, grok, and geoip steps with per-type configuration forms
- **Pipeline edit page** redirects to the list when the active organization is switched, preventing stale-ID errors

## [0.8.4] - 2026-03-19

### Added
Expand Down
2 changes: 2 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@
"dev:frontend": "pnpm --filter \"@logtide/frontend\" dev",
"dev:worker": "pnpm --filter \"@logtide/backend\" dev:worker",
"dev:all": "concurrently -n fe,be,wk -c blue,green,yellow \"pnpm dev:frontend\" \"pnpm dev:backend\" \"pnpm dev:worker\"",
"infra:up": "docker compose -f docker/docker-compose.dev.yml up -d",
"infra:down": "docker compose -f docker/docker-compose.dev.yml down",
"build": "pnpm --recursive --filter \"./packages/**\" build",
"build:shared": "pnpm --filter \"@logtide/shared\" build",
"test": "pnpm --recursive --filter \"./packages/**\" test",
Expand Down
28 changes: 28 additions & 0 deletions packages/backend/migrations/033_log_pipelines.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
CREATE TABLE IF NOT EXISTS log_pipelines (
id UUID NOT NULL DEFAULT gen_random_uuid(),
organization_id UUID NOT NULL REFERENCES organizations(id) ON DELETE CASCADE,
project_id UUID REFERENCES projects(id) ON DELETE CASCADE,
name VARCHAR(200) NOT NULL,
description TEXT,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
steps JSONB NOT NULL DEFAULT '[]'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
PRIMARY KEY (id)
);

CREATE INDEX IF NOT EXISTS idx_log_pipelines_org
ON log_pipelines(organization_id);

CREATE INDEX IF NOT EXISTS idx_log_pipelines_project
ON log_pipelines(project_id)
WHERE project_id IS NOT NULL;

-- Only one pipeline per project (or one org-wide default when project_id IS NULL)
CREATE UNIQUE INDEX IF NOT EXISTS idx_log_pipelines_org_null_project
ON log_pipelines(organization_id)
WHERE project_id IS NULL;

CREATE UNIQUE INDEX IF NOT EXISTS idx_log_pipelines_org_project
ON log_pipelines(organization_id, project_id)
WHERE project_id IS NOT NULL;
158 changes: 158 additions & 0 deletions packages/backend/migrations/034_service_health_monitoring.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
-- ============================================================================
-- Migration 034: Service Health Monitoring
-- ============================================================================

-- 1. Add slug to projects
ALTER TABLE projects ADD COLUMN IF NOT EXISTS slug VARCHAR(255);

-- Generate slugs for existing projects (handles duplicate base slugs per org)
WITH ranked AS (
SELECT
id,
organization_id,
BTRIM(LOWER(REGEXP_REPLACE(TRIM(name), '[^a-zA-Z0-9]+', '-', 'g')), '-') AS base_slug,
ROW_NUMBER() OVER (
PARTITION BY
organization_id,
BTRIM(LOWER(REGEXP_REPLACE(TRIM(name), '[^a-zA-Z0-9]+', '-', 'g')), '-')
ORDER BY created_at
) AS rn
FROM projects
)
UPDATE projects p
SET slug = CASE
WHEN r.rn = 1 THEN r.base_slug
ELSE r.base_slug || '-' || r.rn::text
END
FROM ranked r
WHERE p.id = r.id;

-- Fallback for names that produce empty slugs (all special chars)
UPDATE projects
SET slug = 'project-' || SUBSTRING(id::text, 1, 8)
WHERE slug IS NULL OR slug = '' OR slug = '-';

ALTER TABLE projects ALTER COLUMN slug SET NOT NULL;

CREATE UNIQUE INDEX IF NOT EXISTS idx_projects_org_slug ON projects (organization_id, slug);

-- ============================================================================
-- 2. Monitors table
-- ============================================================================

CREATE TABLE IF NOT EXISTS monitors (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
organization_id UUID NOT NULL REFERENCES organizations(id) ON DELETE CASCADE,
project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
type VARCHAR(20) NOT NULL CHECK (type IN ('http', 'tcp', 'heartbeat')),
-- target: URL for HTTP, host:port for TCP, null for heartbeat
target TEXT,
interval_seconds INTEGER NOT NULL DEFAULT 60 CHECK (interval_seconds >= 30),
timeout_seconds INTEGER NOT NULL DEFAULT 10 CHECK (timeout_seconds >= 1 AND timeout_seconds <= 60),
failure_threshold INTEGER NOT NULL DEFAULT 2 CHECK (failure_threshold >= 1),
auto_resolve BOOLEAN NOT NULL DEFAULT true,
enabled BOOLEAN NOT NULL DEFAULT true,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

CREATE INDEX IF NOT EXISTS idx_monitors_project ON monitors (project_id);
CREATE INDEX IF NOT EXISTS idx_monitors_org ON monitors (organization_id);
CREATE INDEX IF NOT EXISTS idx_monitors_enabled ON monitors (enabled) WHERE enabled = true;

-- ============================================================================
-- 3. Monitor status table (current state, one row per monitor)
-- ============================================================================

CREATE TABLE IF NOT EXISTS monitor_status (
monitor_id UUID PRIMARY KEY REFERENCES monitors(id) ON DELETE CASCADE,
status VARCHAR(20) NOT NULL DEFAULT 'unknown' CHECK (status IN ('up', 'down', 'unknown')),
consecutive_failures INTEGER NOT NULL DEFAULT 0,
consecutive_successes INTEGER NOT NULL DEFAULT 0,
last_checked_at TIMESTAMPTZ,
last_status_change_at TIMESTAMPTZ,
response_time_ms INTEGER,
last_error_code VARCHAR(50),
incident_id UUID REFERENCES incidents(id) ON DELETE SET NULL,
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);

-- ============================================================================
-- 4. Monitor results hypertable (time-series of all check results)
-- ============================================================================
-- Note: No FK on monitor_id for hypertable performance

CREATE TABLE IF NOT EXISTS monitor_results (
time TIMESTAMPTZ NOT NULL,
id UUID NOT NULL DEFAULT gen_random_uuid(),
monitor_id UUID NOT NULL,
organization_id UUID NOT NULL,
project_id UUID NOT NULL,
status VARCHAR(20) NOT NULL CHECK (status IN ('up', 'down')),
response_time_ms INTEGER,
status_code INTEGER,
-- sanitized error code (never raw OS/network error messages)
error_code VARCHAR(50),
-- true when written by POST /monitors/:id/heartbeat, false for worker-initiated checks
is_heartbeat BOOLEAN NOT NULL DEFAULT false,
PRIMARY KEY (time, id)
);

SELECT create_hypertable('monitor_results', 'time', if_not_exists => TRUE);

CREATE INDEX IF NOT EXISTS idx_monitor_results_monitor ON monitor_results (monitor_id, time DESC);
CREATE INDEX IF NOT EXISTS idx_monitor_results_org ON monitor_results (organization_id, time DESC);

ALTER TABLE monitor_results SET (
timescaledb.compress,
timescaledb.compress_segmentby = 'monitor_id',
timescaledb.compress_orderby = 'time DESC'
);

SELECT add_compression_policy('monitor_results', INTERVAL '7 days', if_not_exists => TRUE);
SELECT add_retention_policy('monitor_results', INTERVAL '30 days', if_not_exists => TRUE);

-- ============================================================================
-- 5. Continuous aggregate: daily uptime percentage per monitor
-- ============================================================================

CREATE MATERIALIZED VIEW IF NOT EXISTS monitor_uptime_daily
WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
SELECT
time_bucket('1 day', time) AS bucket,
monitor_id,
organization_id,
project_id,
COUNT(*) AS total_checks,
COUNT(*) FILTER (WHERE status = 'up') AS successful_checks,
ROUND(
100.0 * COUNT(*) FILTER (WHERE status = 'up') / NULLIF(COUNT(*), 0),
2
) AS uptime_pct
FROM monitor_results
GROUP BY bucket, monitor_id, organization_id, project_id
WITH NO DATA;

SELECT add_continuous_aggregate_policy('monitor_uptime_daily',
start_offset => INTERVAL '3 days',
end_offset => INTERVAL '1 minute',
schedule_interval => INTERVAL '1 hour',
if_not_exists => TRUE
);

-- ============================================================================
-- 6. Extend incidents table with source tracking
-- ============================================================================

ALTER TABLE incidents
ADD COLUMN IF NOT EXISTS source VARCHAR(50) NOT NULL DEFAULT 'sigma',
ADD COLUMN IF NOT EXISTS monitor_id UUID REFERENCES monitors(id) ON DELETE SET NULL;

ALTER TABLE incidents
ADD CONSTRAINT incidents_source_check
CHECK (source IN ('sigma', 'monitor', 'manual'))
NOT VALID;

CREATE INDEX IF NOT EXISTS idx_incidents_source ON incidents (source);
CREATE INDEX IF NOT EXISTS idx_incidents_monitor ON incidents (monitor_id) WHERE monitor_id IS NOT NULL;
15 changes: 15 additions & 0 deletions packages/backend/migrations/035_monitoring_enhancements.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
-- ============================================================================
-- Migration 035: Monitoring enhancements
-- - Add http_config JSONB and severity to monitors
-- - Add status_page_public to projects
-- ============================================================================

-- 1. Add HTTP config column to monitors (for method, expectedStatus, headers, bodyAssertion)
ALTER TABLE monitors ADD COLUMN IF NOT EXISTS http_config JSONB;

-- 2. Add per-monitor incident severity (default 'high' matches previous hardcoded behavior)
ALTER TABLE monitors ADD COLUMN IF NOT EXISTS severity VARCHAR(20) NOT NULL DEFAULT 'high'
CHECK (severity IN ('critical', 'high', 'medium', 'low', 'informational'));

-- 3. Add status page visibility to projects (default false = private)
ALTER TABLE projects ADD COLUMN IF NOT EXISTS status_page_public BOOLEAN NOT NULL DEFAULT false;
2 changes: 1 addition & 1 deletion packages/backend/src/database/migrator.ts
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ export async function migrateToLatest() {
if (error) {
console.error('Migration failed');
console.error(error);
process.exit(1);
throw error;
}

console.log('All migrations completed');
Expand Down
99 changes: 99 additions & 0 deletions packages/backend/src/database/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,9 @@ export interface ProjectsTable {
organization_id: string;
user_id: string; // Keep for tracking who created the project
name: string;
slug: string;
description: string | null;
status_page_public: Generated<boolean>;
created_at: Generated<Timestamp>;
updated_at: Generated<Timestamp>;
}
Expand Down Expand Up @@ -420,11 +422,81 @@ export interface IncidentsTable {
mitre_techniques: string[] | null;
ip_reputation: ColumnType<Record<string, unknown> | null, Record<string, unknown> | null, Record<string, unknown> | null>;
geo_data: ColumnType<Record<string, unknown> | null, Record<string, unknown> | null, Record<string, unknown> | null>;
source: Generated<string>;
monitor_id: string | null;
created_at: Generated<Timestamp>;
updated_at: Generated<Timestamp>;
resolved_at: Timestamp | null;
}

// ============================================================================
// SERVICE HEALTH MONITORING TABLES
// ============================================================================

export type MonitorType = 'http' | 'tcp' | 'heartbeat';
export type MonitorStatusValue = 'up' | 'down' | 'unknown';

export interface MonitorHttpConfig {
method?: string;
expectedStatus?: number;
headers?: Record<string, string>;
bodyAssertion?: { type: 'contains'; value: string } | { type: 'regex'; pattern: string };
}

export interface MonitorsTable {
id: Generated<string>;
organization_id: string;
project_id: string;
name: string;
type: MonitorType;
target: string | null;
interval_seconds: Generated<number>;
timeout_seconds: Generated<number>;
failure_threshold: Generated<number>;
auto_resolve: Generated<boolean>;
enabled: Generated<boolean>;
http_config: MonitorHttpConfig | null;
severity: Generated<string>;
created_at: Generated<Timestamp>;
updated_at: Generated<Timestamp>;
}

export interface MonitorStatusTable {
monitor_id: string;
status: Generated<MonitorStatusValue>;
consecutive_failures: Generated<number>;
consecutive_successes: Generated<number>;
last_checked_at: Timestamp | null;
last_status_change_at: Timestamp | null;
response_time_ms: number | null;
last_error_code: string | null;
incident_id: string | null;
updated_at: Generated<Timestamp>;
}

export interface MonitorResultsTable {
time: Timestamp;
id: Generated<string>;
monitor_id: string;
organization_id: string;
project_id: string;
status: 'up' | 'down';
response_time_ms: number | null;
status_code: number | null;
error_code: string | null;
is_heartbeat: Generated<boolean>;
}

export interface MonitorUptimeDailyTable {
bucket: Timestamp;
monitor_id: string;
organization_id: string;
project_id: string;
total_checks: number;
successful_checks: number;
uptime_pct: number | null;
}

export interface IncidentAlertsTable {
id: Generated<string>;
incident_id: string;
Expand Down Expand Up @@ -834,6 +906,26 @@ export interface MetricExemplarsTable {
attributes: ColumnType<Record<string, unknown> | null, Record<string, unknown> | null, Record<string, unknown> | null>;
}

// ============================================================================
// LOG PIPELINES TABLE
// ============================================================================

export interface LogPipelinesTable {
id: Generated<string>;
organization_id: string;
project_id: string | null;
name: string;
description: string | null;
enabled: Generated<boolean>;
steps: ColumnType<
Record<string, unknown>[],
Record<string, unknown>[],
Record<string, unknown>[]
>;
created_at: Generated<Timestamp>;
updated_at: Generated<Timestamp>;
}

export interface Database {
logs: LogsTable;
users: UsersTable;
Expand Down Expand Up @@ -898,4 +990,11 @@ export interface Database {
// Metrics (OTLP)
metrics: MetricsTable;
metric_exemplars: MetricExemplarsTable;
// Log pipelines
log_pipelines: LogPipelinesTable;
// Service health monitoring
monitors: MonitorsTable;
monitor_status: MonitorStatusTable;
monitor_results: MonitorResultsTable;
monitor_uptime_daily: MonitorUptimeDailyTable;
}
3 changes: 2 additions & 1 deletion packages/backend/src/modules/auth/plugin.ts
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,8 @@ const authPlugin: FastifyPluginAsync = async (fastify) => {
request.url.startsWith('/api/v1/projects') ||
request.url.startsWith('/api/v1/alerts') ||
request.url.startsWith('/api/v1/notifications') ||
request.url.startsWith('/api/v1/invitations')
request.url.startsWith('/api/v1/invitations') ||
request.url.startsWith('/api/v1/status')
) {
return;
}
Expand Down
Loading
Loading