-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy path.env.example
More file actions
120 lines (86 loc) · 3.86 KB
/
.env.example
File metadata and controls
120 lines (86 loc) · 3.86 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
# VideoAnnotator Environment Configuration
# Copy this file to .env and customize for your environment
# Values can also be set as system environment variables
# ==============================================================================
# External Service Tokens
# ==============================================================================
# HuggingFace Authentication Token
# Get your token from: https://huggingface.co/settings/tokens
# You need to accept the terms for pyannote/speaker-diarization-3.1
HUGGINGFACE_TOKEN=your_huggingface_token_here
# Optional: OpenAI API Key for speech recognition
OPENAI_API_KEY=your_openai_api_key_here
# ==============================================================================
# Worker Configuration
# ==============================================================================
# Maximum number of jobs to process concurrently
# Lower values reduce GPU memory pressure but decrease throughput
# Recommendation: 1-2 for 6GB GPU, 2-4 for 8-12GB GPU, 4+ for 24GB+ GPU
MAX_CONCURRENT_JOBS=2
# Poll interval for checking new jobs (seconds)
WORKER_POLL_INTERVAL=5
# Maximum retry attempts for failed jobs
MAX_JOB_RETRIES=3
# Base delay for exponential backoff retry (seconds)
RETRY_DELAY_BASE=2.0
# ==============================================================================
# Storage Configuration
# ==============================================================================
# Base directory for job storage (results, logs, temp files)
STORAGE_BASE_DIR=./batch_results
# Days to retain completed job data (leave empty to never auto-delete)
# Only applies to terminal states (COMPLETED, FAILED, CANCELLED)
# STORAGE_RETENTION_DAYS=30
# ==============================================================================
# Security Configuration
# ==============================================================================
# Require API key authentication
AUTH_REQUIRED=true
# Path to token storage directory
TOKEN_DIR=./tokens
# Auto-generate API key on first startup if none exist
AUTO_GENERATE_KEY=true
# ==============================================================================
# API Server Configuration
# ==============================================================================
# Host to bind to (use 0.0.0.0 for all interfaces, 127.0.0.1 for localhost only)
API_HOST=0.0.0.0
# Port to listen on
API_PORT=18011
# CORS allowed origins (comma-separated)
# Default: localhost only (secure-by-default)
CORS_ORIGINS=http://localhost,http://localhost:18011
# Enable CORS credentials support
CORS_ALLOW_CREDENTIALS=true
# ==============================================================================
# Database Configuration
# ==============================================================================
# Database URL (defaults to SQLite)
# Examples:
# SQLite: sqlite:///./videoannotator.db
# PostgreSQL: postgresql://user:password@localhost/dbname
# MySQL: mysql://user:password@localhost/dbname
DATABASE_URL=sqlite:///./videoannotator.db
# Enable database connection pool
DB_POOL_ENABLED=true
# Pool size for database connections
DB_POOL_SIZE=5
# ==============================================================================
# Logging Configuration
# ==============================================================================
# Log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
LOG_LEVEL=INFO
# Log directory
LOG_DIR=./logs
# Enable structured JSON logging (useful for log aggregation)
LOG_JSON=false
# ==============================================================================
# Model Configuration
# ==============================================================================
# Cache directory for downloaded models
MODEL_CACHE_DIR=./models
# Device to use for inference (cpu, cuda, auto)
# auto = use CUDA if available, otherwise CPU
DEVICE=auto
# Use FP16 (half-precision) when available (reduces memory usage)
USE_FP16=true