-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy path.env.example
More file actions
126 lines (108 loc) · 4.52 KB
/
.env.example
File metadata and controls
126 lines (108 loc) · 4.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
# ===================================================================
# DATABASE CONFIGURATION
# ===================================================================
# Two deployment options:
#
# OPTION 1 - EXTERNAL DATABASE (use with docker-compose.yml):
# - Point to your existing PostgreSQL server
# - Run: docker compose up
DB_HOST=localhost
DB_PORT=5432
DB_NAME=codedox
DB_USER=postgres
DB_PASSWORD=your_password_here
# OPTION 2 - INTERNAL DATABASE (use with docker-compose.internal-db.yml):
# - Uses bundled PostgreSQL container
# - Run: docker compose -f docker-compose.internal-db.yml up
# - Uncomment these to use internal DB:
# DB_HOST=postgres
# DB_PORT=5432
# DB_NAME=codedox
# DB_USER=postgres
# DB_PASSWORD=postgres
# LOCAL DEVELOPMENT:
# DB_HOST=localhost
# Code Extraction LLM Configuration
# Enable/disable LLM extraction (when false, uses page title and context for basic extraction)
CODE_ENABLE_LLM_EXTRACTION=false
# Required only if LLM extraction is enabled: Set your LLM API key
CODE_LLM_API_KEY=your-api-key-here
# Model name - use the exact model name your LLM provider expects - this can be used with a local model I recommend
# Examples: gpt-4o-mini, Qwen/Qwen3-Coder-30B-A3B-Instruct , Qwen/Qwen3-4B-Instruct-2507 (good opensource local model options)
CODE_LLM_EXTRACTION_MODEL=gpt-4o-mini
# Optional: Custom LLM endpoint (for local LLMs like LM STUDIO, Ollama, etc.)
# Leave commented to use OpenAI
# CODE_LLM_BASE_URL=http://localhost:8001/v1
# Optional: Custom parameters for LLM requests (JSON format)
# IMPORTANT: Use colons (:) not equals (=), and lowercase 'false' not 'False'
# Example for vLLM/SGLang to disable thinking mode:
# CODE_LLM_EXTRA_PARAMS={"extra_body": {"chat_template_kwargs": {"enable_thinking": false}}}
# Example with temperature:
# CODE_LLM_EXTRA_PARAMS={"temperature": 0.7, "extra_body": {"chat_template_kwargs": {"enable_thinking": false}}}
CODE_LLM_EXTRA_PARAMS={}
# Number of parallel LLM requests for code description
CODE_LLM_NUM_PARALLEL=5
# API Server Configuration
API_HOST=0.0.0.0
API_PORT=8000
# CORS origins as comma-separated values
API_CORS_ORIGINS=http://localhost:3000,http://localhost:5173,http://localhost:8000
# MCP Authentication Configuration
# Enable authentication for MCP endpoints (recommended for remote deployments)
MCP_AUTH_ENABLED=false
# Authentication token for MCP access - generate with: openssl rand -hex 32
MCP_AUTH_TOKEN=your-secure-token-here
# Optional: Multiple tokens for different clients (comma-separated)
# MCP_AUTH_TOKENS=token1,token2,token3
# Crawling Configuration
CRAWL_DEFAULT_MAX_DEPTH=2
CRAWL_MAX_PAGES_PER_JOB=500
CRAWL_RESPECT_ROBOTS_TXT=true
CRAWL_MAX_CONCURRENT_PAGES=3
CRAWL_CONTENT_SIZE_LIMIT=50000
# Custom user agent for HTTP requests (defaults to Chrome browser if not set)
CRAWL_USER_AGENT="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
CRAWL_MAX_CONCURRENT_SESSIONS=20
# Maximum concurrent crawl sessions per job (default: 5)
CRAWL_MAX_CONCURRENT_CRAWLS=5
# Timeout in seconds when cancelling crawl tasks (default: 5.0)
CRAWL_TASK_CANCELLATION_TIMEOUT=5.0
# Seconds without heartbeat before considering job stalled (default: 60)
CRAWL_HEARTBEAT_STALL_THRESHOLD=60
# Code Extraction Configuration
CODE_MAX_CODE_BLOCK_SIZE=50000
CODE_PRESERVE_CONTEXT_CHARS=500
CODE_MIN_CODE_LINES=2
CODE_EXTRACT_FUNCTIONS=true
CODE_EXTRACT_IMPORTS=true
CODE_DETECT_FRAMEWORKS=true
CODE_ENABLE_CONTEXT_EXTRACTION=true
CODE_MAX_CONTEXT_LENGTH=1000
# Optional: set to false to disable auto code formatting
# Search Configuration
SEARCH_MAX_RESULTS=50
SEARCH_ENABLE_FUZZY_SEARCH=true
SEARCH_BOOST_RECENT_DAYS=7
SEARCH_SNIPPET_PREVIEW_LENGTH=200
SEARCH_DEFAULT_MAX_RESULTS=10
SEARCH_MIN_SCORE=0.1
# Upload Configuration
UPLOAD_MAX_FILE_SIZE=10485760 # 10MB per file
UPLOAD_MAX_TOTAL_SIZE=1073741824 # 1GB total for batch uploads (1024 * 1024 * 1024 bytes)
# Environment Configuration
ENVIRONMENT=development
DEBUG=false
OUTPUT_SEPARATOR=----------------------------------------
# Logging Configuration
LOG_LEVEL=INFO
LOG_FORMAT=%(asctime)s - %(name)s - %(levelname)s - %(message)s
LOG_FILE=logs/codedox.log
LOG_MAX_SIZE=10485760 # 10MB
LOG_BACKUP_COUNT=5
# Optional: Test Environment
# TESTING=true # Set to skip certain startup tasks during tests
# TEST_DB_HOST=localhost # Override DB_HOST for tests
# TEST_DB_PORT=5432 # Override DB_PORT for tests
# TEST_DB_NAME=codedox_test # Override DB_NAME for tests
# TEST_DB_USER=postgres # Override DB_USER for tests
# TEST_DB_PASSWORD=postgres # Override DB_PASSWORD for tests