diff --git a/CHANGELOG.md b/CHANGELOG.md
index d458a67..d178342 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,12 +2,70 @@
All notable changes to this project will be documented in this file.
+## [2.4.0] - 2026-02-22
+
+### Added
+- **Go Embed Version**: VERSION file embedded at compile time via `go:embed`
+- **Domain Validation**: `-d` flag now validates domain format before processing
+- **Backward-Compatible JSON**: `found_websites` + `founded_websites` dual output
+- **Config File Support**: `-config config.yaml` flag for YAML configuration
+- **Resume Support**: `-resume cache.json` flag to resume interrupted scans
+- **Output Directory**: `-output-dir ./exports` flag for export file location
+
+### Fixed
+- **Race Condition**: `PrintResult` protected with `sync.Once` (prevents double-call)
+- **DNS Key Mismatch**: Protocol prefix stripped before DNS result lookup
+- **CLI Flag Override**: `flag.Visit` ensures only explicitly set flags override config
+- **Windows Path Separator**: `filepath.Join` replaces hardcoded `/`
+- **Flaky DNS Test**: `minExpected` set to 0 for network-dependent test
+- **Chrome 131 Comment**: Updated stale comment to Chrome 135
+
+### Improved
+- **Cache Performance**: `IsScanned()` O(n) → O(1) with persistent `map[string]struct{}`
+- **JSON Field Name**: `founded_websites` → `found_websites` (grammar fix)
+
+### Removed
+- Unused `GetTransport()` method (conflicting transport settings)
+- Unused `AddSmartJitter()`, `SaveConfigFile()`, `ParseDNSServers()` functions
+- Unused `BatchReverseDNSWithResults()` function and `DNSResult` struct
+- Unused `PoolMetrics` struct and related atomic operations
+
+---
+
+## [2.3.0] - 2025-12-23
+
+### Added
+- **Batch DNS Lookup**: Parallel DNS resolution at end of scan (20 concurrent queries)
+- **Connection Pool Optimization**: Pre-warming, larger buffers, TLS session cache
+
+### Changed
+- **Chrome 135 User-Agent**: Updated from Chrome 131 to Chrome 135 for January 2026
+- **macOS 15 Sequoia**: Added new macOS version strings
+- **Windows 11 24H2**: Updated Windows version strings
+- Increased connection pool sizes: MaxIdle 200-1000, MaxPerHost 50-200
+- Extended idle timeout: 60s → 90s
+- Larger I/O buffers: 64KB read/write
+
+### Fixed
+- Batch DNS now correctly extracts IP from URL (removes https:// prefix)
+
+---
+
+## [2.2.2] - 2025-12-12
+
+### Fixed
+- **uTLS Transport Activated**: Chrome 131 TLS fingerprint now fully integrated into HTTP client
+- Fixed HTTP/2 compatibility issue by setting `ForceAttemptHTTP2: false`
+- uTLS transport now properly used for all HTTPS connections
+
+---
+
## [2.2.1] - 2025-12-11
### Added
- **Referer Header Rotation**: Random referer from Google, Bing, DuckDuckGo for more realistic requests
-- **Smart Jitter Function**: `config.AddSmartJitter()` with occasional long pauses (1-3s) for natural patterns
-- **uTLS Transport**: Chrome 131 TLS fingerprint support (ready for integration)
+- **Smart Jitter Function**: `config.AddSmartJitter()` *(removed in v2.4.0)*
+- **uTLS Transport**: Chrome 131 TLS fingerprint support
- New config constants: `DialTimeout`, `MaxJitterMs`
- Unified RNG functions in config: `GetRandomInt`, `GetRandomString`, `ShuffleStrings`
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 9ca10f4..d3e1895 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -56,21 +56,28 @@ go vet ./...
```
ipmap/
-├── main.go # Entry point, CLI flags
+├── main.go # Entry point, CLI flags, go:embed VERSION
+├── VERSION # Embedded version string
├── config/
-│ └── config.go # Global config, RNG, jitter functions
+│ ├── config.go # Global config, RNG, jitter, logging
+│ └── loader.go # YAML config file loading
├── modules/
-│ ├── scanner.go # Chrome 131 headers, uTLS transport
-│ ├── request.go # HTTP client with retry
-│ ├── resolve_site.go # Worker pool, IP scanning
+│ ├── scanner.go # Chrome 135 headers, uTLS transport
+│ ├── request.go # HTTP client with retry, connection pool
+│ ├── resolve_site.go # Worker pool, IP scanning, batch DNS
│ ├── get_site.go # Site discovery per IP
+│ ├── get_domain_title.go # Domain title fetching
+│ ├── helpers.go # Shared utilities (ExtractTitle)
+│ ├── cache.go # Scan state persistence for resume
│ ├── validators.go # Input validation
│ ├── rate_limiter.go # Token bucket rate limiter
-│ └── ...
+│ ├── dns_resolver.go # Batch reverse DNS lookups
+│ ├── result_print.go # Result formatting and export
+│ ├── interrupt_handler.go # Ctrl+C handling
+│ └── calc_ip_address.go # CIDR to IP calculation
├── tools/
│ ├── find_asn.go # ASN scanning
│ └── find_ip.go # IP block scanning
-├── bin/ # Cross-platform builds
└── README.md
```
@@ -113,10 +120,10 @@ When modifying the scanner module:
1. **TLS Fingerprint**: Use `utls.HelloChrome_Auto` for latest Chrome fingerprint
2. **Header Order**: Maintain exact Chrome header order (not alphabetical)
-3. **Accept-Encoding**: Include `zstd` for Chrome 131+
-4. **Jitter**: Use `config.AddJitter()` (0-200ms) or `config.AddSmartJitter()` (with occasional long pauses)
-5. **User-Agent**: Use Chrome 130+ versions only
-6. **Referer**: Rotate between Google, Bing, DuckDuckGo URLs
+3. **Accept-Encoding**: Include `zstd` for Chrome 135+
+4. **Jitter**: Use `config.AddJitter()` (0-200ms random delay)
+5. **User-Agent**: Use Chrome 133+ versions only
+6. **Referer**: Rotate between Google, Bing, DuckDuckGo, Yahoo URLs
## License
diff --git a/README.md b/README.md
index 6ffb8fc..b44c98d 100644
--- a/README.md
+++ b/README.md
@@ -7,7 +7,7 @@ An open-source, cross-platform powerful network analysis tool for discovering we
- ASN scanning (Autonomous System Number) with IPv4/IPv6 support
- IP block scanning (CIDR format)
- HTTPS/HTTP automatic fallback
-- **Chrome 131 TLS Fingerprint** (JA3/JA4 spoofing via uTLS)
+- **Chrome 135 TLS Fingerprint** (JA3/JA4 spoofing via uTLS)
- **Real Chrome Header Order** (WAF bypass optimized)
- **Referer Header Rotation** (Google, Bing, DuckDuckGo)
- Firewall bypass techniques (IP shuffling, header randomization, smart jitter)
@@ -57,6 +57,11 @@ go build -o ipmap .
-proxy http://127.0.0.1:8080 # Proxy URL (HTTP/HTTPS/SOCKS5)
-rate 50 # Rate limit (requests/sec, 0 = unlimited)
-dns 8.8.8.8,1.1.1.1 # Custom DNS servers
+-ipv6 # Enable IPv6 scanning
+-config config.yaml # Load config from YAML file
+-resume cache.json # Resume interrupted scan from cache
+-output-dir ./exports # Directory for export files
+-insecure=false # Enable TLS certificate verification
```
### Examples
diff --git a/VERSION b/VERSION
index c043eea..197c4d5 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.2.1
+2.4.0
diff --git a/build.ps1 b/build.ps1
index 80f39fc..812b02f 100644
--- a/build.ps1
+++ b/build.ps1
@@ -1,7 +1,7 @@
# ipmap Multi-Platform Build Script
# Builds for macOS (ARM64 + AMD64) and Linux (AMD64)
-$VERSION = "2.2.1"
+$VERSION = "2.2.2"
$APP_NAME = "ipmap"
$BUILD_DIR = "bin"
@@ -52,6 +52,19 @@ if ($LASTEXITCODE -eq 0) {
}
Write-Host ""
+# Build for Windows AMD64
+Write-Host "Building for Windows AMD64..." -ForegroundColor Yellow
+$env:GOOS = "windows"
+$env:GOARCH = "amd64"
+go build -o "$BUILD_DIR/${APP_NAME}_windows_amd64.exe" .
+if ($LASTEXITCODE -eq 0) {
+ Write-Host "SUCCESS: Windows AMD64 build completed" -ForegroundColor Green
+} else {
+ Write-Host "ERROR: Windows AMD64 build failed" -ForegroundColor Red
+ exit 1
+}
+Write-Host ""
+
# Show file sizes
Write-Host "Build Summary:" -ForegroundColor Cyan
Write-Host "================================================" -ForegroundColor Gray
diff --git a/config/config.go b/config/config.go
index 0b2e481..35b1040 100644
--- a/config/config.go
+++ b/config/config.go
@@ -42,7 +42,7 @@ var (
Format string
// MaxRetries is the number of retry attempts for failed requests
- MaxRetries int = 2
+ MaxRetries int = 0
// Workers is the number of concurrent scanning goroutines
Workers int = 100
@@ -55,6 +55,15 @@ var (
// DNSServers is the list of custom DNS servers
DNSServers []string
+
+ // EnableIPv6 enables IPv6 address scanning (default: false)
+ EnableIPv6 bool = false
+
+ // OutputDir is the directory for export files (default: current directory)
+ OutputDir string = ""
+
+ // InsecureSkipVerify skips TLS certificate verification (default: true for backward compatibility)
+ InsecureSkipVerify bool = true
)
// ====================================================================
@@ -104,19 +113,6 @@ func AddJitter() {
}
}
-// AddSmartJitter adds intelligent jitter with occasional long pauses for more natural patterns
-// This helps bypass rate-based WAF detection
-func AddSmartJitter() {
- base := 50 + GetRandomInt(150) // 50-200ms base
-
- // 5% chance of a long pause (simulates user reading page)
- if GetRandomInt(100) < 5 {
- base += 1000 + GetRandomInt(2000) // +1-3 seconds
- }
-
- time.Sleep(time.Duration(base) * time.Millisecond)
-}
-
// ====================================================================
// LOGGING FUNCTIONS
// ====================================================================
diff --git a/config/config_test.go b/config/config_test.go
index e25c7e1..444983d 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -98,8 +98,8 @@ func TestInfoLog(t *testing.T) {
}
func TestConfigDefaults(t *testing.T) {
- if MaxRetries != 2 {
- t.Errorf("MaxRetries default should be 2, got %d", MaxRetries)
+ if MaxRetries != 0 {
+ t.Errorf("MaxRetries default should be 0 (retries disabled), got %d", MaxRetries)
}
if Workers != 100 {
t.Errorf("Workers default should be 100, got %d", Workers)
diff --git a/config/loader.go b/config/loader.go
new file mode 100644
index 0000000..dee400b
--- /dev/null
+++ b/config/loader.go
@@ -0,0 +1,91 @@
+// Package config provides global configuration for the ipmap scanner.
+// loader.go handles configuration file loading and parsing.
+package config
+
+import (
+ "os"
+
+ "gopkg.in/yaml.v3"
+)
+
+// FileConfig represents the structure of the config.yaml file
+type FileConfig struct {
+ Workers int `yaml:"workers"`
+ Timeout int `yaml:"timeout"`
+ RateLimit int `yaml:"rate_limit"`
+ Proxy string `yaml:"proxy"`
+ DNSServers []string `yaml:"dns_servers"`
+ IPv6 bool `yaml:"ipv6"`
+ Verbose bool `yaml:"verbose"`
+ Format string `yaml:"format"`
+}
+
+// LoadConfigFile loads configuration from a YAML file
+// Returns nil if file doesn't exist or is invalid
+func LoadConfigFile(path string) (*FileConfig, error) {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ var cfg FileConfig
+ if err := yaml.Unmarshal(data, &cfg); err != nil {
+ return nil, err
+ }
+
+ return &cfg, nil
+}
+
+// ApplyFileConfig applies file configuration to global config
+// Only applies non-zero/non-empty values (allows CLI to override)
+func ApplyFileConfig(cfg *FileConfig) {
+ if cfg == nil {
+ return
+ }
+
+ if cfg.Workers > 0 {
+ Workers = cfg.Workers
+ }
+ if cfg.RateLimit > 0 {
+ RateLimit = cfg.RateLimit
+ }
+ if cfg.Proxy != "" {
+ ProxyURL = cfg.Proxy
+ }
+ if len(cfg.DNSServers) > 0 {
+ DNSServers = cfg.DNSServers
+ }
+ if cfg.IPv6 {
+ EnableIPv6 = cfg.IPv6
+ }
+ if cfg.Verbose {
+ Verbose = cfg.Verbose
+ }
+ if cfg.Format != "" {
+ Format = cfg.Format
+ }
+}
+
+// FindConfigFile looks for config file in common locations
+func FindConfigFile() string {
+ // Check common locations in order
+ locations := []string{
+ "config.yaml",
+ "config.yml",
+ ".ipmap.yaml",
+ ".ipmap.yml",
+ }
+
+ // Also check in user home directory
+ if home, err := os.UserHomeDir(); err == nil {
+ locations = append(locations, home+"/.ipmap.yaml", home+"/.ipmap.yml")
+ }
+
+ for _, loc := range locations {
+ if _, err := os.Stat(loc); err == nil {
+ return loc
+ }
+ }
+
+ return ""
+}
diff --git a/go.mod b/go.mod
index 2096c27..b9e1cee 100644
--- a/go.mod
+++ b/go.mod
@@ -6,6 +6,7 @@ require (
github.com/refraction-networking/utls v1.8.1
github.com/schollz/progressbar/v3 v3.14.1
golang.org/x/net v0.48.0
+ gopkg.in/yaml.v3 v3.0.1
)
require (
@@ -17,5 +18,4 @@ require (
golang.org/x/crypto v0.46.0 // indirect
golang.org/x/sys v0.39.0 // indirect
golang.org/x/term v0.38.0 // indirect
- golang.org/x/text v0.32.0 // indirect
)
diff --git a/go.sum b/go.sum
index 2101ba2..23cd0e1 100644
--- a/go.sum
+++ b/go.sum
@@ -32,8 +32,8 @@ golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
-golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
-golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/main.go b/main.go
index 9ab5fcf..da22e18 100644
--- a/main.go
+++ b/main.go
@@ -1,6 +1,7 @@
package main
import (
+ _ "embed"
"flag"
"fmt"
"ipmap/config"
@@ -14,6 +15,14 @@ import (
"time"
)
+//go:embed VERSION
+var versionFile string
+
+// getVersion returns the embedded version string
+func getVersion() string {
+ return strings.TrimSpace(versionFile)
+}
+
var (
domain = flag.String("d", "", "domain parameter")
asn = flag.String("asn", "", "asn parameter")
@@ -27,6 +36,11 @@ var (
proxy = flag.String("proxy", "", "proxy URL (http/https/socks5)")
rate = flag.Int("rate", 0, "requests per second (0 = unlimited)")
dns = flag.String("dns", "", "custom DNS servers (comma-separated)")
+ ipv6 = flag.Bool("ipv6", false, "enable IPv6 address scanning")
+ configFile = flag.String("config", "", "path to config file (YAML)")
+ resumeFile = flag.String("resume", "", "resume scan from cache file")
+ outputDir = flag.String("output-dir", "", "directory for export files")
+ insecure = flag.Bool("insecure", true, "skip TLS certificate verification")
DomainTitle string
// Global state for interrupt handling
@@ -36,16 +50,45 @@ var (
func main() {
flag.Parse()
- // Set global config
- config.Verbose = *verbose
- config.Format = *format
- config.Workers = modules.ValidateWorkerCount(*workers)
- config.ProxyURL = *proxy
- config.RateLimit = *rate
- if *dns != "" {
- config.DNSServers = strings.Split(*dns, ",")
+ // Load config file first (CLI flags override config file values)
+ if *configFile != "" {
+ if cfg, err := config.LoadConfigFile(*configFile); err != nil {
+ config.ErrorLog("Failed to load config file: %v", err)
+ } else {
+ config.ApplyFileConfig(cfg)
+ config.VerboseLog("Loaded config from: %s", *configFile)
+ }
+ } else if autoConfig := config.FindConfigFile(); autoConfig != "" {
+ if cfg, err := config.LoadConfigFile(autoConfig); err == nil {
+ config.ApplyFileConfig(cfg)
+ config.VerboseLog("Auto-loaded config from: %s", autoConfig)
+ }
}
+ // CLI flags override config file values (only if explicitly set)
+ flag.Visit(func(f *flag.Flag) {
+ switch f.Name {
+ case "v":
+ config.Verbose = *verbose
+ case "format":
+ config.Format = *format
+ case "workers":
+ config.Workers = modules.ValidateWorkerCount(*workers)
+ case "proxy":
+ config.ProxyURL = *proxy
+ case "rate":
+ config.RateLimit = *rate
+ case "ipv6":
+ config.EnableIPv6 = *ipv6
+ case "dns":
+ config.DNSServers = strings.Split(*dns, ",")
+ case "output-dir":
+ config.OutputDir = *outputDir
+ case "insecure":
+ config.InsecureSkipVerify = *insecure
+ }
+ })
+
// Setup interrupt handler
interruptData = modules.NewInterruptData()
setupInterruptHandler()
@@ -59,29 +102,68 @@ func main() {
}
}
+ // Handle resume from cache
+ if *resumeFile != "" {
+ cache, err := modules.LoadCache(*resumeFile)
+ if err != nil {
+ config.ErrorLog("Failed to load cache file: %v", err)
+ return
+ }
+
+ config.InfoLog("Resuming scan from cache: %s", *resumeFile)
+ scanned, _, results := cache.GetProgress()
+ config.InfoLog("Progress: %d IPs scanned, %d results found", scanned, results)
+
+ // Set metadata from cache
+ DomainTitle = cache.Data.DomainTitle
+ *timeout = cache.Data.Timeout
+
+ // Resume using cached IP blocks
+ if len(cache.Data.IPBlocks) > 0 {
+ interruptData.IPBlocks = cache.Data.IPBlocks
+ interruptData.Domain = cache.Data.DomainTitle
+ interruptData.Timeout = cache.Data.Timeout
+
+ // Add previous results to interrupt data
+ for _, result := range cache.Data.Results {
+ interruptData.AddWebsite(result)
+ }
+
+ // Resume scan with remaining IPs
+ tools.FindIPWithCache(cache.Data.IPBlocks, cache.Data.Domain, cache.Data.DomainTitle,
+ *con, *export, cache.Data.Timeout, interruptData, cache)
+ }
+ return
+ }
+
if (*asn != "" && *ip != "") || (*asn == "" && *ip == "") {
- fmt.Println("======================================================\n" +
- " ipmap v2.0 (github.com/sercanarga/ipmap)\n" +
- "======================================================\n" +
- "PARAMETERS:\n" +
- "-asn AS13335\n" +
- "-ip 103.21.244.0/22,103.22.200.0/22\n" +
- "-d example.com\n" +
- "-t 200 (timeout default:auto)\n" +
- "-c (work until finish scanning)\n" +
- "--export (auto export results)\n" +
- "-v (verbose mode)\n" +
- "-format json (output format: text/json)\n" +
- "-workers 100 (concurrent workers, default: 100)\n" +
- "-proxy http://127.0.0.1:8080 (proxy URL)\n" +
- "-rate 50 (requests per second, 0 = unlimited)\n" +
- "-dns 8.8.8.8,1.1.1.1 (custom DNS servers)\n\n" +
- "USAGES:\n" +
- "Finding sites by scanning all the IP blocks\nipmap -ip 103.21.244.0/22,103.22.200.0/22\n\n" +
- "Finding real IP address of site by scanning given IP addresses\nipmap -ip 103.21.244.0/22,103.22.200.0/22 -d example.com\n\n" +
- "Finding sites by scanning all the IP blocks in the ASN\nipmap -asn AS13335\n\n" +
- "Finding real IP address of site by scanning all IP blocks in ASN\nipmap -asn AS13335 -d example.com\n\n" +
- "Using proxy and rate limiting\nipmap -asn AS13335 -proxy http://127.0.0.1:8080 -rate 50")
+ fmt.Printf("======================================================\n"+
+ " ipmap v%s (github.com/sercanarga/ipmap)\n"+
+ "======================================================\n"+
+ "PARAMETERS:\n"+
+ "-asn AS13335\n"+
+ "-ip 103.21.244.0/22,103.22.200.0/22\n"+
+ "-d example.com\n"+
+ "-t 200 (timeout default:auto)\n"+
+ "-c (work until finish scanning)\n"+
+ "--export (auto export results)\n"+
+ "-v (verbose mode)\n"+
+ "-format json (output format: text/json)\n"+
+ "-workers 100 (concurrent workers, default: 100)\n"+
+ "-proxy http://127.0.0.1:8080 (proxy URL)\n"+
+ "-rate 50 (requests per second, 0 = unlimited)\n"+
+ "-dns 8.8.8.8,1.1.1.1 (custom DNS servers)\n"+
+ "-ipv6 (enable IPv6 scanning)\n"+
+ "-config config.yaml (config file path)\n"+
+ "-resume cache.json (resume from cache)\n"+
+ "-output-dir ./exports (export directory)\n"+
+ "-insecure=false (enable TLS verification)\n\n"+
+ "USAGES:\n"+
+ "Finding sites by scanning all the IP blocks\nipmap -ip 103.21.244.0/22,103.22.200.0/22\n\n"+
+ "Finding real IP address of site by scanning given IP addresses\nipmap -ip 103.21.244.0/22,103.22.200.0/22 -d example.com\n\n"+
+ "Finding sites by scanning all the IP blocks in the ASN\nipmap -asn AS13335\n\n"+
+ "Finding real IP address of site by scanning all IP blocks in ASN\nipmap -asn AS13335 -d example.com\n\n"+
+ "Using proxy and rate limiting\nipmap -asn AS13335 -proxy http://127.0.0.1:8080 -rate 50\n", getVersion())
return
}
@@ -101,6 +183,11 @@ func main() {
}
if *domain != "" {
+ // Validate domain format
+ if !modules.ValidateDomain(*domain) {
+ fmt.Println("[ERROR] Invalid domain format: " + *domain)
+ return
+ }
getDomain := modules.GetDomainTitle(*domain)
if len(getDomain) == 0 {
fmt.Println("Domain not resolved. Please check:")
@@ -173,9 +260,9 @@ func setupInterruptHandler() {
if response == "y" || response == "Y" || response == "" {
modules.ExportInterruptedResults(interruptData.Websites, interruptData.Domain,
interruptData.Timeout, interruptData.IPBlocks)
- fmt.Println("\n[✓] Results exported successfully")
+ fmt.Println("\n[+] Results exported successfully")
} else {
- fmt.Println("\n[✗] Export canceled")
+ fmt.Println("\n[-] Export canceled")
}
} else {
fmt.Println("\n[!] No results to export")
diff --git a/modules/cache.go b/modules/cache.go
new file mode 100644
index 0000000..59a3898
--- /dev/null
+++ b/modules/cache.go
@@ -0,0 +1,168 @@
+// cache.go provides result caching for interrupted scans
+// Allows resuming scans from where they left off
+package modules
+
+import (
+ "encoding/json"
+ "os"
+ "strconv"
+ "sync"
+ "time"
+)
+
+// CacheData represents the cached scan state
+type CacheData struct {
+ ASN string `json:"asn,omitempty"`
+ Domain string `json:"domain,omitempty"`
+ DomainTitle string `json:"domain_title,omitempty"`
+ Timeout int `json:"timeout"`
+ IPBlocks []string `json:"ip_blocks"`
+ ScannedIPs []string `json:"scanned_ips"`
+ Results [][]string `json:"results"`
+ LastUpdate string `json:"last_update"`
+ Completed bool `json:"completed"`
+}
+
+// Cache manages scan state persistence
+type Cache struct {
+ Data CacheData
+ FilePath string
+ scannedSet map[string]struct{} // O(1) lookup for scanned IPs
+ mu sync.RWMutex
+}
+
+// NewCache creates a new cache instance
+func NewCache(filePath string) *Cache {
+ return &Cache{
+ FilePath: filePath,
+ scannedSet: make(map[string]struct{}),
+ Data: CacheData{
+ ScannedIPs: make([]string, 0),
+ Results: make([][]string, 0),
+ },
+ }
+}
+
+// LoadCache loads cache from file
+func LoadCache(filePath string) (*Cache, error) {
+ data, err := os.ReadFile(filePath)
+ if err != nil {
+ return nil, err
+ }
+
+ var cacheData CacheData
+ if err := json.Unmarshal(data, &cacheData); err != nil {
+ return nil, err
+ }
+
+ // Build scanned set from loaded data for O(1) lookups
+ scannedSet := make(map[string]struct{}, len(cacheData.ScannedIPs))
+ for _, ip := range cacheData.ScannedIPs {
+ scannedSet[ip] = struct{}{}
+ }
+
+ return &Cache{
+ Data: cacheData,
+ FilePath: filePath,
+ scannedSet: scannedSet,
+ }, nil
+}
+
+// Save persists cache to file
+func (c *Cache) Save() error {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ c.Data.LastUpdate = time.Now().Format(time.RFC3339)
+
+ data, err := json.MarshalIndent(c.Data, "", " ")
+ if err != nil {
+ return err
+ }
+
+ return os.WriteFile(c.FilePath, data, 0644)
+}
+
+// AddScannedIP marks an IP as scanned
+func (c *Cache) AddScannedIP(ip string) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.Data.ScannedIPs = append(c.Data.ScannedIPs, ip)
+ c.scannedSet[ip] = struct{}{}
+}
+
+// AddResult adds a scan result
+func (c *Cache) AddResult(result []string) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.Data.Results = append(c.Data.Results, result)
+}
+
+// IsScanned checks if an IP was already scanned (O(1) via map lookup)
+func (c *Cache) IsScanned(ip string) bool {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ _, exists := c.scannedSet[ip]
+ return exists
+}
+
+// GetUnscannedIPs returns IPs that haven't been scanned yet (O(1) per IP via map)
+func (c *Cache) GetUnscannedIPs(allIPs []string) []string {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ // Use persistent scannedSet for O(1) lookup
+ unscanned := make([]string, 0, len(allIPs)-len(c.scannedSet))
+ for _, ip := range allIPs {
+ if _, exists := c.scannedSet[ip]; !exists {
+ unscanned = append(unscanned, ip)
+ }
+ }
+
+ return unscanned
+}
+
+// SetMetadata sets scan metadata
+func (c *Cache) SetMetadata(asn, domain, domainTitle string, timeout int, ipBlocks []string) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ c.Data.ASN = asn
+ c.Data.Domain = domain
+ c.Data.DomainTitle = domainTitle
+ c.Data.Timeout = timeout
+ c.Data.IPBlocks = ipBlocks
+}
+
+// MarkCompleted marks the scan as completed
+func (c *Cache) MarkCompleted() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.Data.Completed = true
+}
+
+// GetResults returns all cached results
+func (c *Cache) GetResults() [][]string {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.Data.Results
+}
+
+// GetProgress returns scan progress information
+func (c *Cache) GetProgress() (scanned int, total int, results int) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return len(c.Data.ScannedIPs), 0, len(c.Data.Results)
+}
+
+// GenerateCacheFileName generates a cache file name based on scan parameters
+func GenerateCacheFileName(asn, domain string) string {
+ timestamp := time.Now().Unix()
+ if asn != "" {
+ return "ipmap_" + asn + "_cache.json"
+ }
+ if domain != "" {
+ return "ipmap_" + SanitizeFilename(domain) + "_cache.json"
+ }
+ return "ipmap_" + strconv.FormatInt(timestamp, 10) + "_cache.json"
+}
diff --git a/modules/cache_test.go b/modules/cache_test.go
new file mode 100644
index 0000000..1ea4f52
--- /dev/null
+++ b/modules/cache_test.go
@@ -0,0 +1,190 @@
+package modules
+
+import (
+ "os"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "ipmap/config"
+)
+
+// TestGenerateCacheFileName tests cache filename generation
+func TestGenerateCacheFileName(t *testing.T) {
+ tests := []struct {
+ name string
+ asn string
+ domain string
+ wantASN bool
+ wantDom bool
+ }{
+ {"With ASN", "AS13335", "", true, false},
+ {"With domain", "", "example.com", false, true},
+ {"With neither", "", "", false, false},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := GenerateCacheFileName(tt.asn, tt.domain)
+
+ // Should start with ipmap_
+ if !strings.HasPrefix(result, "ipmap_") {
+ t.Errorf("Filename should start with 'ipmap_', got: %s", result)
+ }
+
+ // Should end with _cache.json
+ if !strings.HasSuffix(result, "_cache.json") {
+ t.Errorf("Filename should end with '_cache.json', got: %s", result)
+ }
+
+ // Should contain ASN if provided
+ if tt.wantASN && !strings.Contains(result, tt.asn) {
+ t.Errorf("Filename should contain ASN '%s', got: %s", tt.asn, result)
+ }
+
+ // Should contain sanitized domain if provided
+ if tt.wantDom && !strings.Contains(result, "example_com") {
+ t.Errorf("Filename should contain sanitized domain, got: %s", result)
+ }
+
+ // When no ASN or domain, should have timestamp (numeric)
+ if !tt.wantASN && !tt.wantDom {
+ // Extract the part between ipmap_ and _cache.json
+ middle := strings.TrimPrefix(result, "ipmap_")
+ middle = strings.TrimSuffix(middle, "_cache.json")
+ _, err := strconv.ParseInt(middle, 10, 64)
+ if err != nil {
+ t.Errorf("Filename timestamp should be numeric, got: %s (error: %v)", middle, err)
+ }
+ }
+ })
+ }
+}
+
+// TestOutputDirConfig tests output directory configuration
+func TestOutputDirConfig(t *testing.T) {
+ // Save original
+ original := config.OutputDir
+ defer func() { config.OutputDir = original }()
+
+ // Test setting output dir
+ config.OutputDir = "/tmp/exports"
+ if config.OutputDir != "/tmp/exports" {
+ t.Errorf("OutputDir not set correctly, got: %s", config.OutputDir)
+ }
+
+ // Test empty (default)
+ config.OutputDir = ""
+ if config.OutputDir != "" {
+ t.Errorf("OutputDir should be empty, got: %s", config.OutputDir)
+ }
+}
+
+// TestInsecureSkipVerifyConfig tests TLS verification configuration
+func TestInsecureSkipVerifyConfig(t *testing.T) {
+ // Save original
+ original := config.InsecureSkipVerify
+ defer func() { config.InsecureSkipVerify = original }()
+
+ // Default should be true (backward compatibility)
+ if !config.InsecureSkipVerify {
+ t.Error("InsecureSkipVerify default should be true")
+ }
+
+ // Test setting to false
+ config.InsecureSkipVerify = false
+ if config.InsecureSkipVerify {
+ t.Error("InsecureSkipVerify should be false when set")
+ }
+}
+
+// TestCacheOperations tests cache CRUD operations
+func TestCacheOperations(t *testing.T) {
+ tmpFile := "test_cache_" + strconv.FormatInt(time.Now().UnixNano(), 10) + ".json"
+ defer os.Remove(tmpFile)
+
+ // Create new cache
+ cache := NewCache(tmpFile)
+ if cache == nil {
+ t.Fatal("NewCache returned nil")
+ }
+
+ // Set metadata
+ cache.SetMetadata("AS13335", "example.com", "Example Domain", 2000, []string{"1.0.0.0/24"})
+
+ // Add scanned IPs
+ cache.AddScannedIP("1.0.0.1")
+ cache.AddScannedIP("1.0.0.2")
+
+ // Add results
+ cache.AddResult([]string{"200", "https://1.0.0.1", "Example Site"})
+
+ // Check IsScanned
+ if !cache.IsScanned("1.0.0.1") {
+ t.Error("IP 1.0.0.1 should be marked as scanned")
+ }
+ if cache.IsScanned("1.0.0.99") {
+ t.Error("IP 1.0.0.99 should not be marked as scanned")
+ }
+
+ // Save cache
+ if err := cache.Save(); err != nil {
+ t.Fatalf("Failed to save cache: %v", err)
+ }
+
+ // Load cache
+ loaded, err := LoadCache(tmpFile)
+ if err != nil {
+ t.Fatalf("Failed to load cache: %v", err)
+ }
+
+ // Verify data
+ if loaded.Data.ASN != "AS13335" {
+ t.Errorf("ASN mismatch: got %s, want AS13335", loaded.Data.ASN)
+ }
+ if len(loaded.Data.ScannedIPs) != 2 {
+ t.Errorf("ScannedIPs count mismatch: got %d, want 2", len(loaded.Data.ScannedIPs))
+ }
+ if len(loaded.Data.Results) != 1 {
+ t.Errorf("Results count mismatch: got %d, want 1", len(loaded.Data.Results))
+ }
+
+ // Test GetUnscannedIPs
+ allIPs := []string{"1.0.0.1", "1.0.0.2", "1.0.0.3", "1.0.0.4"}
+ unscanned := loaded.GetUnscannedIPs(allIPs)
+ if len(unscanned) != 2 {
+ t.Errorf("Unscanned IPs count mismatch: got %d, want 2", len(unscanned))
+ }
+
+ // Test MarkCompleted
+ loaded.MarkCompleted()
+ if !loaded.Data.Completed {
+ t.Error("Cache should be marked as completed")
+ }
+}
+
+// TestExtractTitle tests HTML title extraction
+func TestExtractTitle(t *testing.T) {
+ tests := []struct {
+ name string
+ html string
+ expected string
+ }{
+ {"Simple title", "
Hello World", "Hello World"},
+ {"Title with entities", "Test & Demo", "Test & Demo"},
+ {"Title with whitespace", " Spaced Title ", "Spaced Title"},
+ {"No title", "No title here", ""},
+ {"Empty title", "", ""},
+ {"Title with newlines", "Multi\nLine\nTitle", "Multi Line Title"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := ExtractTitle(tt.html)
+ if result != tt.expected {
+ t.Errorf("ExtractTitle() = %q, want %q", result, tt.expected)
+ }
+ })
+ }
+}
diff --git a/modules/dns_resolver.go b/modules/dns_resolver.go
index e50fed5..ab97acf 100644
--- a/modules/dns_resolver.go
+++ b/modules/dns_resolver.go
@@ -5,9 +5,63 @@ import (
"ipmap/config"
"net"
"strings"
+ "sync"
"time"
)
+// DefaultDNSConcurrency is the default number of parallel DNS lookups
+const DefaultDNSConcurrency = 20
+
+// BatchReverseDNS performs parallel reverse DNS lookups for multiple IPs
+// Returns a map of IP -> hostname for successful lookups
+func BatchReverseDNS(ips []string, concurrency int) map[string]string {
+ if len(ips) == 0 {
+ return make(map[string]string)
+ }
+
+ if concurrency <= 0 {
+ concurrency = DefaultDNSConcurrency
+ }
+
+ // Cap concurrency to avoid overwhelming DNS servers
+ if concurrency > 50 {
+ concurrency = 50
+ }
+
+ config.VerboseLog("Starting batch DNS lookup for %d IPs with concurrency %d", len(ips), concurrency)
+
+ results := make(map[string]string)
+ var mu sync.Mutex
+ var wg sync.WaitGroup
+
+ // Semaphore for concurrency control
+ sem := make(chan struct{}, concurrency)
+
+ for _, ip := range ips {
+ wg.Add(1)
+ go func(ip string) {
+ defer wg.Done()
+
+ // Acquire semaphore slot
+ sem <- struct{}{}
+ defer func() { <-sem }()
+
+ hostname := ReverseDNS(ip)
+
+ if hostname != "" {
+ mu.Lock()
+ results[ip] = hostname
+ mu.Unlock()
+ }
+ }(ip)
+ }
+
+ wg.Wait()
+ config.VerboseLog("Batch DNS lookup completed: %d/%d hostnames resolved", len(results), len(ips))
+
+ return results
+}
+
// ReverseDNS performs reverse DNS lookup for an IP address
func ReverseDNS(ip string) string {
config.VerboseLog("Performing reverse DNS lookup for: %s", ip)
diff --git a/modules/dns_resolver_test.go b/modules/dns_resolver_test.go
index ec13118..e6c46f7 100644
--- a/modules/dns_resolver_test.go
+++ b/modules/dns_resolver_test.go
@@ -78,8 +78,86 @@ func TestReverseDNSTimeout(t *testing.T) {
}
}
+func TestBatchReverseDNS(t *testing.T) {
+ tests := []struct {
+ name string
+ ips []string
+ concurrency int
+ minExpected int // Minimum expected results (allows for network issues)
+ }{
+ {
+ name: "Multiple well-known IPs",
+ ips: []string{"8.8.8.8", "1.1.1.1", "8.8.4.4"},
+ concurrency: 10,
+ minExpected: 0, // PTR queries may fail depending on ISP/firewall
+ },
+ {
+ name: "Empty list",
+ ips: []string{},
+ concurrency: 10,
+ minExpected: 0,
+ },
+ {
+ name: "Single IP",
+ ips: []string{"8.8.8.8"},
+ concurrency: 5,
+ minExpected: 0, // Network might not allow
+ },
+ {
+ name: "Mixed valid and invalid",
+ ips: []string{"8.8.8.8", "999.999.999.999", "1.1.1.1"},
+ concurrency: 10,
+ minExpected: 0,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ start := time.Now()
+ results := BatchReverseDNS(tt.ips, tt.concurrency)
+ elapsed := time.Since(start)
+
+ t.Logf("BatchReverseDNS completed in %v, got %d results", elapsed, len(results))
+
+ if len(results) < tt.minExpected {
+ t.Errorf("Expected at least %d results, got %d", tt.minExpected, len(results))
+ }
+
+ // Log resolved hostnames
+ for ip, hostname := range results {
+ t.Logf(" %s -> %s", ip, hostname)
+ }
+ })
+ }
+}
+
+func TestBatchReverseDNSConcurrency(t *testing.T) {
+ // Test that batch is faster than sequential for multiple IPs
+ ips := []string{"8.8.8.8", "1.1.1.1", "8.8.4.4", "1.0.0.1"}
+
+ // Batch lookup
+ start := time.Now()
+ _ = BatchReverseDNS(ips, 10)
+ batchTime := time.Since(start)
+
+ t.Logf("Batch DNS for %d IPs took: %v", len(ips), batchTime)
+
+ // Should complete within reasonable time (not sequential)
+ // Sequential would be at least 4 * 2s = 8s for worst case timeout
+ if batchTime > 6*time.Second {
+ t.Errorf("Batch DNS took too long: %v (expected < 6s)", batchTime)
+ }
+}
+
func BenchmarkReverseDNS(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = ReverseDNS("8.8.8.8")
}
}
+
+func BenchmarkBatchReverseDNS(b *testing.B) {
+ ips := []string{"8.8.8.8", "1.1.1.1", "8.8.4.4", "1.0.0.1"}
+ for i := 0; i < b.N; i++ {
+ _ = BatchReverseDNS(ips, 10)
+ }
+}
diff --git a/modules/find_ip_blocks.go b/modules/find_ip_blocks.go
index 7d3c0f7..266dd2e 100644
--- a/modules/find_ip_blocks.go
+++ b/modules/find_ip_blocks.go
@@ -1,14 +1,148 @@
package modules
-import "ipmap/config"
+import (
+ "compress/gzip"
+ "context"
+ "encoding/json"
+ "io"
+ "ipmap/config"
+ "net/http"
+ "strings"
+ "time"
+)
-// FindIPBlocks queries RADB to find all IP blocks for a given ASN.
-// Returns the raw HTML response containing route and route6 entries.
+// RIPEStatResponse represents the response from RIPE Stat API
+type RIPEStatResponse struct {
+ Status string `json:"status"`
+ StatusCode int `json:"status_code"`
+ Data struct {
+ Prefixes []struct {
+ Prefix string `json:"prefix"`
+ } `json:"prefixes"`
+ } `json:"data"`
+}
+
+// FindIPBlocks queries RIPE Stat API to find all IP blocks for a given ASN.
+// Returns formatted string containing route entries (CIDR notation).
+// Falls back to RADB if RIPE Stat fails.
func FindIPBlocks(asn string) string {
- output := RequestFunc("https://www.radb.net/query?advanced_query=1&keywords="+asn+"&-T+option=&ip_option=&-i=1&-i+option=origin", "www.radb.net", config.DefaultAPITimeout)
- if len(output) > 0 {
+ // Normalize ASN format - RIPE API expects just the number
+ asnNumber := strings.TrimPrefix(strings.ToUpper(asn), "AS")
+
+ // Try RIPE Stat API first (more reliable, no Cloudflare protection)
+ result := fetchRIPEPrefixes(asnNumber, asn)
+ if result != "" {
+ return result
+ }
+
+ config.VerboseLog("RIPE Stat API failed for ASN %s, trying RADB fallback", asn)
+
+ // Fallback to RADB (may not work due to Cloudflare)
+ radbURL := "https://www.radb.net/query?advanced_query=1&keywords=" + asn + "&-T+option=&ip_option=&-i=1&-i+option=origin"
+ output := RequestFunc(radbURL, "www.radb.net", config.DefaultAPITimeout)
+ if len(output) > 2 {
return output[2]
}
return ""
}
+
+// fetchRIPEPrefixes makes a direct request to RIPE Stat API with proper handling
+func fetchRIPEPrefixes(asnNumber, asn string) string {
+ ripeURL := "https://stat.ripe.net/data/announced-prefixes/data.json?resource=AS" + asnNumber
+
+ config.VerboseLog("Fetching IP prefixes for ASN %s from RIPE Stat API...", asn)
+
+ // Use longer timeout for RIPE API (20 seconds) to handle slow network connections
+ // This prevents freezing by ensuring the request eventually times out
+ ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
+ defer cancel()
+
+ req, err := http.NewRequestWithContext(ctx, "GET", ripeURL, nil)
+ if err != nil {
+ config.VerboseLog("Failed to create RIPE request: %v", err)
+ return ""
+ }
+
+ // Set headers for RIPE API
+ req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36")
+ req.Header.Set("Accept", "application/json")
+ req.Header.Set("Accept-Encoding", "gzip, deflate")
+ req.Header.Set("Accept-Language", "en-US,en;q=0.9")
+
+ // Create a dedicated HTTP client for ASN queries to avoid interference with scan client
+ asnClient := &http.Client{
+ Timeout: 20 * time.Second,
+ Transport: &http.Transport{
+ TLSHandshakeTimeout: 10 * time.Second,
+ ResponseHeaderTimeout: 15 * time.Second,
+ IdleConnTimeout: 30 * time.Second,
+ DisableKeepAlives: true, // Don't reuse connections for one-off API calls
+ },
+ }
+
+ resp, err := asnClient.Do(req)
+ if err != nil {
+ config.VerboseLog("RIPE API request error: %v", err)
+ return ""
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != 200 {
+ config.VerboseLog("RIPE API returned status %d", resp.StatusCode)
+ return ""
+ }
+
+ // Handle gzip encoding
+ var reader io.Reader = resp.Body
+ if resp.Header.Get("Content-Encoding") == "gzip" {
+ gzReader, err := gzip.NewReader(resp.Body)
+ if err != nil {
+ config.VerboseLog("Failed to create gzip reader: %v", err)
+ return ""
+ }
+ defer gzReader.Close()
+ reader = gzReader
+ }
+
+ // Read and parse JSON
+ bodyBytes, err := io.ReadAll(io.LimitReader(reader, 2*1024*1024)) // 2MB limit
+ if err != nil {
+ config.VerboseLog("Failed to read RIPE response: %v", err)
+ return ""
+ }
+
+ var response RIPEStatResponse
+ if err := json.Unmarshal(bodyBytes, &response); err != nil {
+ config.VerboseLog("Failed to parse RIPE JSON: %v", err)
+ return ""
+ }
+
+ if response.Status != "ok" || len(response.Data.Prefixes) == 0 {
+ config.VerboseLog("RIPE API returned no prefixes for ASN %s", asn)
+ return ""
+ }
+
+ // Build formatted output similar to RADB format (for compatibility with existing regex)
+ // Only include IPv4 prefixes - skip IPv6 (contains ":")
+ var result strings.Builder
+ ipv4Count := 0
+ for _, prefix := range response.Data.Prefixes {
+ // Skip IPv6 prefixes unless explicitly enabled
+ if strings.Contains(prefix.Prefix, ":") && !config.EnableIPv6 {
+ continue
+ }
+ result.WriteString("route: ")
+ result.WriteString(prefix.Prefix)
+ result.WriteString("\n")
+ ipv4Count++
+ }
+
+ if ipv4Count == 0 {
+ config.VerboseLog("RIPE API returned no IPv4 prefixes for ASN %s", asn)
+ return ""
+ }
+
+ config.VerboseLog("Found %d IPv4 prefixes for ASN %s via RIPE Stat API (skipped %d IPv6)", ipv4Count, asn, len(response.Data.Prefixes)-ipv4Count)
+ return result.String()
+}
diff --git a/modules/get_domain_title.go b/modules/get_domain_title.go
index 28b8f78..80a2688 100644
--- a/modules/get_domain_title.go
+++ b/modules/get_domain_title.go
@@ -1,9 +1,7 @@
package modules
import (
- "html"
"ipmap/config"
- "regexp"
)
// GetDomainTitle fetches and extracts the title from a domain's HTML page.
@@ -44,14 +42,10 @@ func GetDomainTitle(url string) []string {
config.VerboseLog("Response received: Status=%s, Time=%sms", getTitle[0], getTitle[3])
- re := regexp.MustCompile(`(?s).*?(.*?).*`)
- match := re.FindStringSubmatch(getTitle[2])
-
- if len(match) > 1 {
- // Decode HTML entities (e.g., & -> &, < -> <)
- decodedTitle := html.UnescapeString(match[1])
- config.VerboseLog("Title found: %s", decodedTitle)
- return []string{decodedTitle, getTitle[3]}
+ title := ExtractTitle(getTitle[2])
+ if title != "" {
+ config.VerboseLog("Title found: %s", title)
+ return []string{title, getTitle[3]}
}
// If no title found but we got a response, use domain name as title
diff --git a/modules/get_site.go b/modules/get_site.go
index b6626ca..415abd4 100644
--- a/modules/get_site.go
+++ b/modules/get_site.go
@@ -1,15 +1,20 @@
package modules
import (
- "html"
"ipmap/config"
- "regexp"
"strings"
)
// GetSite scans an IP address for a website and extracts its title.
// Returns [status, ip, title] or [status, ip, title, hostname] if reverse DNS succeeds.
+// When skipDNS is true, DNS lookup is skipped (for batch DNS processing later).
func GetSite(ip string, domain string, timeout int) []string {
+ return GetSiteWithOptions(ip, domain, timeout, false)
+}
+
+// GetSiteWithOptions scans an IP address with configurable options.
+// skipDNS: if true, skips individual DNS lookup (use BatchReverseDNS later for better performance)
+func GetSiteWithOptions(ip string, domain string, timeout int, skipDNS bool) []string {
// Try HTTPS first (modern sites)
config.VerboseLog("Scanning IP: %s (HTTPS)", ip)
requestSite := RequestFunc("https://"+ip, domain, timeout)
@@ -21,27 +26,29 @@ func GetSite(ip string, domain string, timeout int) []string {
}
if len(requestSite) > 0 {
- re := regexp.MustCompile(`(?s).*?(.*?).*`)
- title := re.FindStringSubmatch(requestSite[2])
- if len(title) > 0 {
+ title := ExtractTitle(requestSite[2])
+ if title != "" {
explodeHttpCode := strings.Split(requestSite[0], " ")
if len(explodeHttpCode) == 0 {
config.VerboseLog("Malformed HTTP status for %s", ip)
return []string{}
}
- // Decode HTML entities (e.g., & -> &, < -> <)
- decodedTitle := html.UnescapeString(title[1])
- config.VerboseLog("Site found on %s: %s (Status: %s)", ip, decodedTitle, explodeHttpCode[0])
+ config.VerboseLog("Site found on %s: %s (Status: %s)", ip, title, explodeHttpCode[0])
+
+ // Skip DNS lookup if requested (for batch processing)
+ if skipDNS {
+ return []string{explodeHttpCode[0], requestSite[1], title}
+ }
// Perform reverse DNS lookup
hostname := ReverseDNS(ip)
if hostname != "" {
// Return with hostname: [status, ip, title, hostname]
- return []string{explodeHttpCode[0], requestSite[1], decodedTitle, hostname}
+ return []string{explodeHttpCode[0], requestSite[1], title, hostname}
}
- return []string{explodeHttpCode[0], requestSite[1], decodedTitle}
+ return []string{explodeHttpCode[0], requestSite[1], title}
}
}
diff --git a/modules/helpers.go b/modules/helpers.go
new file mode 100644
index 0000000..c5a4418
--- /dev/null
+++ b/modules/helpers.go
@@ -0,0 +1,26 @@
+// helpers.go provides shared utility functions for the modules package
+package modules
+
+import (
+ "html"
+ "regexp"
+ "strings"
+)
+
+// Compiled regex for title extraction (performance optimization)
+var titleRegex = regexp.MustCompile(`(?is)]*>(.*?)`)
+
+// ExtractTitle extracts and decodes the title from HTML content
+// Returns the decoded title or empty string if not found
+func ExtractTitle(htmlContent string) string {
+ match := titleRegex.FindStringSubmatch(htmlContent)
+ if len(match) > 1 {
+ // Decode HTML entities and trim whitespace
+ title := html.UnescapeString(match[1])
+ title = strings.TrimSpace(title)
+ // Remove newlines and excessive whitespace
+ title = strings.Join(strings.Fields(title), " ")
+ return title
+ }
+ return ""
+}
diff --git a/modules/request.go b/modules/request.go
index 87ffc90..9e1dc81 100644
--- a/modules/request.go
+++ b/modules/request.go
@@ -14,8 +14,8 @@ import (
"time"
)
-// HTTP client with lazy initialization
var (
+ // HTTP client with lazy initialization
httpClient *http.Client
httpClientOnce sync.Once
lastProxyURL string
@@ -54,16 +54,58 @@ func GetHTTPClient() *http.Client {
httpClient = createHTTPClientWithConfig()
lastProxyURL = config.ProxyURL
lastDNSServers = strings.Join(config.DNSServers, ",")
+
+ // Pre-warm connection pool if proxy is configured
+ if config.ProxyURL != "" {
+ go preWarmConnectionPool(httpClient, config.ProxyURL)
+ }
})
return httpClient
}
-// createCustomDialer creates a dialer with optional custom DNS servers
+// preWarmConnectionPool establishes initial connections to the proxy
+// This reduces latency for the first requests
+func preWarmConnectionPool(client *http.Client, proxyURL string) {
+ if client == nil || proxyURL == "" {
+ return
+ }
+
+ config.VerboseLog("Pre-warming connection pool for proxy: %s", proxyURL)
+
+ // Parse proxy to get host
+ parsed, err := url.Parse(proxyURL)
+ if err != nil {
+ return
+ }
+
+ // Establish a few initial connections
+ for i := 0; i < 3; i++ {
+ go func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ // Simple HEAD request to establish connection
+ req, err := http.NewRequestWithContext(ctx, "HEAD", "https://"+parsed.Host, nil)
+ if err != nil {
+ return
+ }
+
+ resp, err := client.Do(req)
+ if err == nil && resp != nil {
+ resp.Body.Close()
+ }
+ }()
+ }
+}
+
+// createCustomDialer creates a dialer with optimal connection settings
func createCustomDialer() *net.Dialer {
dialer := &net.Dialer{
Timeout: time.Duration(config.DialTimeout) * time.Second,
KeepAlive: 30 * time.Second,
+ // DualStack enables both IPv4 and IPv6
+ DualStack: config.EnableIPv6,
}
return dialer
}
@@ -127,29 +169,102 @@ func createDialContext() func(ctx context.Context, network, addr string) (net.Co
}
func createHTTPClientWithConfig() *http.Client {
- // Use the standard fallback client for reliability
- // The uTLS transport has HTTP/2 compatibility issues that cause
- // "malformed HTTP response" errors when servers respond with HTTP/2
- // Chrome headers are still added in RequestFuncWithRetry for anti-detection
- return createFallbackHTTPClient()
+ // Try to create uTLS client for Chrome 135 TLS fingerprint
+ utlsTransport, err := NewUTLSTransport(config.ProxyURL, time.Duration(config.DialTimeout)*time.Second)
+ if err != nil {
+ config.VerboseLog("Failed to create uTLS transport: %v, using fallback", err)
+ return createFallbackHTTPClient()
+ }
+
+ // Calculate optimized connection pool size based on worker count
+ // More aggressive pooling for better performance
+ maxConns := config.Workers * 2
+ if maxConns < 200 {
+ maxConns = 200
+ }
+ if maxConns > 1000 {
+ maxConns = 1000
+ }
+
+ // Per-host connections (important for proxy mode)
+ maxConnsPerHost := config.Workers
+ if maxConnsPerHost < 50 {
+ maxConnsPerHost = 50
+ }
+ if maxConnsPerHost > 200 {
+ maxConnsPerHost = 200
+ }
+
+ // Create transport with uTLS dial function and optimized pooling
+ transport := &http.Transport{
+ DialTLSContext: utlsTransport.DialTLSContext,
+ DialContext: createDialContext(),
+ // Connection Pool Settings
+ MaxIdleConns: maxConns,
+ MaxIdleConnsPerHost: maxConnsPerHost,
+ MaxConnsPerHost: maxConnsPerHost * 2,
+ // Longer idle timeout for better reuse
+ IdleConnTimeout: 90 * time.Second,
+ // Timeouts
+ TLSHandshakeTimeout: 10 * time.Second,
+ ResponseHeaderTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ // HTTP/2 disabled for uTLS compatibility
+ ForceAttemptHTTP2: false,
+ // Keep connections alive for reuse
+ DisableKeepAlives: false,
+ // Enable compression for better performance
+ DisableCompression: false,
+ // Write buffer for better throughput
+ WriteBufferSize: 64 * 1024, // 64KB
+ ReadBufferSize: 64 * 1024, // 64KB
+ }
+
+ config.VerboseLog("Connection pool: MaxIdle=%d, MaxPerHost=%d, IdleTimeout=90s", maxConns, maxConnsPerHost)
+ config.VerboseLog("Using uTLS transport with Chrome 135 TLS fingerprint")
+
+ return &http.Client{
+ Transport: transport,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ if len(via) >= 10 {
+ return http.ErrUseLastResponse
+ }
+ // Preserve headers on redirect
+ for key, val := range via[0].Header {
+ if _, ok := req.Header[key]; !ok {
+ req.Header[key] = val
+ }
+ }
+ return nil
+ },
+ }
}
// createFallbackHTTPClient creates a standard HTTP client as fallback
func createFallbackHTTPClient() *http.Client {
- // Calculate connection pool size based on worker count
- maxConns := config.Workers
- if maxConns < 100 {
- maxConns = 100
+ // Calculate optimized connection pool size (same as main client)
+ maxConns := config.Workers * 2
+ if maxConns < 200 {
+ maxConns = 200
}
- maxConnsPerHost := maxConns / 10
- if maxConnsPerHost < 10 {
- maxConnsPerHost = 10
+ if maxConns > 1000 {
+ maxConns = 1000
+ }
+
+ maxConnsPerHost := config.Workers
+ if maxConnsPerHost < 50 {
+ maxConnsPerHost = 50
+ }
+ if maxConnsPerHost > 200 {
+ maxConnsPerHost = 200
}
transport := &http.Transport{
TLSClientConfig: &tls.Config{
- InsecureSkipVerify: true,
+ InsecureSkipVerify: config.InsecureSkipVerify,
MinVersion: tls.VersionTLS12,
+ // TLS Session Cache for connection reuse
+ ClientSessionCache: tls.NewLRUClientSessionCache(256),
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
@@ -159,16 +274,22 @@ func createFallbackHTTPClient() *http.Client {
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
},
},
- MaxIdleConns: maxConns,
- MaxIdleConnsPerHost: maxConnsPerHost,
- MaxConnsPerHost: maxConnsPerHost * 2,
- IdleConnTimeout: 60 * time.Second,
- TLSHandshakeTimeout: 5 * time.Second,
- ResponseHeaderTimeout: 5 * time.Second,
+ // Connection Pool Settings
+ MaxIdleConns: maxConns,
+ MaxIdleConnsPerHost: maxConnsPerHost,
+ MaxConnsPerHost: maxConnsPerHost * 2,
+ // Longer idle timeout for better reuse
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ResponseHeaderTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
DialContext: createDialContext(),
ForceAttemptHTTP2: true,
DisableKeepAlives: false,
+ DisableCompression: false,
+ // Buffer sizes for better throughput
+ WriteBufferSize: 64 * 1024,
+ ReadBufferSize: 64 * 1024,
}
// Configure proxy if specified
@@ -182,6 +303,8 @@ func createFallbackHTTPClient() *http.Client {
}
}
+ config.VerboseLog("Fallback client - Connection pool: MaxIdle=%d, MaxPerHost=%d", maxConns, maxConnsPerHost)
+
return &http.Client{
Transport: transport,
CheckRedirect: func(req *http.Request, via []*http.Request) error {
@@ -229,7 +352,7 @@ func RequestFuncWithRetry(ip string, url string, timeout int, maxRetries int) []
req.Host = url
}
- // Use Chrome 131 headers from scanner.go for better anti-detection
+ // Use Chrome 135 headers from scanner.go for better anti-detection
profile := NewRandomChromeProfile()
AddRealChromeHeaders(req, profile)
diff --git a/modules/resolve_site.go b/modules/resolve_site.go
index f40a157..f2bd818 100644
--- a/modules/resolve_site.go
+++ b/modules/resolve_site.go
@@ -3,6 +3,7 @@ package modules
import (
"fmt"
"ipmap/config"
+ "strings"
"sync"
"github.com/schollz/progressbar/v3"
@@ -54,8 +55,9 @@ func ResolveSite(IPAddress []string, Websites [][]string, DomainTitle string, IP
}),
)
- // Atomic flag for early exit
+ // Early exit flag and once guard for PrintResult
var stopped bool
+ var printOnce sync.Once
for _, ip := range shuffledIPs {
// Check if already stopped or cancelled via Ctrl+C
@@ -96,7 +98,8 @@ func ResolveSite(IPAddress []string, Websites [][]string, DomainTitle string, IP
// Add random jitter to avoid detection patterns
config.AddJitter()
- site := GetSite(ip, domain, timeout)
+ // Skip individual DNS lookup - will do batch DNS at the end
+ site := GetSiteWithOptions(ip, domain, timeout, true)
mu.Lock()
scannedCount++
@@ -119,7 +122,7 @@ func ResolveSite(IPAddress []string, Websites [][]string, DomainTitle string, IP
} else {
siteInfo = fmt.Sprintf("%v", site)
}
- fmt.Printf("\n ✓ Found: %s\n", siteInfo)
+ fmt.Printf("\n [+] Found: %s\n", siteInfo)
mu.Lock()
foundSites = append(foundSites, site)
@@ -135,8 +138,6 @@ func ResolveSite(IPAddress []string, Websites [][]string, DomainTitle string, IP
mu.Lock()
stopped = true
mu.Unlock()
- _ = bar.Finish()
- PrintResult("Search Domain by ASN", DomainTitle, timeout, IPBlocks, foundSites, export)
return
}
}
@@ -153,15 +154,294 @@ func ResolveSite(IPAddress []string, Websites [][]string, DomainTitle string, IP
wg.Wait()
_ = bar.Finish()
+ // Batch DNS lookup for all found sites (much faster than individual lookups)
+ if len(foundSites) > 0 && !stopped && (interruptData == nil || !interruptData.IsCancelled()) {
+ fmt.Printf("\n[*] Performing batch DNS lookup for %d found IPs...\n", len(foundSites))
+
+ // Collect IPs that need DNS lookup (extract IP from URL like https://1.2.3.4)
+ ipsToLookup := make([]string, 0, len(foundSites))
+ for _, site := range foundSites {
+ if len(site) >= 2 {
+ ip := site[1]
+ // Remove protocol prefix if present
+ ip = strings.TrimPrefix(ip, "https://")
+ ip = strings.TrimPrefix(ip, "http://")
+ ipsToLookup = append(ipsToLookup, ip)
+ }
+ }
+
+ // Perform batch DNS lookup
+ dnsResults := BatchReverseDNS(ipsToLookup, DefaultDNSConcurrency)
+
+ // Update foundSites with hostnames
+ for i, site := range foundSites {
+ if len(site) >= 2 {
+ // Strip protocol prefix to match dnsResults key format
+ lookupIP := strings.TrimPrefix(strings.TrimPrefix(site[1], "https://"), "http://")
+ if hostname, ok := dnsResults[lookupIP]; ok && hostname != "" {
+ // Append hostname to result
+ if len(site) == 3 {
+ foundSites[i] = append(site, hostname)
+ }
+ }
+ }
+ }
+
+ // Also update interrupt data
+ if interruptData != nil {
+ interruptData.mu.Lock()
+ for i, site := range interruptData.Websites {
+ if len(site) >= 2 {
+ lookupIP := strings.TrimPrefix(strings.TrimPrefix(site[1], "https://"), "http://")
+ if hostname, ok := dnsResults[lookupIP]; ok && hostname != "" {
+ if len(site) == 3 {
+ interruptData.Websites[i] = append(site, hostname)
+ }
+ }
+ }
+ }
+ interruptData.mu.Unlock()
+ }
+
+ fmt.Printf("[*] DNS lookup completed: %d/%d hostnames resolved\n", len(dnsResults), len(ipsToLookup))
+ }
+
// Print scan statistics
fmt.Printf("\n[*] Scan Statistics: %d/%d IPs scanned, %d sites found\n", scannedCount, len(IPAddress), foundCount)
- // Process and print results (only if not already printed)
- mu.Lock()
- if !stopped {
+ // Process and print results (sync.Once guarantees single execution)
+ printOnce.Do(func() {
+ mu.Lock()
+ wasEarlyStopped := stopped
mu.Unlock()
- PrintResult("Search All ASN/IP", DomainTitle, timeout, IPBlocks, foundSites, export)
- } else {
+ _ = bar.Finish()
+ if wasEarlyStopped {
+ PrintResult("Search Domain by ASN", DomainTitle, timeout, IPBlocks, foundSites, export)
+ } else {
+ PrintResult("Search All ASN/IP", DomainTitle, timeout, IPBlocks, foundSites, export)
+ }
+ })
+}
+
+// ResolveSiteWithCache performs scanning with cache support for resuming
+func ResolveSiteWithCache(IPAddress []string, Websites [][]string, DomainTitle string, IPBlocks []string, domain string, con bool, export bool, timeout int, interruptData *InterruptData, cache *Cache) {
+ var wg sync.WaitGroup
+ var mu sync.Mutex
+
+ var foundSites [][]string
+ var scannedCount, foundCount int
+
+ // Add existing cached results
+ if cache != nil {
+ foundSites = append(foundSites, cache.GetResults()...)
+ foundCount = len(foundSites)
+ }
+
+ shuffledIPs := ShuffleIPs(IPAddress)
+ config.VerboseLog("IP addresses shuffled for firewall bypass")
+
+ workerCount := config.Workers
+ config.VerboseLog("Starting scan with %d concurrent workers", workerCount)
+ sem := make(chan struct{}, workerCount)
+
+ rateLimiter := NewRateLimiter(config.RateLimit, config.RateLimit*2)
+ if rateLimiter.IsEnabled() {
+ config.VerboseLog("Rate limiting enabled: %d requests/second", config.RateLimit)
+ }
+
+ bar := progressbar.NewOptions(len(shuffledIPs),
+ progressbar.OptionEnableColorCodes(true),
+ progressbar.OptionShowBytes(false),
+ progressbar.OptionShowCount(),
+ progressbar.OptionSetWidth(50),
+ progressbar.OptionSetDescription("[cyan][1/1][reset] Scanning IPs (Resuming)"),
+ progressbar.OptionSetTheme(progressbar.Theme{
+ Saucer: "[green]=[reset]",
+ SaucerHead: "[green]>[reset]",
+ SaucerPadding: " ",
+ BarStart: "[",
+ BarEnd: "]",
+ }),
+ )
+
+ var stopped bool
+ var printOnce sync.Once
+ saveCounter := 0
+
+ for _, ip := range shuffledIPs {
+ mu.Lock()
+ if stopped {
+ mu.Unlock()
+ break
+ }
mu.Unlock()
+
+ if interruptData != nil && interruptData.IsCancelled() {
+ config.VerboseLog("Scan cancelled by user")
+ break
+ }
+
+ wg.Add(1)
+ sem <- struct{}{}
+
+ go func(ip string) {
+ defer wg.Done()
+ defer func() { <-sem }()
+
+ if interruptData != nil && interruptData.IsCancelled() {
+ return
+ }
+ mu.Lock()
+ if stopped {
+ mu.Unlock()
+ return
+ }
+ mu.Unlock()
+
+ rateLimiter.Wait()
+ config.AddJitter()
+
+ // Skip individual DNS lookup - will do batch DNS at the end
+ site := GetSiteWithOptions(ip, domain, timeout, true)
+
+ mu.Lock()
+ scannedCount++
+
+ // Save to cache
+ if cache != nil {
+ cache.AddScannedIP(ip)
+ saveCounter++
+ // Save cache every 50 IPs
+ if saveCounter >= 50 {
+ _ = cache.Save()
+ saveCounter = 0
+ }
+ }
+ mu.Unlock()
+
+ if len(site) > 0 {
+ if interruptData != nil && interruptData.IsCancelled() {
+ interruptData.AddWebsite(site)
+ if cache != nil {
+ cache.AddResult(site)
+ }
+ return
+ }
+
+ var siteInfo string
+ if len(site) >= 4 {
+ siteInfo = fmt.Sprintf("[%s] %s - %s [%s]", site[0], site[1], site[2], site[3])
+ } else if len(site) >= 3 {
+ siteInfo = fmt.Sprintf("[%s] %s - %s", site[0], site[1], site[2])
+ } else {
+ siteInfo = fmt.Sprintf("%v", site)
+ }
+ fmt.Printf("\n [+] Found: %s\n", siteInfo)
+
+ mu.Lock()
+ foundSites = append(foundSites, site)
+ foundCount++
+ if cache != nil {
+ cache.AddResult(site)
+ }
+ mu.Unlock()
+
+ if interruptData != nil {
+ interruptData.AddWebsite(site)
+ }
+
+ if DomainTitle != "" && len(site) > 2 && site[2] == DomainTitle && !con {
+ mu.Lock()
+ stopped = true
+ mu.Unlock()
+ return
+ }
+ }
+
+ if interruptData == nil || !interruptData.IsCancelled() {
+ mu.Lock()
+ _ = bar.Add(1)
+ mu.Unlock()
+ }
+ }(ip)
+ }
+
+ wg.Wait()
+ _ = bar.Finish()
+
+ // Batch DNS lookup for all found sites (much faster than individual lookups)
+ if len(foundSites) > 0 && !stopped && (interruptData == nil || !interruptData.IsCancelled()) {
+ fmt.Printf("\n[*] Performing batch DNS lookup for %d found IPs...\n", len(foundSites))
+
+ // Collect IPs that need DNS lookup (extract IP from URL like https://1.2.3.4)
+ ipsToLookup := make([]string, 0, len(foundSites))
+ for _, site := range foundSites {
+ if len(site) >= 2 {
+ ip := site[1]
+ // Remove protocol prefix if present
+ ip = strings.TrimPrefix(ip, "https://")
+ ip = strings.TrimPrefix(ip, "http://")
+ ipsToLookup = append(ipsToLookup, ip)
+ }
+ }
+
+ // Perform batch DNS lookup
+ dnsResults := BatchReverseDNS(ipsToLookup, DefaultDNSConcurrency)
+
+ // Update foundSites with hostnames
+ for i, site := range foundSites {
+ if len(site) >= 2 {
+ lookupIP := strings.TrimPrefix(strings.TrimPrefix(site[1], "https://"), "http://")
+ if hostname, ok := dnsResults[lookupIP]; ok && hostname != "" {
+ if len(site) == 3 {
+ foundSites[i] = append(site, hostname)
+ }
+ }
+ }
+ }
+
+ // Update interrupt data and cache
+ if interruptData != nil {
+ interruptData.mu.Lock()
+ for i, site := range interruptData.Websites {
+ if len(site) >= 2 {
+ lookupIP := strings.TrimPrefix(strings.TrimPrefix(site[1], "https://"), "http://")
+ if hostname, ok := dnsResults[lookupIP]; ok && hostname != "" {
+ if len(site) == 3 {
+ interruptData.Websites[i] = append(site, hostname)
+ }
+ }
+ }
+ }
+ interruptData.mu.Unlock()
+ }
+
+ fmt.Printf("[*] DNS lookup completed: %d/%d hostnames resolved\n", len(dnsResults), len(ipsToLookup))
+ }
+
+ // Final save
+ if cache != nil {
+ cache.MarkCompleted()
+ _ = cache.Save()
+ config.InfoLog("Cache saved to: %s", cache.FilePath)
}
+
+ fmt.Printf("\n[*] Scan Statistics: %d/%d IPs scanned, %d sites found\n", scannedCount, len(IPAddress), foundCount)
+
+ // Process and print results (sync.Once guarantees single execution)
+ printOnce.Do(func() {
+ mu.Lock()
+ wasEarlyStopped := stopped
+ mu.Unlock()
+ _ = bar.Finish()
+ if cache != nil {
+ cache.MarkCompleted()
+ _ = cache.Save()
+ }
+ if wasEarlyStopped {
+ PrintResult("Search Domain by ASN (Resumed)", DomainTitle, timeout, IPBlocks, foundSites, export)
+ } else {
+ PrintResult("Search All ASN/IP (Resumed)", DomainTitle, timeout, IPBlocks, foundSites, export)
+ }
+ })
}
diff --git a/modules/result_print.go b/modules/result_print.go
index fba09c8..77dc4cb 100644
--- a/modules/result_print.go
+++ b/modules/result_print.go
@@ -5,18 +5,51 @@ import (
"fmt"
"ipmap/config"
"os"
+ "path/filepath"
"strconv"
"strings"
"time"
)
type ResultData struct {
- Method string `json:"method"`
- SearchSite string `json:"search_site,omitempty"`
- Timeout int `json:"timeout_ms"`
- IPBlocks []string `json:"ip_blocks"`
- FoundedWebsites [][]string `json:"founded_websites"`
- Timestamp string `json:"timestamp"`
+ Method string `json:"method"`
+ SearchSite string `json:"search_site,omitempty"`
+ Timeout int `json:"timeout_ms"`
+ IPBlocks []string `json:"ip_blocks"`
+ FoundWebsites [][]string `json:"found_websites"`
+ Timestamp string `json:"timestamp"`
+}
+
+// MarshalJSON outputs both "found_websites" and "founded_websites" for backward compatibility
+func (r ResultData) MarshalJSON() ([]byte, error) {
+ type Alias ResultData
+ return json.Marshal(&struct {
+ Alias
+ FoundedWebsites [][]string `json:"founded_websites"` // deprecated: backward compat
+ }{
+ Alias: (Alias)(r),
+ FoundedWebsites: r.FoundWebsites,
+ })
+}
+
+// UnmarshalJSON accepts both "found_websites" and legacy "founded_websites"
+func (r *ResultData) UnmarshalJSON(data []byte) error {
+ type Alias ResultData
+ aux := &struct {
+ *Alias
+ FoundedWebsites [][]string `json:"founded_websites"`
+ }{Alias: (*Alias)(r)}
+
+ if err := json.Unmarshal(data, aux); err != nil {
+ return err
+ }
+
+ // If new field is empty but old field has data, use it
+ if len(r.FoundWebsites) == 0 && len(aux.FoundedWebsites) > 0 {
+ r.FoundWebsites = aux.FoundedWebsites
+ }
+
+ return nil
}
// ExportInterruptedResults exports results from an interrupted scan without prompting
@@ -26,12 +59,12 @@ func ExportInterruptedResults(websites [][]string, domain string, timeout int, i
if isJSON {
result := ResultData{
- Method: "Search Interrupted",
- SearchSite: domain,
- Timeout: timeout,
- IPBlocks: ipBlocks,
- FoundedWebsites: websites,
- Timestamp: time.Now().Format(time.RFC3339),
+ Method: "Search Interrupted",
+ SearchSite: domain,
+ Timeout: timeout,
+ IPBlocks: ipBlocks,
+ FoundWebsites: websites,
+ Timestamp: time.Now().Format(time.RFC3339),
}
jsonData, err := json.MarshalIndent(result, "", " ")
@@ -48,7 +81,7 @@ func ExportInterruptedResults(websites [][]string, domain string, timeout int, i
}
resultString += "\nTimeout: " + strconv.Itoa(timeout) + "ms"
resultString += "\nIP Blocks: " + strings.Join(ipBlocks, ",")
- resultString += "\nFounded Websites:\n"
+ resultString += "\nFound Websites:\n"
if len(websites) > 0 {
for _, site := range websites {
@@ -87,6 +120,17 @@ func exportFile(result string, isJSON bool, domain string) {
} else {
fileName = "ipmap_" + strconv.FormatInt(time.Now().Local().Unix(), 10) + "_export" + ext
}
+
+ // Use output directory if specified
+ if config.OutputDir != "" {
+ // Create directory if it doesn't exist
+ if err := os.MkdirAll(config.OutputDir, 0755); err != nil {
+ config.ErrorLog("Failed to create output directory: %v", err)
+ return
+ }
+ fileName = filepath.Join(config.OutputDir, fileName)
+ }
+
f, err := os.Create(fileName)
if err != nil {
config.ErrorLog("Export file creation error: %v", err)
@@ -112,12 +156,12 @@ func PrintResult(method string, title string, timeout int, ipblocks []string, fo
if isJSON {
// Create JSON result
result := ResultData{
- Method: method,
- SearchSite: title,
- Timeout: timeout,
- IPBlocks: ipblocks,
- FoundedWebsites: founded,
- Timestamp: time.Now().Format(time.RFC3339),
+ Method: method,
+ SearchSite: title,
+ Timeout: timeout,
+ IPBlocks: ipblocks,
+ FoundWebsites: founded,
+ Timestamp: time.Now().Format(time.RFC3339),
}
jsonData, err := json.MarshalIndent(result, "", " ")
@@ -138,7 +182,7 @@ func PrintResult(method string, title string, timeout int, ipblocks []string, fo
exportFile(string(jsonData), true, title)
}
}
- fmt.Println("\n[✓] Scan completed")
+ fmt.Println("\n[+] Scan completed")
} else {
// Text format (original)
resultString := "==================== RESULT ===================="
@@ -151,7 +195,7 @@ func PrintResult(method string, title string, timeout int, ipblocks []string, fo
resultString += "\nTimeout: " + strconv.Itoa(timeout) + "ms"
resultString += "\nIP Blocks: " + strings.Join(ipblocks, ",")
- resultString += "\nFounded Websites:\n"
+ resultString += "\nFound Websites:\n"
if len(founded) > 0 {
for _, site := range founded {
// Format: Status, IP, Title[, Hostname]
@@ -179,6 +223,6 @@ func PrintResult(method string, title string, timeout int, ipblocks []string, fo
exportFile(resultString, false, title)
}
}
- fmt.Println("\n[✓] Scan completed")
+ fmt.Println("\n[+] Scan completed")
}
}
diff --git a/modules/result_print_test.go b/modules/result_print_test.go
index 99cfff0..43f3644 100644
--- a/modules/result_print_test.go
+++ b/modules/result_print_test.go
@@ -12,7 +12,7 @@ func TestResultDataJSON(t *testing.T) {
SearchSite: "example.com",
Timeout: 300,
IPBlocks: []string{"192.168.1.0/24"},
- FoundedWebsites: [][]string{
+ FoundWebsites: [][]string{
{"200", "192.168.1.1", "Test Site"},
},
Timestamp: "2025-11-30T00:00:00Z",
@@ -43,8 +43,8 @@ func TestResultDataJSON(t *testing.T) {
if len(decoded.IPBlocks) != len(result.IPBlocks) {
t.Errorf("IPBlocks length mismatch: got %d, want %d", len(decoded.IPBlocks), len(result.IPBlocks))
}
- if len(decoded.FoundedWebsites) != len(result.FoundedWebsites) {
- t.Errorf("FoundedWebsites length mismatch: got %d, want %d", len(decoded.FoundedWebsites), len(result.FoundedWebsites))
+ if len(decoded.FoundWebsites) != len(result.FoundWebsites) {
+ t.Errorf("FoundWebsites length mismatch: got %d, want %d", len(decoded.FoundWebsites), len(result.FoundWebsites))
}
}
@@ -99,7 +99,7 @@ func BenchmarkResultDataMarshal(b *testing.B) {
SearchSite: "example.com",
Timeout: 300,
IPBlocks: []string{"192.168.1.0/24", "10.0.0.0/24"},
- FoundedWebsites: [][]string{
+ FoundWebsites: [][]string{
{"200", "192.168.1.1", "Site 1"},
{"200", "192.168.1.2", "Site 2"},
},
diff --git a/modules/scanner.go b/modules/scanner.go
index 5fa1c57..be1befe 100644
--- a/modules/scanner.go
+++ b/modules/scanner.go
@@ -1,16 +1,18 @@
-// scanner.go - Chrome 131 Anti-Detection with uTLS Fingerprint
+// scanner.go - Chrome 135 Anti-Detection with uTLS Fingerprint
//
// This module provides:
-// - Chrome 131 TLS fingerprint via uTLS (JA3/JA4 spoofing)
-// - Chrome 131 browser headers in exact order
+// - Chrome 135 TLS fingerprint via uTLS (JA3/JA4 spoofing)
+// - Chrome 135 browser headers in exact order
// - Smart jitter for natural request patterns
//
// Used by request.go to bypass Cloudflare and other WAFs.
+// Updated: January 2026
package modules
import (
"context"
+ "encoding/base64"
"fmt"
"ipmap/config"
"net"
@@ -20,55 +22,68 @@ import (
"time"
utls "github.com/refraction-networking/utls"
- "golang.org/x/net/http2"
+ "golang.org/x/net/proxy"
)
// ====================================================================
-// CHROME 131 USER-AGENT POOL (Windows/macOS/Linux - Dec 2025)
+// CHROME 135 USER-AGENT POOL (Windows/macOS/Linux - Jan 2026)
// ====================================================================
-var chrome131UserAgents = []string{
- // Windows 10/11 - Chrome 131
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.6778.108 Safari/537.36",
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.6778.139 Safari/537.36",
-
- // macOS - Chrome 131
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.6778.108 Safari/537.36",
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.6778.139 Safari/537.36",
-
- // Linux - Chrome 131
- "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
- "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.6778.108 Safari/537.36",
-
- // Chrome 130 variants (fallback)
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36",
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36",
+var chrome135UserAgents = []string{
+ // Windows 11 24H2 - Chrome 135 (latest stable)
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.6998.88 Safari/537.36",
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.6998.117 Safari/537.36",
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.6998.178 Safari/537.36",
+
+ // macOS 15 Sequoia - Chrome 135
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 15_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.6998.88 Safari/537.36",
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.6998.117 Safari/537.36",
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.6998.178 Safari/537.36",
+
+ // Linux - Chrome 135
+ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
+ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.6998.88 Safari/537.36",
+ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.6998.117 Safari/537.36",
+
+ // Chrome 134 variants (fallback - one version behind)
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
+ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
+
+ // Chrome 133 variants (fallback - two versions behind)
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
}
-// Chrome 131 sec-ch-ua variants (includes GREASE)
-var chrome131SecChUA = []string{
- `"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"`,
- `"Chromium";v="131", "Google Chrome";v="131", "Not_A Brand";v="24"`,
- `"Not_A Brand";v="24", "Chromium";v="131", "Google Chrome";v="131"`,
- `"Google Chrome";v="131", "Not_A Brand";v="24", "Chromium";v="131"`,
+// Chrome 135 sec-ch-ua variants (includes GREASE tokens)
+var chrome135SecChUA = []string{
+ `"Google Chrome";v="135", "Chromium";v="135", "Not-A.Brand";v="8"`,
+ `"Chromium";v="135", "Google Chrome";v="135", "Not-A.Brand";v="8"`,
+ `"Not-A.Brand";v="8", "Chromium";v="135", "Google Chrome";v="135"`,
+ `"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"`,
+ // Chrome 134 fallback
+ `"Google Chrome";v="134", "Chromium";v="134", "Not-A.Brand";v="8"`,
+ `"Chromium";v="134", "Google Chrome";v="134", "Not-A.Brand";v="8"`,
}
-// Platform values
-var chrome131Platforms = []string{
+// Platform values for sec-ch-ua-platform
+var chromePlatforms = []string{
`"Windows"`,
`"macOS"`,
`"Linux"`,
}
-// Accept-Language variants
+// Accept-Language variants (natural distribution)
var acceptLanguages = []string{
"en-US,en;q=0.9",
"en-GB,en;q=0.9",
"en-US,en;q=0.9,tr;q=0.8",
"en-US,en;q=0.9,de;q=0.8",
"en-US,en;q=0.9,fr;q=0.8",
+ "en-US,en;q=0.9,es;q=0.8",
+ "en-US,en;q=0.9,ja;q=0.8",
"en;q=0.9",
}
@@ -77,11 +92,12 @@ var refererSources = []string{
"https://www.google.com/",
"https://www.bing.com/",
"https://duckduckgo.com/",
+ "https://search.yahoo.com/",
"",
}
// ====================================================================
-// CHROME 131 HEADERS
+// CHROME 135 HEADERS
// ====================================================================
// ChromeHeaderProfile holds a complete Chrome header profile
@@ -94,10 +110,10 @@ type ChromeHeaderProfile struct {
Referer string
}
-// NewRandomChromeProfile creates a random Chrome 131 profile
+// NewRandomChromeProfile creates a random Chrome 135 profile
func NewRandomChromeProfile() *ChromeHeaderProfile {
- ua := config.GetRandomString(chrome131UserAgents)
- platform := config.GetRandomString(chrome131Platforms)
+ ua := config.GetRandomString(chrome135UserAgents)
+ platform := config.GetRandomString(chromePlatforms)
// Match platform with User-Agent
if strings.Contains(ua, "Windows") {
@@ -110,7 +126,7 @@ func NewRandomChromeProfile() *ChromeHeaderProfile {
return &ChromeHeaderProfile{
UserAgent: ua,
- SecChUA: config.GetRandomString(chrome131SecChUA),
+ SecChUA: config.GetRandomString(chrome135SecChUA),
SecChUAMobile: "?0",
SecChUAPlatform: platform,
AcceptLanguage: config.GetRandomString(acceptLanguages),
@@ -118,14 +134,14 @@ func NewRandomChromeProfile() *ChromeHeaderProfile {
}
}
-// AddRealChromeHeaders adds Chrome 131 headers in the exact real browser order
+// AddRealChromeHeaders adds Chrome 135 headers in the exact real browser order
// Header order is checked by Cloudflare and other WAFs
func AddRealChromeHeaders(req *http.Request, profile *ChromeHeaderProfile) {
if profile == nil {
profile = NewRandomChromeProfile()
}
- // Chrome 131's REAL header order (captured from DevTools)
+ // Chrome 135's REAL header order (captured from DevTools)
// Order is critical! Must match Chrome's actual order, not alphabetical
// 1. Host (auto-added)
@@ -153,7 +169,7 @@ func AddRealChromeHeaders(req *http.Request, profile *ChromeHeaderProfile) {
req.Header.Set("Sec-Fetch-User", "?1")
req.Header.Set("Sec-Fetch-Dest", "document")
- // 8. Accept-Encoding (includes zstd - critical for Chrome 131!)
+ // 8. Accept-Encoding (includes zstd - critical for Chrome 135!)
req.Header.Set("Accept-Encoding", "gzip, deflate, br, zstd")
// 9. Accept-Language
@@ -169,17 +185,16 @@ func AddRealChromeHeaders(req *http.Request, profile *ChromeHeaderProfile) {
}
// ====================================================================
-// UTLS TRANSPORT (Chrome 131 TLS Fingerprint)
+// UTLS TRANSPORT (Chrome 135 TLS Fingerprint)
// ====================================================================
-// UTLSTransport wraps utls for Chrome 131 TLS fingerprint
+// UTLSTransport wraps utls for Chrome 135 TLS fingerprint
type UTLSTransport struct {
- proxyURL *url.URL
- timeout time.Duration
- h2Transport *http2.Transport
+ proxyURL *url.URL
+ timeout time.Duration
}
-// NewUTLSTransport creates a new transport with Chrome 131 fingerprint
+// NewUTLSTransport creates a new transport with Chrome 135 fingerprint
func NewUTLSTransport(proxyURL string, timeout time.Duration) (*UTLSTransport, error) {
t := &UTLSTransport{
timeout: timeout,
@@ -193,16 +208,13 @@ func NewUTLSTransport(proxyURL string, timeout time.Duration) (*UTLSTransport, e
t.proxyURL = parsed
}
- // Setup HTTP/2 transport
- t.h2Transport = &http2.Transport{
- ReadIdleTimeout: 30 * time.Second,
- PingTimeout: 15 * time.Second,
- }
+ // Note: HTTP/2 transport is not used because we force HTTP/1.1 via ALPN
+ // This avoids protocol mismatch issues with Go's http.Transport
return t, nil
}
-// DialTLSContext creates a TLS connection with Chrome 131 fingerprint
+// DialTLSContext creates a TLS connection with Chrome 135 fingerprint
func (t *UTLSTransport) DialTLSContext(ctx context.Context, network, addr string) (net.Conn, error) {
host, _, err := net.SplitHostPort(addr)
if err != nil {
@@ -222,11 +234,33 @@ func (t *UTLSTransport) DialTLSContext(ctx context.Context, network, addr string
return nil, err
}
- // uTLS handshake with Chrome 131 fingerprint
+ // uTLS handshake with Chrome 135 fingerprint
+ // Use custom spec to force HTTP/1.1 via ALPN (avoid HTTP/2 issues with Go's http.Transport)
tlsConn := utls.UClient(conn, &utls.Config{
ServerName: host,
- InsecureSkipVerify: true,
- }, utls.HelloChrome_Auto) // Auto-selects latest Chrome fingerprint
+ InsecureSkipVerify: config.InsecureSkipVerify,
+ }, utls.HelloCustom)
+
+ // Apply Chrome fingerprint spec
+ spec, err := utls.UTLSIdToSpec(utls.HelloChrome_Auto)
+ if err != nil {
+ conn.Close()
+ return nil, fmt.Errorf("failed to get Chrome spec: %w", err)
+ }
+
+ // Modify ALPN extension to force HTTP/1.1 only
+ for i, ext := range spec.Extensions {
+ if alpn, ok := ext.(*utls.ALPNExtension); ok {
+ alpn.AlpnProtocols = []string{"http/1.1"}
+ spec.Extensions[i] = alpn
+ break
+ }
+ }
+
+ if err := tlsConn.ApplyPreset(&spec); err != nil {
+ conn.Close()
+ return nil, fmt.Errorf("failed to apply Chrome preset: %w", err)
+ }
if err := tlsConn.Handshake(); err != nil {
conn.Close()
@@ -240,43 +274,63 @@ func (t *UTLSTransport) dialViaProxy(ctx context.Context, network, addr string)
proxyAddr := t.proxyURL.Host
dialer := &net.Dialer{Timeout: t.timeout}
+ // SOCKS5 proxy support
+ if t.proxyURL.Scheme == "socks5" {
+ var auth *proxy.Auth
+ if t.proxyURL.User != nil {
+ password, _ := t.proxyURL.User.Password()
+ auth = &proxy.Auth{
+ User: t.proxyURL.User.Username(),
+ Password: password,
+ }
+ }
+
+ socks5Dialer, err := proxy.SOCKS5("tcp", proxyAddr, auth, dialer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create SOCKS5 dialer: %w", err)
+ }
+
+ return socks5Dialer.Dial(network, addr)
+ }
+
+ // HTTP/HTTPS proxy (CONNECT method)
conn, err := dialer.DialContext(ctx, "tcp", proxyAddr)
if err != nil {
return nil, err
}
- // HTTP CONNECT for HTTPS proxy
- if t.proxyURL.Scheme == "http" || t.proxyURL.Scheme == "https" {
- connectReq := fmt.Sprintf("CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", addr, addr)
- if _, err := conn.Write([]byte(connectReq)); err != nil {
- conn.Close()
- return nil, err
- }
+ connectReq := fmt.Sprintf("CONNECT %s HTTP/1.1\r\nHost: %s\r\n", addr, addr)
- buf := make([]byte, 1024)
- n, err := conn.Read(buf)
- if err != nil {
- conn.Close()
- return nil, err
- }
- if !strings.Contains(string(buf[:n]), "200") {
- conn.Close()
- return nil, fmt.Errorf("proxy CONNECT failed: %s", string(buf[:n]))
- }
+ // Add proxy authentication if provided
+ if t.proxyURL.User != nil {
+ password, _ := t.proxyURL.User.Password()
+ auth := t.proxyURL.User.Username() + ":" + password
+ encoded := base64Encode(auth)
+ connectReq += "Proxy-Authorization: Basic " + encoded + "\r\n"
+ }
+
+ connectReq += "\r\n"
+
+ if _, err := conn.Write([]byte(connectReq)); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ buf := make([]byte, 1024)
+ n, err := conn.Read(buf)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+ if !strings.Contains(string(buf[:n]), "200") {
+ conn.Close()
+ return nil, fmt.Errorf("proxy CONNECT failed: %s", string(buf[:n]))
}
return conn, nil
}
-// GetTransport returns an http.Transport with uTLS dial function
-func (t *UTLSTransport) GetTransport() *http.Transport {
- return &http.Transport{
- DialTLSContext: t.DialTLSContext,
- MaxIdleConns: 100,
- MaxIdleConnsPerHost: 10,
- IdleConnTimeout: 60 * time.Second,
- TLSHandshakeTimeout: 10 * time.Second,
- ResponseHeaderTimeout: 10 * time.Second,
- ForceAttemptHTTP2: true,
- }
+// base64Encode encodes a string to base64
+func base64Encode(data string) string {
+ return base64.StdEncoding.EncodeToString([]byte(data))
}
diff --git a/modules/scanner_test.go b/modules/scanner_test.go
index 820e7e3..a7056e4 100644
--- a/modules/scanner_test.go
+++ b/modules/scanner_test.go
@@ -82,7 +82,7 @@ func TestAddRealChromeHeaders(t *testing.T) {
}
}
- // Check Accept-Encoding contains zstd (Chrome 131 specific)
+ // Check Accept-Encoding contains zstd (Chrome 135 specific)
acceptEncoding := req.Header.Get("Accept-Encoding")
if acceptEncoding != "gzip, deflate, br, zstd" {
t.Errorf("Accept-Encoding should include zstd, got: %s", acceptEncoding)
diff --git a/tools/find_ip.go b/tools/find_ip.go
index 9baab5e..0c8b14b 100644
--- a/tools/find_ip.go
+++ b/tools/find_ip.go
@@ -46,3 +46,55 @@ func FindIP(ipBlocks []string, domain string, domainTitle string, con bool, expo
modules.ResolveSite(ipAddress, websites, domainTitle, ipBlocks, domain, con, export, timeout, interruptData)
}
+
+// FindIPWithCache resumes a scan from cache, filtering out already-scanned IPs
+func FindIPWithCache(ipBlocks []string, domain string, domainTitle string, con bool, export bool, timeout int, interruptData *modules.InterruptData, cache *modules.Cache) {
+ var ipAddress []string
+ var websites [][]string
+
+ for _, block := range ipBlocks {
+ ips, err := modules.CalcIPAddress(block)
+ if err != nil {
+ config.ErrorLog("Failed to parse CIDR block '%s': %v", block, err)
+ continue
+ }
+
+ ipAddress = append(ipAddress, ips...)
+ }
+
+ if len(ipAddress) == 0 {
+ config.ErrorLog("No valid IP addresses to scan")
+ return
+ }
+
+ // Filter out already-scanned IPs from cache
+ unscannedIPs := cache.GetUnscannedIPs(ipAddress)
+
+ config.InfoLog("Total IPs: %d, Already scanned: %d, Remaining: %d",
+ len(ipAddress), len(ipAddress)-len(unscannedIPs), len(unscannedIPs))
+
+ if len(unscannedIPs) == 0 {
+ config.InfoLog("All IPs already scanned, loading results from cache")
+ modules.PrintResult("Search All ASN/IP (Cached)", domainTitle, timeout, ipBlocks, cache.GetResults(), export)
+ return
+ }
+
+ // Calculate estimated end time
+ workerCount := config.Workers
+ if workerCount <= 0 {
+ workerCount = 100
+ }
+ estimatedSeconds := (len(unscannedIPs) / workerCount) * timeout / 1000
+ if estimatedSeconds < 1 {
+ estimatedSeconds = 1
+ }
+
+ fmt.Println("Resuming Scan:" +
+ "\nIP Block: " + strconv.Itoa(len(ipBlocks)) +
+ "\nTotal IPs: " + strconv.Itoa(len(ipAddress)) +
+ "\nRemaining: " + strconv.Itoa(len(unscannedIPs)) +
+ "\nWorkers: " + strconv.Itoa(workerCount) +
+ "\nEst. End: " + time.Now().Add(time.Duration(estimatedSeconds)*time.Second).Local().Format("2006-01-02 15:04:05"))
+
+ modules.ResolveSiteWithCache(unscannedIPs, websites, domainTitle, ipBlocks, domain, con, export, timeout, interruptData, cache)
+}