package database

import (
	"context"
	"path/filepath"
	"testing"
	"time"

	"huntr/internal/engine"
)

// TestWriter_QueueAndWrite tests that findings are queued and written in batches.
func TestWriter_QueueAndWrite(t *testing.T) {
	// Setup temp database
	dbPath := filepath.Join(t.TempDir(), "test.db")
	db, err := Open(DefaultConfig(dbPath))
	if err != nil {
		t.Fatalf("Open: %v", err)
	}
	defer db.Close()

	if err := EnsureSchema(db); err != nil {
		t.Fatalf("EnsureSchema: %v", err)
	}

	// Create writer with small batch size for testing
	ctx, cancel := context.WithCancel(context.Background())
	cfg := WriterConfig{
		BatchSize:      10,
		ChannelSize:    100,
		MaxRetries:     3,
		BaseRetryDelay: 1 * time.Millisecond,
	}
	w := NewWriter(ctx, db, cfg)

	// Start writer in background
	done := make(chan error)
	go func() {
		done <- w.Start()
	}()

	// Queue 25 findings
	for i := 0; i < 25; i++ {
		f := engine.Finding{
			Domain:      "example.com",
			Path:        "/.env",
			StatusCode:  200,
			ContentType: "text/plain",
			BodySnippet: "DB_PASSWORD=secret",
			Patterns:    []string{"DB_PASSWORD"},
			Exposed:     true,
			FoundAt:     time.Now(),
		}
		if err := w.QueueFinding(f); err != nil {
			t.Fatalf("QueueFinding: %v", err)
		}
	}

	// Small delay to let batches process
	time.Sleep(100 * time.Millisecond)

	// Cancel to trigger shutdown
	cancel()

	// Wait for writer to finish
	select {
	case <-done:
	case <-time.After(5 * time.Second):
		t.Fatal("writer did not shut down in time")
	}

	// Verify stats
	stats := w.Stats()
	if stats.TotalWrites != 25 {
		t.Errorf("TotalWrites = %d, want 25", stats.TotalWrites)
	}
	// With adaptive batch sizing, we may get fewer batches due to low-latency mode
	// collecting all available findings. At minimum we should have 1 batch.
	if stats.TotalBatches < 1 {
		t.Errorf("TotalBatches = %d, want >= 1", stats.TotalBatches)
	}
	if stats.FailedWrites != 0 {
		t.Errorf("FailedWrites = %d, want 0", stats.FailedWrites)
	}

	// Verify data in database
	var count int
	err = db.QueryRow("SELECT COUNT(*) FROM findings").Scan(&count)
	if err != nil {
		t.Fatalf("count query: %v", err)
	}
	// Note: Due to UNIQUE(domain, path) constraint, all 25 inserts map to 1 row
	// because we use the same domain+path. This is correct behavior for deduplication.
	if count != 1 {
		t.Errorf("database count = %d, want 1 (deduplicated)", count)
	}
}

// TestWriter_QueueAndWrite_UniqueFindings tests that unique findings are all written.
func TestWriter_QueueAndWrite_UniqueFindings(t *testing.T) {
	// Setup temp database
	dbPath := filepath.Join(t.TempDir(), "test.db")
	db, err := Open(DefaultConfig(dbPath))
	if err != nil {
		t.Fatalf("Open: %v", err)
	}
	defer db.Close()

	if err := EnsureSchema(db); err != nil {
		t.Fatalf("EnsureSchema: %v", err)
	}

	// Create writer with small batch size for testing
	ctx, cancel := context.WithCancel(context.Background())
	cfg := WriterConfig{
		BatchSize:      10,
		ChannelSize:    100,
		MaxRetries:     3,
		BaseRetryDelay: 1 * time.Millisecond,
	}
	w := NewWriter(ctx, db, cfg)

	// Start writer in background
	done := make(chan error)
	go func() {
		done <- w.Start()
	}()

	// Queue 25 unique findings (different paths)
	for i := 0; i < 25; i++ {
		f := engine.Finding{
			Domain:      "example.com",
			Path:        "/.env" + string(rune('a'+i)),
			StatusCode:  200,
			ContentType: "text/plain",
			BodySnippet: "DB_PASSWORD=secret",
			Patterns:    []string{"DB_PASSWORD"},
			Exposed:     true,
			FoundAt:     time.Now(),
		}
		if err := w.QueueFinding(f); err != nil {
			t.Fatalf("QueueFinding: %v", err)
		}
	}

	// Small delay to let batches process
	time.Sleep(100 * time.Millisecond)

	// Cancel to trigger shutdown
	cancel()

	// Wait for writer to finish
	select {
	case <-done:
	case <-time.After(5 * time.Second):
		t.Fatal("writer did not shut down in time")
	}

	// Verify stats
	stats := w.Stats()
	if stats.TotalWrites != 25 {
		t.Errorf("TotalWrites = %d, want 25", stats.TotalWrites)
	}
	// With adaptive batch sizing, we may get fewer batches due to low-latency mode.
	if stats.TotalBatches < 1 {
		t.Errorf("TotalBatches = %d, want >= 1", stats.TotalBatches)
	}

	// Verify all unique rows in database
	var count int
	err = db.QueryRow("SELECT COUNT(*) FROM findings").Scan(&count)
	if err != nil {
		t.Fatalf("count query: %v", err)
	}
	if count != 25 {
		t.Errorf("database count = %d, want 25", count)
	}
}

// TestWriter_GracefulShutdown tests that queue drains completely on shutdown.
func TestWriter_GracefulShutdown(t *testing.T) {
	// Setup temp database
	dbPath := filepath.Join(t.TempDir(), "test.db")
	db, err := Open(DefaultConfig(dbPath))
	if err != nil {
		t.Fatalf("Open: %v", err)
	}
	defer db.Close()

	if err := EnsureSchema(db); err != nil {
		t.Fatalf("EnsureSchema: %v", err)
	}

	// Create writer with larger batch size (so items sit in queue longer)
	ctx, cancel := context.WithCancel(context.Background())
	cfg := WriterConfig{
		BatchSize:      100, // Large batch, findings won't auto-flush
		ChannelSize:    1000,
		MaxRetries:     3,
		BaseRetryDelay: 1 * time.Millisecond,
	}
	w := NewWriter(ctx, db, cfg)

	// Start writer in background
	done := make(chan error)
	go func() {
		done <- w.Start()
	}()

	// Small delay to ensure writer goroutine has started
	time.Sleep(10 * time.Millisecond)

	// Queue findings with unique paths
	findingsCount := 50
	for i := 0; i < findingsCount; i++ {
		f := engine.Finding{
			Domain:      "drain-test.com",
			Path:        "/" + string(rune('a'+i%26)) + string(rune('a'+i/26)),
			StatusCode:  200,
			ContentType: "text/plain",
			BodySnippet: "SECRET=value",
			Patterns:    []string{"SECRET"},
			Exposed:     true,
			FoundAt:     time.Now(),
		}
		if err := w.QueueFinding(f); err != nil {
			t.Fatalf("QueueFinding: %v", err)
		}
	}

	// Cancel after queueing (triggers graceful shutdown with drain)
	cancel()

	// Wait for writer to finish draining
	select {
	case <-done:
	case <-time.After(5 * time.Second):
		t.Fatal("writer did not shut down in time")
	}

	// Verify all findings were written during drain
	stats := w.Stats()
	if stats.TotalWrites != int64(findingsCount) {
		t.Errorf("TotalWrites = %d, want %d (graceful drain)", stats.TotalWrites, findingsCount)
	}
	if stats.FailedWrites != 0 {
		t.Errorf("FailedWrites = %d, want 0", stats.FailedWrites)
	}

	// Verify in database
	var count int
	err = db.QueryRow("SELECT COUNT(*) FROM findings WHERE domain = 'drain-test.com'").Scan(&count)
	if err != nil {
		t.Fatalf("count query: %v", err)
	}
	if count != findingsCount {
		t.Errorf("database count = %d, want %d", count, findingsCount)
	}
}

// TestWriter_Backpressure tests that writers block when channel is full.
func TestWriter_Backpressure(t *testing.T) {
	// Setup temp database
	dbPath := filepath.Join(t.TempDir(), "test.db")
	db, err := Open(DefaultConfig(dbPath))
	if err != nil {
		t.Fatalf("Open: %v", err)
	}
	defer db.Close()

	if err := EnsureSchema(db); err != nil {
		t.Fatalf("EnsureSchema: %v", err)
	}

	// Create writer with very small channel
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	cfg := WriterConfig{
		BatchSize:      1000, // Very large batch (won't flush quickly)
		ChannelSize:    5,    // Very small channel for backpressure testing
		MaxRetries:     3,
		BaseRetryDelay: 1 * time.Millisecond,
	}
	w := NewWriter(ctx, db, cfg)

	// Start writer in background
	done := make(chan error)
	go func() {
		done <- w.Start()
	}()

	// Fill the channel (should succeed for channel capacity)
	for i := 0; i < 5; i++ {
		f := engine.Finding{
			Domain:      "backpressure.com",
			Path:        "/" + string(rune('a'+i)),
			StatusCode:  200,
			ContentType: "text/plain",
			BodySnippet: "DATA",
			Patterns:    []string{},
			Exposed:     false,
			FoundAt:     time.Now(),
		}
		if err := w.QueueFinding(f); err != nil {
			t.Fatalf("QueueFinding %d: %v", i, err)
		}
	}

	// Verify queue is at capacity
	stats := w.Stats()
	// Note: Queue depth may be slightly less if batch collection started
	if stats.QueueDepth < 3 {
		t.Logf("QueueDepth = %d (some may have been collected for batch)", stats.QueueDepth)
	}

	// Attempting to queue more should block briefly
	// We test this with a select timeout
	blocked := make(chan bool)
	go func() {
		f := engine.Finding{
			Domain:      "backpressure.com",
			Path:        "/extra",
			StatusCode:  200,
			ContentType: "text/plain",
			BodySnippet: "DATA",
			Patterns:    []string{},
			Exposed:     false,
			FoundAt:     time.Now(),
		}
		w.QueueFinding(f) // This may block or succeed depending on batch timing
		blocked <- false
	}()

	// Give it a moment - if it returns very quickly, batch processing freed space
	// If it takes longer, backpressure is working
	select {
	case <-blocked:
		// Either way is acceptable - the important thing is it eventually succeeds
		t.Log("QueueFinding completed (batch processing freed space)")
	case <-time.After(200 * time.Millisecond):
		// Backpressure is working - the goroutine is blocked
		t.Log("QueueFinding blocked as expected (backpressure working)")
	}

	// Cleanup
	cancel()
	<-done
}

// TestWriter_AdaptiveBatchSize tests that batch size adapts to queue depth.
func TestWriter_AdaptiveBatchSize(t *testing.T) {
	// Setup temp database (needed for Writer, even though we test internal method)
	dbPath := filepath.Join(t.TempDir(), "test.db")
	db, err := Open(DefaultConfig(dbPath))
	if err != nil {
		t.Fatalf("Open: %v", err)
	}
	defer db.Close()

	// Create writer with default config
	ctx := context.Background()
	cfg := WriterConfig{
		BatchSize:      250,
		ChannelSize:    1000,
		MaxRetries:     3,
		BaseRetryDelay: 1 * time.Millisecond,
	}
	w := NewWriter(ctx, db, cfg)

	// Test with empty queue - should return small batch size
	size := w.determineBatchSize()
	// When queue is empty (depth 0), it returns default batch size
	if size != 250 {
		t.Errorf("empty queue batch size = %d, want 250 (default)", size)
	}

	// Fill queue partially and test
	for i := 0; i < 50; i++ {
		w.findings <- engine.Finding{
			Domain: "test.com",
			Path:   "/" + string(rune('a'+i%26)),
		}
	}

	size = w.determineBatchSize()
	// With 50 items (< 100), should return low latency batch size
	if size != 51 { // 50 + 1 for the "first" finding
		t.Errorf("low queue batch size = %d, want 51", size)
	}

	// Drain for cleanup
	for len(w.findings) > 0 {
		<-w.findings
	}

	// Fill queue deeply (> 800) and test
	for i := 0; i < 850; i++ {
		w.findings <- engine.Finding{
			Domain: "test.com",
			Path:   "/" + string(rune('a'+i%26)) + string(rune('a'+i/26%26)),
		}
	}

	size = w.determineBatchSize()
	// With 850 items (> 800), should return 500 for faster flushing
	if size != 500 {
		t.Errorf("deep queue batch size = %d, want 500", size)
	}

	// Drain for cleanup
	for len(w.findings) > 0 {
		<-w.findings
	}
}

// TestWriter_DefaultConfig tests that default config values are applied.
func TestWriter_DefaultConfig(t *testing.T) {
	cfg := DefaultWriterConfig()

	if cfg.BatchSize != 250 {
		t.Errorf("BatchSize = %d, want 250", cfg.BatchSize)
	}
	if cfg.ChannelSize != 1000 {
		t.Errorf("ChannelSize = %d, want 1000", cfg.ChannelSize)
	}
	if cfg.MaxRetries != 3 {
		t.Errorf("MaxRetries = %d, want 3", cfg.MaxRetries)
	}
	if cfg.BaseRetryDelay != 10*time.Millisecond {
		t.Errorf("BaseRetryDelay = %v, want 10ms", cfg.BaseRetryDelay)
	}
}

// TestWriter_NewWriter_AppliesDefaults tests that NewWriter applies defaults for zero values.
func TestWriter_NewWriter_AppliesDefaults(t *testing.T) {
	dbPath := filepath.Join(t.TempDir(), "test.db")
	db, err := Open(DefaultConfig(dbPath))
	if err != nil {
		t.Fatalf("Open: %v", err)
	}
	defer db.Close()

	// Empty config - all zeros
	ctx := context.Background()
	w := NewWriter(ctx, db, WriterConfig{})

	if w.cfg.BatchSize != 250 {
		t.Errorf("BatchSize = %d, want 250", w.cfg.BatchSize)
	}
	if w.cfg.ChannelSize != 1000 {
		t.Errorf("ChannelSize = %d, want 1000", w.cfg.ChannelSize)
	}
	if w.cfg.MaxRetries != 3 {
		t.Errorf("MaxRetries = %d, want 3", w.cfg.MaxRetries)
	}
	if w.cfg.BaseRetryDelay != 10*time.Millisecond {
		t.Errorf("BaseRetryDelay = %v, want 10ms", w.cfg.BaseRetryDelay)
	}
	if cap(w.findings) != 1000 {
		t.Errorf("channel cap = %d, want 1000", cap(w.findings))
	}
}
