package loader

import (
	"fmt"
	"os"
	"strings"
)

// PlainTextLoader loads targets from plain-text files (one domain per line).
// Refactored from main.go executeBulkScan for reusability.
type PlainTextLoader struct{}

// NewPlainTextLoader creates a PlainTextLoader.
func NewPlainTextLoader() *PlainTextLoader {
	return &PlainTextLoader{}
}

// Load reads targets from a plain-text file and returns normalized, deduplicated URLs.
// Implements the Loader interface.
//
// Format:
//   - One domain per line
//   - Lines starting with "#" are comments (skipped)
//   - Empty lines are skipped
//   - http://, https:// prefixes are stripped
//   - Trailing slashes are stripped
//
// Note: PlainTextLoader does NOT filter private IPs or localhost.
// This maintains backward compatibility with existing targets files.
func (l *PlainTextLoader) Load(path string) (*LoadResult, error) {
	data, err := os.ReadFile(path)
	if err != nil {
		return nil, fmt.Errorf("read file: %w", err)
	}

	lines := strings.Split(strings.TrimSpace(string(data)), "\n")

	result := &LoadResult{}
	seen := make(map[string]bool)

	for _, line := range lines {
		line = strings.TrimSpace(line)

		result.TotalRows++

		// Skip empty lines and comments
		if line == "" || strings.HasPrefix(line, "#") {
			result.Filtered++
			continue
		}

		// Normalize: strip scheme and trailing slash
		line = strings.TrimPrefix(line, "http://")
		line = strings.TrimPrefix(line, "https://")
		line = strings.TrimRight(line, "/")

		// Further normalize with shared normalizeURL function
		normalized := normalizeURL("http://" + line) // Add temp scheme for parsing

		// Deduplicate
		if seen[normalized] {
			result.Duplicates++
			continue
		}

		seen[normalized] = true
		result.Targets = append(result.Targets, normalized)
	}

	return result, nil
}
