#!/bin/bash
# Test bulk_file_import CLI command
# Usage: ./test_bulk_import.sh [db_path]
# Default: /home/david/go/src/skraak/db/test.duckdb (ALWAYS USE TEST DATABASE!)

source "$(dirname "$0")/test_lib.sh"

# Get absolute paths before changing directory
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
PROJECT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
DB_PATH="${1:-$PROJECT_DIR/db/test.duckdb}"

if [ ! -f "$DB_PATH" ]; then
    echo -e "${RED}Error: Database not found at $DB_PATH${NC}"
    exit 1
fi

echo "=== Testing bulk_file_import CLI Command ==="
echo ""
echo "Database: $DB_PATH"
echo ""

check_binary

# Navigate to the project directory where skraak binary is located
cd "$PROJECT_DIR" || exit 1

# Helper to run CLI command and capture JSON output
run_cli() {
    "$PROJECT_DIR/skraak" "$@" 2>/dev/null || true
}

run_cli_with_stderr() {
    "$PROJECT_DIR/skraak" "$@" 2>&1 || true
}

# Helper to check for error in CLI output
cli_is_error() {
    local output="$1"
    # CLI outputs errors to stderr with "Error:" prefix
    if echo "$output" | grep -q '"error"' 2>/dev/null; then
        return 0
    fi
    # Also check for error in JSON output
    if echo "$output" | jq -e '.error // empty' >/dev/null 2>&1; then
        return 0
    fi
    return 1
}

echo "Step 1: Create test dataset and locations"
echo "------------------------------------------"

# Create a test dataset using CLI
echo -n "Creating test dataset... "
DATASET_RESULT=$(run_cli create dataset --db "$DB_PATH" --name "Bulk Import Test Dataset" --type structured --description "Dataset for testing bulk import")
DATASET_ID=$(echo "$DATASET_RESULT" | jq -r '.dataset.id // empty')
if [ -n "$DATASET_ID" ]; then
    echo -e "${GREEN}${NC} Created dataset: $DATASET_ID"
else
    echo -e "${RED}${NC} Failed to create dataset"
    echo "$DATASET_RESULT" | jq '.'
    exit 1
fi

# Create test location A
echo -n "Creating test location A... "
LOCATION_A_RESULT=$(run_cli create location --db "$DB_PATH" --dataset "$DATASET_ID" --name "Test Location A" --lat -41.2865 --lon 174.7762 --timezone "Pacific/Auckland" --description "Test site A")
LOCATION_A_ID=$(echo "$LOCATION_A_RESULT" | jq -r '.location.id // empty')
if [ -n "$LOCATION_A_ID" ]; then
    echo -e "${GREEN}${NC} Created location A: $LOCATION_A_ID"
else
    echo -e "${RED}${NC} Failed to create location A"
    echo "$LOCATION_A_RESULT" | jq '.'
    exit 1
fi

# Create test location B
echo -n "Creating test location B... "
LOCATION_B_RESULT=$(run_cli create location --db "$DB_PATH" --dataset "$DATASET_ID" --name "Test Location B" --lat -36.8485 --lon 174.7633 --timezone "Pacific/Auckland" --description "Test site B")
LOCATION_B_ID=$(echo "$LOCATION_B_RESULT" | jq -r '.location.id // empty')
if [ -n "$LOCATION_B_ID" ]; then
    echo -e "${GREEN}${NC} Created location B: $LOCATION_B_ID"
else
    echo -e "${RED}${NC} Failed to create location B"
    echo "$LOCATION_B_RESULT" | jq '.'
    exit 1
fi
echo ""

echo "Step 2: Create test CSV file"
echo "-----------------------------"

# Create test CSV with sample data
CSV_FILE="/tmp/test_bulk_import_$$.csv"
LOG_FILE="/tmp/test_bulk_import_$$.log"

cat > "$CSV_FILE" << EOF
location_name,location_id,directory_path,date_range,sample_rate,file_count
Test Location A,$LOCATION_A_ID,/nonexistent/path/a,2024-01,250000,0
Test Location B,$LOCATION_B_ID,/nonexistent/path/b,2024-02,384000,0
EOF

echo -e "${GREEN}${NC} Created test CSV at $CSV_FILE"
echo "Contents:"
cat "$CSV_FILE"
echo ""

echo "Step 3: Test bulk_file_import CLI command"
echo "------------------------------------------"

# Note: Directories don't exist, so no files will be imported
# This validates:
# - CSV parsing
# - Location ID validation
# - Cluster auto-creation logic
# - JSON output format

echo "Running bulk import (directories don't exist)..."
IMPORT_RESULT=$(run_cli_with_stderr import bulk --db "$DB_PATH" --dataset "$DATASET_ID" --csv "$CSV_FILE" --log "$LOG_FILE")

# Extract just the JSON output (last lines starting with {)
JSON_OUTPUT=$(echo "$IMPORT_RESULT" | grep -A 100 '^{' | head -20)

# Check for valid JSON output with expected structure
FILES_IMPORTED=$(echo "$JSON_OUTPUT" | jq -r '.files_imported // empty' 2>/dev/null)
if [ -n "$FILES_IMPORTED" ]; then
    echo -e "${GREEN}${NC} Tool executed successfully"
    echo "  Files imported: $FILES_IMPORTED"
    echo "  Total locations: $(echo "$JSON_OUTPUT" | jq -r '.total_locations')"
    echo "  Processing time: $(echo "$JSON_OUTPUT" | jq -r '.processing_time')"
else
    # Check for error
    if echo "$IMPORT_RESULT" | grep -qi "error"; then
        echo -e "${YELLOW}?${NC} Tool returned error:"
        echo "$IMPORT_RESULT" | grep -i "error" | head -3
    else
        echo -e "${RED}${NC} Unexpected result:"
        echo "$IMPORT_RESULT" | head -5
    fi
fi
echo ""

# Check if log file was created
if [ -f "$LOG_FILE" ]; then
    echo -e "${GREEN}${NC} Log file created at $LOG_FILE"
    echo "  Log entries: $(wc -l < "$LOG_FILE")"
    rm -f "$LOG_FILE"
else
    echo -e "${YELLOW}${NC} Log file not created (expected if no files processed)"
fi
echo ""

echo "Step 4: Test validation - invalid CSV path"
echo "-------------------------------------------"
INVALID_CSV=$(run_cli_with_stderr import bulk --db "$DB_PATH" --dataset "$DATASET_ID" --csv "/nonexistent/file.csv" --log "$LOG_FILE")
if echo "$INVALID_CSV" | grep -qi "error\|no such file\|not found\|not accessible"; then
    echo -e "${GREEN}${NC} Correctly rejected non-existent CSV file"
else
    echo -e "${RED}${NC} Should have rejected non-existent CSV"
    echo "$INVALID_CSV" | head -3
fi
echo ""

echo "Step 5: Test validation - invalid dataset ID"
echo "---------------------------------------------"
INVALID_DATASET=$(run_cli_with_stderr import bulk --db "$DB_PATH" --dataset "INVALID_ID_123" --csv "$CSV_FILE" --log "$LOG_FILE")
if echo "$INVALID_DATASET" | grep -qi "error\|not found\|no such\|does not exist"; then
    echo -e "${GREEN}${NC} Correctly rejected invalid dataset ID"
else
    echo -e "${RED}${NC} Should have rejected invalid dataset ID"
    echo "$INVALID_DATASET" | head -3
fi
echo ""

echo "Step 6: Test validation - missing required flags"
echo "-------------------------------------------------"
MISSING_FLAGS=$(run_cli_with_stderr import bulk --db "$DB_PATH" --dataset "$DATASET_ID")
if echo "$MISSING_FLAGS" | grep -qi "missing\|required"; then
    echo -e "${GREEN}${NC} Correctly rejected missing required flags"
else
    echo -e "${RED}${NC} Should have rejected missing required flags"
    echo "$MISSING_FLAGS" | head -3
fi
echo ""

echo "=== TEST SUMMARY ==="
echo "Bulk import CLI command validation complete!"
echo "Note: Directory errors are expected (using non-existent paths)"
echo "The test validates CSV parsing and validation logic."
echo ""

# Cleanup
echo "Cleaning up test files..."
rm -f "$CSV_FILE" "$LOG_FILE"
echo -e "${GREEN}${NC} Cleanup complete"
echo ""