RMWLXG5HGB44LH3CEA7FWTAFPSZQZQGH52OHVQJUDP6ASPVDVQJAC QQOATNCITSSIPKVUFNZEPN73TGU244GIAW6K37SILAGQWVQ4TCQQC W3A2EECCD23SVHJZN6MXPH2PAVFHH5CNFD2XHPQRRW6M4GUTG3FAC 4VO5HC4R37PDGNAN6SX24X2M5G2V4RXIX2WCTFXDOH2TPRZA7DZQC DORZF5HSV672ZP5HUDYB3J6TBH5O2LMXJE4HPSE7H5SOGZQBDCXQC 7NS27QXZMVTZBK4VPMYL5IKGSTTAWR6NDG5SOVITNX44VNIRZPMAC JCXEAHITH33V2SFDZX6JNPNFHSZPVSVLF556TMXDVJPDWS2TBINAC OGLLBQQYE5KICDMI6EX7ZI4TZT5RB7UFHH7O2DUOZ44QQXVL5YAAC L4STQEXDGCPZXDHTEUBCOQKBMTFDRVXRLNFQHPDHOVXDCJO33LQQC 65G4H2V6262GLHTPQQ5H4NIPDJB7HRPBRVNAD2EL26N75YUM5PWQC NKQAT3RE4IBIWXVMI5LJUINDPHTANNMORZ5N2JFA4AN6UUB72KGAC GXVVTHNXT2IZPR4OB77VMU6GXFEA5TUFZ2MHMA5ASU2DSTFPLDLQC IFVRAERTCCDICNTYTG3TX2WASB6RXQQEJWWXQMQZJSQDQ3HLE5OQC 2Y2ZW565SRONQ2UXPLX5SRP2HDFWMRF5KDXKSKVRCHBBGEGMTVIQC VZGXBNYYO3E7EPFQ4GOLNVMRXXTQDDQZUU2BZ6JHNBDY4B2QLDAAC NAZQZRYQTXWVE2VFY65ONSD6O3EUMNRHARCDVH2D2HKM3YH4RGUAC 5IIWZLQR5ZKOW4OUJCQMJDPWXZKDLGRTK4DQIELXHX7VUBTXUJ4AC _, err = database.Exec(query, args...)
// Begin logged transaction for updatetx, err := db.BeginLoggedTx(ctx, database, "create_or_update_pattern")if err != nil {return output, fmt.Errorf("failed to begin transaction: %w", err)}defer func() {if err != nil {tx.Rollback()}}()_, err = tx.Exec(query, args...)
_, err = database.Exec(query, args...)
// Begin logged transaction for updatetx, err := db.BeginLoggedTx(ctx, database, "create_or_update_location")if err != nil {return output, fmt.Errorf("failed to begin transaction: %w", err)}defer func() {if err != nil {tx.Rollback()}}()_, err = tx.Exec(query, args...)
_, err = database.Exec(query, args...)
// Begin logged transaction for updatetx, err := db.BeginLoggedTx(ctx, database, "create_or_update_dataset")if err != nil {return output, fmt.Errorf("failed to begin transaction: %w", err)}defer func() {if err != nil {tx.Rollback()}}()_, err = tx.Exec(query, args...)
_, err = database.Exec(query, args...)
// Begin logged transaction for updatetx, err := db.BeginLoggedTx(ctx, database, "create_or_update_cluster")if err != nil {return output, fmt.Errorf("failed to begin transaction: %w", err)}defer func() {if err != nil {tx.Rollback()}}()_, err = tx.Exec(query, args...)
#!/bin/bash# Test event log functionality# Usage: ./test_event_log.sh [database_path]set -eDB="${1:-../db/test.duckdb}"LOG="$DB.events.jsonl"echo "=== Testing Event Log ==="echo "Database: $DB"echo "Event log: $LOG"echo ""# Clean uprm -f "$LOG"# Check if database exists and has schemaif [ ! -f "$DB" ]; thenecho "Error: Database $DB does not exist"exit 1fi# Test 1: Create datasetecho "Test 1: Create dataset..."RESULT=$(./skraak dataset create --db "$DB" --name "EventLogTest_$(date +%s)" --type structured 2>&1)DATASET_ID=$(echo "$RESULT" | jq -r '.dataset.id')echo " Created dataset: $DATASET_ID"# Check event logif [ ! -f "$LOG" ]; thenecho " ERROR: Event log not created!"exit 1fiEVENT_COUNT=$(wc -l < "$LOG")if [ "$EVENT_COUNT" -lt 1 ]; thenecho " ERROR: No events logged!"exit 1fiecho " Event log has $EVENT_COUNT entry/entries"# Test 2: Verify event structureecho ""echo "Test 2: Verify event structure..."EVENT=$(head -1 "$LOG")echo "$EVENT" | jq -e '.id' > /dev/null && echo " ✓ Has id"echo "$EVENT" | jq -e '.timestamp' > /dev/null && echo " ✓ Has timestamp"echo "$EVENT" | jq -e '.tool' > /dev/null && echo " ✓ Has tool"echo "$EVENT" | jq -e '.queries' > /dev/null && echo " ✓ Has queries"echo "$EVENT" | jq -e '.success' > /dev/null && echo " ✓ Has success"# Test 3: Create locationecho ""echo "Test 3: Create location..."RESULT=$(./skraak location create --db "$DB" --dataset "$DATASET_ID" --name "TestLoc_$(date +%s)" --lat -36.85 --lon 174.76 --timezone Pacific/Auckland 2>&1)LOCATION_ID=$(echo "$RESULT" | jq -r '.location.id')echo " Created location: $LOCATION_ID"# Test 4: Verify multiple eventsEVENT_COUNT=$(wc -l < "$LOG")if [ "$EVENT_COUNT" -lt 2 ]; thenecho " ERROR: Expected at least 2 events, got $EVENT_COUNT"exit 1fiecho " Event log has $EVENT_COUNT entries"# Test 5: Dry-run replayecho ""echo "Test 5: Dry-run replay..."./skraak replay events --db "$DB" --log "$LOG" --dry-run > /dev/null 2>&1echo " ✓ Dry-run succeeded"# Test 6: Verify replay command flagsecho ""echo "Test 6: Verify replay flags..."./skraak replay events --db "$DB" --log "$LOG" --last 1 --dry-run > /dev/null 2>&1echo " ✓ --last flag works"echo ""echo "=== All tests passed ==="echo ""echo "Event log contents:"cat "$LOG" | jq -c '{id, tool, queries: (.queries | length), success}'
package dbimport ("context""database/sql""encoding/json""fmt""os""path/filepath""strings""sync""time"gonanoid "github.com/matoous/go-nanoid/v2")// LoggedTx wraps *sql.Tx and records all Exec/ExecContext calls for mutation loggingtype LoggedTx struct {tx *sql.Txqueries []QueryRecordmu sync.MutextoolName stringstartTime time.Time}// QueryRecord represents a single SQL statement with parameterstype QueryRecord struct {SQL string `json:"sql"`Parameters []interface{} `json:"parameters"`}// TransactionEvent represents a complete transaction for the event logtype TransactionEvent struct {ID string `json:"id"`Timestamp time.Time `json:"timestamp"`Tool string `json:"tool,omitempty"`Queries []QueryRecord `json:"queries"`Success bool `json:"success"`Duration int64 `json:"duration_ms"`}// LoggedStmt wraps *sql.Stmt to intercept Exec calls on prepared statementstype LoggedStmt struct {stmt *sql.Stmttx *LoggedTxsql string}// EventLogConfig holds configuration for event loggingtype EventLogConfig struct {Enabled boolPath string}var (eventLogConfig EventLogConfigeventLogMu sync.MutexeventLogFile *os.FileeventLogEnc *json.Encoder)// SetEventLogConfig configures event logging globallyfunc SetEventLogConfig(cfg EventLogConfig) {eventLogMu.Lock()defer eventLogMu.Unlock()// Close existing file if path changedif eventLogFile != nil && eventLogConfig.Path != cfg.Path {eventLogFile.Close()eventLogFile = nileventLogEnc = nil}eventLogConfig = cfg}// GetEventLogConfig returns the current event log configurationfunc GetEventLogConfig() EventLogConfig {eventLogMu.Lock()defer eventLogMu.Unlock()return eventLogConfig}// BeginLoggedTx starts a new transaction that logs all mutations// toolName is optional and identifies which tool initiated the transactionfunc BeginLoggedTx(ctx context.Context, db *sql.DB, toolName string) (*LoggedTx, error) {tx, err := db.BeginTx(ctx, nil)if err != nil {return nil, err}return &LoggedTx{tx: tx,queries: make([]QueryRecord, 0),toolName: toolName,startTime: time.Now(),}, nil}// ExecContext executes and records the SQL statement if it's a mutationfunc (l *LoggedTx) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {result, err := l.tx.ExecContext(ctx, query, args...)if err == nil && isMutation(query) {l.mu.Lock()l.queries = append(l.queries, QueryRecord{SQL: query,Parameters: args,})l.mu.Unlock()}return result, err}// Exec executes and records the SQL statement if it's a mutationfunc (l *LoggedTx) Exec(query string, args ...interface{}) (sql.Result, error) {return l.ExecContext(context.Background(), query, args...)}// QueryRowContext delegates to underlying tx (not logged - read operation)func (l *LoggedTx) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {return l.tx.QueryRowContext(ctx, query, args...)}// QueryRow delegates to underlying tx (not logged - read operation)func (l *LoggedTx) QueryRow(query string, args ...interface{}) *sql.Row {return l.tx.QueryRow(query, args...)}// QueryContext delegates to underlying tx (not logged - read operation)func (l *LoggedTx) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {return l.tx.QueryContext(ctx, query, args...)}// Query delegates to underlying tx (not logged - read operation)func (l *LoggedTx) Query(query string, args ...interface{}) (*sql.Rows, error) {return l.tx.Query(query, args...)}// PrepareContext creates a logged prepared statementfunc (l *LoggedTx) PrepareContext(ctx context.Context, query string) (*LoggedStmt, error) {stmt, err := l.tx.PrepareContext(ctx, query)if err != nil {return nil, err}return &LoggedStmt{stmt: stmt, tx: l, sql: query}, nil}// Prepare creates a logged prepared statementfunc (l *LoggedTx) Prepare(query string) (*LoggedStmt, error) {return l.PrepareContext(context.Background(), query)}// Rollback rolls back the transaction (discards recorded queries)func (l *LoggedTx) Rollback() error {l.mu.Lock()l.queries = nil // Discard recorded queriesl.mu.Unlock()return l.tx.Rollback()}// Commit commits the transaction and logs all recorded queries on successfunc (l *LoggedTx) Commit() error {err := l.tx.Commit()if err != nil {return err}// Log on success onlyl.mu.Lock()queries := l.queriesl.mu.Unlock()if len(queries) > 0 && eventLogConfig.Enabled {l.writeEvent(queries)}return nil}// writeEvent writes the transaction to the event logfunc (l *LoggedTx) writeEvent(queries []QueryRecord) {eventLogMu.Lock()defer eventLogMu.Unlock()if !eventLogConfig.Enabled {return}// Ensure file is openif err := ensureEventLogFile(); err != nil {// Log to stderr but don't fail the commitfmt.Fprintf(os.Stderr, "Warning: failed to open event log: %v\n", err)return}id, err := gonanoid.New(21)if err != nil {fmt.Fprintf(os.Stderr, "Warning: failed to generate event ID: %v\n", err)return}event := TransactionEvent{ID: id,Timestamp: time.Now(),Tool: l.toolName,Queries: queries,Success: true,Duration: time.Since(l.startTime).Milliseconds(),}if err := eventLogEnc.Encode(event); err != nil {fmt.Fprintf(os.Stderr, "Warning: failed to write event log: %v\n", err)}}// LoggedStmt methods// ExecContext executes the prepared statement and logs if it's a mutationfunc (s *LoggedStmt) ExecContext(ctx context.Context, args ...interface{}) (sql.Result, error) {result, err := s.stmt.ExecContext(ctx, args...)if err == nil && isMutation(s.sql) {s.tx.mu.Lock()s.tx.queries = append(s.tx.queries, QueryRecord{SQL: s.sql,Parameters: args,})s.tx.mu.Unlock()}return result, err}// Exec executes the prepared statement and logs if it's a mutationfunc (s *LoggedStmt) Exec(args ...interface{}) (sql.Result, error) {return s.ExecContext(context.Background(), args...)}// QueryRowContext delegates to underlying statementfunc (s *LoggedStmt) QueryRowContext(ctx context.Context, args ...interface{}) *sql.Row {return s.stmt.QueryRowContext(ctx, args...)}// QueryRow delegates to underlying statementfunc (s *LoggedStmt) QueryRow(args ...interface{}) *sql.Row {return s.stmt.QueryRow(args...)}// QueryContext delegates to underlying statementfunc (s *LoggedStmt) QueryContext(ctx context.Context, args ...interface{}) (*sql.Rows, error) {return s.stmt.QueryContext(ctx, args...)}// Query delegates to underlying statementfunc (s *LoggedStmt) Query(args ...interface{}) (*sql.Rows, error) {return s.stmt.Query(args...)}// Close closes the prepared statementfunc (s *LoggedStmt) Close() error {return s.stmt.Close()}// isMutation returns true if the SQL is a mutation (INSERT, UPDATE, DELETE)func isMutation(sqlStr string) bool {upper := strings.ToUpper(strings.TrimSpace(sqlStr))// Handle WITH clauses (CTEs) that may contain mutationsif strings.HasPrefix(upper, "WITH") {// Check for INSERT/UPDATE/DELETE within the queryreturn strings.Contains(upper, "INSERT") ||strings.Contains(upper, "UPDATE") ||strings.Contains(upper, "DELETE")}return strings.HasPrefix(upper, "INSERT") ||strings.HasPrefix(upper, "UPDATE") ||strings.HasPrefix(upper, "DELETE")}// ensureEventLogFile opens the event log file if not already openfunc ensureEventLogFile() error {if eventLogFile != nil {return nil}dir := filepath.Dir(eventLogConfig.Path)if err := os.MkdirAll(dir, 0755); err != nil {return fmt.Errorf("failed to create event log directory: %w", err)}f, err := os.OpenFile(eventLogConfig.Path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)if err != nil {return fmt.Errorf("failed to open event log file: %w", err)}eventLogFile = feventLogEnc = json.NewEncoder(f)eventLogEnc.SetEscapeHTML(false)return nil}// CloseEventLog closes the event log filefunc CloseEventLog() error {eventLogMu.Lock()defer eventLogMu.Unlock()// Disable logging before closingeventLogConfig.Enabled = falseif eventLogFile != nil {err := eventLogFile.Close()eventLogFile = nileventLogEnc = nilreturn err}return nil}// MarshalJSON implements json.Marshaler for QueryRecord// Handles special types like time.Time, nil, and nullable typesfunc (q QueryRecord) MarshalJSON() ([]byte, error) {// Create a helper struct with string parameterstype QueryRecordJSON struct {SQL string `json:"sql"`Parameters []interface{} `json:"parameters"`}result := QueryRecordJSON{SQL: q.SQL,Parameters: make([]interface{}, len(q.Parameters)),}for i, param := range q.Parameters {result.Parameters[i] = marshalParam(param)}return json.Marshal(result)}// marshalParam converts a parameter to a JSON-serializable valuefunc marshalParam(param interface{}) interface{} {if param == nil {return nil}switch v := param.(type) {case time.Time:return v.Format(time.RFC3339Nano)case *time.Time:if v == nil {return nil}return v.Format(time.RFC3339Nano)case string:return vcase *string:if v == nil {return nil}return *vcase int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:return vcase float32, float64, bool:return vcase []byte:return vdefault:// For other types, try to convert to stringreturn fmt.Sprintf("%v", v)}}
package cmdimport ("bufio""context""database/sql""encoding/json""flag""fmt""os""strings""skraak/db")// RunReplay handles the "replay" subcommandfunc RunReplay(args []string) {if len(args) < 1 {printReplayUsage()os.Exit(1)}switch args[0] {case "events":runReplayEvents(args[1:])default:fmt.Fprintf(os.Stderr, "Unknown replay subcommand: %s\n\n", args[0])printReplayUsage()os.Exit(1)}}func printReplayUsage() {fmt.Fprintf(os.Stderr, "Usage: skraak replay <subcommand> [options]\n\n")fmt.Fprintf(os.Stderr, "Subcommands:\n")fmt.Fprintf(os.Stderr, " events Replay event log into database\n")fmt.Fprintf(os.Stderr, "\nExamples:\n")fmt.Fprintf(os.Stderr, " skraak replay events --db ./backup.duckdb --log ./skraak.duckdb.events.jsonl\n")fmt.Fprintf(os.Stderr, " skraak replay events --db ./backup.duckdb --log ./events.jsonl --dry-run\n")fmt.Fprintf(os.Stderr, " skraak replay events --db ./backup.duckdb --log ./events.jsonl --last 10\n")}func runReplayEvents(args []string) {fs := flag.NewFlagSet("replay events", flag.ExitOnError)dbPath := fs.String("db", "", "Path to target database (required)")logPath := fs.String("log", "", "Path to event log file (required)")dryRun := fs.Bool("dry-run", false, "Print events without executing")fromID := fs.String("from", "", "Start from event ID (inclusive)")toID := fs.String("to", "", "Stop at event ID (inclusive)")lastN := fs.Int("last", 0, "Replay last N events (0 = all)")continueOnError := fs.Bool("continue", false, "Continue past errors")fs.Usage = func() {fmt.Fprintf(os.Stderr, "Usage: skraak replay events [options]\n\n")fmt.Fprintf(os.Stderr, "Replay event log into database.\n\n")fmt.Fprintf(os.Stderr, "Options:\n")fs.PrintDefaults()fmt.Fprintf(os.Stderr, "\nExamples:\n")fmt.Fprintf(os.Stderr, " skraak replay events --db ./backup.duckdb --log ./events.jsonl\n")fmt.Fprintf(os.Stderr, " skraak replay events --db ./backup.duckdb --log ./events.jsonl --dry-run\n")fmt.Fprintf(os.Stderr, " skraak replay events --db ./backup.duckdb --log ./events.jsonl --last 10\n")}if err := fs.Parse(args); err != nil {os.Exit(1)}// Validate required flagsmissing := []string{}if *dbPath == "" {missing = append(missing, "--db")}if *logPath == "" {missing = append(missing, "--log")}if len(missing) > 0 {fmt.Fprintf(os.Stderr, "Error: missing required flags: %v\n\n", missing)fs.Usage()os.Exit(1)}// Read eventsevents, err := readEvents(*logPath)if err != nil {fmt.Fprintf(os.Stderr, "Error reading events: %v\n", err)os.Exit(1)}// Filter eventsevents = filterEvents(events, *fromID, *toID, *lastN)fmt.Fprintf(os.Stderr, "Found %d events to replay\n", len(events))if *dryRun {for i, event := range events {fmt.Printf("\n[%d/%d] Event %s (%s)\n", i+1, len(events), event.ID, event.Tool)for _, q := range event.Queries {fmt.Printf(" SQL: %s\n", truncateSQL(q.SQL, 80))fmt.Printf(" Params: %v\n", q.Parameters)}}return}// Open databasedatabase, err := db.OpenWriteableDB(*dbPath)if err != nil {fmt.Fprintf(os.Stderr, "Error opening database: %v\n", err)os.Exit(1)}defer database.Close()// Disable event logging for replaydb.SetEventLogConfig(db.EventLogConfig{Enabled: false})// Replay each eventsuccessCount := 0failCount := 0for i, event := range events {fmt.Fprintf(os.Stderr, "\n[%d/%d] Replaying event %s (%s)...\n", i+1, len(events), event.ID, event.Tool)err := replayEvent(database, event)if err != nil {failCount++fmt.Fprintf(os.Stderr, " ERROR: %v\n", err)if !*continueOnError {fmt.Fprintf(os.Stderr, "Stopping due to error. Use --continue to skip errors.\n")os.Exit(1)}} else {successCount++fmt.Fprintf(os.Stderr, " OK (%d queries)\n", len(event.Queries))}}fmt.Fprintf(os.Stderr, "\nReplay complete: %d succeeded, %d failed\n", successCount, failCount)}// TransactionEvent represents a transaction event from the logtype TransactionEvent struct {ID string `json:"id"`Timestamp string `json:"timestamp"`Tool string `json:"tool,omitempty"`Queries []QueryRecord `json:"queries"`Success bool `json:"success"`Duration int64 `json:"duration_ms"`}// QueryRecord represents a single SQL statement with parameterstype QueryRecord struct {SQL string `json:"sql"`Parameters []interface{} `json:"parameters"`}// readEvents reads all events from a JSONL filefunc readEvents(path string) ([]TransactionEvent, error) {file, err := os.Open(path)if err != nil {return nil, fmt.Errorf("failed to open event log: %w", err)}defer file.Close()var events []TransactionEventscanner := bufio.NewScanner(file)lineNum := 0for scanner.Scan() {lineNum++line := scanner.Bytes()if len(line) == 0 {continue}var event TransactionEventif err := json.Unmarshal(line, &event); err != nil {fmt.Fprintf(os.Stderr, "Warning: failed to parse line %d: %v\n", lineNum, err)continue}events = append(events, event)}if err := scanner.Err(); err != nil {return nil, fmt.Errorf("error reading event log: %w", err)}return events, nil}// filterEvents filters events based on criteriafunc filterEvents(events []TransactionEvent, fromID, toID string, lastN int) []TransactionEvent {// Filter by fromIDif fromID != "" {startIdx := 0for i, e := range events {if e.ID == fromID {startIdx = ibreak}}events = events[startIdx:]}// Filter by toIDif toID != "" {endIdx := len(events)for i, e := range events {if e.ID == toID {endIdx = i + 1break}}events = events[:endIdx]}// Filter by lastNif lastN > 0 && len(events) > lastN {events = events[len(events)-lastN:]}// Only replay successful eventsvar filtered []TransactionEventfor _, e := range events {if e.Success {filtered = append(filtered, e)}}return filtered}// replayEvent replays a single transaction eventfunc replayEvent(database *sql.DB, event TransactionEvent) error {ctx := context.Background()tx, err := database.BeginTx(ctx, nil)if err != nil {return fmt.Errorf("failed to begin transaction: %w", err)}for _, q := range event.Queries {// Convert parameters to []interface{} for Exec_, err := tx.ExecContext(ctx, q.SQL, q.Parameters...)if err != nil {tx.Rollback()return fmt.Errorf("query failed: %w (SQL: %s)", err, truncateSQL(q.SQL, 50))}}if err := tx.Commit(); err != nil {return fmt.Errorf("failed to commit transaction: %w", err)}return nil}// truncateSQL truncates a SQL string for displayfunc truncateSQL(sql string, maxLen int) string {sql = strings.Join(strings.Fields(sql), " ") // Normalize whitespaceif len(sql) <= maxLen {return sql}return sql[:maxLen] + "..."}
## Event LogAll mutating SQL operations logged to `<database>.events.jsonl` for backup synchronization.**What's logged:**- INSERT, UPDATE, DELETE operations- On successful commit only (rollbacks discarded)- Includes tool name, SQL, parameters, timestamp**Replay on backup:**```bashskraak replay events --db backup.duckdb --log skraak.duckdb.events.jsonlskraak replay events --db backup.duckdb --log events.jsonl --dry-runskraak replay events --db backup.duckdb --log events.jsonl --last 10```
**CLI Commands:** `mcp`, `sql`, `dataset`, `location`, `cluster`, `pattern`, `import`**Test Scripts:** 8 comprehensive shell scripts
**CLI Commands:** `mcp`, `sql`, `dataset`, `location`, `cluster`, `pattern`, `import`, `replay`**Event Log:** SQL-level mutation capture for backup sync (`<db>.events.jsonl`)**Test Scripts:** 9 comprehensive shell scripts
## [2026-02-18] Event Log for Database Mutation Replay**New feature: SQL-level event logging for backup synchronization****Purpose:** Capture all mutating SQL operations (INSERT, UPDATE, DELETE) to enable replay on backup databases for synchronization.**Architecture:**- Transaction wrapper (`db.LoggedTx`) intercepts all mutations- Logged only on successful commit (rollback discards recorded queries)- Events written to JSONL file (`<database>.events.jsonl`)- Prepared statements fully supported via `LoggedStmt` wrapper**Added:**- `db/tx_logger.go` — LoggedTx, LoggedStmt, TransactionEvent types- `cmd/replay.go` — `skraak replay events` CLI command- `shell_scripts/test_event_log.sh` — Integration test script**Modified:**- All CLI commands initialize event log with defer close- All tools use `db.BeginLoggedTx()` instead of `database.BeginTx()`- `utils/cluster_import.go` updated for batch imports**Event format (JSONL):**```json{"id": "V1StGXR8_Z5jdHi6B-myT","timestamp": "2026-02-18T14:30:22+13:00","tool": "create_or_update_dataset","queries": [{"sql": "INSERT INTO ...", "parameters": [...]}],"success": true,"duration_ms": 45}```**Replay command:**```bashskraak replay events --db backup.duckdb --log skraak.duckdb.events.jsonlskraak replay events --db backup.duckdb --log events.jsonl --dry-runskraak replay events --db backup.duckdb --log events.jsonl --last 10```
docs/.pi/