4667MSQANSFBWQVT7EDH7ZGWYVGNPCVLQRIXOECHY2BMS7V4SYUQC import { Hono } from "hono";import { eq } from "drizzle-orm";import { nanoid } from "nanoid";import {file,fileDataset,mothMetadata,location,cluster,dataset} from "../../../db/schema";import { authenticate, checkUserPermission } from "../middleware/auth";import { createDatabase } from "../utils/database";import { standardErrorResponse, handleAuthError } from "../utils/errorHandler";import type { Env, JWTPayload } from "../types";const fileImport = new Hono<{ Bindings: Env }>();/*** Interface for file import request body*/interface FileImportRequest {files: Array<{fileName: string;path?: string;xxh64Hash: string;locationId: string;timestampLocal: string; // ISO 8601 timestampclusterId?: string;duration: number;sampleRate: number;description?: string;upload?: boolean;maybeSolarNight?: boolean;maybeCivilNight?: boolean;moonPhase?: number;// Optional moth metadatamothMetadata?: {timestamp: string; // ISO 8601 timestamprecorderId?: string;gain?: 'low' | 'low-medium' | 'medium' | 'medium-high' | 'high';batteryV?: number;tempC?: number;};}>;datasetId: string;}/*** POST /api/file-import* Bulk import files into the database* Creates records in file, file_dataset, and optionally moth_metadata tables*/fileImport.post("/", authenticate, async (c) => {try {const jwtPayload = (c as unknown as { jwtPayload: JWTPayload }).jwtPayload;const userId = jwtPayload.sub;const db = createDatabase(c.env);const body = await c.req.json() as FileImportRequest;// Check user permissionconst hasPermission = await checkUserPermission(db,userId,body.datasetId,"ADMIN");if (!hasPermission) {return c.json({error: "Forbidden",message: "You do not have permission to upload files to this dataset",},403);}// Validate requestif (!body.files || !Array.isArray(body.files) || body.files.length === 0) {return c.json({error: "Bad Request",message: "Files array is required and must not be empty",},400);}if (!body.datasetId) {return c.json({error: "Bad Request",message: "datasetId is required",},400);}// Validate that dataset exists and user has accessconst datasetResult = await db.select({ id: dataset.id }).from(dataset).where(eq(dataset.id, body.datasetId)).limit(1);if (datasetResult.length === 0) {return c.json({error: "Not Found",message: "Dataset not found",},404);}// Validate locations and clusters existconst locationIds = [...new Set(body.files.map(f => f.locationId))];const clusterIds = [...new Set(body.files.map(f => f.clusterId).filter(Boolean))];const locationResults = await db.select({ id: location.id }).from(location).where(eq(location.datasetId, body.datasetId));const validLocationIds = new Set(locationResults.map(l => l.id));if (clusterIds.length > 0) {const clusterResults = await db.select({ id: cluster.id }).from(cluster).where(eq(cluster.datasetId, body.datasetId));const validClusterIds = new Set(clusterResults.map(c => c.id));// Check if all cluster IDs are validfor (const clusterId of clusterIds) {if (clusterId && !validClusterIds.has(clusterId)) {return c.json({error: "Bad Request",message: `Invalid clusterId: ${clusterId}`,},400);}}}// Check if all location IDs are validfor (const locationId of locationIds) {if (!validLocationIds.has(locationId)) {return c.json({error: "Bad Request",message: `Invalid locationId: ${locationId}`,},400);}}const now = new Date();const createdFiles: string[] = [];// Process files one by one (using sequential operations instead of transactions due to Neon HTTP driver)for (const fileData of body.files) {try {const fileId = nanoid(21); // Generate nanoid for file// Validate required fieldsif (!fileData.fileName || !fileData.xxh64Hash || !fileData.locationId ||!fileData.timestampLocal || fileData.duration === undefined ||fileData.sampleRate === undefined) {return c.json({error: "Bad Request",message: "Missing required file fields: fileName, xxh64Hash, locationId, timestampLocal, duration, sampleRate",},400);}// Insert into file tableawait db.insert(file).values({id: fileId,fileName: fileData.fileName,path: fileData.path || null,xxh64Hash: fileData.xxh64Hash,locationId: fileData.locationId,timestampLocal: new Date(fileData.timestampLocal),clusterId: fileData.clusterId || null,duration: fileData.duration.toString(),sampleRate: fileData.sampleRate,description: fileData.description || null,upload: fileData.upload || false,maybeSolarNight: fileData.maybeSolarNight || null,maybeCivilNight: fileData.maybeCivilNight || null,moonPhase: fileData.moonPhase?.toString() || null,createdBy: userId,createdAt: now,lastModified: now,modifiedBy: userId,active: true,});// Insert into file_dataset tableawait db.insert(fileDataset).values({fileId: fileId,datasetId: body.datasetId,createdAt: now,createdBy: userId,lastModified: now,modifiedBy: userId,});// Insert moth metadata if providedif (fileData.mothMetadata) {await db.insert(mothMetadata).values({fileId: fileId,timestamp: new Date(fileData.mothMetadata.timestamp),recorderId: fileData.mothMetadata.recorderId || null,gain: fileData.mothMetadata.gain || null,batteryV: fileData.mothMetadata.batteryV?.toString() || null,tempC: fileData.mothMetadata.tempC?.toString() || null,createdAt: now,createdBy: userId,lastModified: now,modifiedBy: userId,active: true,});}createdFiles.push(fileId);} catch (error) {console.error(`Error processing file ${fileData.fileName}:`, error);// Continue with next file rather than failing the entire batch// In a real transaction, we would rollback herethrow error; // For now, fail fast}}return c.json({message: "Files imported successfully",data: {importedCount: createdFiles.length,fileIds: createdFiles,},});} catch (error) {console.error("Error importing files:", error);if (error && typeof error === "object" && "message" in error) {const errorResponse = handleAuthError("file import");return c.json(errorResponse, 401);}const errorResponse = standardErrorResponse(error, "Failed to import files");return c.json(errorResponse, 500);}});export default fileImport;
/*** Filename parsing utilities for non-AudioMoth files* Based on Julia code logic for extracting timestamps from filenames* Works on clusters of filenames to determine date formats with proper context*/export interface ParsedTimestamp {timestampLocal: string; // ISO 8601 timestamp in local timezonetimestampUTC: string; // ISO 8601 timestamp in UTC}export interface FilenameParsing {fileName: string;timestamp: ParsedTimestamp;}/*** Parses a vector of filenames to extract DateTime objects* Assumes it's getting a folder of 1 recording session, not random files* For 6 digit dates the year digits need to have less variation than the day digits* Does not handle miss shapen dates*/export function dateTimeOfFilename(filenames: string[]): Date[] {// Assert we have files to work withif (filenames.length === 0) {throw new Error("This is an empty vector of filenames");}// Extract date_time patterns using regex (\d{6}|\d{8})_\d{6}const pattern = /(\d{6}|\d{8})_\d{6}/;const f2 = filenames.map(filename => {const match = filename.match(pattern);if (!match) {throw new Error(`No date_time pattern found in filename: ${filename}`);}return match[0];});// Date_time format must be the same for whole vectorconst lengths = f2.map(x => x.length);const uniqueLengths = [...new Set(lengths)];if (uniqueLengths.length !== 1) {throw new Error("Different date formats in vector");}// Assert proper lengthconst firstLength = f2[0].length;if (firstLength !== 15 && firstLength !== 13) {throw new Error("Wrong length to be a date_time");}// Split into date and time partsconst rawDt = f2.map(x => {const parts = x.split('_');return [parts[0], parts[1]];});// Parse date and time stringsconst dates = rawDt.map(x => x[0]);const times = rawDt.map(x => x[1]);const parsedDates = parseDateStrings(dates);const parsedTimes = parseTimeStrings(times);// Combine into Date objectsconst dateTimeObjects = parsedDates.map((date, index) => {const time = parsedTimes[index];return new Date(date.year, date.month - 1, date.day, time.hour, time.minute, time.second);});return dateTimeObjects;}/*** Parse time strings (always HHMMSS format)*/function parseTimeStrings(times: string[]): Array<{hour: number, minute: number, second: number}> {return times.map(timeStr => {const h = parseInt(timeStr.slice(0, 2), 10);const m = parseInt(timeStr.slice(2, 4), 10);const s = parseInt(timeStr.slice(4, 6), 10);if (isNaN(h) || isNaN(m) || isNaN(s)) {throw new Error(`Invalid time format: ${timeStr}`);}return { hour: h, minute: m, second: s };});}/*** Parse date strings - handles both 8-digit (YYYYMMDD) and 6-digit (YYMMDD/DDMMYY) formats*/function parseDateStrings(dates: string[]): Array<{year: number, month: number, day: number}> {const firstDateLength = dates[0].length;if (firstDateLength === 8) {// 8-digit format: YYYYMMDDreturn dates.map(dateStr => {const y = parseInt(dateStr.slice(0, 4), 10);const m = parseInt(dateStr.slice(4, 6), 10);const d = parseInt(dateStr.slice(6, 8), 10);if (isNaN(y) || isNaN(m) || isNaN(d)) {throw new Error(`Invalid 8-digit date format: ${dateStr}`);}return { year: y, month: m, day: d };});} else if (firstDateLength === 6) {// 6-digit format: need to determine YYMMDD vs DDMMYYreturn parseShortDateStrings(dates);} else {throw new Error("Date is not 8 or 6 digits long");}}/*** Parse 6-digit date strings - determines YYMMDD vs DDMMYY by comparing variance*/function parseShortDateStrings(dates: string[]): Array<{year: number, month: number, day: number}> {if (dates.length <= 1) {throw new Error("Not enough files to work out YYMMDD v DDMMYY");}// Extract first 2, middle 2, and last 2 digitsconst x1 = dates.map(dateStr => parseInt(dateStr.slice(0, 2), 10));const m = dates.map(dateStr => parseInt(dateStr.slice(2, 4), 10));const x2 = dates.map(dateStr => parseInt(dateStr.slice(4, 6), 10));// Check for parsing errorsif (x1.some(isNaN) || m.some(isNaN) || x2.some(isNaN)) {throw new Error("Invalid 6-digit date format");}// Determine year and day positions by comparing uniqueness// If x2 has more unique values than x1, then x1 is year (YYMMDD)// Otherwise x2 is year (DDMMYY)const uniqueX1 = new Set(x1);const uniqueX2 = new Set(x2);let year: number[];let day: number[];if (uniqueX2.size >= uniqueX1.size) {// YYMMDD formatyear = x1.map(y => y + 2000);day = x2;} else {// DDMMYY formatyear = x2.map(y => y + 2000);day = x1;}return dates.map((_, index) => ({year: year[index],month: m[index],day: day[index]}));}/*** Calculate timezone offset for a given date and timezone ID* Uses the earliest file's date to determine offset for consistency across cluster*/function calculateTimezoneOffset(date: Date, timezoneId: string): number {try {// Create a date in the target timezoneconst utcTime = date.getTime();const localTime = new Date(date.toLocaleString("en-US", { timeZone: timezoneId })).getTime();// Calculate offset in minutes (UTC - local time)const offsetMinutes = (utcTime - localTime) / (1000 * 60);return offsetMinutes;} catch (error) {console.error(`Error calculating timezone offset for ${timezoneId}:`, error);// Fallback to UTCreturn 0;}}/*** Convert local timestamp to UTC using a fixed timezone offset*/function convertToUTC(localDate: Date, offsetMinutes: number): Date {return new Date(localDate.getTime() + (offsetMinutes * 60 * 1000));}/*** Parse filenames in a cluster to extract timestamps* Uses the cluster's timezone and maintains consistent offset across all files*/export function parseClusterFilenames(filenames: string[],clusterTimezoneId: string): FilenameParsing[] {if (filenames.length === 0) {throw new Error("No filenames provided");}console.log(`Parsing ${filenames.length} filenames with timezone ${clusterTimezoneId}`);// Extract DateTime objects from filenamesconst localDateTimes = dateTimeOfFilename(filenames);// Find the earliest file to determine timezone offsetconst earliestDate = new Date(Math.min(...localDateTimes.map(d => d.getTime())));const timezoneOffsetMinutes = calculateTimezoneOffset(earliestDate, clusterTimezoneId);console.log(`Using timezone offset: ${timezoneOffsetMinutes} minutes for all files`);// Convert all timestamps using the same offsetconst results: FilenameParsing[] = filenames.map((fileName, index) => {const localDateTime = localDateTimes[index];const utcDateTime = convertToUTC(localDateTime, timezoneOffsetMinutes);// Format timestampsconst offsetHours = Math.floor(Math.abs(timezoneOffsetMinutes) / 60);const offsetMins = Math.abs(timezoneOffsetMinutes) % 60;const offsetSign = timezoneOffsetMinutes <= 0 ? '+' : '-';const offsetString = `${offsetSign}${offsetHours.toString().padStart(2, '0')}:${offsetMins.toString().padStart(2, '0')}`;const timestampLocal = `${localDateTime.getFullYear()}-${(localDateTime.getMonth() + 1).toString().padStart(2, '0')}-${localDateTime.getDate().toString().padStart(2, '0')}T${localDateTime.getHours().toString().padStart(2, '0')}:${localDateTime.getMinutes().toString().padStart(2, '0')}:${localDateTime.getSeconds().toString().padStart(2, '0')}.000${offsetString}`;const timestampUTC = utcDateTime.toISOString();return {fileName,timestamp: {timestampLocal,timestampUTC}};});console.log(`Successfully parsed ${results.length} filenames`);return results;}
/*** General file utilities for processing audio files* Contains functionality used for all audio files, regardless of recorder type*/import { parseBuffer } from 'music-metadata';import xxhash from 'xxhash-wasm';export interface BasicFileMetadata {fileName: string;fileSize: number;duration?: number | undefined;sampleRate?: number | undefined;comment?: string | undefined;artist?: string | undefined;}/*** Generates xxHash64 for the file using the same method as the Node.js example* This ensures consistent hashing across different machines*/export async function generateFileHash(file: File): Promise<string> {try {console.log(`Generating xxHash64 for file: ${file.name} (${file.size} bytes)`);// Read file as ArrayBufferconst fileBuffer = await file.arrayBuffer();// Convert to Uint8Array and then to latin1 string (same as Node.js Buffer.toString('latin1'))const uint8Array = new Uint8Array(fileBuffer);let fileString = '';for (let i = 0; i < uint8Array.length; i++) {fileString += String.fromCharCode(uint8Array[i]);}console.log(`File converted to latin1 string, length: ${fileString.length}`);// Initialize xxhash-wasmconst { h64ToString } = await xxhash();// Calculate hash using the same method as the Node.js exampleconst hash = h64ToString(fileString);console.log(`xxHash64 calculated: ${hash}`);return hash;} catch (error) {console.error('Error generating xxHash64:', error);// Fallback to a deterministic hash if xxhash failsconst fallbackStr = `${file.name}_${file.size}_${file.lastModified || 0}`;let fallbackHash = 0;for (let i = 0; i < fallbackStr.length; i++) {fallbackHash = ((fallbackHash << 5) - fallbackHash) + fallbackStr.charCodeAt(i);fallbackHash = fallbackHash & fallbackHash;}const fallbackResult = Math.abs(fallbackHash).toString(16).padStart(16, '0');console.warn(`Using fallback hash: ${fallbackResult}`);return fallbackResult;}}/*** Extracts audio file metadata using music-metadata library* Works for all audio file types (WAV, MP3, etc.)*/export async function extractAudioMetadata(file: File): Promise<BasicFileMetadata> {try {// Convert File to ArrayBuffer for music-metadataconst arrayBuffer = await file.arrayBuffer();const metadata = await parseBuffer(new Uint8Array(arrayBuffer), file.type || 'audio/wav');return {fileName: file.name,fileSize: file.size,duration: metadata.format.duration ?? undefined,sampleRate: metadata.format.sampleRate ?? undefined,comment: Array.isArray(metadata.common.comment)? metadata.common.comment[0]?.text: metadata.common.comment ?? undefined,artist: metadata.common.artist ?? undefined,};} catch (error) {console.error('Error extracting audio metadata:', error);return {fileName: file.name,fileSize: file.size,};}}/*** Checks if a file is likely an audio file based on extension*/export function isAudioFile(fileName: string): boolean {const audioExtensions = ['.wav', '.WAV', '.mp3', '.MP3', '.m4a', '.M4A', '.flac', '.FLAC'];return audioExtensions.some(ext => fileName.endsWith(ext));}/*** Filters audio files from a list of file handles*/export function filterAudioFiles(fileHandles: FileSystemFileHandle[]): FileSystemFileHandle[] {return fileHandles.filter(handle => isAudioFile(handle.name));}/*** Basic file information for any file type*/export interface ProcessedFileInfo {fileName: string;fileSize: number;xxh64Hash: string;duration?: number | undefined;sampleRate?: number | undefined;}/*** Processes any audio file to extract basic information and hash* This is the base processing that applies to all audio files*/export async function processBasicFileInfo(file: File): Promise<ProcessedFileInfo> {try {// Extract basic metadata and generate hash in parallelconst [metadata, hash] = await Promise.all([extractAudioMetadata(file),generateFileHash(file)]);return {fileName: metadata.fileName,fileSize: metadata.fileSize,xxh64Hash: hash,duration: metadata.duration,sampleRate: metadata.sampleRate,};} catch (error) {console.error('Error processing basic file info:', error);// Fallback with minimal informationconst fallbackHash = await generateFileHash(file);return {fileName: file.name,fileSize: file.size,xxh64Hash: fallbackHash,};}}
/*** AudioMoth metadata parsing utilities* Based on the Comment field format from AudioMoth recordings*/import { extractAudioMetadata, processBasicFileInfo } from './fileUtils';import { type ParsedTimestamp } from './filenameParser';export interface AudioMothMetadata {timestampLocal: string; // ISO 8601 timestamp in local timezonetimestampUTC: string; // ISO 8601 timestamp in UTCmothId?: string | undefined;gain?: 'low' | 'low-medium' | 'medium' | 'medium-high' | 'high' | undefined;batteryV?: number | undefined;tempC?: number | undefined;duration: number;sampleRate: number;}export interface ParsedFileMetadata {isAudioMoth: boolean;audioMothData?: AudioMothMetadata | undefined;fileName: string;fileSize: number;duration?: number | undefined;sampleRate?: number | undefined;xxh64Hash?: string | undefined;}/*** Parses AudioMoth comment field to extract metadata* Based on Julia code logic - matches the exact indexing pattern* Example: "Recorded at 21:00:00 24/02/2025 (UTC+13) by AudioMoth 248AB50153AB0549 at medium gain while battery was 4.3V and temperature was 15.8C."*/export function parseAudioMothComment(comment: string): AudioMothMetadata | null {try {console.log('Parsing AudioMoth comment:', comment);// Split the comment into parts (same as Julia: split(comment, " "))const c_v = comment.split(' ');// Trim to max 19 parts if longer than 22 (same as Julia logic)const comment_vector = c_v.length < 22 ? c_v : c_v.slice(0, 19);console.log('Comment vector:', comment_vector);// Ensure we have enough parts for a valid AudioMoth commentif (comment_vector.length < 15 || !comment.includes('AudioMoth')) {console.log('Invalid comment - not enough parts or not AudioMoth');return null;}// Extract date and time using Julia indices (adjusted for 0-based indexing)// Julia: time = split(comment_vector[3], ":") -> JS: comment_vector[2]// Julia: date = split(comment_vector[4], "/") -> JS: comment_vector[3]// Julia: tz = chop(comment_vector[5], head = 4, tail = 1) -> JS: comment_vector[4]const timeStr = comment_vector[2]; // "21:00:00"const dateStr = comment_vector[3]; // "24/02/2025"const timezoneStr = comment_vector[4]; // "(UTC+13)"console.log('Parsed time/date/tz:', { timeStr, dateStr, timezoneStr });if (!timeStr || !dateStr || !timezoneStr) {console.log('Missing time, date, or timezone');return null;}// Parse time componentsconst timeParts = timeStr.split(':');if (timeParts.length !== 3) {console.log('Invalid time format');return null;}const hours = parseInt(timeParts[0], 10);const minutes = parseInt(timeParts[1], 10);const seconds = parseInt(timeParts[2], 10);// Parse date componentsconst dateParts = dateStr.split('/');if (dateParts.length !== 3) {console.log('Invalid date format');return null;}const day = parseInt(dateParts[0], 10);const month = parseInt(dateParts[1], 10);const year = parseInt(dateParts[2], 10);// Parse timezone - extract the offset from (UTC+XX) format// Julia: tz = chop(comment_vector[5], head = 4, tail = 1)const tzMatch = timezoneStr.match(/UTC([+-]\d+)/);const timezoneOffset = tzMatch ? tzMatch[1] : '+00';console.log('Timezone offset:', timezoneOffset);// Create local timestamp string (Julia format)// Julia: date[3] * "-" * date[2] * "-" * date[1] * "T" * time[1] * ":" * time[2] * ":" * time[3] * "." * "000" * time_zoneconst localTimestamp = `${year.toString().padStart(4, '0')}-${month.toString().padStart(2, '0')}-${day.toString().padStart(2, '0')}T${hours.toString().padStart(2, '0')}:${minutes.toString().padStart(2, '0')}:${seconds.toString().padStart(2, '0')}.000${timezoneOffset}:00`;// Convert to UTCconst localDate = new Date(localTimestamp);const utcTimestamp = localDate.toISOString();console.log('Timestamps:', { localTimestamp, utcTimestamp });// Extract moth ID using Julia index// Julia: moth_id = comment_vector[8] -> JS: comment_vector[7]const mothId = comment_vector.length > 7 ? comment_vector[7] : undefined;// Extract gain using Julia index// Julia: gain = comment_vector[10] -> JS: comment_vector[9]const gain = comment_vector.length > 9 ? comment_vector[9] as AudioMothMetadata['gain'] : undefined;// Extract battery voltage - index back from end (Julia logic)// Julia: battery = parse(Float64, chop(comment_vector[end-4], tail = 1))// JS: comment_vector[length - 5] and remove last characterlet batteryV: number | undefined;if (comment_vector.length >= 5) {const batteryStr = comment_vector[comment_vector.length - 5]; // end-4 in 0-based indexingif (batteryStr && batteryStr.endsWith('V')) {const voltageStr = batteryStr.slice(0, -1); // Remove 'V' (chop tail=1)const voltage = parseFloat(voltageStr);if (!isNaN(voltage)) {batteryV = voltage;}}}// Extract temperature - last element (Julia logic)// Julia: temperature = parse(Float64, chop(comment_vector[end], tail = 2))// JS: comment_vector[length - 1] and remove last 2 characterslet tempC: number | undefined;if (comment_vector.length > 0) {const tempStr = comment_vector[comment_vector.length - 1]; // end in 0-based indexingif (tempStr && tempStr.endsWith('C.')) {const temperatureStr = tempStr.slice(0, -2); // Remove 'C.' (chop tail=2)const temperature = parseFloat(temperatureStr);if (!isNaN(temperature)) {tempC = temperature;}}}console.log('Extracted metadata:', { mothId, gain, batteryV, tempC });return {timestampLocal: localTimestamp,timestampUTC: utcTimestamp,mothId,gain,batteryV,tempC,duration: 0, // Will be filled in from audio file metadatasampleRate: 0, // Will be filled in from audio file metadata};} catch (error) {console.error('Error parsing AudioMoth comment:', error);return null;}}/*** Checks if a file is an AudioMoth recording based on metadata*/export function isAudioMothFile(metadata: { artist?: string | undefined; comment?: string | undefined }): boolean {// Check for AudioMoth in artist field or comment fieldconst artist = metadata?.artist || '';const comment = metadata?.comment || '';return artist.includes('AudioMoth') || comment.includes('AudioMoth');}/*** Processes a file to extract all relevant metadata* Returns structured metadata for import API*/export async function processAudioFile(file: File): Promise<ParsedFileMetadata> {try {// Extract full audio metadataconst audioMetadata = await extractAudioMetadata(file);// Check if this is an AudioMoth fileconst isAudioMoth = isAudioMothFile({artist: audioMetadata.artist,comment: audioMetadata.comment,});if (!isAudioMoth) {return {isAudioMoth: false,fileName: audioMetadata.fileName,fileSize: audioMetadata.fileSize,duration: audioMetadata.duration,sampleRate: audioMetadata.sampleRate,};}// Parse AudioMoth comment if availablelet audioMothData: AudioMothMetadata | undefined;if (audioMetadata.comment) {const parsedComment = parseAudioMothComment(audioMetadata.comment);if (parsedComment) {audioMothData = {...parsedComment,duration: audioMetadata.duration || 0,sampleRate: audioMetadata.sampleRate || 8000,};}}// If we couldn't parse the comment but it's still an AudioMoth file,// create basic metadata structureif (!audioMothData) {audioMothData = {timestampLocal: new Date().toISOString(),timestampUTC: new Date().toISOString(),duration: audioMetadata.duration || 0,sampleRate: audioMetadata.sampleRate || 8000,};}return {isAudioMoth: true,fileName: audioMetadata.fileName,fileSize: audioMetadata.fileSize,duration: audioMetadata.duration,sampleRate: audioMetadata.sampleRate,audioMothData,};} catch (error) {console.error('Error processing audio file:', error);return {isAudioMoth: false,fileName: file.name,fileSize: file.size,};}}/*** Creates a complete file import object for the API*/export async function createFileImportData(file: File,locationId: string,clusterId?: string,precomputedTimestamp?: ParsedTimestamp): Promise<object> {// Get basic file info (hash, duration, sample rate) - works for all filesconst basicInfo = await processBasicFileInfo(file);// Get AudioMoth-specific metadata if applicableconst metadata = await processAudioFile(file);const baseFileData = {fileName: basicInfo.fileName,xxh64Hash: basicInfo.xxh64Hash,locationId: locationId,clusterId: clusterId,duration: basicInfo.duration || 0,sampleRate: basicInfo.sampleRate || 44100,upload: false, // Will be set to true after successful upload};if (metadata.isAudioMoth && metadata.audioMothData) {return {...baseFileData,timestampLocal: metadata.audioMothData.timestampLocal,mothMetadata: {timestamp: metadata.audioMothData.timestampUTC,recorderId: metadata.audioMothData.mothId,gain: metadata.audioMothData.gain,batteryV: metadata.audioMothData.batteryV,tempC: metadata.audioMothData.tempC,},};} else {// For non-AudioMoth files, use precomputed timestamp from filename parsingif (precomputedTimestamp) {return {...baseFileData,timestampLocal: precomputedTimestamp.timestampLocal,// No moth metadata for non-AudioMoth files};} else {// Fallback to current time if no precomputed timestamp availableconsole.warn(`No timestamp data available for non-AudioMoth file: ${file.name}`);return {...baseFileData,timestampLocal: new Date().toISOString(),// No moth metadata for non-AudioMoth files};}}}