From cbbfa25975f146f2f0f9239109cb5fc40ddffe52 Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Tue, 21 Oct 2025 00:16:54 -0300 Subject: [PATCH 01/17] Initial PR to support both Qstash and BullMQ --- .cursor/rules/queues.mdc | 283 +++++++ apps/web/app/api/clean/route.ts | 7 +- apps/web/app/api/queue/[queueName]/route.ts | 61 ++ apps/web/app/api/resend/digest/all/route.ts | 14 +- apps/web/app/api/resend/summary/all/route.ts | 22 +- apps/web/env.ts | 15 +- apps/web/package.json | 1 + apps/web/utils/actions/clean.ts | 26 +- apps/web/utils/digest/index.ts | 34 +- apps/web/utils/queue/bullmq-manager.ts | 199 +++++ apps/web/utils/queue/qstash-manager.ts | 111 +++ apps/web/utils/queue/queue-manager.ts | 114 +++ apps/web/utils/queue/queue.test.ts | 775 ++++++++++++++++++ apps/web/utils/queue/queues.ts | 178 ++++ apps/web/utils/queue/types.ts | 101 +++ apps/web/utils/queue/worker.ts | 160 ++++ apps/web/utils/scheduled-actions/scheduler.ts | 87 +- apps/web/utils/upstash/categorize-senders.ts | 16 +- pnpm-lock.yaml | 118 +++ 19 files changed, 2188 insertions(+), 134 deletions(-) create mode 100644 .cursor/rules/queues.mdc create mode 100644 apps/web/app/api/queue/[queueName]/route.ts create mode 100644 apps/web/utils/queue/bullmq-manager.ts create mode 100644 apps/web/utils/queue/qstash-manager.ts create mode 100644 apps/web/utils/queue/queue-manager.ts create mode 100644 apps/web/utils/queue/queue.test.ts create mode 100644 apps/web/utils/queue/queues.ts create mode 100644 apps/web/utils/queue/types.ts create mode 100644 apps/web/utils/queue/worker.ts diff --git a/.cursor/rules/queues.mdc b/.cursor/rules/queues.mdc new file mode 100644 index 0000000000..4169e08d35 --- /dev/null +++ b/.cursor/rules/queues.mdc @@ -0,0 +1,283 @@ +# Queue System + +Unified queue system supporting both QStash and Redis (BullMQ) with automatic system selection based on `QUEUE_SYSTEM` environment variable. + +## Quick Start + +### 1. Job Enqueueing + +```typescript +import { enqueueJob } from "@/utils/queue/queue-manager"; + +// Basic job +const job = await enqueueJob("my-queue", { + message: "Hello from the queue system!", + userId: "user-123", +}); + +// Delayed job (5 seconds) +const delayedJob = await enqueueJob("my-queue", { + message: "This job was delayed by 5 seconds", +}, { + delay: 5000, +}); + +console.log("Jobs enqueued:", job.id || job, delayedJob.id || delayedJob); +``` + +### 2. Bulk Job Enqueueing + +```typescript +import { bulkEnqueueJobs } from "@/utils/queue/queue-manager"; + +const jobs = await bulkEnqueueJobs("my-queue", { + jobs: [ + { data: { message: "Bulk job 1" } }, + { data: { message: "Bulk job 2" } }, + { data: { message: "Bulk job 3" } }, + ], +}); + +console.log("Bulk jobs enqueued:", jobs.length); +``` + +### 3. Worker Setup (Redis only) + +```typescript +import { registerWorker } from "@/utils/queue/worker"; + +// Register a worker for processing jobs +const worker = registerWorker("my-queue", async (job) => { + console.log("Processing job:", job.id, job.data); + + // Your processing logic here + await processJob(job.data); + + console.log("Job completed:", job.id); +}, { + concurrency: 3, // Default concurrency is 3 +}); + +// Graceful shutdown +process.on("SIGINT", async () => { + await shutdownAllWorkers(); + process.exit(0); +}); +``` + +## Configuration + +### Environment Variables + +```bash +# Choose queue system +QUEUE_SYSTEM=redis # Use Redis + BullMQ +QUEUE_SYSTEM=upstash # Use QStash (default) + +# For Redis system +REDIS_URL=redis://localhost:6379 + +# For QStash system +QSTASH_TOKEN=your_qstash_token +``` + +### System Information + +```typescript +import { getQueueSystemInfo } from "@/utils/queue/queue-manager"; + +const info = getQueueSystemInfo(); +console.log("Queue system:", info.system); +console.log("Is Redis:", info.isRedis); +console.log("Is QStash:", info.isQStash); +``` + +### Retry Configuration + +- **QStash**: Automatically retries up to **10 times** (handled by QStash service) +- **BullMQ (Redis)**: Retries up to **5 times** (configured in BullMQ manager) +- **Parallelism**: Default concurrency of **3** for both systems + +## Migration Examples + +### From Direct QStash Usage + +**Old way (direct QStash):** +```typescript +import { publishToQstashQueue } from "@/utils/upstash"; + +await publishToQstashQueue({ + queueName: "digest-item-summarize", + parallelism: 3, + url: "/api/ai/digest", + body: { emailAccountId: "user-123", message: {...} }, +}); +``` + +**New way (unified queue system):** +```typescript +import { enqueueJob } from "@/utils/queue/queue-manager"; + +await enqueueJob("digest-item-summarize", { + emailAccountId: "user-123", + message: {...}, +}); +// Retry and parallelism handled automatically by system defaults +``` + +## Error Handling + +```typescript +try { + const job = await enqueueJob("my-queue", data, { + delay: 5000, + priority: 1, + }); +} catch (error) { + console.error("Failed to enqueue job:", error); +} +``` + +### Worker Error Handling + +```typescript +import { registerWorker } from "@/utils/queue/worker"; + +registerWorker("my-queue", async (job) => { + try { + await processJob(job.data); + } catch (error) { + console.error("Job failed:", job.id, error); + throw error; // Will trigger retry logic + } +}); +``` + +## Monitoring + +### Queue Health Check + +```typescript +import { getQueueSystemInfo } from "@/utils/queue/queue-manager"; + +function checkQueueHealth() { + const info = getQueueSystemInfo(); + + if (info.isRedis) { + console.log("Using Redis + BullMQ - Full feature set available"); + // Set up BullMQ monitoring here + } else { + console.log("Using QStash - HTTP-based processing"); + // Use QStash dashboard for monitoring + } +} +``` + +### Worker Monitoring + +```typescript +import { getAllWorkers } from "@/utils/queue/worker"; + +const workers = getAllWorkers(); +console.log("Active workers:", workers.size); + +for (const [queueName, worker] of workers) { + console.log(`Worker for ${queueName}:`, { + isRunning: worker.isRunning(), + concurrency: worker.opts.concurrency, + }); +} +``` + +## Complete Example + +Here's a complete example showing how to set up and use the queue system: + +```typescript +import { + enqueueJob, + bulkEnqueueJobs, + getQueueSystemInfo, + closeQueueManager +} from "@/utils/queue/queue-manager"; +import { registerWorker, shutdownAllWorkers } from "@/utils/queue/worker"; + +async function main() { + // Check system info + const systemInfo = getQueueSystemInfo(); + console.log("Queue system:", systemInfo.system); + + // Set up worker (Redis only) + if (systemInfo.isRedis) { + registerWorker("demo-queue", async (job) => { + console.log("Processing:", job.data); + await new Promise(resolve => setTimeout(resolve, 1000)); + console.log("Completed:", job.id); + }); + } + + // Enqueue some jobs + await enqueueJob("demo-queue", { message: "Hello!" }); + await bulkEnqueueJobs("demo-queue", { + jobs: [ + { data: { message: "Bulk 1" } }, + { data: { message: "Bulk 2" } }, + ], + }); + + // Wait for processing + await new Promise(resolve => setTimeout(resolve, 5000)); + + // Cleanup + await shutdownAllWorkers(); + await closeQueueManager(); +} + +main().catch(console.error); +``` + +## Best Practices + +1. **Choose the right system**: Use Redis for self-hosted deployments, QStash for managed cloud deployments (default) +2. **Use system defaults**: Retry and parallelism are configured automatically - don't override unless necessary +3. **Handle errors gracefully**: Always implement proper error handling in job processors +4. **Monitor queue health**: Set up monitoring for queue depth, processing rates, and error rates +5. **Use job IDs for deduplication**: Prevent duplicate jobs by using meaningful job IDs +6. **Clean up completed jobs**: Default cleanup policies are set appropriately + +## Troubleshooting + +### Common Issues + +- **Workers not processing jobs**: Ensure you're using Redis queue system and workers are properly registered +- **Connection errors**: Check your Redis/QStash credentials and network connectivity +- **Jobs stuck in queue**: Check worker logs for errors and ensure workers are running +- **Memory issues**: Adjust concurrency settings and job cleanup policies + + +## API Reference + +### Core Functions + +- `enqueueJob(queueName, data, options?)`: Enqueue a single job +- `bulkEnqueueJobs(queueName, options)`: Enqueue multiple jobs +- `createQueueWorker(queueName, processor, options?)`: Create a worker (Redis only) +- `getQueueManager()`: Get the queue manager instance +- `getQueueSystemInfo()`: Get current system information + +### Worker Functions + +- `registerWorker(queueName, processor, config?)`: Register a worker +- `unregisterWorker(queueName)`: Unregister a worker +- `shutdownAllWorkers()`: Shutdown all workers gracefully + +## File Structure + +``` +apps/web/utils/queue/ +├── queue-manager.ts # Main queue abstraction +├── bullmq-manager.ts # BullMQ implementation +├── qstash-manager.ts # QStash implementation +├── types.ts # Type definitions +└── worker.ts # Worker management +``` \ No newline at end of file diff --git a/apps/web/app/api/clean/route.ts b/apps/web/app/api/clean/route.ts index 559f0b6e18..1788c72393 100644 --- a/apps/web/app/api/clean/route.ts +++ b/apps/web/app/api/clean/route.ts @@ -2,7 +2,7 @@ import { verifySignatureAppRouter } from "@upstash/qstash/nextjs"; import { z } from "zod"; import { NextResponse } from "next/server"; import { withError } from "@/utils/middleware"; -import { publishToQstash } from "@/utils/upstash"; +import { enqueueJob } from "@/utils/queue/queue-manager"; import { getThreadMessages } from "@/utils/gmail/thread"; import { getGmailClientWithRefresh } from "@/utils/gmail/client"; import type { CleanGmailBody } from "@/app/api/clean/gmail/route"; @@ -271,10 +271,7 @@ function getPublish({ }); await Promise.all([ - publishToQstash("/api/clean/gmail", cleanGmailBody, { - key: `gmail-action-${emailAccountId}`, - ratePerSecond: maxRatePerSecond, - }), + enqueueJob("clean-gmail", cleanGmailBody), updateThread({ emailAccountId, jobId, diff --git a/apps/web/app/api/queue/[queueName]/route.ts b/apps/web/app/api/queue/[queueName]/route.ts new file mode 100644 index 0000000000..00dc9d7a01 --- /dev/null +++ b/apps/web/app/api/queue/[queueName]/route.ts @@ -0,0 +1,61 @@ +/** + * Generic queue handler API route + * This handles jobs from both QStash and BullMQ systems + * + * Usage: POST /api/queue/{queueName} + * Body: Job data + */ + +import { type NextRequest, NextResponse } from "next/server"; +import { createScopedLogger } from "@/utils/logger"; +import { getQueueHandler, isValidQueueName } from "@/utils/queue/queues"; + +const logger = createScopedLogger("queue-api"); + +export async function POST( + request: NextRequest, + { params }: { params: { queueName: string } }, +) { + const { queueName } = params; + + try { + const body = await request.json(); + + logger.info("Received queue job", { + queueName, + body: JSON.stringify(body), + }); + + // Validate queue name + if (!isValidQueueName(queueName)) { + logger.warn("Unknown queue name", { queueName }); + return NextResponse.json( + { error: "Unknown queue name" }, + { status: 400 }, + ); + } + + // Get the appropriate handler + const handler = getQueueHandler(queueName); + if (!handler) { + logger.error("No handler found for queue", { queueName }); + return NextResponse.json( + { error: "No handler found for queue" }, + { status: 500 }, + ); + } + + // Execute the handler + return await handler(body); + } catch (error) { + logger.error("Queue job processing failed", { + queueName, + error: error instanceof Error ? error.message : String(error), + }); + + return NextResponse.json( + { error: "Job processing failed" }, + { status: 500 }, + ); + } +} diff --git a/apps/web/app/api/resend/digest/all/route.ts b/apps/web/app/api/resend/digest/all/route.ts index 556496dd9a..8c2c6d3fc4 100644 --- a/apps/web/app/api/resend/digest/all/route.ts +++ b/apps/web/app/api/resend/digest/all/route.ts @@ -2,11 +2,10 @@ import { NextResponse } from "next/server"; import subDays from "date-fns/subDays"; import prisma from "@/utils/prisma"; import { withError } from "@/utils/middleware"; -import { env } from "@/env"; import { hasCronSecret, hasPostCronSecret } from "@/utils/cron"; import { captureException } from "@/utils/error"; import { createScopedLogger } from "@/utils/logger"; -import { publishToQstashQueue } from "@/utils/upstash"; +import { enqueueJob } from "@/utils/queue/queue-manager"; const logger = createScopedLogger("cron/resend/digest/all"); @@ -47,18 +46,13 @@ async function sendDigestAllUpdate() { eligibleAccounts: emailAccounts.length, }); - const url = `${env.NEXT_PUBLIC_BASE_URL}/api/resend/digest`; - for (const emailAccount of emailAccounts) { try { - await publishToQstashQueue({ - queueName: "email-digest-all", - parallelism: 3, // Allow up to 3 concurrent jobs from this queue - url, - body: { emailAccountId: emailAccount.id }, + await enqueueJob("email-digest-all", { + emailAccountId: emailAccount.id, }); } catch (error) { - logger.error("Failed to publish to Qstash", { + logger.error("Failed to enqueue digest job", { email: emailAccount.email, error, }); diff --git a/apps/web/app/api/resend/summary/all/route.ts b/apps/web/app/api/resend/summary/all/route.ts index 6f73f4bed8..a39e201e6b 100644 --- a/apps/web/app/api/resend/summary/all/route.ts +++ b/apps/web/app/api/resend/summary/all/route.ts @@ -2,16 +2,12 @@ import { NextResponse } from "next/server"; import subDays from "date-fns/subDays"; import prisma from "@/utils/prisma"; import { withError } from "@/utils/middleware"; -import { env } from "@/env"; -import { - getCronSecretHeader, - hasCronSecret, - hasPostCronSecret, -} from "@/utils/cron"; +import { hasCronSecret, hasPostCronSecret } from "@/utils/cron"; import { Frequency } from "@prisma/client"; import { captureException } from "@/utils/error"; import { createScopedLogger } from "@/utils/logger"; -import { publishToQstashQueue } from "@/utils/upstash"; +import { enqueueJob } from "@/utils/queue/queue-manager"; +import { QUEUES } from "@/utils/queue/queues"; const logger = createScopedLogger("cron/resend/summary/all"); @@ -45,19 +41,13 @@ async function sendSummaryAllUpdate() { logger.info("Sending summary to users", { count: emailAccounts.length }); - const url = `${env.NEXT_PUBLIC_BASE_URL}/api/resend/summary`; - for (const emailAccount of emailAccounts) { try { - await publishToQstashQueue({ - queueName: "email-summary-all", - parallelism: 3, // Allow up to 3 concurrent jobs from this queue - url, - body: { email: emailAccount.email }, - headers: getCronSecretHeader(), + await enqueueJob("email-summary-all", { + email: emailAccount.email, }); } catch (error) { - logger.error("Failed to publish to Qstash", { + logger.error("Failed to enqueue summary job", { email: emailAccount.email, error, }); diff --git a/apps/web/env.ts b/apps/web/env.ts index 3d8615d9c9..b0e8288017 100644 --- a/apps/web/env.ts +++ b/apps/web/env.ts @@ -60,11 +60,9 @@ export const env = createEnv({ UPSTASH_REDIS_URL: z.string().optional(), UPSTASH_REDIS_TOKEN: z.string().optional(), - REDIS_URL: z.string().optional(), // used for subscriptions - + REDIS_URL: z.string().optional(), + QUEUE_SYSTEM: z.enum(["redis", "upstash"]).default("upstash"), QSTASH_TOKEN: z.string().optional(), - QSTASH_CURRENT_SIGNING_KEY: z.string().optional(), - QSTASH_NEXT_SIGNING_KEY: z.string().optional(), GOOGLE_PUBSUB_TOPIC_NAME: z.string().min(1), GOOGLE_PUBSUB_VERIFICATION_TOKEN: z.string().optional(), @@ -240,3 +238,12 @@ export const env = createEnv({ process.env.NEXT_PUBLIC_DISABLE_REFERRAL_SIGNATURE, }, }); + +// Validate queue system configuration once at bootstrap +if (env.QUEUE_SYSTEM === "redis" && !env.REDIS_URL) { + throw new Error("REDIS_URL is required when QUEUE_SYSTEM is set to 'redis'"); +} + +if (env.QUEUE_SYSTEM === "upstash" && !env.QSTASH_TOKEN) { + console.warn("QSTASH_TOKEN is not set - QStash functionality may be limited"); +} diff --git a/apps/web/package.json b/apps/web/package.json index 44e9751ca8..4e5c68f003 100644 --- a/apps/web/package.json +++ b/apps/web/package.json @@ -92,6 +92,7 @@ "ai": "5.0.28", "better-auth": "1.3.7", "braintrust": "0.3.6", + "bullmq": "^5.61.0", "capital-case": "2.0.0", "cheerio": "1.0.0", "class-variance-authority": "0.7.1", diff --git a/apps/web/utils/actions/clean.ts b/apps/web/utils/actions/clean.ts index eddb34f3a7..50478870c9 100644 --- a/apps/web/utils/actions/clean.ts +++ b/apps/web/utils/actions/clean.ts @@ -6,7 +6,7 @@ import { undoCleanInboxSchema, changeKeepToDoneSchema, } from "@/utils/actions/clean.validation"; -import { bulkPublishToQstash } from "@/utils/upstash"; +import { bulkEnqueueJobs } from "@/utils/queue/queue-manager"; import { env } from "@/env"; import { getLabel, @@ -138,19 +138,17 @@ export const cleanInboxAction = actionClient if (threads.length === 0) break; - const url = `${env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL}/api/clean`; - - logger.info("Pushing to Qstash", { + logger.info("Pushing to queue system", { threadCount: threads.length, nextPageToken, + queueSystem: env.QUEUE_SYSTEM, }); - const items = threads + const jobs = threads .map((thread) => { if (!thread.id) return; return { - url, - body: { + data: { emailAccountId, threadId: thread.id, markedDoneLabelId, @@ -160,20 +158,18 @@ export const cleanInboxAction = actionClient instructions, skips, } satisfies CleanThreadBody, - // give every user their own queue for ai processing. if we get too many parallel users we may need more - // api keys or a global queue - // problem with a global queue is that if there's a backlog users will have to wait for others to finish first - flowControl: { - key: `ai-clean-${emailAccountId}`, - parallelism: 3, + opts: { + // Add any job-specific options here }, }; }) .filter(isDefined); - await bulkPublishToQstash({ items }); + await bulkEnqueueJobs("ai-clean", { + jobs, + }); - totalEmailsProcessed += items.length; + totalEmailsProcessed += jobs.length; } while ( nextPageToken && !isMaxEmailsReached(totalEmailsProcessed, maxEmails) diff --git a/apps/web/utils/digest/index.ts b/apps/web/utils/digest/index.ts index e90e153506..88a0ba7e15 100644 --- a/apps/web/utils/digest/index.ts +++ b/apps/web/utils/digest/index.ts @@ -1,8 +1,6 @@ -import { env } from "@/env"; -import { publishToQstashQueue } from "@/utils/upstash"; +import { enqueueJob } from "@/utils/queue/queue-manager"; import { createScopedLogger } from "@/utils/logger"; import { emailToContent } from "@/utils/mail"; -import type { DigestBody } from "@/app/api/ai/digest/validation"; import type { ParsedMessage } from "@/utils/types"; import type { EmailForAction } from "@/utils/ai/types"; @@ -19,28 +17,22 @@ export async function enqueueDigestItem({ actionId?: string; coldEmailId?: string; }) { - const url = `${env.NEXT_PUBLIC_BASE_URL}/api/ai/digest`; try { - await publishToQstashQueue({ - queueName: "digest-item-summarize", - parallelism: 3, // Allow up to 3 concurrent jobs from this queue - url, - body: { - emailAccountId, - actionId, - coldEmailId, - message: { - id: email.id, - threadId: email.threadId, - from: email.headers.from, - to: email.headers.to || "", - subject: email.headers.subject, - content: emailToContent(email), - }, + await enqueueJob("digest-item-summarize", { + emailAccountId, + actionId, + coldEmailId, + message: { + id: email.id, + threadId: email.threadId, + from: email.headers.from, + to: email.headers.to || "", + subject: email.headers.subject, + content: emailToContent(email), }, }); } catch (error) { - logger.error("Failed to publish to Qstash", { + logger.error("Failed to enqueue digest job", { emailAccountId, error, }); diff --git a/apps/web/utils/queue/bullmq-manager.ts b/apps/web/utils/queue/bullmq-manager.ts new file mode 100644 index 0000000000..818cb3b12a --- /dev/null +++ b/apps/web/utils/queue/bullmq-manager.ts @@ -0,0 +1,199 @@ +import { + Queue, + Worker, + QueueEvents, + type Job, + type ConnectionOptions, +} from "bullmq"; +import { env } from "@/env"; +import { createScopedLogger } from "@/utils/logger"; +import type { + QueueJobData, + EnqueueOptions, + BulkEnqueueOptions, + QueueManager, +} from "./types"; + +// Default concurrency for BullMQ workers +const DEFAULT_CONCURRENCY = 3; + +// Default retry attempts for BullMQ jobs +const DEFAULT_ATTEMPTS = 5; + +const logger = createScopedLogger("queue-bullmq"); + +export class BullMQManager implements QueueManager { + private readonly queues: Map = new Map(); + private readonly workers: Map = new Map(); + private readonly queueEvents: Map = new Map(); + private readonly connection: ConnectionOptions; + + constructor() { + this.connection = { + host: env.REDIS_URL!, + }; + } + + async enqueue( + queueName: string, + data: T, + options: EnqueueOptions = {}, + ): Promise> { + const queue = this.getOrCreateQueue(queueName); + + const jobOptions = { + delay: options.delay, + attempts: options.attempts || DEFAULT_ATTEMPTS, + priority: options.priority, + removeOnComplete: options.removeOnComplete || 10, + removeOnFail: options.removeOnFail || 5, + jobId: options.jobId, + }; + + const job = await queue.add(queueName, data, jobOptions); + + logger.info("Job enqueued with BullMQ", { + queueName, + jobId: job.id, + data: JSON.stringify(data), + }); + + return job as Job; + } + + async bulkEnqueue( + queueName: string, + options: BulkEnqueueOptions, + ): Promise[]> { + const queue = this.getOrCreateQueue(queueName); + + const jobs = options.jobs.map((jobData) => ({ + name: jobData.name || queueName, + data: jobData.data, + opts: { + delay: options.delay, + attempts: options.attempts || DEFAULT_ATTEMPTS, + priority: options.priority, + removeOnComplete: options.removeOnComplete || 10, + removeOnFail: options.removeOnFail || 5, + jobId: jobData.opts?.jobId, + ...jobData.opts, + }, + })); + + const addedJobs = await queue.addBulk(jobs); + + logger.info("Bulk jobs enqueued with BullMQ", { + queueName, + jobCount: addedJobs.length, + }); + + return addedJobs as Job[]; + } + + createWorker( + queueName: string, + processor: (job: Job) => Promise, + options: { + concurrency?: number; + connection?: ConnectionOptions; + } = {}, + ): Worker { + const worker = new Worker( + queueName, + async (job) => { + logger.info("Processing job", { + queueName, + jobId: job.id, + data: JSON.stringify(job.data), + }); + + try { + await processor(job); + logger.info("Job completed successfully", { + queueName, + jobId: job.id, + }); + } catch (error) { + logger.error("Job failed", { + queueName, + jobId: job.id, + error: error instanceof Error ? error.message : String(error), + }); + throw error; + } + }, + { + connection: options.connection || this.connection, + concurrency: options.concurrency || DEFAULT_CONCURRENCY, + removeOnComplete: { count: 10 }, + removeOnFail: { count: 5 }, + }, + ); + + this.workers.set(queueName, worker); + return worker; + } + + createQueue( + queueName: string, + options: { + connection?: ConnectionOptions; + defaultJobOptions?: Record; + } = {}, + ): Queue { + const queue = new Queue(queueName, { + connection: options.connection || this.connection, + defaultJobOptions: { + removeOnComplete: { count: 10 }, + removeOnFail: { count: 5 }, + attempts: DEFAULT_ATTEMPTS, + ...options.defaultJobOptions, + }, + }); + + this.queues.set(queueName, queue); + return queue; + } + + getQueueEvents(queueName: string): QueueEvents { + if (!this.queueEvents.has(queueName)) { + const queueEvents = new QueueEvents(queueName, { + connection: this.connection, + }); + this.queueEvents.set(queueName, queueEvents); + } + return this.queueEvents.get(queueName)!; + } + + private getOrCreateQueue(queueName: string): Queue { + if (!this.queues.has(queueName)) { + this.createQueue(queueName); + } + return this.queues.get(queueName)!; + } + + async close(): Promise { + // Close all workers + for (const [name, worker] of this.workers) { + logger.info("Closing worker", { queueName: name }); + await worker.close(); + } + + // Close all queues + for (const [name, queue] of this.queues) { + logger.info("Closing queue", { queueName: name }); + await queue.close(); + } + + // Close all queue events + for (const [name, queueEvents] of this.queueEvents) { + logger.info("Closing queue events", { queueName: name }); + await queueEvents.close(); + } + + this.queues.clear(); + this.workers.clear(); + this.queueEvents.clear(); + } +} diff --git a/apps/web/utils/queue/qstash-manager.ts b/apps/web/utils/queue/qstash-manager.ts new file mode 100644 index 0000000000..30e5429f93 --- /dev/null +++ b/apps/web/utils/queue/qstash-manager.ts @@ -0,0 +1,111 @@ +import type { Job, ConnectionOptions } from "bullmq"; +import { env } from "@/env"; +import { createScopedLogger } from "@/utils/logger"; +import { publishToQstashQueue } from "@/utils/upstash"; +import { Client } from "@upstash/qstash"; +import type { + QueueJobData, + EnqueueOptions, + BulkEnqueueOptions, + QueueManager, +} from "./types"; + +const logger = createScopedLogger("queue-qstash"); + +// Default parallelism for QStash flow control +const DEFAULT_PARALLELISM = 3; + +export class QStashManager implements QueueManager { + async enqueue( + queueName: string, + data: T, + options: EnqueueOptions = {}, + ): Promise { + const url = `${env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL}/api/queue/${queueName}`; + + if (options.delay) { + const notBefore = Math.floor(Date.now() / 1000) + options.delay / 1000; + const client = new Client({ token: env.QSTASH_TOKEN! }); + const response = await client.publishJSON({ + url, + body: data, + notBefore, + deduplicationId: options.jobId, + }); + return response?.messageId || "unknown"; + } else { + const response = await publishToQstashQueue({ + queueName, + parallelism: DEFAULT_PARALLELISM, + url, + body: data, + }); + return response?.messageId || "unknown"; + } + } + + async bulkEnqueue( + queueName: string, + options: BulkEnqueueOptions, + ): Promise { + const url = `${env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL}/api/queue/${queueName}`; + + const items = options.jobs.map((job) => { + const item: { + url: string; + body: QueueJobData; + notBefore?: number; + deduplicationId?: string; + } = { + url, + body: job.data, + }; + + if (options.delay) { + item.notBefore = Math.floor(Date.now() / 1000) + options.delay / 1000; + } + + if (job.opts?.jobId) { + item.deduplicationId = job.opts.jobId; + } + + return item; + }); + + const client = new Client({ token: env.QSTASH_TOKEN! }); + const response = await client.batchJSON(items); + return response?.map((r) => r.messageId || "unknown") || []; + } + + createWorker( + _queueName: string, + _processor: (job: Job) => Promise, + _options: { + concurrency?: number; + connection?: ConnectionOptions; + } = {}, + ): never { + throw new Error( + "QStash workers are handled via HTTP endpoints, not BullMQ workers", + ); + } + + createQueue( + _queueName: string, + _options: { + connection?: ConnectionOptions; + defaultJobOptions?: Record; + } = {}, + ): never { + throw new Error("QStash queues are managed by QStash, not BullMQ"); + } + + getQueueEvents(_queueName: string): never { + throw new Error("QStash queue events are not available through BullMQ"); + } + + async close(): Promise { + // QStash doesn't require closing connections + logger.info("QStash manager closed"); + } +} diff --git a/apps/web/utils/queue/queue-manager.ts b/apps/web/utils/queue/queue-manager.ts new file mode 100644 index 0000000000..9b259dd67d --- /dev/null +++ b/apps/web/utils/queue/queue-manager.ts @@ -0,0 +1,114 @@ +import type { + Queue, + Worker, + QueueEvents, + Job, + ConnectionOptions, +} from "bullmq"; +import { env } from "@/env"; +import { createScopedLogger } from "@/utils/logger"; +import { BullMQManager } from "./bullmq-manager"; +import { QStashManager } from "./qstash-manager"; +import type { + QueueJobData, + EnqueueOptions, + BulkEnqueueOptions, + QueueManager, +} from "./types"; + +const logger = createScopedLogger("queue"); + +export function createQueueManager(): QueueManager { + const queueSystem = env.QUEUE_SYSTEM; + + logger.info("Creating queue manager", { queueSystem }); + + switch (queueSystem) { + case "redis": + // Use BullMQ with Redis + return new BullMQManager(); + case "upstash": + // Use QStash (HTTP-based, no Redis needed for BullMQ) + return new QStashManager(); + default: + throw new Error(`Unsupported queue system: ${queueSystem}`); + } +} + +let queueManager: QueueManager | null = null; + +export function getQueueManager(): QueueManager { + if (!queueManager) { + queueManager = createQueueManager(); + } + return queueManager; +} + +// Utility functions for common queue operations +export async function enqueueJob( + queueName: string, + data: T, + options?: EnqueueOptions, +): Promise | string> { + const manager = getQueueManager(); + return manager.enqueue(queueName, data, options); +} + +export async function bulkEnqueueJobs( + queueName: string, + options: BulkEnqueueOptions, +): Promise[] | string[]> { + const manager = getQueueManager(); + return manager.bulkEnqueue(queueName, options); +} + +export function createQueueWorker( + queueName: string, + processor: (job: Job) => Promise, + options?: { + concurrency?: number; + connection?: ConnectionOptions; + }, +): Worker | null { + const manager = getQueueManager(); + return manager.createWorker(queueName, processor, options); +} + +export function createQueue( + queueName: string, + options?: { + connection?: ConnectionOptions; + defaultJobOptions?: Record; + }, +): Queue | null { + const manager = getQueueManager(); + return manager.createQueue(queueName, options); +} + +export async function closeQueueManager(): Promise { + if (queueManager) { + await queueManager.close(); + queueManager = null; + } +} + +export function getQueueSystemInfo() { + return { + system: env.QUEUE_SYSTEM, + isRedis: env.QUEUE_SYSTEM === "redis", + isQStash: env.QUEUE_SYSTEM === "upstash", + }; +} + +export type { Queue, Worker, QueueEvents, Job, ConnectionOptions }; +export type { + QueueSystem, + QueueJobData, + QueueConfig, + EnqueueOptions, + BulkEnqueueOptions, + QueueManager, + QueueSystemInfo, + WorkerConfig, + JobProcessor, +} from "./types"; diff --git a/apps/web/utils/queue/queue.test.ts b/apps/web/utils/queue/queue.test.ts new file mode 100644 index 0000000000..3f1da1f873 --- /dev/null +++ b/apps/web/utils/queue/queue.test.ts @@ -0,0 +1,775 @@ +import { describe, it, expect, beforeEach, afterEach, vi } from "vitest"; + +// Mock BullMQ +const mockQueue = { + add: vi.fn(), + addBulk: vi.fn(), + close: vi.fn(), +}; + +const mockWorker = { + on: vi.fn(), + close: vi.fn(), + isRunning: vi.fn().mockReturnValue(true), + opts: { concurrency: 3 }, +}; + +const mockQueueEvents = { + close: vi.fn(), +}; + +vi.mock("bullmq", () => ({ + Queue: vi.fn().mockImplementation(() => mockQueue), + Worker: vi.fn().mockImplementation(() => mockWorker), + QueueEvents: vi.fn().mockImplementation(() => mockQueueEvents), +})); + +// Mock QStash Client +const mockClient = { + publishJSON: vi.fn(), + batchJSON: vi.fn(), +}; + +vi.mock("@upstash/qstash", () => ({ + Client: vi.fn().mockImplementation(() => mockClient), +})); + +// Mock publishToQstashQueue +const mockPublishToQstashQueue = vi.fn(); +vi.mock("@/utils/upstash", () => ({ + publishToQstashQueue: mockPublishToQstashQueue, +})); + +// Mock environment - default to upstash +vi.mock("@/env", () => ({ + env: { + QUEUE_SYSTEM: "upstash", + QSTASH_TOKEN: "test-token", + REDIS_URL: "redis://localhost:6379", + WEBHOOK_URL: "https://test.com", + NEXT_PUBLIC_BASE_URL: "https://test.com", + }, +})); + +describe("Queue System", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + afterEach(async () => { + const { closeQueueManager } = await import("./queue-manager"); + await closeQueueManager(); + vi.resetModules(); + }); + + describe("Queue Manager", () => { + describe("System Detection", () => { + it("should detect QStash system by default", async () => { + const { getQueueSystemInfo } = await import("./queue-manager"); + const info = getQueueSystemInfo(); + + expect(info.system).toBe("upstash"); + expect(info.isQStash).toBe(true); + expect(info.isRedis).toBe(false); + }); + + it("should detect Redis system when configured", async () => { + await vi.doMock("@/env", () => ({ + env: { + QUEUE_SYSTEM: "redis", + QSTASH_TOKEN: "test-token", + REDIS_URL: "redis://localhost:6379", + WEBHOOK_URL: "https://test.com", + NEXT_PUBLIC_BASE_URL: "https://test.com", + }, + })); + vi.resetModules(); + + const { getQueueSystemInfo } = await import("./queue-manager"); + const info = getQueueSystemInfo(); + + expect(info.system).toBe("redis"); + expect(info.isRedis).toBe(true); + expect(info.isQStash).toBe(false); + }); + }); + + describe("Job Enqueueing", () => { + it("should enqueue a single job with QStash", async () => { + // Ensure we're using QStash environment + await vi.doMock("@/env", () => ({ + env: { + QUEUE_SYSTEM: "upstash", + QSTASH_TOKEN: "test-token", + REDIS_URL: "redis://localhost:6379", + WEBHOOK_URL: "https://test.com", + NEXT_PUBLIC_BASE_URL: "https://test.com", + }, + })); + vi.resetModules(); + + const { enqueueJob } = await import("./queue-manager"); + const jobData = { message: "Test job", userId: "user-123" }; + + mockPublishToQstashQueue.mockResolvedValueOnce({ + messageId: "qstash-message-123", + }); + + const result = await enqueueJob("test-queue", jobData); + + expect(mockPublishToQstashQueue).toHaveBeenCalledWith({ + queueName: "test-queue", + parallelism: 3, + url: "https://test.com/api/queue/test-queue", + body: jobData, + }); + expect(result).toBe("qstash-message-123"); + }); + + it("should enqueue a job with options", async () => { + // Ensure we're using QStash environment + await vi.doMock("@/env", () => ({ + env: { + QUEUE_SYSTEM: "upstash", + QSTASH_TOKEN: "test-token", + REDIS_URL: "redis://localhost:6379", + WEBHOOK_URL: "https://test.com", + NEXT_PUBLIC_BASE_URL: "https://test.com", + }, + })); + vi.resetModules(); + + const { enqueueJob } = await import("./queue-manager"); + const jobData = { message: "Delayed job", userId: "user-456" }; + const options = { delay: 5000, priority: 1, jobId: "custom-job-id" }; + + mockClient.publishJSON.mockResolvedValueOnce({ + messageId: "qstash-delayed-123", + }); + + const result = await enqueueJob("test-queue", jobData, options); + + expect(mockClient.publishJSON).toHaveBeenCalledWith({ + url: "https://test.com/api/queue/test-queue", + body: jobData, + notBefore: expect.any(Number), + deduplicationId: "custom-job-id", + }); + expect(result).toBe("qstash-delayed-123"); + }); + + it("should handle job enqueueing errors", async () => { + // Ensure we're using QStash environment + await vi.doMock("@/env", () => ({ + env: { + QUEUE_SYSTEM: "upstash", + QSTASH_TOKEN: "test-token", + REDIS_URL: "redis://localhost:6379", + WEBHOOK_URL: "https://test.com", + NEXT_PUBLIC_BASE_URL: "https://test.com", + }, + })); + vi.resetModules(); + + const { enqueueJob } = await import("./queue-manager"); + const error = new Error("Enqueue failed"); + mockPublishToQstashQueue.mockRejectedValueOnce(error); + + await expect( + enqueueJob("test-queue", { message: "Test" }), + ).rejects.toThrow("Enqueue failed"); + }); + }); + + describe("Bulk Job Enqueueing", () => { + it("should enqueue multiple jobs", async () => { + // Ensure we're using QStash environment + await vi.doMock("@/env", () => ({ + env: { + QUEUE_SYSTEM: "upstash", + QSTASH_TOKEN: "test-token", + REDIS_URL: "redis://localhost:6379", + WEBHOOK_URL: "https://test.com", + NEXT_PUBLIC_BASE_URL: "https://test.com", + }, + })); + vi.resetModules(); + + const { bulkEnqueueJobs } = await import("./queue-manager"); + const jobs = [ + { data: { message: "Bulk job 1" } }, + { data: { message: "Bulk job 2" } }, + ]; + + mockClient.batchJSON.mockResolvedValueOnce([ + { messageId: "qstash-bulk-1" }, + { messageId: "qstash-bulk-2" }, + ]); + + const result = await bulkEnqueueJobs("test-queue", { jobs }); + + expect(mockClient.batchJSON).toHaveBeenCalledWith([ + { + url: "https://test.com/api/queue/test-queue", + body: { message: "Bulk job 1" }, + }, + { + url: "https://test.com/api/queue/test-queue", + body: { message: "Bulk job 2" }, + }, + ]); + expect(result).toEqual(["qstash-bulk-1", "qstash-bulk-2"]); + }); + + it("should handle bulk enqueueing errors", async () => { + // Ensure we're using QStash environment + await vi.doMock("@/env", () => ({ + env: { + QUEUE_SYSTEM: "upstash", + QSTASH_TOKEN: "test-token", + REDIS_URL: "redis://localhost:6379", + WEBHOOK_URL: "https://test.com", + NEXT_PUBLIC_BASE_URL: "https://test.com", + }, + })); + vi.resetModules(); + + const { bulkEnqueueJobs } = await import("./queue-manager"); + const error = new Error("Bulk enqueue failed"); + mockClient.batchJSON.mockRejectedValueOnce(error); + + await expect( + bulkEnqueueJobs("test-queue", { + jobs: [{ data: { message: "Test" } }], + }), + ).rejects.toThrow("Bulk enqueue failed"); + }); + }); + + describe("Error Handling", () => { + it("should handle unsupported queue system", async () => { + await vi.doMock("@/env", () => ({ + env: { + QUEUE_SYSTEM: "unsupported" as any, + QSTASH_TOKEN: "test-token", + REDIS_URL: "redis://localhost:6379", + WEBHOOK_URL: "https://test.com", + NEXT_PUBLIC_BASE_URL: "https://test.com", + }, + })); + vi.resetModules(); + + const { createQueueManager } = await import("./queue-manager"); + expect(() => createQueueManager()).toThrow( + "Unsupported queue system: unsupported", + ); + }); + }); + }); + + describe("BullMQ Manager", () => { + let manager: any; + + beforeEach(async () => { + await vi.doMock("@/env", () => ({ + env: { + QUEUE_SYSTEM: "redis", + QSTASH_TOKEN: "test-token", + REDIS_URL: "redis://localhost:6379", + WEBHOOK_URL: "https://test.com", + NEXT_PUBLIC_BASE_URL: "https://test.com", + }, + })); + vi.resetModules(); + + const { BullMQManager } = await import("./bullmq-manager"); + manager = new BullMQManager(); + }); + + afterEach(async () => { + if (manager) { + await manager.close(); + } + }); + + describe("Job Enqueueing", () => { + it("should enqueue a single job", async () => { + const jobData = { message: "Test job", userId: "user-123" }; + mockQueue.add.mockResolvedValueOnce({ id: "job-123" }); + + const result = await manager.enqueue("test-queue", jobData); + + expect(mockQueue.add).toHaveBeenCalledWith("test-queue", jobData, { + delay: undefined, + attempts: 5, + priority: undefined, + removeOnComplete: 10, + removeOnFail: 5, + jobId: undefined, + }); + expect(result).toEqual({ id: "job-123" }); + }); + + it("should enqueue a job with options", async () => { + const jobData = { message: "Delayed job", userId: "user-456" }; + const options = { delay: 5000, priority: 1, jobId: "job-456" }; + mockQueue.add.mockResolvedValueOnce({ id: "job-456" }); + + const result = await manager.enqueue("test-queue", jobData, options); + + expect(mockQueue.add).toHaveBeenCalledWith("test-queue", jobData, { + delay: 5000, + attempts: 5, + priority: 1, + removeOnComplete: 10, + removeOnFail: 5, + jobId: "job-456", + }); + expect(result).toEqual({ id: "job-456" }); + }); + + it("should handle enqueue errors", async () => { + const error = new Error("Enqueue failed"); + mockQueue.add.mockRejectedValueOnce(error); + + await expect( + manager.enqueue("test-queue", { message: "Test" }), + ).rejects.toThrow("Enqueue failed"); + }); + }); + + describe("Bulk Job Enqueueing", () => { + it("should enqueue multiple jobs", async () => { + const jobs = [ + { data: { message: "Bulk job 1" } }, + { data: { message: "Bulk job 2" } }, + ]; + const mockJobs = [{ id: "bulk-job-1" }, { id: "bulk-job-2" }]; + mockQueue.addBulk.mockResolvedValueOnce(mockJobs); + + const result = await manager.bulkEnqueue("test-queue", { jobs }); + + expect(mockQueue.addBulk).toHaveBeenCalledWith([ + { + name: "test-queue", + data: { message: "Bulk job 1" }, + opts: { + delay: undefined, + attempts: 5, + priority: undefined, + removeOnComplete: 10, + removeOnFail: 5, + jobId: undefined, + }, + }, + { + name: "test-queue", + data: { message: "Bulk job 2" }, + opts: { + delay: undefined, + attempts: 5, + priority: undefined, + removeOnComplete: 10, + removeOnFail: 5, + jobId: undefined, + }, + }, + ]); + expect(result).toBe(mockJobs); + }); + }); + + describe("Worker Management", () => { + it("should create a worker", () => { + const processor = vi.fn(); + const worker = manager.createWorker("test-queue", processor); + + expect(worker).toBe(mockWorker); + }); + + it("should create a worker with concurrency", () => { + const processor = vi.fn(); + const worker = manager.createWorker("test-queue", processor, { + concurrency: 5, + }); + + expect(worker).toBe(mockWorker); + }); + }); + + describe("Queue Management", () => { + it("should create a queue", () => { + const queue = manager.createQueue("test-queue"); + expect(queue).toBe(mockQueue); + }); + + it("should get queue events", () => { + const events = manager.getQueueEvents("test-queue"); + expect(events).toBe(mockQueueEvents); + }); + }); + + describe("Cleanup", () => { + it("should close all workers and queues", async () => { + manager.createWorker("test-queue-1", vi.fn()); + manager.createWorker("test-queue-2", vi.fn()); + manager.createQueue("test-queue-1"); + manager.createQueue("test-queue-2"); + manager.getQueueEvents("test-queue-1"); + manager.getQueueEvents("test-queue-2"); + + await manager.close(); + + expect(mockWorker.close).toHaveBeenCalledTimes(2); + expect(mockQueue.close).toHaveBeenCalledTimes(2); + expect(mockQueueEvents.close).toHaveBeenCalledTimes(2); + }); + }); + }); + + describe("QStash Manager", () => { + let manager: any; + + beforeEach(async () => { + const { QStashManager } = await import("./qstash-manager"); + manager = new QStashManager(); + }); + + afterEach(async () => { + if (manager) { + await manager.close(); + } + }); + + describe("Job Enqueueing", () => { + it("should enqueue a single job", async () => { + const jobData = { message: "Test job", userId: "user-123" }; + mockPublishToQstashQueue.mockResolvedValueOnce({ + messageId: "qstash-message-123", + }); + + const result = await manager.enqueue("test-queue", jobData); + + expect(mockPublishToQstashQueue).toHaveBeenCalledWith({ + queueName: "test-queue", + parallelism: 3, + url: "https://test.com/api/queue/test-queue", + body: jobData, + }); + expect(result).toBe("qstash-message-123"); + }); + + it("should enqueue a job with delay", async () => { + const jobData = { message: "Delayed job", userId: "user-456" }; + const options = { delay: 5000, jobId: "delayed-job-123" }; + mockClient.publishJSON.mockResolvedValueOnce({ + messageId: "qstash-delayed-123", + }); + + const result = await manager.enqueue("test-queue", jobData, options); + + expect(mockClient.publishJSON).toHaveBeenCalledWith({ + url: "https://test.com/api/queue/test-queue", + body: jobData, + notBefore: expect.any(Number), + deduplicationId: "delayed-job-123", + }); + expect(result).toBe("qstash-delayed-123"); + }); + + it("should handle enqueue errors", async () => { + const error = new Error("Enqueue failed"); + mockPublishToQstashQueue.mockRejectedValueOnce(error); + + await expect( + manager.enqueue("test-queue", { message: "Test" }), + ).rejects.toThrow("Enqueue failed"); + }); + + it("should return 'unknown' when messageId is missing", async () => { + mockPublishToQstashQueue.mockResolvedValueOnce({ + messageId: undefined, + }); // No messageId + + const result = await manager.enqueue("test-queue", { message: "Test" }); + expect(result).toBe("unknown"); + }); + }); + + describe("Bulk Job Enqueueing", () => { + it("should enqueue multiple jobs", async () => { + const jobs = [ + { data: { message: "Bulk job 1" } }, + { data: { message: "Bulk job 2" } }, + ]; + mockClient.batchJSON.mockResolvedValueOnce([ + { messageId: "qstash-bulk-1" }, + { messageId: "qstash-bulk-2" }, + ]); + + const result = await manager.bulkEnqueue("test-queue", { jobs }); + + expect(mockClient.batchJSON).toHaveBeenCalledWith([ + { + url: "https://test.com/api/queue/test-queue", + body: { message: "Bulk job 1" }, + }, + { + url: "https://test.com/api/queue/test-queue", + body: { message: "Bulk job 2" }, + }, + ]); + expect(result).toEqual(["qstash-bulk-1", "qstash-bulk-2"]); + }); + + it("should handle bulk enqueue errors", async () => { + const error = new Error("Bulk enqueue failed"); + mockClient.batchJSON.mockRejectedValueOnce(error); + + await expect( + manager.bulkEnqueue("test-queue", { + jobs: [{ data: { message: "Test" } }], + }), + ).rejects.toThrow("Bulk enqueue failed"); + }); + }); + + describe("Unsupported Operations", () => { + it("should throw error for createWorker", () => { + expect(() => manager.createWorker("test-queue", vi.fn())).toThrow( + "QStash workers are handled via HTTP endpoints, not BullMQ workers", + ); + }); + + it("should throw error for createQueue", () => { + expect(() => manager.createQueue("test-queue")).toThrow( + "QStash queues are managed by QStash, not BullMQ", + ); + }); + + it("should throw error for getQueueEvents", () => { + expect(() => manager.getQueueEvents("test-queue")).toThrow( + "QStash queue events are not available through BullMQ", + ); + }); + }); + + describe("URL Construction", () => { + it("should use WEBHOOK_URL when available", async () => { + await vi.doMock("@/env", () => ({ + env: { + QSTASH_TOKEN: "test-token", + WEBHOOK_URL: "https://webhook.test.com", + NEXT_PUBLIC_BASE_URL: "https://fallback.test.com", + }, + })); + vi.resetModules(); + + const { QStashManager: MockedQStashManager } = await import( + "./qstash-manager" + ); + const mockedManager = new MockedQStashManager(); + + mockPublishToQstashQueue.mockResolvedValue({ messageId: "test-123" }); + + await mockedManager.enqueue("test-queue", { message: "Test" }); + + expect(mockPublishToQstashQueue).toHaveBeenCalledWith( + expect.objectContaining({ + url: "https://webhook.test.com/api/queue/test-queue", + }), + ); + }); + + it("should fallback to NEXT_PUBLIC_BASE_URL when WEBHOOK_URL is not available", async () => { + await vi.doMock("@/env", () => ({ + env: { + QSTASH_TOKEN: "test-token", + WEBHOOK_URL: undefined, + NEXT_PUBLIC_BASE_URL: "https://fallback.test.com", + }, + })); + vi.resetModules(); + + const { QStashManager: MockedQStashManager } = await import( + "./qstash-manager" + ); + const mockedManager = new MockedQStashManager(); + + mockPublishToQstashQueue.mockResolvedValue({ messageId: "test-123" }); + + await mockedManager.enqueue("test-queue", { message: "Test" }); + + expect(mockPublishToQstashQueue).toHaveBeenCalledWith( + expect.objectContaining({ + url: "https://fallback.test.com/api/queue/test-queue", + }), + ); + }); + }); + }); + + describe("Worker Management", () => { + const mockCreateQueueWorker = vi.fn(); + const mockCloseQueueManager = vi.fn(); + + beforeEach(() => { + vi.doMock("./queue-manager", () => ({ + createQueueWorker: mockCreateQueueWorker, + closeQueueManager: mockCloseQueueManager, + })); + }); + + describe("Worker Registration", () => { + it("should register a worker", async () => { + const { registerWorker } = await import("./worker"); + const processor = vi.fn(); + + mockCreateQueueWorker.mockReturnValueOnce(mockWorker); + + const worker = registerWorker("test-queue", processor); + + expect(mockCreateQueueWorker).toHaveBeenCalledWith( + "test-queue", + processor, + { + concurrency: 1, + }, + ); + expect(worker).toBe(mockWorker); + }); + + it("should register a worker with configuration", async () => { + const { registerWorker } = await import("./worker"); + const processor = vi.fn(); + + mockCreateQueueWorker.mockReturnValueOnce(mockWorker); + + registerWorker("test-queue", processor, { concurrency: 5 }); + + expect(mockCreateQueueWorker).toHaveBeenCalledWith( + "test-queue", + processor, + { + concurrency: 5, + }, + ); + }); + + it("should return existing worker if already registered", async () => { + const { registerWorker } = await import("./worker"); + const processor = vi.fn(); + + mockCreateQueueWorker.mockReturnValueOnce(mockWorker); + registerWorker("test-queue", processor); + const worker = registerWorker("test-queue", vi.fn()); // Try to register again + + expect(mockCreateQueueWorker).toHaveBeenCalledTimes(1); // Should only be called once + expect(worker).toBe(mockWorker); + }); + + it("should handle worker creation failure", async () => { + const { registerWorker } = await import("./worker"); + const processor = vi.fn(); + + mockCreateQueueWorker.mockReturnValueOnce(null); + + const worker = registerWorker("test-queue", processor); + + expect(worker).toBeNull(); + }); + }); + + describe("Worker Events", () => { + it("should set up worker event listeners", async () => { + const { registerWorker } = await import("./worker"); + const processor = vi.fn(); + + mockCreateQueueWorker.mockReturnValueOnce(mockWorker); + registerWorker("test-queue", processor); + + expect(mockWorker.on).toHaveBeenCalledWith( + "completed", + expect.any(Function), + ); + expect(mockWorker.on).toHaveBeenCalledWith( + "failed", + expect.any(Function), + ); + expect(mockWorker.on).toHaveBeenCalledWith( + "stalled", + expect.any(Function), + ); + expect(mockWorker.on).toHaveBeenCalledWith( + "error", + expect.any(Function), + ); + }); + }); + + describe("Worker Management", () => { + it("should unregister a worker", async () => { + const { registerWorker, unregisterWorker } = await import("./worker"); + const processor = vi.fn(); + + mockCreateQueueWorker.mockReturnValueOnce(mockWorker); + registerWorker("test-queue", processor); + + await unregisterWorker("test-queue"); + + expect(mockWorker.close).toHaveBeenCalledTimes(1); + }); + + it("should handle unregistering non-existent worker", async () => { + const { unregisterWorker } = await import("./worker"); + + await unregisterWorker("non-existent-queue"); + expect(mockWorker.close).not.toHaveBeenCalled(); + }); + + it("should get a specific worker", async () => { + const { registerWorker, getWorker } = await import("./worker"); + const processor = vi.fn(); + + mockCreateQueueWorker.mockReturnValueOnce(mockWorker); + registerWorker("test-queue", processor); + + const worker = getWorker("test-queue"); + expect(worker).toBe(mockWorker); + }); + + it("should return undefined for non-existent worker", async () => { + const { getWorker } = await import("./worker"); + + const worker = getWorker("non-existent-queue"); + expect(worker).toBeUndefined(); + }); + }); + + describe("Shutdown", () => { + it("should shutdown all workers", async () => { + const { registerWorker, shutdownAllWorkers } = await import("./worker"); + + mockCreateQueueWorker.mockReturnValue(mockWorker); + registerWorker("test-queue-1", vi.fn()); + registerWorker("test-queue-2", vi.fn()); + + await shutdownAllWorkers(); + + expect(mockWorker.close).toHaveBeenCalledTimes(2); + }); + + it("should handle shutdown errors gracefully", async () => { + const { registerWorker, shutdownAllWorkers } = await import("./worker"); + + mockCreateQueueWorker.mockReturnValueOnce(mockWorker); + registerWorker("test-queue", vi.fn()); + mockWorker.close.mockRejectedValueOnce(new Error("Close failed")); + + await shutdownAllWorkers(); + + expect(mockWorker.close).toHaveBeenCalledTimes(1); + }); + }); + }); +}); diff --git a/apps/web/utils/queue/queues.ts b/apps/web/utils/queue/queues.ts new file mode 100644 index 0000000000..99c99b2402 --- /dev/null +++ b/apps/web/utils/queue/queues.ts @@ -0,0 +1,178 @@ +import { NextResponse } from "next/server"; +import { createScopedLogger } from "@/utils/logger"; +import type { CleanAction } from "@prisma/client"; + +const logger = createScopedLogger("queue-handlers"); + +export interface DigestJobData { + emailAccountId: string; + actionId?: string; + coldEmailId?: string; + message: { + id: string; + threadId: string; + from: string; + to: string; + subject: string; + content: string; + }; +} + +export interface AiCategorizeSendersJobData { + emailAccountId: string; + senders: string[]; +} + +export interface ScheduledActionJobData { + scheduledActionId: string; +} + +export interface AiCleanJobData { + emailAccountId: string; + threadId: string; + markedDoneLabelId: string; + processedLabelId: string; + jobId: string; + action: CleanAction; + instructions?: string; + skips: { + reply: boolean; + starred: boolean; + calendar: boolean; + receipt: boolean; + attachment: boolean; + conversation: boolean; + }; +} + +export interface EmailDigestAllJobData { + emailAccountId: string; +} + +export interface EmailSummaryAllJobData { + email: string; + userId: string; +} + +export interface CleanGmailJobData { + emailAccountId: string; + threadId: string; + markDone: boolean; + action: CleanAction; + markedDoneLabelId?: string; + processedLabelId?: string; + jobId: string; +} + +async function handleDigestJob(data: DigestJobData) { + logger.info("Processing digest job", { + emailAccountId: data.emailAccountId, + actionId: data.actionId, + coldEmailId: data.coldEmailId, + messageId: data.message.id, + }); + + // TODO: Implement actual digest processing logic + await new Promise((resolve) => setTimeout(resolve, 1000)); + + logger.info("Digest job completed"); + return NextResponse.json({ success: true }); +} + +async function handleCategorizeSendersJob(data: AiCategorizeSendersJobData) { + logger.info("Processing categorize senders job", { + emailAccountId: data.emailAccountId, + senderCount: data.senders.length, + }); + + // TODO: Implement actual categorization logic + await new Promise((resolve) => setTimeout(resolve, 2000)); + + logger.info("Categorize senders job completed"); + return NextResponse.json({ success: true }); +} + +async function handleScheduledActionJob(data: ScheduledActionJobData) { + logger.info("Processing scheduled action job", { + scheduledActionId: data.scheduledActionId, + }); + + // TODO: Implement actual scheduled action logic + await new Promise((resolve) => setTimeout(resolve, 500)); + + logger.info("Scheduled action job completed"); + return NextResponse.json({ success: true }); +} + +async function handleAiCleanJob(data: AiCleanJobData) { + logger.info("Processing AI clean job", { + emailAccountId: data.emailAccountId, + threadId: data.threadId, + action: data.action, + jobId: data.jobId, + }); + + // TODO: Implement actual AI clean logic + await new Promise((resolve) => setTimeout(resolve, 3000)); + + logger.info("AI clean job completed"); + return NextResponse.json({ success: true }); +} + +async function handleEmailDigestAllJob(data: EmailDigestAllJobData) { + logger.info("Processing email digest all job", { + emailAccountId: data.emailAccountId, + }); + + // TODO: Implement actual email digest all logic + await new Promise((resolve) => setTimeout(resolve, 1500)); + + logger.info("Email digest all job completed"); + return NextResponse.json({ success: true }); +} + +async function handleEmailSummaryAllJob(data: EmailSummaryAllJobData) { + logger.info("Processing email summary all job", { + email: data.email, + userId: data.userId, + }); + + // TODO: Implement actual email summary all logic + await new Promise((resolve) => setTimeout(resolve, 2500)); + + logger.info("Email summary all job completed"); + return NextResponse.json({ success: true }); +} + +async function handleCleanGmailJob(data: CleanGmailJobData) { + logger.info("Processing clean Gmail job", { + emailAccountId: data.emailAccountId, + threadId: data.threadId, + jobId: data.jobId, + }); + + // TODO: Implement actual clean Gmail logic + await new Promise((resolve) => setTimeout(resolve, 2000)); + + logger.info("Clean Gmail job completed"); + return NextResponse.json({ success: true }); +} + +export const QUEUE_HANDLERS = { + "digest-item-summarize": handleDigestJob, + "ai-categorize-senders": handleCategorizeSendersJob, + "scheduled-actions": handleScheduledActionJob, + "ai-clean": handleAiCleanJob, + "email-digest-all": handleEmailDigestAllJob, + "email-summary-all": handleEmailSummaryAllJob, + "clean-gmail": handleCleanGmailJob, +} as const; + +export type QueueName = keyof typeof QUEUE_HANDLERS; +export function getQueueHandler(queueName: string) { + return QUEUE_HANDLERS[queueName as QueueName] || null; +} + +export function isValidQueueName(queueName: string): queueName is QueueName { + return queueName in QUEUE_HANDLERS; +} diff --git a/apps/web/utils/queue/types.ts b/apps/web/utils/queue/types.ts new file mode 100644 index 0000000000..314363856b --- /dev/null +++ b/apps/web/utils/queue/types.ts @@ -0,0 +1,101 @@ +import type { + Queue, + Worker, + QueueEvents, + Job, + ConnectionOptions, +} from "bullmq"; + +export type QueueSystem = "redis" | "upstash"; + +export type { QueueName } from "./queues"; + +export interface QueueJobData { + [key: string]: unknown; +} + +export interface QueueSystemConfig { + defaultParallelism: number; + defaultConcurrency: number; +} + +export interface QueueConfig { + name: string; + parallelism?: number; + delay?: number; + attempts?: number; + backoff?: { + type: "fixed" | "exponential"; + delay: number; + }; +} + +export interface EnqueueOptions { + delay?: number; + attempts?: number; + priority?: number; + removeOnComplete?: number; + removeOnFail?: number; + jobId?: string; +} + +export interface BulkEnqueueOptions extends EnqueueOptions { + jobs: Array<{ + name?: string; + data: QueueJobData; + opts?: EnqueueOptions; + }>; +} + +export interface QueueManager { + enqueue( + queueName: string, + data: T, + options?: EnqueueOptions, + ): Promise | string>; + + bulkEnqueue( + queueName: string, + options: BulkEnqueueOptions, + ): Promise[] | string[]>; + + createWorker( + queueName: string, + processor: (job: Job) => Promise, + options?: { + concurrency?: number; + connection?: ConnectionOptions; + }, + ): Worker; + + createQueue( + queueName: string, + options?: { + connection?: ConnectionOptions; + defaultJobOptions?: Record; + }, + ): Queue; + + getQueueEvents(queueName: string): QueueEvents; + + close(): Promise; +} + +export interface WorkerConfig { + concurrency?: number; + removeOnComplete?: number; + removeOnFail?: number; + maxStalledCount?: number; + stalledInterval?: number; +} + +export type JobProcessor = (job: Job) => Promise; + +export interface QueueSystemInfo { + system: string; + supportsWorkers: boolean; + supportsDelayedJobs: boolean; + supportsBulkOperations: boolean; +} + +export type { Queue, Worker, QueueEvents, Job, ConnectionOptions }; diff --git a/apps/web/utils/queue/worker.ts b/apps/web/utils/queue/worker.ts new file mode 100644 index 0000000000..d5aab26d01 --- /dev/null +++ b/apps/web/utils/queue/worker.ts @@ -0,0 +1,160 @@ +import type { Worker } from "bullmq"; +import { createScopedLogger } from "@/utils/logger"; +import { createQueueWorker, closeQueueManager } from "./queue-manager"; +import type { WorkerConfig, JobProcessor } from "./types"; + +const logger = createScopedLogger("queue-worker"); + +class WorkerRegistry { + private readonly workers: Map = new Map(); + private isShuttingDown = false; + + registerWorker( + queueName: string, + processor: JobProcessor, + config: WorkerConfig = {}, + ): Worker | null { + if (this.workers.has(queueName)) { + logger.warn("Worker already registered for queue", { queueName }); + return this.workers.get(queueName)!; + } + + const worker = createQueueWorker(queueName, processor as JobProcessor, { + concurrency: config.concurrency || 1, + }); + + if (!worker) { + logger.error("Failed to create worker", { queueName }); + return null; + } + + worker.on("completed", (job) => { + logger.info("Job completed", { + queueName, + jobId: job.id, + duration: Date.now() - job.processedOn!, + }); + }); + + worker.on("failed", (job, err) => { + logger.error("Job failed", { + queueName, + jobId: job?.id, + error: err.message, + attempts: job?.attemptsMade, + maxAttempts: job?.opts.attempts, + }); + }); + + worker.on("stalled", (jobId) => { + logger.warn("Job stalled", { queueName, jobId }); + }); + + worker.on("error", (err) => { + logger.error("Worker error", { + queueName, + error: err.message, + }); + }); + + this.workers.set(queueName, worker); + logger.info("Worker registered", { + queueName, + concurrency: config.concurrency, + }); + + return worker; + } + + async unregisterWorker(queueName: string): Promise { + const worker = this.workers.get(queueName); + if (!worker) { + logger.warn("No worker found for queue", { queueName }); + return; + } + + await worker.close(); + this.workers.delete(queueName); + logger.info("Worker unregistered", { queueName }); + } + + getWorkers(): Map { + return new Map(this.workers); + } + + getWorker(queueName: string): Worker | undefined { + return this.workers.get(queueName); + } + + async shutdown(): Promise { + if (this.isShuttingDown) { + logger.warn("Shutdown already in progress"); + return; + } + + this.isShuttingDown = true; + logger.info("Shutting down all workers", { + workerCount: this.workers.size, + }); + + const shutdownPromises = Array.from(this.workers.entries()).map( + async ([queueName, worker]) => { + try { + logger.info("Closing worker", { queueName }); + await worker.close(); + } catch (error) { + logger.error("Error closing worker", { + queueName, + error: error instanceof Error ? error.message : String(error), + }); + } + }, + ); + + await Promise.all(shutdownPromises); + this.workers.clear(); + this.isShuttingDown = false; + logger.info("All workers shut down"); + } +} + +const workerRegistry = new WorkerRegistry(); +export function registerWorker( + queueName: string, + processor: JobProcessor, + config: WorkerConfig = {}, +): Worker | null { + return workerRegistry.registerWorker(queueName, processor, config); +} + +export function unregisterWorker(queueName: string): Promise { + return workerRegistry.unregisterWorker(queueName); +} + +export function getWorker(queueName: string): Worker | undefined { + return workerRegistry.getWorker(queueName); +} + +export function getAllWorkers(): Map { + return workerRegistry.getWorkers(); +} + +export async function shutdownAllWorkers(): Promise { + await workerRegistry.shutdown(); +} + +process.on("SIGINT", async () => { + logger.info("Received SIGINT, shutting down workers..."); + await shutdownAllWorkers(); + await closeQueueManager(); + process.exit(0); +}); + +process.on("SIGTERM", async () => { + logger.info("Received SIGTERM, shutting down workers..."); + await shutdownAllWorkers(); + await closeQueueManager(); + process.exit(0); +}); + +export { workerRegistry }; diff --git a/apps/web/utils/scheduled-actions/scheduler.ts b/apps/web/utils/scheduled-actions/scheduler.ts index 75acfe9446..1aa03a3bbe 100644 --- a/apps/web/utils/scheduled-actions/scheduler.ts +++ b/apps/web/utils/scheduled-actions/scheduler.ts @@ -4,9 +4,9 @@ import type { ActionItem } from "@/utils/ai/types"; import { createScopedLogger } from "@/utils/logger"; import { canActionBeDelayed } from "@/utils/delayed-actions"; import { env } from "@/env"; -import { getCronSecretHeader } from "@/utils/cron"; +import { addMinutes } from "date-fns"; import { Client } from "@upstash/qstash"; -import { addMinutes, getUnixTime } from "date-fns"; +import { enqueueJob } from "@/utils/queue/queue-manager"; const logger = createScopedLogger("qstash-scheduled-actions"); @@ -255,57 +255,29 @@ async function scheduleMessage({ delayInMinutes: number; deduplicationId: string; }) { - const client = getQstashClient(); - const url = `${env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL}/api/scheduled-actions/execute`; - - const notBefore = getUnixTime(addMinutes(new Date(), delayInMinutes)); - try { - if (client) { - const response = await client.publishJSON({ - url, - body: payload, - notBefore, // Absolute delay using unix timestamp - deduplicationId, - contentBasedDeduplication: false, - headers: getCronSecretHeader(), - }); + // Use the unified queue system instead of direct QStash + const delayInMs = delayInMinutes * 60 * 1000; // Convert minutes to milliseconds - // The messageId here has a different meaning because it is - // the QStash identifier and not the usual messageId of the email - const messageId = - "messageId" in response ? response.messageId : undefined; - - logger.info("Successfully scheduled with QStash", { - scheduledActionId: payload.scheduledActionId, - scheduledId: messageId, - notBefore, - delayInMinutes, - deduplicationId, - }); + const job = await enqueueJob("scheduled-actions", payload, { + delay: delayInMs, + jobId: deduplicationId, + attempts: 3, + }); - return messageId; - } else { - logger.error( - "QStash client not available, scheduled action cannot be executed", - { - scheduledActionId: payload.scheduledActionId, - }, - ); + const messageId = typeof job === "string" ? job : job.id; - await prisma.scheduledAction.update({ - where: { id: payload.scheduledActionId }, - data: { - schedulingStatus: "FAILED" as const, - }, - }); + logger.info("Successfully scheduled with queue system", { + scheduledActionId: payload.scheduledActionId, + scheduledId: messageId, + delayInMinutes, + deduplicationId, + queueSystem: env.QUEUE_SYSTEM, + }); - throw new Error( - "QStash client not available - scheduled action cannot be executed", - ); - } + return messageId; } catch (error) { - logger.error("Failed to schedule with QStash", { + logger.error("Failed to schedule with queue system", { error, scheduledActionId: payload.scheduledActionId, deduplicationId, @@ -327,13 +299,22 @@ async function cancelMessage( messageId: string, ) { try { - await client.http.request({ - path: ["v2", "messages", messageId], - method: "DELETE", - }); - logger.info("Successfully cancelled QStash message", { messageId }); + // For QStash, we can still cancel directly + if (env.QUEUE_SYSTEM === "upstash") { + await client.http.request({ + path: ["v2", "messages", messageId], + method: "DELETE", + }); + logger.info("Successfully cancelled QStash message", { messageId }); + } else { + // For Redis/BullMQ, we would need to implement job cancellation + // For now, just log that cancellation is not supported + logger.warn("Job cancellation not implemented for Redis queue system", { + messageId, + }); + } } catch (error) { - logger.error("Failed to cancel QStash message", { messageId, error }); + logger.error("Failed to cancel message", { messageId, error }); throw error; } } diff --git a/apps/web/utils/upstash/categorize-senders.ts b/apps/web/utils/upstash/categorize-senders.ts index 7e0763fb33..3dfcce193a 100644 --- a/apps/web/utils/upstash/categorize-senders.ts +++ b/apps/web/utils/upstash/categorize-senders.ts @@ -1,5 +1,6 @@ import chunk from "lodash/chunk"; -import { deleteQueue, listQueues, publishToQstashQueue } from "@/utils/upstash"; +import { deleteQueue, listQueues } from "@/utils/upstash"; +import { enqueueJob } from "@/utils/queue/queue-manager"; import { env } from "@/env"; import type { AiCategorizeSenders } from "@/app/api/user/categorize/senders/batch/handle-batch-validation"; import { createScopedLogger } from "@/utils/logger"; @@ -42,15 +43,10 @@ export async function publishToAiCategorizeSendersQueue( // Process all chunks in parallel, each as a separate queue item await Promise.all( chunks.map((senderChunk) => - publishToQstashQueue({ - queueName, - parallelism: 3, // Allow up to 3 concurrent jobs from this queue - url, - body: { - emailAccountId: body.emailAccountId, - senders: senderChunk, - } satisfies AiCategorizeSenders, - }), + enqueueJob(queueName, { + emailAccountId: body.emailAccountId, + senders: senderChunk, + } satisfies AiCategorizeSenders), ), ); } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index a34a333ad7..28319411f9 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -327,6 +327,9 @@ importers: braintrust: specifier: 0.3.6 version: 0.3.6(zod@3.25.46) + bullmq: + specifier: ^5.61.0 + version: 5.61.0 capital-case: specifier: 2.0.0 version: 2.0.0 @@ -2351,6 +2354,36 @@ packages: resolution: {integrity: sha512-beedclIvFcCnPrYgHsylqiYJVJ/CI47Vyc4tY8no1/Li/O8U4BTlJfy6ZwxkYwx+Mx10nrgwSVrA7VBbhh4slg==} engines: {node: '>=18'} + '@msgpackr-extract/msgpackr-extract-darwin-arm64@3.0.3': + resolution: {integrity: sha512-QZHtlVgbAdy2zAqNA9Gu1UpIuI8Xvsd1v8ic6B2pZmeFnFcMWiPLfWXh7TVw4eGEZ/C9TH281KwhVoeQUKbyjw==} + cpu: [arm64] + os: [darwin] + + '@msgpackr-extract/msgpackr-extract-darwin-x64@3.0.3': + resolution: {integrity: sha512-mdzd3AVzYKuUmiWOQ8GNhl64/IoFGol569zNRdkLReh6LRLHOXxU4U8eq0JwaD8iFHdVGqSy4IjFL4reoWCDFw==} + cpu: [x64] + os: [darwin] + + '@msgpackr-extract/msgpackr-extract-linux-arm64@3.0.3': + resolution: {integrity: sha512-YxQL+ax0XqBJDZiKimS2XQaf+2wDGVa1enVRGzEvLLVFeqa5kx2bWbtcSXgsxjQB7nRqqIGFIcLteF/sHeVtQg==} + cpu: [arm64] + os: [linux] + + '@msgpackr-extract/msgpackr-extract-linux-arm@3.0.3': + resolution: {integrity: sha512-fg0uy/dG/nZEXfYilKoRe7yALaNmHoYeIoJuJ7KJ+YyU2bvY8vPv27f7UKhGRpY6euFYqEVhxCFZgAUNQBM3nw==} + cpu: [arm] + os: [linux] + + '@msgpackr-extract/msgpackr-extract-linux-x64@3.0.3': + resolution: {integrity: sha512-cvwNfbP07pKUfq1uH+S6KJ7dT9K8WOE4ZiAcsrSes+UY55E/0jLYc+vq+DO7jlmqRb5zAggExKm0H7O/CBaesg==} + cpu: [x64] + os: [linux] + + '@msgpackr-extract/msgpackr-extract-win32-x64@3.0.3': + resolution: {integrity: sha512-x0fWaQtYp4E6sktbsdAqnehxDgEc/VwM7uLsRCYWaiGu0ykYdZPiS8zCWdnjHwyiumousxfBm4SO31eXqwEZhQ==} + cpu: [x64] + os: [win32] + '@mux/mux-data-google-ima@0.2.8': resolution: {integrity: sha512-0ZEkHdcZ6bS8QtcjFcoJeZxJTpX7qRIledf4q1trMWPznugvtajCjCM2kieK/pzkZj1JM6liDRFs1PJSfVUs2A==} @@ -3819,6 +3852,7 @@ packages: '@sanity/next-loader@2.1.0': resolution: {integrity: sha512-ta9HPqGpVQNv5NYTV5X9HNa2m7AJi1ixUVHVbOwGLfdm2D5FWALuCYlQ8GvtaCIxCakOtLLmUSVt9d8wFM83xQ==} engines: {node: '>=18.18'} + deprecated: This package is deprecated. Please use 'next-sanity/live' instead. peerDependencies: next: ^14.1 || ^15.0.0-0 react: ^18.3 || ^19.0.0-0 @@ -5484,6 +5518,9 @@ packages: builtins@1.0.3: resolution: {integrity: sha512-uYBjakWipfaO/bXI7E8rq6kpwHRZK5cNYrUv2OzZSI/FvmdMyXJ2tG9dKcjEC5YHmHpUAwsargWIZNWdxb/bnQ==} + bullmq@5.61.0: + resolution: {integrity: sha512-khaTjc1JnzaYFl4FrUtsSsqugAW/urRrcZ9Q0ZE+REAw8W+gkHFqxbGlutOu6q7j7n91wibVaaNlOUMdiEvoSQ==} + bytes@3.1.2: resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} engines: {node: '>= 0.8'} @@ -5911,6 +5948,10 @@ packages: crisp-sdk-web@1.0.25: resolution: {integrity: sha512-CWTHFFeHRV0oqiXoPh/aIAKhFs6xcIM4NenGPnClAMCZUDQgQsF1OWmZWmnVNjJriXUmWRgDfeUxcxygS0dCRA==} + cron-parser@4.9.0: + resolution: {integrity: sha512-p0SaNjrHOnQeR8/VnfGbmg9te2kfyYSQ7Sc/j/6DtPL3JQvKxmjO9TSjNFpujqV3vEYYBvNNvXSxzyksBWAx1Q==} + engines: {node: '>=12.0.0'} + cross-env@7.0.3: resolution: {integrity: sha512-+/HKd6EgcQCJGh2PSjZuUitQBQynKor4wrFbRg4DtAgS1aWO+gU52xpH7M9ScGgXSYmAVS9bIJ8EzuaGw0oNAw==} engines: {node: '>=10.14', npm: '>=6', yarn: '>=1'} @@ -7943,6 +7984,10 @@ packages: peerDependencies: react: ^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0 + luxon@3.7.2: + resolution: {integrity: sha512-vtEhXh/gNjI9Yg1u4jX/0YVPMvxzHuGgCm6tC5kZyb08yjGWGnqAjGJvcXbqQR2P3MyMEFnRbpcdFS6PBcLqew==} + engines: {node: '>=12'} + lz-string@1.5.0: resolution: {integrity: sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==} hasBin: true @@ -8353,6 +8398,13 @@ packages: ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + msgpackr-extract@3.0.3: + resolution: {integrity: sha512-P0efT1C9jIdVRefqjzOQ9Xml57zpOXnIuS+csaB4MdZbTdmGDLo8XhzBG1N7aO11gKDDkJvBLULeFTo46wwreA==} + hasBin: true + + msgpackr@1.11.5: + resolution: {integrity: sha512-UjkUHN0yqp9RWKy0Lplhh+wlpdt9oQBYgULZOiFhV3VclSF1JnSQWZ5r9gORQlNYaUKQoR8itv7g7z1xDDuACA==} + mustache@4.2.0: resolution: {integrity: sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==} hasBin: true @@ -8487,6 +8539,9 @@ packages: resolution: {integrity: sha512-WmS3EUGw+vXHlTgiUPi3NzbZNwH6+uGX0QLGgqG+aFSJ5rkX/Ee0nuwHBJfZTfQwwR8lGO819NEIwQ7CGhkdEQ==} deprecated: Use `change-case` + node-abort-controller@3.1.1: + resolution: {integrity: sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==} + node-domexception@1.0.0: resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} engines: {node: '>=10.5.0'} @@ -8509,6 +8564,10 @@ packages: resolution: {integrity: sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==} engines: {node: '>= 6.13.0'} + node-gyp-build-optional-packages@5.2.2: + resolution: {integrity: sha512-s+w+rBWnpTMwSFbaE0UXsRlg7hU4FjekKU4eyAih5T8nJuNZT1nNsskXpxmeqSK9UzkBl6UgRlnKc8hz8IEqOw==} + hasBin: true + node-html-parser@6.1.13: resolution: {integrity: sha512-qIsTMOY4C/dAa5Q5vsobRpOOvPfC4pB61UVW2uSwZNUp0QU/jCekTal1vMmbO0DgdHeLUJpv/ARmDqErVxA3Sg==} @@ -13185,6 +13244,24 @@ snapshots: transitivePeerDependencies: - supports-color + '@msgpackr-extract/msgpackr-extract-darwin-arm64@3.0.3': + optional: true + + '@msgpackr-extract/msgpackr-extract-darwin-x64@3.0.3': + optional: true + + '@msgpackr-extract/msgpackr-extract-linux-arm64@3.0.3': + optional: true + + '@msgpackr-extract/msgpackr-extract-linux-arm@3.0.3': + optional: true + + '@msgpackr-extract/msgpackr-extract-linux-x64@3.0.3': + optional: true + + '@msgpackr-extract/msgpackr-extract-win32-x64@3.0.3': + optional: true + '@mux/mux-data-google-ima@0.2.8': dependencies: mux-embed: 5.9.0 @@ -16929,6 +17006,18 @@ snapshots: builtins@1.0.3: {} + bullmq@5.61.0: + dependencies: + cron-parser: 4.9.0 + ioredis: 5.7.0 + msgpackr: 1.11.5 + node-abort-controller: 3.1.1 + semver: 7.7.2 + tslib: 2.8.1 + uuid: 11.1.0 + transitivePeerDependencies: + - supports-color + bytes@3.1.2: {} cac@6.7.14: {} @@ -17367,6 +17456,10 @@ snapshots: crisp-sdk-web@1.0.25: {} + cron-parser@4.9.0: + dependencies: + luxon: 3.7.2 + cross-env@7.0.3: dependencies: cross-spawn: 7.0.6 @@ -19653,6 +19746,8 @@ snapshots: dependencies: react: 19.1.1 + luxon@3.7.2: {} + lz-string@1.5.0: {} magic-string@0.30.18: @@ -20343,6 +20438,22 @@ snapshots: ms@2.1.3: {} + msgpackr-extract@3.0.3: + dependencies: + node-gyp-build-optional-packages: 5.2.2 + optionalDependencies: + '@msgpackr-extract/msgpackr-extract-darwin-arm64': 3.0.3 + '@msgpackr-extract/msgpackr-extract-darwin-x64': 3.0.3 + '@msgpackr-extract/msgpackr-extract-linux-arm': 3.0.3 + '@msgpackr-extract/msgpackr-extract-linux-arm64': 3.0.3 + '@msgpackr-extract/msgpackr-extract-linux-x64': 3.0.3 + '@msgpackr-extract/msgpackr-extract-win32-x64': 3.0.3 + optional: true + + msgpackr@1.11.5: + optionalDependencies: + msgpackr-extract: 3.0.3 + mustache@4.2.0: {} mute-stream@0.0.8: {} @@ -20459,6 +20570,8 @@ snapshots: no-case@4.0.0: {} + node-abort-controller@3.1.1: {} + node-domexception@1.0.0: {} node-fetch@2.7.0(encoding@0.1.13): @@ -20475,6 +20588,11 @@ snapshots: node-forge@1.3.1: {} + node-gyp-build-optional-packages@5.2.2: + dependencies: + detect-libc: 2.0.4 + optional: true + node-html-parser@6.1.13: dependencies: css-select: 5.2.2 From 9d39a5fdd512abd6e58fbdfff6cc937775ae5f4b Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Fri, 24 Oct 2025 01:48:07 -0300 Subject: [PATCH 02/17] Update worker initialization. Add 7 queues to large queue --- .cursor/rules/queues.mdc | 138 ++++--- apps/web/app/api/queue/[queueName]/route.ts | 81 ++-- apps/web/app/api/resend/summary/all/route.ts | 1 - apps/web/package.json | 3 + apps/web/utils/queue/bullmq-manager.ts | 23 +- apps/web/utils/queue/qstash-manager.ts | 14 +- apps/web/utils/queue/queue-manager.ts | 18 + apps/web/utils/queue/queue.test.ts | 2 +- apps/web/utils/queue/queues.ts | 370 +++++++++++++++++- apps/web/utils/queue/worker.ts | 15 +- apps/web/utils/scheduled-actions/scheduler.ts | 1 + apps/web/utils/upstash/categorize-senders.ts | 31 +- apps/web/worker.ts | 113 ++++++ docker-compose.yml | 25 ++ docker/Dockerfile.prod | 2 + 15 files changed, 706 insertions(+), 131 deletions(-) create mode 100644 apps/web/worker.ts diff --git a/.cursor/rules/queues.mdc b/.cursor/rules/queues.mdc index 4169e08d35..82bb2c9364 100644 --- a/.cursor/rules/queues.mdc +++ b/.cursor/rules/queues.mdc @@ -9,15 +9,15 @@ Unified queue system supporting both QStash and Redis (BullMQ) with automatic sy ```typescript import { enqueueJob } from "@/utils/queue/queue-manager"; -// Basic job -const job = await enqueueJob("my-queue", { - message: "Hello from the queue system!", - userId: "user-123", +// Basic job (will be distributed across ai-categorize-senders-0 to ai-categorize-senders-6) +const job = await enqueueJob("ai-categorize-senders-0", { + emailAccountId: "user-123", + senders: ["sender@example.com"], }); // Delayed job (5 seconds) -const delayedJob = await enqueueJob("my-queue", { - message: "This job was delayed by 5 seconds", +const delayedJob = await enqueueJob("scheduled-actions", { + scheduledActionId: "action-456", }, { delay: 5000, }); @@ -30,11 +30,11 @@ console.log("Jobs enqueued:", job.id || job, delayedJob.id || delayedJob); ```typescript import { bulkEnqueueJobs } from "@/utils/queue/queue-manager"; -const jobs = await bulkEnqueueJobs("my-queue", { +const jobs = await bulkEnqueueJobs("ai-categorize-senders-0", { jobs: [ - { data: { message: "Bulk job 1" } }, - { data: { message: "Bulk job 2" } }, - { data: { message: "Bulk job 3" } }, + { data: { emailAccountId: "user-1", senders: ["sender1@example.com"] } }, + { data: { emailAccountId: "user-2", senders: ["sender2@example.com"] } }, + { data: { emailAccountId: "user-3", senders: ["sender3@example.com"] } }, ], }); @@ -43,28 +43,51 @@ console.log("Bulk jobs enqueued:", jobs.length); ### 3. Worker Setup (Redis only) -```typescript -import { registerWorker } from "@/utils/queue/worker"; +**Workers run as a separate process when `QUEUE_SYSTEM=redis`** -// Register a worker for processing jobs -const worker = registerWorker("my-queue", async (job) => { - console.log("Processing job:", job.id, job.data); - - // Your processing logic here - await processJob(job.data); - - console.log("Job completed:", job.id); -}, { - concurrency: 3, // Default concurrency is 3 -}); +Start the worker process alongside your Next.js application: -// Graceful shutdown -process.on("SIGINT", async () => { - await shutdownAllWorkers(); - process.exit(0); -}); +```bash +# Development (with auto-reload) +npm run worker:dev + +# Production +npm run worker + +# Using Docker Compose (runs both web and worker) +docker-compose up + +# Or run them separately: +docker-compose up -d web db redis # Start web, db, redis +docker-compose up -d worker # Start worker ``` +The worker process automatically: +- Creates one worker per registered queue +- Handles all queue types: + - `digest-item-summarize` - Process digest email items + - `ai-categorize-senders-0` to `ai-categorize-senders-6` - 7 distributed queues for AI sender categorization (load balanced) + - `scheduled-actions` - Execute scheduled email actions + - `ai-clean` - AI-powered email cleaning + - `email-digest-all` - Send digest emails to all users + - `email-summary-all` - Send summary emails to all users + - `clean-gmail` - Clean Gmail-specific operations +- Runs with concurrency of 3 per queue +- Implements graceful shutdown +- Reconnects on database/Redis failures + +**Why separate process?** +- Prevents blocking the main application event loop +- Allows independent scaling of workers +- Improves fault isolation +- Enables better resource management + +**Load Balancing for AI Categorization:** +- Email account IDs are distributed across 7 queues (`ai-categorize-senders-0` to `ai-categorize-senders-6`) +- Uses a simple hash function to ensure even distribution +- Each queue can process up to 3 jobs concurrently (21 total concurrent categorizations) +- QStash still uses per-email-account queues for maximum parallelization + ## Configuration ### Environment Variables @@ -94,9 +117,14 @@ console.log("Is QStash:", info.isQStash); ### Retry Configuration -- **QStash**: Automatically retries up to **10 times** (handled by QStash service) -- **BullMQ (Redis)**: Retries up to **5 times** (configured in BullMQ manager) -- **Parallelism**: Default concurrency of **3** for both systems +- **QStash**: Retry count is handled by QStash service (see [QStash documentation](https://docs.upstash.com/qstash) for details) +- **BullMQ (Redis)**: Retries up to **5 times** (configured in `bullmq-manager.ts`) +- **Concurrency**: + - **Worker Registry**: Default concurrency of **3** (configured in `worker.ts`) + - **BullMQ Manager**: Default concurrency of **3** (configured in `bullmq-manager.ts`) + - **QStash**: Uses parallelism of **3** for flow control (configured in `qstash-manager.ts`) + +> **Note**: For authoritative concurrency and retry settings, see `worker.ts` and `bullmq-manager.ts` ## Migration Examples @@ -129,7 +157,7 @@ await enqueueJob("digest-item-summarize", { ```typescript try { - const job = await enqueueJob("my-queue", data, { + const job = await enqueueJob("ai-categorize-senders-0", data, { delay: 5000, priority: 1, }); @@ -143,7 +171,7 @@ try { ```typescript import { registerWorker } from "@/utils/queue/worker"; -registerWorker("my-queue", async (job) => { +registerWorker("ai-categorize-senders-0", async (job) => { try { await processJob(job.data); } catch (error) { @@ -200,28 +228,21 @@ import { getQueueSystemInfo, closeQueueManager } from "@/utils/queue/queue-manager"; -import { registerWorker, shutdownAllWorkers } from "@/utils/queue/worker"; async function main() { // Check system info const systemInfo = getQueueSystemInfo(); console.log("Queue system:", systemInfo.system); - // Set up worker (Redis only) - if (systemInfo.isRedis) { - registerWorker("demo-queue", async (job) => { - console.log("Processing:", job.data); - await new Promise(resolve => setTimeout(resolve, 1000)); - console.log("Completed:", job.id); - }); - } + // Workers are automatically initialized when QUEUE_SYSTEM=redis + // No manual setup needed! // Enqueue some jobs - await enqueueJob("demo-queue", { message: "Hello!" }); - await bulkEnqueueJobs("demo-queue", { + await enqueueJob("email-digest-all", { emailAccountId: "account-123" }); + await bulkEnqueueJobs("ai-categorize-senders-0", { jobs: [ - { data: { message: "Bulk 1" } }, - { data: { message: "Bulk 2" } }, + { data: { emailAccountId: "account-123", senders: ["sender1@example.com"] } }, + { data: { emailAccountId: "account-456", senders: ["sender2@example.com"] } }, ], }); @@ -229,7 +250,6 @@ async function main() { await new Promise(resolve => setTimeout(resolve, 5000)); // Cleanup - await shutdownAllWorkers(); await closeQueueManager(); } @@ -274,10 +294,22 @@ main().catch(console.error); ## File Structure ``` -apps/web/utils/queue/ -├── queue-manager.ts # Main queue abstraction -├── bullmq-manager.ts # BullMQ implementation -├── qstash-manager.ts # QStash implementation -├── types.ts # Type definitions -└── worker.ts # Worker management +apps/web/ +├── worker.ts # Worker process entry point +└── utils/queue/ + ├── queue-manager.ts # Main queue abstraction + ├── bullmq-manager.ts # BullMQ implementation + ├── qstash-manager.ts # QStash implementation + ├── queues.ts # Queue handlers and job data types + ├── types.ts # Type definitions + ├── worker.ts # Worker management + ├── queue.test.ts # Comprehensive test suite + ├── ai-queue.ts # AI-specific queue utilities + ├── email-action-queue.ts # Email action queue utilities + └── email-actions.ts # Email action definitions + +docker/ +└── Dockerfile.prod # Production image (supports both web and worker) + +docker-compose.yml # Includes both web and worker services (reuses same image) ``` \ No newline at end of file diff --git a/apps/web/app/api/queue/[queueName]/route.ts b/apps/web/app/api/queue/[queueName]/route.ts index 00dc9d7a01..62f7277406 100644 --- a/apps/web/app/api/queue/[queueName]/route.ts +++ b/apps/web/app/api/queue/[queueName]/route.ts @@ -9,53 +9,56 @@ import { type NextRequest, NextResponse } from "next/server"; import { createScopedLogger } from "@/utils/logger"; import { getQueueHandler, isValidQueueName } from "@/utils/queue/queues"; +import { verifySignatureAppRouter } from "@upstash/qstash/nextjs"; const logger = createScopedLogger("queue-api"); -export async function POST( - request: NextRequest, - { params }: { params: { queueName: string } }, -) { - const { queueName } = params; +export const POST = verifySignatureAppRouter( + async ( + request: NextRequest, + { params }: { params: { queueName: string } }, + ) => { + const { queueName } = params; - try { - const body = await request.json(); + try { + const body = await request.json(); - logger.info("Received queue job", { - queueName, - body: JSON.stringify(body), - }); + logger.info("Received queue job", { + queueName, + body: JSON.stringify(body), + }); - // Validate queue name - if (!isValidQueueName(queueName)) { - logger.warn("Unknown queue name", { queueName }); - return NextResponse.json( - { error: "Unknown queue name" }, - { status: 400 }, - ); - } + // Validate queue name + if (!isValidQueueName(queueName)) { + logger.warn("Unknown queue name", { queueName }); + return NextResponse.json( + { error: "Unknown queue name" }, + { status: 400 }, + ); + } + + // Get the appropriate handler + const handler = getQueueHandler(queueName); + if (!handler) { + logger.error("No handler found for queue", { queueName }); + return NextResponse.json( + { error: "No handler found for queue" }, + { status: 500 }, + ); + } + + // Execute the handler + return await handler(body); + } catch (error) { + logger.error("Queue job processing failed", { + queueName, + error: error instanceof Error ? error.message : String(error), + }); - // Get the appropriate handler - const handler = getQueueHandler(queueName); - if (!handler) { - logger.error("No handler found for queue", { queueName }); return NextResponse.json( - { error: "No handler found for queue" }, + { error: "Job processing failed" }, { status: 500 }, ); } - - // Execute the handler - return await handler(body); - } catch (error) { - logger.error("Queue job processing failed", { - queueName, - error: error instanceof Error ? error.message : String(error), - }); - - return NextResponse.json( - { error: "Job processing failed" }, - { status: 500 }, - ); - } -} + }, +); diff --git a/apps/web/app/api/resend/summary/all/route.ts b/apps/web/app/api/resend/summary/all/route.ts index a39e201e6b..5084fae021 100644 --- a/apps/web/app/api/resend/summary/all/route.ts +++ b/apps/web/app/api/resend/summary/all/route.ts @@ -7,7 +7,6 @@ import { Frequency } from "@prisma/client"; import { captureException } from "@/utils/error"; import { createScopedLogger } from "@/utils/logger"; import { enqueueJob } from "@/utils/queue/queue-manager"; -import { QUEUES } from "@/utils/queue/queues"; const logger = createScopedLogger("cron/resend/summary/all"); diff --git a/apps/web/package.json b/apps/web/package.json index 4e5c68f003..fb4d7e33db 100644 --- a/apps/web/package.json +++ b/apps/web/package.json @@ -6,6 +6,8 @@ "dev": "cross-env NODE_OPTIONS=--max_old_space_size=16384 next dev --turbopack", "build": "cross-env NODE_OPTIONS=--max_old_space_size=16384 prisma migrate deploy && next build", "start": "next start", + "worker": "tsx worker.ts", + "worker:dev": "tsx watch worker.ts", "lint": "next lint", "test": "cross-env RUN_AI_TESTS=false vitest", "test-ai": "cross-env RUN_AI_TESTS=true vitest --run", @@ -192,6 +194,7 @@ "serwist": "9.2.0", "tailwindcss": "3.4.17", "tsconfig": "workspace:*", + "tsx": "^4.20.0", "vite-tsconfig-paths": "5.1.4", "vitest": "3.2.4", "vitest-mock-extended": "3.1.0" diff --git a/apps/web/utils/queue/bullmq-manager.ts b/apps/web/utils/queue/bullmq-manager.ts index 818cb3b12a..f83e368fc5 100644 --- a/apps/web/utils/queue/bullmq-manager.ts +++ b/apps/web/utils/queue/bullmq-manager.ts @@ -14,11 +14,8 @@ import type { QueueManager, } from "./types"; -// Default concurrency for BullMQ workers -const DEFAULT_CONCURRENCY = 3; - -// Default retry attempts for BullMQ jobs -const DEFAULT_ATTEMPTS = 5; +export const DEFAULT_CONCURRENCY = 3; +export const DEFAULT_ATTEMPTS = 5; const logger = createScopedLogger("queue-bullmq"); @@ -30,7 +27,7 @@ export class BullMQManager implements QueueManager { constructor() { this.connection = { - host: env.REDIS_URL!, + host: env.REDIS_URL, }; } @@ -43,10 +40,10 @@ export class BullMQManager implements QueueManager { const jobOptions = { delay: options.delay, - attempts: options.attempts || DEFAULT_ATTEMPTS, + attempts: options.attempts ?? DEFAULT_ATTEMPTS, priority: options.priority, - removeOnComplete: options.removeOnComplete || 10, - removeOnFail: options.removeOnFail || 5, + removeOnComplete: options.removeOnComplete ?? 10, + removeOnFail: options.removeOnFail ?? 5, jobId: options.jobId, }; @@ -68,14 +65,14 @@ export class BullMQManager implements QueueManager { const queue = this.getOrCreateQueue(queueName); const jobs = options.jobs.map((jobData) => ({ - name: jobData.name || queueName, + name: jobData.name ?? queueName, data: jobData.data, opts: { delay: options.delay, - attempts: options.attempts || DEFAULT_ATTEMPTS, + attempts: options.attempts ?? DEFAULT_ATTEMPTS, priority: options.priority, - removeOnComplete: options.removeOnComplete || 10, - removeOnFail: options.removeOnFail || 5, + removeOnComplete: options.removeOnComplete ?? 10, + removeOnFail: options.removeOnFail ?? 5, jobId: jobData.opts?.jobId, ...jobData.opts, }, diff --git a/apps/web/utils/queue/qstash-manager.ts b/apps/web/utils/queue/qstash-manager.ts index 30e5429f93..eaa520ebd9 100644 --- a/apps/web/utils/queue/qstash-manager.ts +++ b/apps/web/utils/queue/qstash-manager.ts @@ -15,6 +15,10 @@ const logger = createScopedLogger("queue-qstash"); // Default parallelism for QStash flow control const DEFAULT_PARALLELISM = 3; +function getQstashClient(): Client { + return new Client({ token: env.QSTASH_TOKEN! }); +} + export class QStashManager implements QueueManager { async enqueue( queueName: string, @@ -24,8 +28,9 @@ export class QStashManager implements QueueManager { const url = `${env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL}/api/queue/${queueName}`; if (options.delay) { - const notBefore = Math.floor(Date.now() / 1000) + options.delay / 1000; - const client = new Client({ token: env.QSTASH_TOKEN! }); + const notBefore = + Math.floor(Date.now() / 1000) + Math.floor(options.delay / 1000); + const client = getQstashClient(); const response = await client.publishJSON({ url, body: data, @@ -62,7 +67,8 @@ export class QStashManager implements QueueManager { }; if (options.delay) { - item.notBefore = Math.floor(Date.now() / 1000) + options.delay / 1000; + item.notBefore = + Math.floor(Date.now() / 1000) + Math.floor(options.delay / 1000); } if (job.opts?.jobId) { @@ -72,7 +78,7 @@ export class QStashManager implements QueueManager { return item; }); - const client = new Client({ token: env.QSTASH_TOKEN! }); + const client = getQstashClient(); const response = await client.batchJSON(items); return response?.map((r) => r.messageId || "unknown") || []; } diff --git a/apps/web/utils/queue/queue-manager.ts b/apps/web/utils/queue/queue-manager.ts index 9b259dd67d..3b7cf31afa 100644 --- a/apps/web/utils/queue/queue-manager.ts +++ b/apps/web/utils/queue/queue-manager.ts @@ -26,9 +26,17 @@ export function createQueueManager(): QueueManager { switch (queueSystem) { case "redis": // Use BullMQ with Redis + if (!env.REDIS_URL && !env.UPSTASH_REDIS_URL) { + throw new Error( + "Missing Redis URL (set REDIS_URL or UPSTASH_REDIS_URL) for QUEUE_SYSTEM=redis", + ); + } return new BullMQManager(); case "upstash": // Use QStash (HTTP-based, no Redis needed for BullMQ) + if (!env.QSTASH_TOKEN) { + throw new Error("Missing QSTASH_TOKEN for QUEUE_SYSTEM=upstash"); + } return new QStashManager(); default: throw new Error(`Unsupported queue system: ${queueSystem}`); @@ -71,6 +79,16 @@ export function createQueueWorker( }, ): Worker | null { const manager = getQueueManager(); + + // Only BullMQ supports workers; QStash uses HTTP endpoints + if (env.QUEUE_SYSTEM !== "redis") { + logger.warn("Workers not supported for queue system", { + queueSystem: env.QUEUE_SYSTEM, + queueName, + }); + return null; + } + return manager.createWorker(queueName, processor, options); } diff --git a/apps/web/utils/queue/queue.test.ts b/apps/web/utils/queue/queue.test.ts index 3f1da1f873..1c2b17f441 100644 --- a/apps/web/utils/queue/queue.test.ts +++ b/apps/web/utils/queue/queue.test.ts @@ -633,7 +633,7 @@ describe("Queue System", () => { "test-queue", processor, { - concurrency: 1, + concurrency: 3, }, ); expect(worker).toBe(mockWorker); diff --git a/apps/web/utils/queue/queues.ts b/apps/web/utils/queue/queues.ts index 99c99b2402..aef27d4a5f 100644 --- a/apps/web/utils/queue/queues.ts +++ b/apps/web/utils/queue/queues.ts @@ -1,6 +1,20 @@ import { NextResponse } from "next/server"; import { createScopedLogger } from "@/utils/logger"; import type { CleanAction } from "@prisma/client"; +import prisma from "@/utils/prisma"; +import { createEmailProvider } from "@/utils/email/provider"; +import { createUnsubscribeToken } from "@/utils/unsubscribe"; +import { calculateNextScheduleDate } from "@/utils/schedule"; +import { sendDigestEmail } from "@inboxzero/resend"; +import { env } from "@/env"; +import { DigestStatus, SystemType } from "@prisma/client"; +import { extractNameFromEmail } from "@/utils/email"; +import { camelCase } from "lodash"; +import { getRuleName } from "@/utils/rule/consts"; +import { storedDigestContentSchema } from "@/app/api/resend/digest/validation"; +import { sleep } from "@/utils/sleep"; +import type { ParsedMessage } from "@/utils/types"; +import type { Digest } from "@/app/api/resend/digest/validation"; const logger = createScopedLogger("queue-handlers"); @@ -85,11 +99,28 @@ async function handleCategorizeSendersJob(data: AiCategorizeSendersJobData) { senderCount: data.senders.length, }); - // TODO: Implement actual categorization logic - await new Promise((resolve) => setTimeout(resolve, 2000)); - - logger.info("Categorize senders job completed"); - return NextResponse.json({ success: true }); + try { + const response = await fetch( + `${process.env.NEXT_PUBLIC_BASE_URL}/api/user/categorize/senders/batch`, + { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(data), + }, + ); + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + + await new Promise((resolve) => setTimeout(resolve, 2000)); + + logger.info("Categorize senders job completed successfully"); + return NextResponse.json({ success: true }); + } catch (error) { + logger.error("Error processing categorize senders job", { error }); + throw error; + } } async function handleScheduledActionJob(data: ScheduledActionJobData) { @@ -124,11 +155,283 @@ async function handleEmailDigestAllJob(data: EmailDigestAllJobData) { emailAccountId: data.emailAccountId, }); - // TODO: Implement actual email digest all logic - await new Promise((resolve) => setTimeout(resolve, 1500)); + try { + const result = await sendDigestEmailForAccount(data.emailAccountId); + logger.info("Email digest all job completed", { result }); + return NextResponse.json({ success: true, result }); + } catch (error) { + logger.error("Email digest all job failed", { + emailAccountId: data.emailAccountId, + error: error instanceof Error ? error.message : String(error), + }); + throw error; + } +} - logger.info("Email digest all job completed"); - return NextResponse.json({ success: true }); +async function getDigestSchedule({ + emailAccountId, +}: { + emailAccountId: string; +}) { + return prisma.schedule.findUnique({ + where: { emailAccountId }, + select: { + id: true, + intervalDays: true, + occurrences: true, + daysOfWeek: true, + timeOfDay: true, + lastOccurrenceAt: true, + nextOccurrenceAt: true, + }, + }); +} + +async function sendDigestEmailForAccount(emailAccountId: string) { + logger.info("Sending digest email"); + + const emailAccount = await prisma.emailAccount.findUnique({ + where: { id: emailAccountId }, + select: { + email: true, + account: { select: { provider: true } }, + }, + }); + + if (!emailAccount) { + throw new Error("Email account not found"); + } + + const emailProvider = await createEmailProvider({ + emailAccountId, + provider: emailAccount.account.provider, + }); + + const digestScheduleData = await getDigestSchedule({ emailAccountId }); + + const pendingDigests = await prisma.digest.findMany({ + where: { + emailAccountId, + status: DigestStatus.PENDING, + }, + select: { + id: true, + items: { + select: { + messageId: true, + content: true, + action: { + select: { + executedRule: { + select: { + rule: { + select: { + name: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }); + + if (pendingDigests.length) { + // Mark all found digests as processing + await prisma.digest.updateMany({ + where: { + id: { + in: pendingDigests.map((d) => d.id), + }, + }, + data: { + status: DigestStatus.PROCESSING, + }, + }); + } + + try { + // Return early if no digests were found + if (pendingDigests.length === 0) { + return { success: true, message: "No digests to process" }; + } + + // Store the digest IDs for the final update + const processedDigestIds = pendingDigests.map((d) => d.id); + + const messageIds = pendingDigests.flatMap((digest) => + digest.items.map((item) => item.messageId), + ); + + logger.info("Fetching batch of messages"); + + const messages: ParsedMessage[] = []; + if (messageIds.length > 0) { + const batchSize = 100; + + // Can't fetch more then 100 messages at a time, so fetch in batches + // and wait 2 seconds to avoid rate limiting + // TODO: Refactor into the provider if used elsewhere + for (let i = 0; i < messageIds.length; i += batchSize) { + const batch = messageIds.slice(i, i + batchSize); + const batchResults = await emailProvider.getMessagesBatch(batch); + messages.push(...batchResults); + + if (i + batchSize < messageIds.length) { + await sleep(2000); + } + } + } + + logger.info("Fetched batch of messages"); + + // Create a message lookup map for O(1) access + const messageMap = new Map(messages.map((m) => [m.id, m])); + + // Map of rules camelCase -> ruleName + const ruleNameMap = new Map(); + + // Transform and group in a single pass + const executedRulesByRule = pendingDigests.reduce((acc, digest) => { + digest.items.forEach((item) => { + const message = messageMap.get(item.messageId); + if (!message) { + logger.warn("Message not found, skipping digest item", { + messageId: item.messageId, + }); + return; + } + + const ruleName = + item.action?.executedRule?.rule?.name || + getRuleName(SystemType.COLD_EMAIL); + + const ruleNameKey = camelCase(ruleName); + if (!ruleNameMap.has(ruleNameKey)) { + ruleNameMap.set(ruleNameKey, ruleName); + } + + if (!acc[ruleNameKey]) { + acc[ruleNameKey] = []; + } + + let parsedContent: unknown; + try { + parsedContent = JSON.parse(item.content); + } catch (error) { + logger.warn("Failed to parse digest item content, skipping item", { + messageId: item.messageId, + digestId: digest.id, + error: error instanceof Error ? error.message : "Unknown error", + }); + return; // Skip this item and continue with the next one + } + + const contentResult = + storedDigestContentSchema.safeParse(parsedContent); + + if (contentResult.success) { + acc[ruleNameKey].push({ + content: contentResult.data.content, + from: extractNameFromEmail(message?.headers?.from || ""), + subject: message?.headers?.subject || "", + }); + } else { + logger.warn("Failed to validate digest content structure", { + messageId: item.messageId, + digestId: digest.id, + error: contentResult.error, + }); + } + }); + return acc; + }, {} as Digest); + + if (Object.keys(executedRulesByRule).length === 0) { + logger.info("No executed rules found, skipping digest email"); + return { + success: true, + message: "No executed rules found, skipping digest email", + }; + } + + const token = await createUnsubscribeToken({ emailAccountId }); + + logger.info("Sending digest email"); + + // First, send the digest email and wait for it to complete + await sendDigestEmail({ + from: env.RESEND_FROM_EMAIL, + to: emailAccount.email, + emailProps: { + baseUrl: env.NEXT_PUBLIC_BASE_URL, + unsubscribeToken: token, + date: new Date(), + ruleNames: Object.fromEntries(ruleNameMap), + ...executedRulesByRule, + emailAccountId, + }, + }); + + logger.info("Digest email sent"); + + // Only update database if email sending succeeded + // Use a transaction to ensure atomicity - all updates succeed or none are applied + await prisma.$transaction([ + ...(digestScheduleData + ? [ + prisma.schedule.update({ + where: { + id: digestScheduleData.id, + emailAccountId, + }, + data: { + lastOccurrenceAt: new Date(), + nextOccurrenceAt: calculateNextScheduleDate(digestScheduleData), + }, + }), + ] + : []), + // Mark only the processed digests as sent + prisma.digest.updateMany({ + where: { + id: { + in: processedDigestIds, + }, + }, + data: { + status: DigestStatus.SENT, + sentAt: new Date(), + }, + }), + // Redact all DigestItems for the processed digests + prisma.digestItem.updateMany({ + data: { content: "[REDACTED]" }, + where: { + digestId: { + in: processedDigestIds, + }, + }, + }), + ]); + } catch (error) { + await prisma.digest.updateMany({ + where: { + id: { + in: pendingDigests.map((d) => d.id), + }, + }, + data: { + status: DigestStatus.FAILED, + }, + }); + logger.error("Error sending digest email", { error }); + throw error; + } + + return { success: true, message: "Digest email sent successfully" }; } async function handleEmailSummaryAllJob(data: EmailSummaryAllJobData) { @@ -158,21 +461,62 @@ async function handleCleanGmailJob(data: CleanGmailJobData) { return NextResponse.json({ success: true }); } +// Configuration for distributed AI categorize senders queues +export const AI_CATEGORIZE_SENDERS_QUEUE_COUNT = 7; +const AI_CATEGORIZE_SENDERS_PREFIX = "ai-categorize-senders"; + +// Helper to get the queue index from an AI categorize senders queue name +export function getAiCategorizeSendersQueueIndex( + queueName: string, +): number | null { + if (!queueName.startsWith(`${AI_CATEGORIZE_SENDERS_PREFIX}-`)) return null; + const index = Number.parseInt(queueName.split("-").pop() || "", 10); + return Number.isNaN(index) ? null : index; +} + export const QUEUE_HANDLERS = { "digest-item-summarize": handleDigestJob, - "ai-categorize-senders": handleCategorizeSendersJob, "scheduled-actions": handleScheduledActionJob, "ai-clean": handleAiCleanJob, "email-digest-all": handleEmailDigestAllJob, "email-summary-all": handleEmailSummaryAllJob, "clean-gmail": handleCleanGmailJob, + + "ai-categorize-senders-0": handleCategorizeSendersJob, + "ai-categorize-senders-1": handleCategorizeSendersJob, + "ai-categorize-senders-2": handleCategorizeSendersJob, + "ai-categorize-senders-3": handleCategorizeSendersJob, + "ai-categorize-senders-4": handleCategorizeSendersJob, + "ai-categorize-senders-5": handleCategorizeSendersJob, + "ai-categorize-senders-6": handleCategorizeSendersJob, } as const; export type QueueName = keyof typeof QUEUE_HANDLERS; export function getQueueHandler(queueName: string) { - return QUEUE_HANDLERS[queueName as QueueName] || null; + if (queueName in QUEUE_HANDLERS) { + return QUEUE_HANDLERS[queueName as QueueName]; + } + + if (queueName.startsWith(`${AI_CATEGORIZE_SENDERS_PREFIX}-`)) { + return handleCategorizeSendersJob; + } + + return null; } -export function isValidQueueName(queueName: string): queueName is QueueName { - return queueName in QUEUE_HANDLERS; +export function isValidQueueName(queueName: string): boolean { + if (queueName in QUEUE_HANDLERS) { + return true; + } + + if (queueName.startsWith(`${AI_CATEGORIZE_SENDERS_PREFIX}-`)) { + const queueIndex = getAiCategorizeSendersQueueIndex(queueName); + return ( + queueIndex !== null && + queueIndex >= 0 && + queueIndex < AI_CATEGORIZE_SENDERS_QUEUE_COUNT + ); + } + + return false; } diff --git a/apps/web/utils/queue/worker.ts b/apps/web/utils/queue/worker.ts index d5aab26d01..8dc9be896e 100644 --- a/apps/web/utils/queue/worker.ts +++ b/apps/web/utils/queue/worker.ts @@ -2,6 +2,7 @@ import type { Worker } from "bullmq"; import { createScopedLogger } from "@/utils/logger"; import { createQueueWorker, closeQueueManager } from "./queue-manager"; import type { WorkerConfig, JobProcessor } from "./types"; +import { DEFAULT_CONCURRENCY } from "./bullmq-manager"; const logger = createScopedLogger("queue-worker"); @@ -20,7 +21,7 @@ class WorkerRegistry { } const worker = createQueueWorker(queueName, processor as JobProcessor, { - concurrency: config.concurrency || 1, + concurrency: config.concurrency || DEFAULT_CONCURRENCY, }); if (!worker) { @@ -29,11 +30,17 @@ class WorkerRegistry { } worker.on("completed", (job) => { - logger.info("Job completed", { + const logData: Record = { queueName, jobId: job.id, - duration: Date.now() - job.processedOn!, - }); + }; + + if (typeof job.processedOn === "number") { + logData.duration = Date.now() - job.processedOn; + logData.processedOn = job.processedOn; + } + + logger.info("Job completed", logData); }); worker.on("failed", (job, err) => { diff --git a/apps/web/utils/scheduled-actions/scheduler.ts b/apps/web/utils/scheduled-actions/scheduler.ts index 1aa03a3bbe..2887408c5a 100644 --- a/apps/web/utils/scheduled-actions/scheduler.ts +++ b/apps/web/utils/scheduled-actions/scheduler.ts @@ -12,6 +12,7 @@ const logger = createScopedLogger("qstash-scheduled-actions"); interface ScheduledActionPayload { scheduledActionId: string; + [key: string]: unknown; } function getQstashClient() { diff --git a/apps/web/utils/upstash/categorize-senders.ts b/apps/web/utils/upstash/categorize-senders.ts index 3dfcce193a..ddb391772c 100644 --- a/apps/web/utils/upstash/categorize-senders.ts +++ b/apps/web/utils/upstash/categorize-senders.ts @@ -1,19 +1,44 @@ import chunk from "lodash/chunk"; import { deleteQueue, listQueues } from "@/utils/upstash"; import { enqueueJob } from "@/utils/queue/queue-manager"; +import { AI_CATEGORIZE_SENDERS_QUEUE_COUNT } from "@/utils/queue/queues"; import { env } from "@/env"; import type { AiCategorizeSenders } from "@/app/api/user/categorize/senders/batch/handle-batch-validation"; import { createScopedLogger } from "@/utils/logger"; const logger = createScopedLogger("upstash"); -const CATEGORIZE_SENDERS_PREFIX = "ai-categorize-senders"; +// Use the same prefix as defined in queues.ts for consistency +const AI_CATEGORIZE_SENDERS_PREFIX = "ai-categorize-senders"; +/** + * Distributes email accounts across multiple queues for load balancing + * + * For Redis: Uses a simple hash of the emailAccountId to ensure consistent distribution + * - Creates hash by summing character codes + * - Example: "user-123" -> 'u'(117) + 's'(115) + 'e'(101) + 'r'(114) + '-'(45) + '1'(49) + '2'(50) + '3'(51) = 742 + * - Distributes across 7 queues (0-6) using modulo: 742 % 7 = 0 -> "ai-categorize-senders-0" + * + * For QStash: Uses per-email-account queues for maximum parallelization + */ const getCategorizeSendersQueueName = ({ emailAccountId, }: { emailAccountId: string; -}) => `${CATEGORIZE_SENDERS_PREFIX}-${emailAccountId}`; +}) => { + if (env.QUEUE_SYSTEM === "redis") { + const characterCodeSum = emailAccountId + .split("") + .reduce((total, character) => total + character.charCodeAt(0), 0); + + const targetQueueIndex = + characterCodeSum % AI_CATEGORIZE_SENDERS_QUEUE_COUNT; + + return `${AI_CATEGORIZE_SENDERS_PREFIX}-${targetQueueIndex}`; + } + + return `${AI_CATEGORIZE_SENDERS_PREFIX}-${emailAccountId}`; +}; /** * Publishes sender categorization tasks to QStash queue in batches @@ -57,7 +82,7 @@ export async function deleteEmptyCategorizeSendersQueues({ skipEmailAccountId: string; }) { return deleteEmptyQueues({ - prefix: CATEGORIZE_SENDERS_PREFIX, + prefix: AI_CATEGORIZE_SENDERS_PREFIX, skipEmailAccountId, }); } diff --git a/apps/web/worker.ts b/apps/web/worker.ts new file mode 100644 index 0000000000..e7991042cb --- /dev/null +++ b/apps/web/worker.ts @@ -0,0 +1,113 @@ +#!/usr/bin/env node +/** + * BullMQ Worker Process + * + * This runs as a separate process from the Next.js application + * Start with: npm run worker + */ + +import { createScopedLogger } from "@/utils/logger"; +import { registerWorker, shutdownAllWorkers } from "@/utils/queue/worker"; +import { QUEUE_HANDLERS, type QueueName } from "@/utils/queue/queues"; +import { env } from "@/env"; +import prisma from "@/utils/prisma"; + +const logger = createScopedLogger("worker-process"); + +async function startWorkers() { + if (env.QUEUE_SYSTEM !== "redis") { + logger.info("Worker process not needed - using QStash", { + queueSystem: env.QUEUE_SYSTEM, + }); + process.exit(0); + } + + logger.info("Starting BullMQ worker process", { + nodeEnv: process.env.NODE_ENV, + queueCount: Object.keys(QUEUE_HANDLERS).length, + }); + + // Worker should fail if it can't connect to the database + try { + await prisma.$connect(); + logger.info("Database connected successfully"); + } catch (error) { + logger.error("Failed to connect to database", { error }); + process.exit(1); + } + + // The recommended BullMQ approach is to register a worker for each queue + let successCount = 0; + for (const [queueName, handler] of Object.entries(QUEUE_HANDLERS)) { + const worker = registerWorker(queueName as QueueName, async (job) => { + logger.info("Processing job", { + queueName, + jobId: job.id, + data: JSON.stringify(job.data), + }); + + try { + await handler(job.data); + + logger.info("Job completed successfully", { + queueName, + jobId: job.id, + }); + } catch (error) { + logger.error("Job failed", { + queueName, + jobId: job.id, + error: error instanceof Error ? error.message : String(error), + }); + throw error; // Re-throw to let BullMQ handle retries + } + }); + + if (worker) { + successCount++; + logger.info("Worker registered successfully", { + queueName, + concurrency: worker.opts.concurrency, + }); + } else { + logger.error("Failed to register worker", { queueName }); + } + } + + logger.info("Worker process started", { + totalQueues: Object.keys(QUEUE_HANDLERS).length, + successfulWorkers: successCount, + }); + + process.stdin.resume(); +} + +async function shutdown() { + logger.info("Shutting down worker process..."); + + try { + await shutdownAllWorkers(); + await prisma.$disconnect(); + logger.info("Worker process shut down successfully"); + process.exit(0); + } catch (error) { + logger.error("Error during shutdown", { error }); + process.exit(1); + } +} + +process.on("SIGINT", shutdown); +process.on("SIGTERM", shutdown); +process.on("uncaughtException", (error) => { + logger.error("Uncaught exception", { error: error.message }); + shutdown(); +}); +process.on("unhandledRejection", (reason) => { + logger.error("Unhandled rejection", { reason }); + shutdown(); +}); + +startWorkers().catch((error) => { + logger.error("Failed to start workers", { error }); + process.exit(1); +}); diff --git a/docker-compose.yml b/docker-compose.yml index aa005837db..1c61901c3b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -62,6 +62,31 @@ services: UPSTASH_REDIS_URL: "http://serverless-redis-http:80" UPSTASH_REDIS_TOKEN: "${UPSTASH_REDIS_TOKEN}" + worker: + image: ghcr.io/elie222/inbox-zero:latest + pull_policy: if_not_present + # Use the same build context as web service + build: + context: . + dockerfile: ./docker/Dockerfile.prod + args: + NEXT_PUBLIC_BASE_URL: ${NEXT_PUBLIC_BASE_URL:-http://localhost:3000} + env_file: + - ./apps/web/.env + depends_on: + - db + - redis + networks: + - inbox-zero-network + environment: + DATABASE_URL: "postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-password}@db:5432/${POSTGRES_DB:-inboxzero}?schema=public" + DIRECT_URL: "postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-password}@db:5432/${POSTGRES_DB:-inboxzero}?schema=public" + REDIS_URL: "redis://redis:6379" + QUEUE_SYSTEM: "redis" + # Override the default command to run the worker instead of the web server + command: ["pnpm", "--filter", "inbox-zero-ai", "run", "worker"] + restart: unless-stopped + volumes: database-data: diff --git a/docker/Dockerfile.prod b/docker/Dockerfile.prod index 59ae4c83d4..eeb850afa1 100644 --- a/docker/Dockerfile.prod +++ b/docker/Dockerfile.prod @@ -6,6 +6,8 @@ WORKDIR /app RUN apk add --no-cache openssl # Install the specific pnpm version used by the project RUN npm install -g pnpm@10.15.0 +# Install tsx for running the worker +RUN npm install -g tsx@4.20.0 # Copy all package manager files first for caching COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc* ./ From dc265b6ce1e1a51cfc3e64069ac8ff749ab4edad Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Fri, 24 Oct 2025 12:02:30 -0300 Subject: [PATCH 03/17] PR feedback --- apps/web/app/api/queue/[queueName]/route.ts | 111 ++++++++++++-------- apps/web/utils/queue/qstash-manager.ts | 6 +- apps/web/utils/queue/queue-manager.ts | 10 ++ apps/web/utils/queue/queues.ts | 20 ++-- apps/web/worker.ts | 13 +-- 5 files changed, 95 insertions(+), 65 deletions(-) diff --git a/apps/web/app/api/queue/[queueName]/route.ts b/apps/web/app/api/queue/[queueName]/route.ts index 62f7277406..208e48da5b 100644 --- a/apps/web/app/api/queue/[queueName]/route.ts +++ b/apps/web/app/api/queue/[queueName]/route.ts @@ -2,6 +2,10 @@ * Generic queue handler API route * This handles jobs from both QStash and BullMQ systems * + * Authentication: + * - QStash requests: Verified via QStash signature + * - Internal Redis/BullMQ requests: Verified via internal API key + * * Usage: POST /api/queue/{queueName} * Body: Job data */ @@ -10,55 +14,78 @@ import { type NextRequest, NextResponse } from "next/server"; import { createScopedLogger } from "@/utils/logger"; import { getQueueHandler, isValidQueueName } from "@/utils/queue/queues"; import { verifySignatureAppRouter } from "@upstash/qstash/nextjs"; +import { withError } from "@/utils/middleware"; +import { isValidInternalApiKey } from "@/utils/internal-api"; +import { env } from "@/env"; const logger = createScopedLogger("queue-api"); -export const POST = verifySignatureAppRouter( - async ( - request: NextRequest, - { params }: { params: { queueName: string } }, - ) => { - const { queueName } = params; +// Internal authentication for Redis/BullMQ jobs +async function validateInternalRequest(request: NextRequest): Promise { + // Check for internal API key + if (isValidInternalApiKey(request.headers, logger)) { + return true; + } - try { - const body = await request.json(); + // Check for cron secret (for scheduled jobs) + const authHeader = request.headers.get("authorization"); + if (authHeader === `Bearer ${env.CRON_SECRET}`) { + return true; + } - logger.info("Received queue job", { - queueName, - body: JSON.stringify(body), - }); + logger.warn("Unauthorized internal request", { + hasInternalKey: !!request.headers.get("x-api-key"), + hasCronSecret: !!authHeader, + origin: request.headers.get("origin"), + userAgent: request.headers.get("user-agent"), + }); - // Validate queue name - if (!isValidQueueName(queueName)) { - logger.warn("Unknown queue name", { queueName }); - return NextResponse.json( - { error: "Unknown queue name" }, - { status: 400 }, - ); - } + return false; +} - // Get the appropriate handler - const handler = getQueueHandler(queueName); - if (!handler) { - logger.error("No handler found for queue", { queueName }); - return NextResponse.json( - { error: "No handler found for queue" }, - { status: 500 }, - ); - } +// Main handler with authentication and error handling +async function handleQueueJob( + request: NextRequest, + { params }: { params: Promise> }, +) { + const { queueName } = await params; - // Execute the handler - return await handler(body); - } catch (error) { - logger.error("Queue job processing failed", { - queueName, - error: error instanceof Error ? error.message : String(error), - }); + // Validate queue name first + if (!isValidQueueName(queueName)) { + logger.warn("Unknown queue name", { queueName }); + return NextResponse.json({ error: "Unknown queue name" }, { status: 400 }); + } - return NextResponse.json( - { error: "Job processing failed" }, - { status: 500 }, - ); + // For internal Redis/BullMQ requests, validate authentication + if (env.QUEUE_SYSTEM === "redis") { + const isAuthorized = await validateInternalRequest(request); + if (!isAuthorized) { + logger.error("Unauthorized internal request", { queueName }); + return NextResponse.json({ error: "Unauthorized" }, { status: 401 }); } - }, -); + } + + const body = await request.json(); + + logger.info("Processing queue job", { + queueName, + queueSystem: env.QUEUE_SYSTEM, + }); + + // Get the appropriate handler + const handler = getQueueHandler(queueName); + if (!handler) { + logger.error("No handler found for queue", { queueName }); + return NextResponse.json( + { error: "No handler found for queue" }, + { status: 500 }, + ); + } + + // Execute the handler + return await handler(body); +} + +// Export with QStash signature verification for QStash requests +// and withError middleware for consistent error handling +export const POST = verifySignatureAppRouter(withError(handleQueueJob)); diff --git a/apps/web/utils/queue/qstash-manager.ts b/apps/web/utils/queue/qstash-manager.ts index eaa520ebd9..01bc53ca10 100644 --- a/apps/web/utils/queue/qstash-manager.ts +++ b/apps/web/utils/queue/qstash-manager.ts @@ -28,8 +28,7 @@ export class QStashManager implements QueueManager { const url = `${env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL}/api/queue/${queueName}`; if (options.delay) { - const notBefore = - Math.floor(Date.now() / 1000) + Math.floor(options.delay / 1000); + const notBefore = Math.ceil((Date.now() + options.delay) / 1000); const client = getQstashClient(); const response = await client.publishJSON({ url, @@ -67,8 +66,7 @@ export class QStashManager implements QueueManager { }; if (options.delay) { - item.notBefore = - Math.floor(Date.now() / 1000) + Math.floor(options.delay / 1000); + item.notBefore = Math.ceil((Date.now() + options.delay) / 1000); } if (job.opts?.jobId) { diff --git a/apps/web/utils/queue/queue-manager.ts b/apps/web/utils/queue/queue-manager.ts index 3b7cf31afa..c7317ec5f7 100644 --- a/apps/web/utils/queue/queue-manager.ts +++ b/apps/web/utils/queue/queue-manager.ts @@ -100,6 +100,16 @@ export function createQueue( }, ): Queue | null { const manager = getQueueManager(); + + // Only BullMQ supports queue creation; QStash uses HTTP endpoints + if (env.QUEUE_SYSTEM !== "redis") { + logger.warn("Queue creation not supported for queue system", { + queueSystem: env.QUEUE_SYSTEM, + queueName, + }); + return null; + } + return manager.createQueue(queueName, options); } diff --git a/apps/web/utils/queue/queues.ts b/apps/web/utils/queue/queues.ts index aef27d4a5f..578e4d6725 100644 --- a/apps/web/utils/queue/queues.ts +++ b/apps/web/utils/queue/queues.ts @@ -15,6 +15,7 @@ import { storedDigestContentSchema } from "@/app/api/resend/digest/validation"; import { sleep } from "@/utils/sleep"; import type { ParsedMessage } from "@/utils/types"; import type { Digest } from "@/app/api/resend/digest/validation"; +import { handleBatchRequest } from "@/app/api/user/categorize/senders/batch/handle-batch"; const logger = createScopedLogger("queue-handlers"); @@ -100,21 +101,23 @@ async function handleCategorizeSendersJob(data: AiCategorizeSendersJobData) { }); try { - const response = await fetch( - `${process.env.NEXT_PUBLIC_BASE_URL}/api/user/categorize/senders/batch`, - { + // Call the batch categorization logic directly instead of making an HTTP call + // This eliminates unnecessary network overhead and improves performance + const response = await handleBatchRequest( + new Request("http://localhost", { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify(data), - }, + }), ); if (!response.ok) { - throw new Error(`HTTP error! status: ${response.status}`); + const errorText = await response.text(); + throw new Error( + `Batch categorization failed: ${response.status} - ${errorText}`, + ); } - await new Promise((resolve) => setTimeout(resolve, 2000)); - logger.info("Categorize senders job completed successfully"); return NextResponse.json({ success: true }); } catch (error) { @@ -271,7 +274,7 @@ async function sendDigestEmailForAccount(emailAccountId: string) { if (messageIds.length > 0) { const batchSize = 100; - // Can't fetch more then 100 messages at a time, so fetch in batches + // Can't fetch more than 100 messages at a time, so fetch in batches // and wait 2 seconds to avoid rate limiting // TODO: Refactor into the provider if used elsewhere for (let i = 0; i < messageIds.length; i += batchSize) { @@ -436,7 +439,6 @@ async function sendDigestEmailForAccount(emailAccountId: string) { async function handleEmailSummaryAllJob(data: EmailSummaryAllJobData) { logger.info("Processing email summary all job", { - email: data.email, userId: data.userId, }); diff --git a/apps/web/worker.ts b/apps/web/worker.ts index e7991042cb..67621792ce 100644 --- a/apps/web/worker.ts +++ b/apps/web/worker.ts @@ -40,19 +40,12 @@ async function startWorkers() { let successCount = 0; for (const [queueName, handler] of Object.entries(QUEUE_HANDLERS)) { const worker = registerWorker(queueName as QueueName, async (job) => { - logger.info("Processing job", { - queueName, - jobId: job.id, - data: JSON.stringify(job.data), - }); + logger.info("Processing job", { queueName, jobId: job.id }); try { - await handler(job.data); + await handler(job.data as never); - logger.info("Job completed successfully", { - queueName, - jobId: job.id, - }); + logger.info("Job completed successfully", { queueName, jobId: job.id }); } catch (error) { logger.error("Job failed", { queueName, From a688952069b820035b2aefa6f90a99f1a8036e13 Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Fri, 24 Oct 2025 12:05:07 -0300 Subject: [PATCH 04/17] Restore pnpm-lock --- pnpm-lock.yaml | 118 ------------------------------------------------- 1 file changed, 118 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 28319411f9..a34a333ad7 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -327,9 +327,6 @@ importers: braintrust: specifier: 0.3.6 version: 0.3.6(zod@3.25.46) - bullmq: - specifier: ^5.61.0 - version: 5.61.0 capital-case: specifier: 2.0.0 version: 2.0.0 @@ -2354,36 +2351,6 @@ packages: resolution: {integrity: sha512-beedclIvFcCnPrYgHsylqiYJVJ/CI47Vyc4tY8no1/Li/O8U4BTlJfy6ZwxkYwx+Mx10nrgwSVrA7VBbhh4slg==} engines: {node: '>=18'} - '@msgpackr-extract/msgpackr-extract-darwin-arm64@3.0.3': - resolution: {integrity: sha512-QZHtlVgbAdy2zAqNA9Gu1UpIuI8Xvsd1v8ic6B2pZmeFnFcMWiPLfWXh7TVw4eGEZ/C9TH281KwhVoeQUKbyjw==} - cpu: [arm64] - os: [darwin] - - '@msgpackr-extract/msgpackr-extract-darwin-x64@3.0.3': - resolution: {integrity: sha512-mdzd3AVzYKuUmiWOQ8GNhl64/IoFGol569zNRdkLReh6LRLHOXxU4U8eq0JwaD8iFHdVGqSy4IjFL4reoWCDFw==} - cpu: [x64] - os: [darwin] - - '@msgpackr-extract/msgpackr-extract-linux-arm64@3.0.3': - resolution: {integrity: sha512-YxQL+ax0XqBJDZiKimS2XQaf+2wDGVa1enVRGzEvLLVFeqa5kx2bWbtcSXgsxjQB7nRqqIGFIcLteF/sHeVtQg==} - cpu: [arm64] - os: [linux] - - '@msgpackr-extract/msgpackr-extract-linux-arm@3.0.3': - resolution: {integrity: sha512-fg0uy/dG/nZEXfYilKoRe7yALaNmHoYeIoJuJ7KJ+YyU2bvY8vPv27f7UKhGRpY6euFYqEVhxCFZgAUNQBM3nw==} - cpu: [arm] - os: [linux] - - '@msgpackr-extract/msgpackr-extract-linux-x64@3.0.3': - resolution: {integrity: sha512-cvwNfbP07pKUfq1uH+S6KJ7dT9K8WOE4ZiAcsrSes+UY55E/0jLYc+vq+DO7jlmqRb5zAggExKm0H7O/CBaesg==} - cpu: [x64] - os: [linux] - - '@msgpackr-extract/msgpackr-extract-win32-x64@3.0.3': - resolution: {integrity: sha512-x0fWaQtYp4E6sktbsdAqnehxDgEc/VwM7uLsRCYWaiGu0ykYdZPiS8zCWdnjHwyiumousxfBm4SO31eXqwEZhQ==} - cpu: [x64] - os: [win32] - '@mux/mux-data-google-ima@0.2.8': resolution: {integrity: sha512-0ZEkHdcZ6bS8QtcjFcoJeZxJTpX7qRIledf4q1trMWPznugvtajCjCM2kieK/pzkZj1JM6liDRFs1PJSfVUs2A==} @@ -3852,7 +3819,6 @@ packages: '@sanity/next-loader@2.1.0': resolution: {integrity: sha512-ta9HPqGpVQNv5NYTV5X9HNa2m7AJi1ixUVHVbOwGLfdm2D5FWALuCYlQ8GvtaCIxCakOtLLmUSVt9d8wFM83xQ==} engines: {node: '>=18.18'} - deprecated: This package is deprecated. Please use 'next-sanity/live' instead. peerDependencies: next: ^14.1 || ^15.0.0-0 react: ^18.3 || ^19.0.0-0 @@ -5518,9 +5484,6 @@ packages: builtins@1.0.3: resolution: {integrity: sha512-uYBjakWipfaO/bXI7E8rq6kpwHRZK5cNYrUv2OzZSI/FvmdMyXJ2tG9dKcjEC5YHmHpUAwsargWIZNWdxb/bnQ==} - bullmq@5.61.0: - resolution: {integrity: sha512-khaTjc1JnzaYFl4FrUtsSsqugAW/urRrcZ9Q0ZE+REAw8W+gkHFqxbGlutOu6q7j7n91wibVaaNlOUMdiEvoSQ==} - bytes@3.1.2: resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} engines: {node: '>= 0.8'} @@ -5948,10 +5911,6 @@ packages: crisp-sdk-web@1.0.25: resolution: {integrity: sha512-CWTHFFeHRV0oqiXoPh/aIAKhFs6xcIM4NenGPnClAMCZUDQgQsF1OWmZWmnVNjJriXUmWRgDfeUxcxygS0dCRA==} - cron-parser@4.9.0: - resolution: {integrity: sha512-p0SaNjrHOnQeR8/VnfGbmg9te2kfyYSQ7Sc/j/6DtPL3JQvKxmjO9TSjNFpujqV3vEYYBvNNvXSxzyksBWAx1Q==} - engines: {node: '>=12.0.0'} - cross-env@7.0.3: resolution: {integrity: sha512-+/HKd6EgcQCJGh2PSjZuUitQBQynKor4wrFbRg4DtAgS1aWO+gU52xpH7M9ScGgXSYmAVS9bIJ8EzuaGw0oNAw==} engines: {node: '>=10.14', npm: '>=6', yarn: '>=1'} @@ -7984,10 +7943,6 @@ packages: peerDependencies: react: ^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0 - luxon@3.7.2: - resolution: {integrity: sha512-vtEhXh/gNjI9Yg1u4jX/0YVPMvxzHuGgCm6tC5kZyb08yjGWGnqAjGJvcXbqQR2P3MyMEFnRbpcdFS6PBcLqew==} - engines: {node: '>=12'} - lz-string@1.5.0: resolution: {integrity: sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==} hasBin: true @@ -8398,13 +8353,6 @@ packages: ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} - msgpackr-extract@3.0.3: - resolution: {integrity: sha512-P0efT1C9jIdVRefqjzOQ9Xml57zpOXnIuS+csaB4MdZbTdmGDLo8XhzBG1N7aO11gKDDkJvBLULeFTo46wwreA==} - hasBin: true - - msgpackr@1.11.5: - resolution: {integrity: sha512-UjkUHN0yqp9RWKy0Lplhh+wlpdt9oQBYgULZOiFhV3VclSF1JnSQWZ5r9gORQlNYaUKQoR8itv7g7z1xDDuACA==} - mustache@4.2.0: resolution: {integrity: sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==} hasBin: true @@ -8539,9 +8487,6 @@ packages: resolution: {integrity: sha512-WmS3EUGw+vXHlTgiUPi3NzbZNwH6+uGX0QLGgqG+aFSJ5rkX/Ee0nuwHBJfZTfQwwR8lGO819NEIwQ7CGhkdEQ==} deprecated: Use `change-case` - node-abort-controller@3.1.1: - resolution: {integrity: sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==} - node-domexception@1.0.0: resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} engines: {node: '>=10.5.0'} @@ -8564,10 +8509,6 @@ packages: resolution: {integrity: sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==} engines: {node: '>= 6.13.0'} - node-gyp-build-optional-packages@5.2.2: - resolution: {integrity: sha512-s+w+rBWnpTMwSFbaE0UXsRlg7hU4FjekKU4eyAih5T8nJuNZT1nNsskXpxmeqSK9UzkBl6UgRlnKc8hz8IEqOw==} - hasBin: true - node-html-parser@6.1.13: resolution: {integrity: sha512-qIsTMOY4C/dAa5Q5vsobRpOOvPfC4pB61UVW2uSwZNUp0QU/jCekTal1vMmbO0DgdHeLUJpv/ARmDqErVxA3Sg==} @@ -13244,24 +13185,6 @@ snapshots: transitivePeerDependencies: - supports-color - '@msgpackr-extract/msgpackr-extract-darwin-arm64@3.0.3': - optional: true - - '@msgpackr-extract/msgpackr-extract-darwin-x64@3.0.3': - optional: true - - '@msgpackr-extract/msgpackr-extract-linux-arm64@3.0.3': - optional: true - - '@msgpackr-extract/msgpackr-extract-linux-arm@3.0.3': - optional: true - - '@msgpackr-extract/msgpackr-extract-linux-x64@3.0.3': - optional: true - - '@msgpackr-extract/msgpackr-extract-win32-x64@3.0.3': - optional: true - '@mux/mux-data-google-ima@0.2.8': dependencies: mux-embed: 5.9.0 @@ -17006,18 +16929,6 @@ snapshots: builtins@1.0.3: {} - bullmq@5.61.0: - dependencies: - cron-parser: 4.9.0 - ioredis: 5.7.0 - msgpackr: 1.11.5 - node-abort-controller: 3.1.1 - semver: 7.7.2 - tslib: 2.8.1 - uuid: 11.1.0 - transitivePeerDependencies: - - supports-color - bytes@3.1.2: {} cac@6.7.14: {} @@ -17456,10 +17367,6 @@ snapshots: crisp-sdk-web@1.0.25: {} - cron-parser@4.9.0: - dependencies: - luxon: 3.7.2 - cross-env@7.0.3: dependencies: cross-spawn: 7.0.6 @@ -19746,8 +19653,6 @@ snapshots: dependencies: react: 19.1.1 - luxon@3.7.2: {} - lz-string@1.5.0: {} magic-string@0.30.18: @@ -20438,22 +20343,6 @@ snapshots: ms@2.1.3: {} - msgpackr-extract@3.0.3: - dependencies: - node-gyp-build-optional-packages: 5.2.2 - optionalDependencies: - '@msgpackr-extract/msgpackr-extract-darwin-arm64': 3.0.3 - '@msgpackr-extract/msgpackr-extract-darwin-x64': 3.0.3 - '@msgpackr-extract/msgpackr-extract-linux-arm': 3.0.3 - '@msgpackr-extract/msgpackr-extract-linux-arm64': 3.0.3 - '@msgpackr-extract/msgpackr-extract-linux-x64': 3.0.3 - '@msgpackr-extract/msgpackr-extract-win32-x64': 3.0.3 - optional: true - - msgpackr@1.11.5: - optionalDependencies: - msgpackr-extract: 3.0.3 - mustache@4.2.0: {} mute-stream@0.0.8: {} @@ -20570,8 +20459,6 @@ snapshots: no-case@4.0.0: {} - node-abort-controller@3.1.1: {} - node-domexception@1.0.0: {} node-fetch@2.7.0(encoding@0.1.13): @@ -20588,11 +20475,6 @@ snapshots: node-forge@1.3.1: {} - node-gyp-build-optional-packages@5.2.2: - dependencies: - detect-libc: 2.0.4 - optional: true - node-html-parser@6.1.13: dependencies: css-select: 5.2.2 From 1ca7d0f568542b84355c65f40895edd89b0d22ce Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Mon, 3 Nov 2025 11:55:00 -0300 Subject: [PATCH 05/17] Partial queue implementation with docker improvements --- .cursor/rules/queues.mdc | 49 +++++--- .dockerignore | 34 ++++++ apps/web/env.ts | 9 -- apps/web/instrumentation.ts | 44 +++++++ apps/web/next.config.ts | 1 + apps/web/package.json | 10 +- apps/web/utils/queue/bullmq-manager.ts | 8 +- apps/web/utils/queue/queue-manager.ts | 10 -- apps/web/worker.ts | 106 ---------------- docker-compose.yml | 2 +- docker/Dockerfile.prod | 59 ++++----- package.json | 1 + pnpm-lock.yaml | 163 ++++++++++++++++++++++++- 13 files changed, 312 insertions(+), 184 deletions(-) create mode 100644 .dockerignore delete mode 100644 apps/web/worker.ts diff --git a/.cursor/rules/queues.mdc b/.cursor/rules/queues.mdc index 82bb2c9364..6c72b25abf 100644 --- a/.cursor/rules/queues.mdc +++ b/.cursor/rules/queues.mdc @@ -43,23 +43,20 @@ console.log("Bulk jobs enqueued:", jobs.length); ### 3. Worker Setup (Redis only) -**Workers run as a separate process when `QUEUE_SYSTEM=redis`** +**How workers run** -Start the worker process alongside your Next.js application: +- Development: workers auto-start inside the Next.js server via `apps/web/instrumentation.ts` when `NODE_ENV=development`. +- Production: run a separate worker process using the standalone build output: ```bash -# Development (with auto-reload) -npm run worker:dev +# Build app and copy worker into standalone output +pnpm --filter inbox-zero-ai build -# Production -npm run worker +# Start Next.js (prod) +pnpm --filter inbox-zero-ai start:standalone -# Using Docker Compose (runs both web and worker) -docker-compose up - -# Or run them separately: -docker-compose up -d web db redis # Start web, db, redis -docker-compose up -d worker # Start worker +# Start worker (separate process) +pnpm --filter inbox-zero-ai worker ``` The worker process automatically: @@ -97,7 +94,8 @@ The worker process automatically: QUEUE_SYSTEM=redis # Use Redis + BullMQ QUEUE_SYSTEM=upstash # Use QStash (default) -# For Redis system +# For Redis system (BullMQ) +# Must be a full URL. Do NOT pass this as host/port separately to BullMQ. REDIS_URL=redis://localhost:6379 # For QStash system @@ -115,7 +113,7 @@ console.log("Is Redis:", info.isRedis); console.log("Is QStash:", info.isQStash); ``` -### Retry Configuration +### Retry & Concurrency Configuration - **QStash**: Retry count is handled by QStash service (see [QStash documentation](https://docs.upstash.com/qstash) for details) - **BullMQ (Redis)**: Retries up to **5 times** (configured in `bullmq-manager.ts`) @@ -124,7 +122,7 @@ console.log("Is QStash:", info.isQStash); - **BullMQ Manager**: Default concurrency of **3** (configured in `bullmq-manager.ts`) - **QStash**: Uses parallelism of **3** for flow control (configured in `qstash-manager.ts`) -> **Note**: For authoritative concurrency and retry settings, see `worker.ts` and `bullmq-manager.ts` +> Note: For authoritative concurrency and retry settings, see `apps/web/utils/queue/worker.ts` and `apps/web/utils/queue/bullmq-manager.ts`. ## Migration Examples @@ -269,11 +267,26 @@ main().catch(console.error); ### Common Issues -- **Workers not processing jobs**: Ensure you're using Redis queue system and workers are properly registered -- **Connection errors**: Check your Redis/QStash credentials and network connectivity +- **Workers not processing jobs**: In dev, confirm `NODE_ENV=development` and that `instrumentation.ts` is loaded. In prod, ensure `pnpm worker` is running. +- **Connection errors (BullMQ)**: Use `connection: { url: env.REDIS_URL }` with BullMQ. Passing `host: "redis://..."` causes DNS errors like `ENOTFOUND redis://localhost:6379`. - **Jobs stuck in queue**: Check worker logs for errors and ensure workers are running - **Memory issues**: Adjust concurrency settings and job cleanup policies +### Environment validation & server-only pitfalls + +- `@t3-oss/env-nextjs` validates at import time. If the worker is run outside Next without preloading env, you'll see "Invalid environment variables". Next dev/prod automatically load `.env`, but raw `node/tsx` doesn’t. +- The `server-only` package throws when imported outside Next’s server graph. Running app modules directly with `node/tsx` can crash. + +Solutions we use: +- Development workers run inside Next via `instrumentation.ts` (no env/server-only issues). +- Production workers use the standalone build and a simple `apps/web/worker.js` that imports from `.next/standalone/...`. + +If you need a standalone TypeScript worker entrypoint, bundle it (tsup/esbuild) and: +- preload env (`@next/env`) before importing `env.ts` +- alias `server-only` to a no-op only for the worker bundle +- register tsconfig paths +This keeps code clean but adds a bundling step. + ## API Reference @@ -295,7 +308,7 @@ main().catch(console.error); ``` apps/web/ -├── worker.ts # Worker process entry point +├── worker.js # Worker process entry point (copied to standalone on build) └── utils/queue/ ├── queue-manager.ts # Main queue abstraction ├── bullmq-manager.ts # BullMQ implementation diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..140171a432 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,34 @@ +# VCS +.git + +# Node / package managers +node_modules +pnpm-store + +# Build outputs / caches +.next +.turbo +out +coverage +*.log + +# OS/editor junk +.DS_Store +*.swp +*.swo +.idea +.vscode + +# Local env files (do not bake secrets) +*.env +*.env.* +!.env.example + +# Docker +Dockerfile* +!.dockerignore + +# Misc +*.local +_tmp +.tmp diff --git a/apps/web/env.ts b/apps/web/env.ts index b0e8288017..8765f2d91a 100644 --- a/apps/web/env.ts +++ b/apps/web/env.ts @@ -238,12 +238,3 @@ export const env = createEnv({ process.env.NEXT_PUBLIC_DISABLE_REFERRAL_SIGNATURE, }, }); - -// Validate queue system configuration once at bootstrap -if (env.QUEUE_SYSTEM === "redis" && !env.REDIS_URL) { - throw new Error("REDIS_URL is required when QUEUE_SYSTEM is set to 'redis'"); -} - -if (env.QUEUE_SYSTEM === "upstash" && !env.QSTASH_TOKEN) { - console.warn("QSTASH_TOKEN is not set - QStash functionality may be limited"); -} diff --git a/apps/web/instrumentation.ts b/apps/web/instrumentation.ts index cf789025e4..9cd0024819 100644 --- a/apps/web/instrumentation.ts +++ b/apps/web/instrumentation.ts @@ -1,6 +1,12 @@ /* eslint-disable no-process-env */ import * as Sentry from "@sentry/nextjs"; +declare global { + // Flag to avoid starting workers multiple times in dev hot-reload + // eslint-disable-next-line no-var + var __inboxZeroWorkersStarted: boolean | undefined; +} + export function register() { if (process.env.NEXT_RUNTIME === "nodejs") { // this is your Sentry.init call from `sentry.server.config.js|ts` @@ -13,6 +19,44 @@ export function register() { // uncomment the line below to enable Spotlight (https://spotlightjs.com) // spotlight: process.env.NODE_ENV === 'development', }); + + // Start BullMQ workers inside the Next.js server process in dev mode + if (process.env.NODE_ENV === "development") { + // Avoid duplicate starts during hot reloads + if (!globalThis.__inboxZeroWorkersStarted) { + globalThis.__inboxZeroWorkersStarted = true; + + // Defer heavy imports until after env is available + import("@/env").then(async ({ env }) => { + if (env.QUEUE_SYSTEM !== "redis") return; + + try { + const [{ registerWorker }, { QUEUE_HANDLERS }] = await Promise.all([ + import("@/utils/queue/worker"), + import("@/utils/queue/queues"), + ]); + + let started = 0; + const entries = Object.entries(QUEUE_HANDLERS) as Array< + [string, (data: unknown) => Promise] + >; + for (const [queueName, handler] of entries) { + const worker = registerWorker(queueName, async (job: unknown) => { + try { + const data = (job as { data: unknown }).data; + await handler(data); + } catch (error) { + throw error instanceof Error + ? error + : new Error(String(error)); + } + }); + if (worker) started++; + } + } catch (err) {} + }); + } + } } // This is your Sentry.init call from `sentry.edge.config.js|ts` diff --git a/apps/web/next.config.ts b/apps/web/next.config.ts index d58dd1924b..390f8f0c29 100644 --- a/apps/web/next.config.ts +++ b/apps/web/next.config.ts @@ -12,6 +12,7 @@ const withMDX = nextMdx({ }); const nextConfig: NextConfig = { + output: "standalone", reactStrictMode: true, serverExternalPackages: ["@sentry/nextjs", "@sentry/node"], turbopack: { diff --git a/apps/web/package.json b/apps/web/package.json index 69685ca6a0..35564cee15 100644 --- a/apps/web/package.json +++ b/apps/web/package.json @@ -4,10 +4,10 @@ "private": true, "scripts": { "dev": "cross-env NODE_OPTIONS=--max_old_space_size=16384 next dev --turbopack", - "build": "cross-env NODE_OPTIONS=--max_old_space_size=16384 prisma migrate deploy && next build", + "build": "cross-env NODE_OPTIONS=--max_old_space_size=16384 prisma migrate deploy && next build && shx cp worker.js .next/standalone/worker.js && shx chmod +x .next/standalone/worker.js", "start": "next start", - "worker": "tsx worker.ts", - "worker:dev": "tsx watch worker.ts", + "start:standalone": "node .next/standalone/server.js", + "worker": "node .next/standalone/worker.js", "lint": "biome check .", "test": "cross-env RUN_AI_TESTS=false vitest", "test-ai": "cross-env RUN_AI_TESTS=true vitest --run", @@ -196,10 +196,10 @@ "serwist": "9.2.1", "tailwindcss": "3.4.17", "tsconfig": "workspace:*", - "tsx": "^4.20.0", "vite-tsconfig-paths": "5.1.4", "vitest": "3.2.4", - "vitest-mock-extended": "3.1.0" + "vitest-mock-extended": "3.1.0", + "shx": "^0.3.4" }, "engines": { "node": ">=22.0.0" diff --git a/apps/web/utils/queue/bullmq-manager.ts b/apps/web/utils/queue/bullmq-manager.ts index f83e368fc5..ebc2a7de4e 100644 --- a/apps/web/utils/queue/bullmq-manager.ts +++ b/apps/web/utils/queue/bullmq-manager.ts @@ -26,9 +26,13 @@ export class BullMQManager implements QueueManager { private readonly connection: ConnectionOptions; constructor() { + if (!env.REDIS_URL) { + throw new Error("REDIS_URL is required for BullMQ"); + } + this.connection = { - host: env.REDIS_URL, - }; + url: env.REDIS_URL, + } as unknown as ConnectionOptions; } async enqueue( diff --git a/apps/web/utils/queue/queue-manager.ts b/apps/web/utils/queue/queue-manager.ts index c7317ec5f7..c105863e0c 100644 --- a/apps/web/utils/queue/queue-manager.ts +++ b/apps/web/utils/queue/queue-manager.ts @@ -25,18 +25,8 @@ export function createQueueManager(): QueueManager { switch (queueSystem) { case "redis": - // Use BullMQ with Redis - if (!env.REDIS_URL && !env.UPSTASH_REDIS_URL) { - throw new Error( - "Missing Redis URL (set REDIS_URL or UPSTASH_REDIS_URL) for QUEUE_SYSTEM=redis", - ); - } return new BullMQManager(); case "upstash": - // Use QStash (HTTP-based, no Redis needed for BullMQ) - if (!env.QSTASH_TOKEN) { - throw new Error("Missing QSTASH_TOKEN for QUEUE_SYSTEM=upstash"); - } return new QStashManager(); default: throw new Error(`Unsupported queue system: ${queueSystem}`); diff --git a/apps/web/worker.ts b/apps/web/worker.ts deleted file mode 100644 index 67621792ce..0000000000 --- a/apps/web/worker.ts +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env node -/** - * BullMQ Worker Process - * - * This runs as a separate process from the Next.js application - * Start with: npm run worker - */ - -import { createScopedLogger } from "@/utils/logger"; -import { registerWorker, shutdownAllWorkers } from "@/utils/queue/worker"; -import { QUEUE_HANDLERS, type QueueName } from "@/utils/queue/queues"; -import { env } from "@/env"; -import prisma from "@/utils/prisma"; - -const logger = createScopedLogger("worker-process"); - -async function startWorkers() { - if (env.QUEUE_SYSTEM !== "redis") { - logger.info("Worker process not needed - using QStash", { - queueSystem: env.QUEUE_SYSTEM, - }); - process.exit(0); - } - - logger.info("Starting BullMQ worker process", { - nodeEnv: process.env.NODE_ENV, - queueCount: Object.keys(QUEUE_HANDLERS).length, - }); - - // Worker should fail if it can't connect to the database - try { - await prisma.$connect(); - logger.info("Database connected successfully"); - } catch (error) { - logger.error("Failed to connect to database", { error }); - process.exit(1); - } - - // The recommended BullMQ approach is to register a worker for each queue - let successCount = 0; - for (const [queueName, handler] of Object.entries(QUEUE_HANDLERS)) { - const worker = registerWorker(queueName as QueueName, async (job) => { - logger.info("Processing job", { queueName, jobId: job.id }); - - try { - await handler(job.data as never); - - logger.info("Job completed successfully", { queueName, jobId: job.id }); - } catch (error) { - logger.error("Job failed", { - queueName, - jobId: job.id, - error: error instanceof Error ? error.message : String(error), - }); - throw error; // Re-throw to let BullMQ handle retries - } - }); - - if (worker) { - successCount++; - logger.info("Worker registered successfully", { - queueName, - concurrency: worker.opts.concurrency, - }); - } else { - logger.error("Failed to register worker", { queueName }); - } - } - - logger.info("Worker process started", { - totalQueues: Object.keys(QUEUE_HANDLERS).length, - successfulWorkers: successCount, - }); - - process.stdin.resume(); -} - -async function shutdown() { - logger.info("Shutting down worker process..."); - - try { - await shutdownAllWorkers(); - await prisma.$disconnect(); - logger.info("Worker process shut down successfully"); - process.exit(0); - } catch (error) { - logger.error("Error during shutdown", { error }); - process.exit(1); - } -} - -process.on("SIGINT", shutdown); -process.on("SIGTERM", shutdown); -process.on("uncaughtException", (error) => { - logger.error("Uncaught exception", { error: error.message }); - shutdown(); -}); -process.on("unhandledRejection", (reason) => { - logger.error("Unhandled rejection", { reason }); - shutdown(); -}); - -startWorkers().catch((error) => { - logger.error("Failed to start workers", { error }); - process.exit(1); -}); diff --git a/docker-compose.yml b/docker-compose.yml index 1c61901c3b..8a2aca1d7d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -84,7 +84,7 @@ services: REDIS_URL: "redis://redis:6379" QUEUE_SYSTEM: "redis" # Override the default command to run the worker instead of the web server - command: ["pnpm", "--filter", "inbox-zero-ai", "run", "worker"] + command: ["node", "worker.js"] restart: unless-stopped volumes: diff --git a/docker/Dockerfile.prod b/docker/Dockerfile.prod index eeb850afa1..da256450ca 100644 --- a/docker/Dockerfile.prod +++ b/docker/Dockerfile.prod @@ -1,18 +1,13 @@ -FROM node:22-alpine + +FROM node:22-alpine AS builder WORKDIR /app -# Install necessary tools RUN apk add --no-cache openssl -# Install the specific pnpm version used by the project RUN npm install -g pnpm@10.15.0 -# Install tsx for running the worker -RUN npm install -g tsx@4.20.0 -# Copy all package manager files first for caching +# Copy lockfiles/workspace manifests for better caching COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc* ./ - -# Create directory structure and copy package.json files COPY apps/web/package.json apps/web/package.json COPY apps/unsubscriber/package.json apps/unsubscriber/package.json COPY packages/loops/package.json packages/loops/package.json @@ -20,33 +15,23 @@ COPY packages/resend/package.json packages/resend/package.json COPY packages/tinybird/package.json packages/tinybird/package.json COPY packages/tinybird-ai-analytics/package.json packages/tinybird-ai-analytics/package.json COPY packages/tsconfig/package.json packages/tsconfig/package.json - -# Copy patches directory so patches can be applied during install COPY patches/ patches/ - -# Copy Prisma schema file needed for postinstall script COPY apps/web/prisma/schema.prisma apps/web/prisma/schema.prisma -# Install ALL dependencies (including dev, no pruning) -# Remove --ignore-scripts to allow patches to be applied -# Use --no-frozen-lockfile to allow lockfile updates for patches +# Install deps RUN pnpm install --no-frozen-lockfile --prefer-offline -# Copy the rest of the application code FIRST + # Copy the full repo COPY . . - -# Run Prisma generate with build-time variables (cached) -RUN pnpm --filter inbox-zero-ai exec -- prisma generate - -# Set NODE_ENV for build and runtime + # Build app (runs Next build and copies worker.js into .next/standalone) ENV NODE_ENV=production - -# Accept build-time arguments for all NEXT_PUBLIC variables -# Users can override any of these during build +# Increase V8 heap for Next build to avoid OOM in builder +ENV NODE_OPTIONS=--max_old_space_size=16384 ARG NEXT_PUBLIC_BASE_URL="http://localhost:3000" +ENV NEXT_PUBLIC_BASE_URL=${NEXT_PUBLIC_BASE_URL} -# Provide dummy build-time ENV VARS (Still needed for build) +# Provide safe dummy envs so Next build can complete at image build time ENV DATABASE_URL="postgresql://dummy:dummy@dummy:5432/dummy?schema=public" ENV DIRECT_URL="postgresql://dummy:dummy@dummy:5432/dummy?schema=public" ENV AUTH_SECRET="dummy_secret_for_build_only" @@ -65,14 +50,24 @@ ENV QSTASH_TOKEN="dummy_qstash_token_for_build" ENV QSTASH_CURRENT_SIGNING_KEY="dummy_qstash_curr_key_for_build" ENV QSTASH_NEXT_SIGNING_KEY="dummy_qstash_next_key_for_build" -# Set NEXT_PUBLIC env vars from ARGs (users can override these) -ENV NEXT_PUBLIC_BASE_URL=${NEXT_PUBLIC_BASE_URL} +# Use the package script so worker.js is copied into .next/standalone +RUN pnpm --filter inbox-zero-ai exec -- prisma generate \ + && pnpm --filter inbox-zero-ai exec -- next build \ + && pnpm --filter inbox-zero-ai exec -- shx cp worker.js .next/standalone/worker.js \ + && pnpm --filter inbox-zero-ai exec -- shx chmod +x .next/standalone/worker.js \ + && rm -rf apps/web/.next/cache + +FROM node:22-alpine AS runner + +WORKDIR /app -# Ensure prisma generate runs -RUN pnpm --filter inbox-zero-ai exec -- next build + # Copy the standalone server output (contains node_modules subset + server.js) +COPY --from=builder /app/apps/web/.next/standalone ./ + # Static assets used by the server +COPY --from=builder /app/apps/web/.next/static ./apps/web/.next/static +COPY --from=builder /app/apps/web/public ./apps/web/public EXPOSE 3000 -# Set the default command to start the production server -# Use the simpler pnpm command, should work now as pnpm & next are installed -CMD pnpm --filter inbox-zero-ai start +# Default command runs the Next.js server from the standalone bundle +CMD ["node", "server.js"] diff --git a/package.json b/package.json index 2c81de6aff..c510fc0c48 100644 --- a/package.json +++ b/package.json @@ -17,6 +17,7 @@ "husky": "9.1.7", "lint-staged": "16.2.5", "prettier": "3.6.2", + "shx": "^0.3.4", "tsconfig-paths": "^4.2.0", "turbo": "2.5.8", "ultracite": "5.3.3" diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 0ec3dfab79..eaafc6cf12 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -32,6 +32,9 @@ importers: prettier: specifier: 3.6.2 version: 3.6.2 + shx: + specifier: ^0.3.4 + version: 0.3.4 tsconfig-paths: specifier: ^4.2.0 version: 4.2.0 @@ -327,6 +330,9 @@ importers: braintrust: specifier: 0.4.6 version: 0.4.6(@aws-sdk/credential-provider-web-identity@3.911.0)(zod@3.25.46) + bullmq: + specifier: ^5.61.0 + version: 5.62.0 capital-case: specifier: 2.0.0 version: 2.0.0 @@ -619,12 +625,18 @@ importers: serwist: specifier: 9.2.1 version: 9.2.1(typescript@5.9.3) + shx: + specifier: ^0.3.4 + version: 0.3.4 tailwindcss: specifier: 3.4.17 version: 3.4.17(ts-node@10.9.2(@types/node@24.9.1)(typescript@5.9.3)) tsconfig: specifier: workspace:* version: link:../../packages/tsconfig + tsx: + specifier: ^4.20.0 + version: 4.20.6 vite-tsconfig-paths: specifier: 5.1.4 version: 5.1.4(typescript@5.9.3)(vite@7.1.11(@types/node@24.9.1)(jiti@2.6.1)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1)) @@ -2841,6 +2853,36 @@ packages: resolution: {integrity: sha512-j/P+yuxXfgxb+mW7OEoRCM3G47zCTDqUPivJo/VzpjbG8I9csTXtOprCf5FfOfHK4whOJny0aHuBEON+kS7CCA==} engines: {node: '>=18'} + '@msgpackr-extract/msgpackr-extract-darwin-arm64@3.0.3': + resolution: {integrity: sha512-QZHtlVgbAdy2zAqNA9Gu1UpIuI8Xvsd1v8ic6B2pZmeFnFcMWiPLfWXh7TVw4eGEZ/C9TH281KwhVoeQUKbyjw==} + cpu: [arm64] + os: [darwin] + + '@msgpackr-extract/msgpackr-extract-darwin-x64@3.0.3': + resolution: {integrity: sha512-mdzd3AVzYKuUmiWOQ8GNhl64/IoFGol569zNRdkLReh6LRLHOXxU4U8eq0JwaD8iFHdVGqSy4IjFL4reoWCDFw==} + cpu: [x64] + os: [darwin] + + '@msgpackr-extract/msgpackr-extract-linux-arm64@3.0.3': + resolution: {integrity: sha512-YxQL+ax0XqBJDZiKimS2XQaf+2wDGVa1enVRGzEvLLVFeqa5kx2bWbtcSXgsxjQB7nRqqIGFIcLteF/sHeVtQg==} + cpu: [arm64] + os: [linux] + + '@msgpackr-extract/msgpackr-extract-linux-arm@3.0.3': + resolution: {integrity: sha512-fg0uy/dG/nZEXfYilKoRe7yALaNmHoYeIoJuJ7KJ+YyU2bvY8vPv27f7UKhGRpY6euFYqEVhxCFZgAUNQBM3nw==} + cpu: [arm] + os: [linux] + + '@msgpackr-extract/msgpackr-extract-linux-x64@3.0.3': + resolution: {integrity: sha512-cvwNfbP07pKUfq1uH+S6KJ7dT9K8WOE4ZiAcsrSes+UY55E/0jLYc+vq+DO7jlmqRb5zAggExKm0H7O/CBaesg==} + cpu: [x64] + os: [linux] + + '@msgpackr-extract/msgpackr-extract-win32-x64@3.0.3': + resolution: {integrity: sha512-x0fWaQtYp4E6sktbsdAqnehxDgEc/VwM7uLsRCYWaiGu0ykYdZPiS8zCWdnjHwyiumousxfBm4SO31eXqwEZhQ==} + cpu: [x64] + os: [win32] + '@mux/mux-data-google-ima@0.2.8': resolution: {integrity: sha512-0ZEkHdcZ6bS8QtcjFcoJeZxJTpX7qRIledf4q1trMWPznugvtajCjCM2kieK/pzkZj1JM6liDRFs1PJSfVUs2A==} @@ -6166,6 +6208,9 @@ packages: buffer@6.0.3: resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} + bullmq@5.62.0: + resolution: {integrity: sha512-Q+UwvZs53FeYeJgkGuhtnUBh+rgvi4kvoLiCLBcc36ukB1UvE3/Lw5jx7rDCEgTMWSSyUClpnGpP+B8lGE64GQ==} + bytes@3.1.2: resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} engines: {node: '>= 0.8'} @@ -6593,6 +6638,10 @@ packages: crisp-sdk-web@1.0.26: resolution: {integrity: sha512-MBK/+tCNJmt/l9ZYHb0cUUaaF02rCKeqLw1cruY/HU48tPOOnhUM0JdvuPaeq1d5VB2DCILY4rGYE4NeJbXvzg==} + cron-parser@4.9.0: + resolution: {integrity: sha512-p0SaNjrHOnQeR8/VnfGbmg9te2kfyYSQ7Sc/j/6DtPL3JQvKxmjO9TSjNFpujqV3vEYYBvNNvXSxzyksBWAx1Q==} + engines: {node: '>=12.0.0'} + cross-env@10.1.0: resolution: {integrity: sha512-GsYosgnACZTADcmEyJctkJIoqAhHjttw7RsFrVoJNXbsWWqaq6Ym+7kZjq6mS45O0jij6vtiReppKQEtqWy6Dw==} engines: {node: '>=20'} @@ -8035,6 +8084,10 @@ packages: resolution: {integrity: sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==} engines: {node: '>=12'} + interpret@1.4.0: + resolution: {integrity: sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==} + engines: {node: '>= 0.10'} + ioredis@5.8.2: resolution: {integrity: sha512-C6uC+kleiIMmjViJINWk80sOQw5lEzse1ZmvD+S/s8p8CWapftSaC+kocGTx6xrbrJ4WmYQGC08ffHLr6ToR6Q==} engines: {node: '>=12.22.0'} @@ -8633,6 +8686,10 @@ packages: peerDependencies: react: ^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0 + luxon@3.7.2: + resolution: {integrity: sha512-vtEhXh/gNjI9Yg1u4jX/0YVPMvxzHuGgCm6tC5kZyb08yjGWGnqAjGJvcXbqQR2P3MyMEFnRbpcdFS6PBcLqew==} + engines: {node: '>=12'} + lz-string@1.5.0: resolution: {integrity: sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==} hasBin: true @@ -9070,6 +9127,13 @@ packages: ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + msgpackr-extract@3.0.3: + resolution: {integrity: sha512-P0efT1C9jIdVRefqjzOQ9Xml57zpOXnIuS+csaB4MdZbTdmGDLo8XhzBG1N7aO11gKDDkJvBLULeFTo46wwreA==} + hasBin: true + + msgpackr@1.11.5: + resolution: {integrity: sha512-UjkUHN0yqp9RWKy0Lplhh+wlpdt9oQBYgULZOiFhV3VclSF1JnSQWZ5r9gORQlNYaUKQoR8itv7g7z1xDDuACA==} + mustache@4.2.0: resolution: {integrity: sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==} hasBin: true @@ -9198,6 +9262,9 @@ packages: resolution: {integrity: sha512-E2wEyrgX/CqvicaQYU3Ze1PFGjc4QYPGsjUrlYkqAE0WjHEZwgOsGMPMzkMse4LjJbDmaEuDX3CM036j5K2DSQ==} engines: {node: '>=10'} + node-abort-controller@3.1.1: + resolution: {integrity: sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==} + node-domexception@1.0.0: resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} engines: {node: '>=10.5.0'} @@ -9220,6 +9287,10 @@ packages: resolution: {integrity: sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==} engines: {node: '>= 6.13.0'} + node-gyp-build-optional-packages@5.2.2: + resolution: {integrity: sha512-s+w+rBWnpTMwSFbaE0UXsRlg7hU4FjekKU4eyAih5T8nJuNZT1nNsskXpxmeqSK9UzkBl6UgRlnKc8hz8IEqOw==} + hasBin: true + node-html-parser@6.1.13: resolution: {integrity: sha512-qIsTMOY4C/dAa5Q5vsobRpOOvPfC4pB61UVW2uSwZNUp0QU/jCekTal1vMmbO0DgdHeLUJpv/ARmDqErVxA3Sg==} @@ -10281,6 +10352,10 @@ packages: react: ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 react-dom: ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + rechoir@0.6.2: + resolution: {integrity: sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==} + engines: {node: '>= 0.10'} + recma-build-jsx@1.0.0: resolution: {integrity: sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew==} @@ -10693,12 +10768,22 @@ packages: resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} engines: {node: '>=8'} + shelljs@0.8.5: + resolution: {integrity: sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==} + engines: {node: '>=4'} + hasBin: true + shiki@3.13.0: resolution: {integrity: sha512-aZW4l8Og16CokuCLf8CF8kq+KK2yOygapU5m3+hoGw0Mdosc6fPitjM+ujYarppj5ZIKGyPDPP1vqmQhr+5/0g==} shimmer@1.2.1: resolution: {integrity: sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==} + shx@0.3.4: + resolution: {integrity: sha512-N6A9MLVqjxZYcVn8hLmtneQWIJtp8IKzMP4eMnx+nqkvXoqinUPCbUFLp2UcWTEIUONhlk0ewxr/jaVGlc+J+g==} + engines: {node: '>=6'} + hasBin: true + side-channel-list@1.0.0: resolution: {integrity: sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==} engines: {node: '>= 0.4'} @@ -14639,6 +14724,24 @@ snapshots: transitivePeerDependencies: - supports-color + '@msgpackr-extract/msgpackr-extract-darwin-arm64@3.0.3': + optional: true + + '@msgpackr-extract/msgpackr-extract-darwin-x64@3.0.3': + optional: true + + '@msgpackr-extract/msgpackr-extract-linux-arm64@3.0.3': + optional: true + + '@msgpackr-extract/msgpackr-extract-linux-arm@3.0.3': + optional: true + + '@msgpackr-extract/msgpackr-extract-linux-x64@3.0.3': + optional: true + + '@msgpackr-extract/msgpackr-extract-win32-x64@3.0.3': + optional: true + '@mux/mux-data-google-ima@0.2.8': dependencies: mux-embed: 5.9.0 @@ -18098,7 +18201,7 @@ snapshots: sirv: 3.0.2 tinyglobby: 0.2.14 tinyrainbow: 2.0.0 - vitest: 3.2.4(@types/debug@4.1.12)(@types/node@24.9.1)(@vitest/ui@3.2.4)(jiti@2.6.1)(jsdom@26.1.0)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1) + vitest: 3.2.4(@types/debug@4.1.12)(@types/node@24.9.1)(@vitest/ui@3.2.4)(jiti@2.6.1)(jsdom@27.0.1(postcss@8.5.6))(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1) '@vitest/utils@3.2.4': dependencies: @@ -18701,6 +18804,18 @@ snapshots: base64-js: 1.5.1 ieee754: 1.2.1 + bullmq@5.62.0: + dependencies: + cron-parser: 4.9.0 + ioredis: 5.8.2 + msgpackr: 1.11.5 + node-abort-controller: 3.1.1 + semver: 7.7.3 + tslib: 2.8.1 + uuid: 11.1.0 + transitivePeerDependencies: + - supports-color + bytes@3.1.2: {} cac@6.7.14: {} @@ -19134,6 +19249,10 @@ snapshots: crisp-sdk-web@1.0.26: {} + cron-parser@4.9.0: + dependencies: + luxon: 3.7.2 + cross-env@10.1.0: dependencies: '@epic-web/invariant': 1.0.0 @@ -20924,6 +21043,8 @@ snapshots: internmap@2.0.3: {} + interpret@1.4.0: {} + ioredis@5.8.2: dependencies: '@ioredis/commands': 1.4.0 @@ -21508,6 +21629,8 @@ snapshots: dependencies: react: 19.1.1 + luxon@3.7.2: {} + lz-string@1.5.0: {} magic-string@0.30.18: @@ -22231,6 +22354,22 @@ snapshots: ms@2.1.3: {} + msgpackr-extract@3.0.3: + dependencies: + node-gyp-build-optional-packages: 5.2.2 + optionalDependencies: + '@msgpackr-extract/msgpackr-extract-darwin-arm64': 3.0.3 + '@msgpackr-extract/msgpackr-extract-darwin-x64': 3.0.3 + '@msgpackr-extract/msgpackr-extract-linux-arm': 3.0.3 + '@msgpackr-extract/msgpackr-extract-linux-arm64': 3.0.3 + '@msgpackr-extract/msgpackr-extract-linux-x64': 3.0.3 + '@msgpackr-extract/msgpackr-extract-win32-x64': 3.0.3 + optional: true + + msgpackr@1.11.5: + optionalDependencies: + msgpackr-extract: 3.0.3 + mustache@4.2.0: {} mute-stream@0.0.8: {} @@ -22355,6 +22494,8 @@ snapshots: dependencies: semver: 7.7.3 + node-abort-controller@3.1.1: {} + node-domexception@1.0.0: {} node-fetch@2.7.0(encoding@0.1.13): @@ -22371,6 +22512,11 @@ snapshots: node-forge@1.3.1: {} + node-gyp-build-optional-packages@5.2.2: + dependencies: + detect-libc: 2.0.4 + optional: true + node-html-parser@6.1.13: dependencies: css-select: 5.2.2 @@ -23572,6 +23718,10 @@ snapshots: tiny-invariant: 1.3.3 victory-vendor: 36.9.2 + rechoir@0.6.2: + dependencies: + resolve: 1.22.11 + recma-build-jsx@1.0.0: dependencies: '@types/estree': 1.0.8 @@ -24281,6 +24431,12 @@ snapshots: shebang-regex@3.0.0: {} + shelljs@0.8.5: + dependencies: + glob: 7.2.3 + interpret: 1.4.0 + rechoir: 0.6.2 + shiki@3.13.0: dependencies: '@shikijs/core': 3.13.0 @@ -24294,6 +24450,11 @@ snapshots: shimmer@1.2.1: {} + shx@0.3.4: + dependencies: + minimist: 1.2.8 + shelljs: 0.8.5 + side-channel-list@1.0.0: dependencies: es-errors: 1.3.0 From e485bd2c35b1d2576fb1cfc6dbc523fa3365fce9 Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Wed, 5 Nov 2025 00:39:26 -0300 Subject: [PATCH 06/17] Fix package versions. Remove shx. Update worker command --- apps/web/env.ts | 1 + apps/web/instrumentation.ts | 75 +++++++++++++------------- apps/web/next.config.ts | 2 +- apps/web/package.json | 17 +++--- apps/web/worker.js | 104 ++++++++++++++++++++++++++++++++++++ biome.json | 1 + docker-compose.yml | 7 +-- docker/Dockerfile.prod | 9 ++-- 8 files changed, 163 insertions(+), 53 deletions(-) create mode 100644 apps/web/worker.js diff --git a/apps/web/env.ts b/apps/web/env.ts index 8765f2d91a..caf6ebb963 100644 --- a/apps/web/env.ts +++ b/apps/web/env.ts @@ -62,6 +62,7 @@ export const env = createEnv({ UPSTASH_REDIS_TOKEN: z.string().optional(), REDIS_URL: z.string().optional(), QUEUE_SYSTEM: z.enum(["redis", "upstash"]).default("upstash"), + ENABLE_WORKER_QUEUES: z.boolean().default(false), QSTASH_TOKEN: z.string().optional(), GOOGLE_PUBSUB_TOPIC_NAME: z.string().min(1), diff --git a/apps/web/instrumentation.ts b/apps/web/instrumentation.ts index 9cd0024819..27778a1f9b 100644 --- a/apps/web/instrumentation.ts +++ b/apps/web/instrumentation.ts @@ -7,6 +7,41 @@ declare global { var __inboxZeroWorkersStarted: boolean | undefined; } +export function startBullMQWorkers() { + // Avoid duplicate starts during hot reloads + if (!globalThis.__inboxZeroWorkersStarted) { + globalThis.__inboxZeroWorkersStarted = true; + + // Defer heavy imports until after env is available + import("@/env").then(async ({ env }) => { + if (env.QUEUE_SYSTEM !== "redis") return; + + try { + const [{ registerWorker }, { QUEUE_HANDLERS }] = await Promise.all([ + import("@/utils/queue/worker"), + import("@/utils/queue/queues"), + ]); + + const entries = Object.entries(QUEUE_HANDLERS) as Array< + [string, (data: unknown) => Promise] + >; + for (const [queueName, handler] of entries) { + registerWorker(queueName, async (job: unknown) => { + try { + const data = (job as { data: unknown }).data; + await handler(data); + } catch (error) { + throw error instanceof Error + ? error + : new Error(String(error)); + } + }); + } + } catch (err) {} + }); + } +} + export function register() { if (process.env.NEXT_RUNTIME === "nodejs") { // this is your Sentry.init call from `sentry.server.config.js|ts` @@ -20,42 +55,10 @@ export function register() { // spotlight: process.env.NODE_ENV === 'development', }); - // Start BullMQ workers inside the Next.js server process in dev mode - if (process.env.NODE_ENV === "development") { - // Avoid duplicate starts during hot reloads - if (!globalThis.__inboxZeroWorkersStarted) { - globalThis.__inboxZeroWorkersStarted = true; - - // Defer heavy imports until after env is available - import("@/env").then(async ({ env }) => { - if (env.QUEUE_SYSTEM !== "redis") return; - - try { - const [{ registerWorker }, { QUEUE_HANDLERS }] = await Promise.all([ - import("@/utils/queue/worker"), - import("@/utils/queue/queues"), - ]); - - let started = 0; - const entries = Object.entries(QUEUE_HANDLERS) as Array< - [string, (data: unknown) => Promise] - >; - for (const [queueName, handler] of entries) { - const worker = registerWorker(queueName, async (job: unknown) => { - try { - const data = (job as { data: unknown }).data; - await handler(data); - } catch (error) { - throw error instanceof Error - ? error - : new Error(String(error)); - } - }); - if (worker) started++; - } - } catch (err) {} - }); - } + // Start BullMQ workers inside the Next.js server process when enabled + // Can be enabled via ENABLE_WORKER_QUEUES=true or automatically in development mode + if (process.env.NODE_ENV === "development" && process.env.ENABLE_WORKER_QUEUES === "true") { + startBullMQWorkers(); } } diff --git a/apps/web/next.config.ts b/apps/web/next.config.ts index 390f8f0c29..63d1a89ff6 100644 --- a/apps/web/next.config.ts +++ b/apps/web/next.config.ts @@ -12,7 +12,7 @@ const withMDX = nextMdx({ }); const nextConfig: NextConfig = { - output: "standalone", + ...(process.env.DOCKER_BUILD === "true" && { output: "standalone" }), reactStrictMode: true, serverExternalPackages: ["@sentry/nextjs", "@sentry/node"], turbopack: { diff --git a/apps/web/package.json b/apps/web/package.json index 35564cee15..fba32623b8 100644 --- a/apps/web/package.json +++ b/apps/web/package.json @@ -4,7 +4,7 @@ "private": true, "scripts": { "dev": "cross-env NODE_OPTIONS=--max_old_space_size=16384 next dev --turbopack", - "build": "cross-env NODE_OPTIONS=--max_old_space_size=16384 prisma migrate deploy && next build && shx cp worker.js .next/standalone/worker.js && shx chmod +x .next/standalone/worker.js", + "build": "cross-env NODE_OPTIONS=--max_old_space_size=16384 prisma migrate deploy && next build", "start": "next start", "start:standalone": "node .next/standalone/server.js", "worker": "node .next/standalone/worker.js", @@ -29,7 +29,7 @@ "@date-fns/tz": "1.4.1", "@dub/analytics": "0.0.32", "@formkit/auto-animate": "0.9.0", - "@googleapis/calendar": "^12.0.0", + "@googleapis/calendar": "12.0.0", "@googleapis/gmail": "15.0.0", "@googleapis/people": "6.0.0", "gaxios": "7.1.2", @@ -43,7 +43,7 @@ "@mdx-js/loader": "3.1.1", "@mdx-js/react": "3.1.1", "@microsoft/microsoft-graph-client": "3.0.7", - "@modelcontextprotocol/sdk": "^1.20.1", + "@modelcontextprotocol/sdk": "1.20.1", "@mux/mux-player-react": "3.6.1", "@next/mdx": "15.5.6", "@next/third-parties": "15.5.6", @@ -95,7 +95,7 @@ "ai": "5.0.76", "better-auth": "1.3.28", "braintrust": "0.4.6", - "bullmq": "^5.61.0", + "bullmq": "5.61.0", "capital-case": "2.0.0", "cheerio": "1.0.0", "class-variance-authority": "0.7.1", @@ -103,10 +103,10 @@ "cmdk": "1.1.1", "crisp-sdk-web": "1.0.26", "date-fns": "4.1.0", - "diff": "^8.0.0", + "diff": "8.0.0", "dompurify": "3.3.0", "dub": "0.67.0", - "easymde": "^2.20.0", + "easymde": "2.20.0", "email-reply-parser": "1.9.4", "embla-carousel-react": "8.6.0", "encoding": "0.1.13", @@ -171,7 +171,7 @@ }, "devDependencies": { "@headlessui/tailwindcss": "0.2.2", - "@microsoft/microsoft-graph-types": "^2.43.1", + "@microsoft/microsoft-graph-types": "2.43.1", "@testing-library/react": "16.3.0", "@types/diff": "8.0.0", "@types/email-reply-parser": "1.4.2", @@ -198,8 +198,7 @@ "tsconfig": "workspace:*", "vite-tsconfig-paths": "5.1.4", "vitest": "3.2.4", - "vitest-mock-extended": "3.1.0", - "shx": "^0.3.4" + "vitest-mock-extended": "3.1.0" }, "engines": { "node": ">=22.0.0" diff --git a/apps/web/worker.js b/apps/web/worker.js new file mode 100644 index 0000000000..8fe00dd55d --- /dev/null +++ b/apps/web/worker.js @@ -0,0 +1,104 @@ +/** + * DISCLAIMER: This is a precompiled file that gets copied to the standalone build. + * This file is used to start BullMQ workers in production/standalone mode. + * It loads environment variables from .env and calls startBullMQWorkers() from instrumentation.js + */ + +const path = require('path') +const fs = require('fs') + +// Set up environment similar to server.js +process.env.NODE_ENV = 'production' +process.chdir(__dirname) + +// Load environment variables from .env file (same as Next.js server.js does) +// Try to load .env file from the same directory +const envPath = path.join(__dirname, '.env') +if (fs.existsSync(envPath)) { + // Use dotenv to load the .env file + try { + // Try to use @next/env which Next.js uses + const { loadEnvConfig } = require('next/dist/server/config-utils') + loadEnvConfig(__dirname) + } catch (err) { + // Fallback to dotenv if @next/env is not available + try { + require('dotenv').config({ path: envPath }) + } catch (dotenvErr) { + console.warn('Could not load .env file:', dotenvErr.message) + } + } +} + +// In Docker standalone build, worker.js is at /app/worker.js +// and instrumentation.js is at /app/apps/web/.next/server/instrumentation.js +const instrumentationPath = path.join(__dirname, 'apps/web/.next/server/instrumentation.js') + +if (!fs.existsSync(instrumentationPath)) { + console.error('Could not find instrumentation.js at:', instrumentationPath) + console.error('Current __dirname:', __dirname) + process.exit(1) +} + +// Start workers and keep process alive +async function startWorkers() { + try { + // Try ES module import first (Next.js compiles TS to ESM) + const instrumentation = await import(instrumentationPath) + + if (instrumentation.startBullMQWorkers) { + instrumentation.startBullMQWorkers() + console.log('BullMQ workers started successfully') + } else { + throw new Error('startBullMQWorkers not found in instrumentation module') + } + } catch (importErr) { + // Fallback: try CommonJS require + try { + // Clear require cache if needed + delete require.cache[require.resolve(instrumentationPath)] + const instrumentation = require(instrumentationPath) + + if (instrumentation.startBullMQWorkers) { + instrumentation.startBullMQWorkers() + console.log('BullMQ workers started successfully') + } else { + throw new Error('startBullMQWorkers not found in instrumentation module') + } + } catch (requireErr) { + console.error('Failed to load instrumentation module') + console.error('Import error:', importErr.message) + console.error('Require error:', requireErr.message) + console.error('Attempted path:', instrumentationPath) + process.exit(1) + } + } +} + +startWorkers().catch((err) => { + console.error('Failed to start workers:', err) + process.exit(1) +}) + +// Handle graceful shutdown +process.on('SIGTERM', () => { + console.log('Received SIGTERM, shutting down gracefully...') + process.exit(0) +}) + +process.on('SIGINT', () => { + console.log('Received SIGINT, shutting down gracefully...') + process.exit(0) +}) + +// Keep the process running - don't exit on errors +process.on('uncaughtException', (err) => { + console.error('Uncaught exception:', err) + // Log but don't exit to keep workers running +}) + +process.on('unhandledRejection', (reason, promise) => { + console.error('Unhandled rejection at:', promise, 'reason:', reason) + // Log but don't exit to keep workers running +}) + diff --git a/biome.json b/biome.json index 72509e274f..4eb372cf90 100644 --- a/biome.json +++ b/biome.json @@ -104,6 +104,7 @@ "!.turbo", "!.next", "!sw.js", + "!worker.js", "!.vscode" ] }, diff --git a/docker-compose.yml b/docker-compose.yml index 8a2aca1d7d..5721a3b4aa 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -59,9 +59,9 @@ services: environment: DATABASE_URL: "postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-password}@db:5432/${POSTGRES_DB:-inboxzero}?schema=public" DIRECT_URL: "postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-password}@db:5432/${POSTGRES_DB:-inboxzero}?schema=public" + QUEUE_SYSTEM: "redis" + REDIS_URL: "redis://redis:6379" UPSTASH_REDIS_URL: "http://serverless-redis-http:80" - UPSTASH_REDIS_TOKEN: "${UPSTASH_REDIS_TOKEN}" - worker: image: ghcr.io/elie222/inbox-zero:latest pull_policy: if_not_present @@ -81,8 +81,9 @@ services: environment: DATABASE_URL: "postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-password}@db:5432/${POSTGRES_DB:-inboxzero}?schema=public" DIRECT_URL: "postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-password}@db:5432/${POSTGRES_DB:-inboxzero}?schema=public" - REDIS_URL: "redis://redis:6379" QUEUE_SYSTEM: "redis" + REDIS_URL: "redis://redis:6379" + UPSTASH_REDIS_URL: "http://serverless-redis-http:80" # Override the default command to run the worker instead of the web server command: ["node", "worker.js"] restart: unless-stopped diff --git a/docker/Dockerfile.prod b/docker/Dockerfile.prod index da256450ca..2e7e3939ae 100644 --- a/docker/Dockerfile.prod +++ b/docker/Dockerfile.prod @@ -49,12 +49,13 @@ ENV REDIS_URL="redis://dummy:dummy@dummy:6379" ENV QSTASH_TOKEN="dummy_qstash_token_for_build" ENV QSTASH_CURRENT_SIGNING_KEY="dummy_qstash_curr_key_for_build" ENV QSTASH_NEXT_SIGNING_KEY="dummy_qstash_next_key_for_build" +ENV DOCKER_BUILD="true" -# Use the package script so worker.js is copied into .next/standalone +# Use the package script so worker.js is copied into .next/standalone at root (same level as server.js) RUN pnpm --filter inbox-zero-ai exec -- prisma generate \ && pnpm --filter inbox-zero-ai exec -- next build \ - && pnpm --filter inbox-zero-ai exec -- shx cp worker.js .next/standalone/worker.js \ - && pnpm --filter inbox-zero-ai exec -- shx chmod +x .next/standalone/worker.js \ + && cp apps/web/worker.js apps/web/.next/standalone/worker.js \ + && chmod +x apps/web/.next/standalone/worker.js \ && rm -rf apps/web/.next/cache FROM node:22-alpine AS runner @@ -70,4 +71,4 @@ COPY --from=builder /app/apps/web/public ./apps/web/public EXPOSE 3000 # Default command runs the Next.js server from the standalone bundle -CMD ["node", "server.js"] +CMD ["node", "apps/web/server.js"] From 452fef5954684d1eebfc852fba6aad23c413c0e8 Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Wed, 5 Nov 2025 00:59:01 -0300 Subject: [PATCH 07/17] PR feedback. fix biome checks --- apps/web/instrumentation.ts | 67 ++++++++----- apps/web/utils/queue/bullmq-manager.ts | 58 ++++++++++++ apps/web/utils/queue/qstash-manager.ts | 64 +++++++++++++ apps/web/utils/queue/queue-manager.ts | 9 +- apps/web/utils/queue/queues.ts | 99 ++++++++++++++++++++ apps/web/utils/upstash/categorize-senders.ts | 15 +-- apps/web/worker.js | 3 +- biome.json | 4 +- 8 files changed, 282 insertions(+), 37 deletions(-) diff --git a/apps/web/instrumentation.ts b/apps/web/instrumentation.ts index 27778a1f9b..6d8f9b72d6 100644 --- a/apps/web/instrumentation.ts +++ b/apps/web/instrumentation.ts @@ -13,32 +13,48 @@ export function startBullMQWorkers() { globalThis.__inboxZeroWorkersStarted = true; // Defer heavy imports until after env is available - import("@/env").then(async ({ env }) => { - if (env.QUEUE_SYSTEM !== "redis") return; + import("@/env") + .then(async ({ env }) => { + if (env.QUEUE_SYSTEM !== "redis") return; - try { - const [{ registerWorker }, { QUEUE_HANDLERS }] = await Promise.all([ - import("@/utils/queue/worker"), - import("@/utils/queue/queues"), - ]); + try { + const [{ registerWorker }, { QUEUE_HANDLERS }] = await Promise.all([ + import("@/utils/queue/worker"), + import("@/utils/queue/queues"), + ]); - const entries = Object.entries(QUEUE_HANDLERS) as Array< - [string, (data: unknown) => Promise] - >; - for (const [queueName, handler] of entries) { - registerWorker(queueName, async (job: unknown) => { - try { - const data = (job as { data: unknown }).data; - await handler(data); - } catch (error) { - throw error instanceof Error - ? error - : new Error(String(error)); - } - }); + const entries = Object.entries(QUEUE_HANDLERS) as Array< + [string, (data: unknown) => Promise] + >; + for (const [queueName, handler] of entries) { + registerWorker(queueName, async (job: unknown) => { + try { + const data = (job as { data: unknown }).data; + await handler(data); + } catch (error) { + throw error instanceof Error ? error : new Error(String(error)); + } + }); + } + } catch { + // Worker registration errors are logged by the worker module } - } catch (err) {} - }); + }) + .catch((error) => { + // biome-ignore lint/suspicious/noConsole: Required during startup before logger is available + console.error( + "[instrumentation] Failed to load environment variables during worker initialization", + { + error: error instanceof Error ? error.message : String(error), + stack: error instanceof Error ? error.stack : undefined, + }, + ); + // biome-ignore lint/suspicious/noConsole: Required during startup before logger is available + console.error( + "[instrumentation] This usually indicates invalid or missing environment variables. Exiting process.", + ); + process.exit(1); + }); } } @@ -57,7 +73,10 @@ export function register() { // Start BullMQ workers inside the Next.js server process when enabled // Can be enabled via ENABLE_WORKER_QUEUES=true or automatically in development mode - if (process.env.NODE_ENV === "development" && process.env.ENABLE_WORKER_QUEUES === "true") { + if ( + process.env.NODE_ENV === "development" && + process.env.ENABLE_WORKER_QUEUES === "true" + ) { startBullMQWorkers(); } } diff --git a/apps/web/utils/queue/bullmq-manager.ts b/apps/web/utils/queue/bullmq-manager.ts index ebc2a7de4e..26ef20ae7f 100644 --- a/apps/web/utils/queue/bullmq-manager.ts +++ b/apps/web/utils/queue/bullmq-manager.ts @@ -7,6 +7,7 @@ import { } from "bullmq"; import { env } from "@/env"; import { createScopedLogger } from "@/utils/logger"; +import { getAiCleanQueueName } from "./queues"; import type { QueueJobData, EnqueueOptions, @@ -66,6 +67,63 @@ export class BullMQManager implements QueueManager { queueName: string, options: BulkEnqueueOptions, ): Promise[]> { + // For ai-clean queue, use hash-based distribution across multiple queues + // This ensures per-account parallelism limits similar to ai-categorize-senders + if (queueName === "ai-clean") { + // Group jobs by their target queue (based on emailAccountId hash) + const jobsByQueue = new Map(); + for (const job of options.jobs) { + const emailAccountId = (job.data as { emailAccountId?: string }) + .emailAccountId; + if (!emailAccountId) { + logger.warn( + "Job missing emailAccountId, skipping per-account queue grouping", + { + queueName, + }, + ); + continue; + } + const targetQueueName = getAiCleanQueueName({ emailAccountId }); + if (!jobsByQueue.has(targetQueueName)) { + jobsByQueue.set(targetQueueName, []); + } + jobsByQueue.get(targetQueueName)!.push(job); + } + + // Enqueue jobs to their respective queues + const allJobs: Job[] = []; + for (const [targetQueueName, queueJobs] of jobsByQueue) { + const queue = this.getOrCreateQueue(targetQueueName); + + const jobs = queueJobs.map((jobData) => ({ + name: jobData.name ?? targetQueueName, + data: jobData.data, + opts: { + delay: options.delay, + attempts: options.attempts ?? DEFAULT_ATTEMPTS, + priority: options.priority, + removeOnComplete: options.removeOnComplete ?? 10, + removeOnFail: options.removeOnFail ?? 5, + jobId: jobData.opts?.jobId, + ...jobData.opts, + }, + })); + + const addedJobs = await queue.addBulk(jobs); + allJobs.push(...(addedJobs as Job[])); + } + + logger.info("Bulk jobs enqueued with BullMQ (distributed)", { + queueName, + jobCount: allJobs.length, + queuesUsed: Array.from(jobsByQueue.keys()), + }); + + return allJobs; + } + + // For other queues, use the original single-queue approach const queue = this.getOrCreateQueue(queueName); const jobs = options.jobs.map((jobData) => ({ diff --git a/apps/web/utils/queue/qstash-manager.ts b/apps/web/utils/queue/qstash-manager.ts index 01bc53ca10..2009fbc233 100644 --- a/apps/web/utils/queue/qstash-manager.ts +++ b/apps/web/utils/queue/qstash-manager.ts @@ -54,6 +54,70 @@ export class QStashManager implements QueueManager { ): Promise { const url = `${env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL}/api/queue/${queueName}`; + // For ai-clean queue, use per-account queues to maintain parallelism limits per account + // This ensures each account has its own queue with parallelism=3, preventing one account + // from flooding the global queue and maintaining the per-user parallelism safeguard. + if (queueName === "ai-clean") { + // Group jobs by emailAccountId (all jobs should have emailAccountId in their data) + const jobsByAccount = new Map(); + for (const job of options.jobs) { + const emailAccountId = (job.data as { emailAccountId?: string }) + .emailAccountId; + if (!emailAccountId) { + logger.warn( + "Job missing emailAccountId, skipping per-account queue grouping", + { + queueName, + }, + ); + continue; + } + const accountQueueName = `${queueName}-${emailAccountId}`; + if (!jobsByAccount.has(accountQueueName)) { + jobsByAccount.set(accountQueueName, []); + } + jobsByAccount.get(accountQueueName)!.push(job); + } + + // Use publishToQstashQueue for each account's queue with parallelism=3 + // First, ensure each account's queue exists with the correct parallelism + const client = getQstashClient(); + const results: string[] = []; + for (const [accountQueueName, accountJobs] of jobsByAccount) { + // Create/update the queue with parallelism=3 for this account + const queue = client.queue({ queueName: accountQueueName }); + await queue.upsert({ parallelism: DEFAULT_PARALLELISM }); + + // Enqueue all jobs for this account + const accountResults = await Promise.all( + accountJobs.map(async (job) => { + if (options.delay) { + // For delayed jobs, use publishJSON with notBefore + const notBefore = Math.ceil((Date.now() + options.delay) / 1000); + const response = await queue.enqueueJSON({ + url, + body: job.data, + notBefore, + deduplicationId: job.opts?.jobId, + }); + return response?.messageId || "unknown"; + } else { + // For immediate jobs, use enqueueJSON + const response = await queue.enqueueJSON({ + url, + body: job.data, + deduplicationId: job.opts?.jobId, + }); + return response?.messageId || "unknown"; + } + }), + ); + results.push(...accountResults); + } + return results; + } + + // For other queues, use the original batchJSON approach const items = options.jobs.map((job) => { const item: { url: string; diff --git a/apps/web/utils/queue/queue-manager.ts b/apps/web/utils/queue/queue-manager.ts index c105863e0c..33478a62d8 100644 --- a/apps/web/utils/queue/queue-manager.ts +++ b/apps/web/utils/queue/queue-manager.ts @@ -14,6 +14,7 @@ import type { EnqueueOptions, BulkEnqueueOptions, QueueManager, + QueueSystemInfo, } from "./types"; const logger = createScopedLogger("queue"); @@ -110,11 +111,13 @@ export async function closeQueueManager(): Promise { } } -export function getQueueSystemInfo() { +export function getQueueSystemInfo(): QueueSystemInfo { + const isRedis = env.QUEUE_SYSTEM === "redis"; return { system: env.QUEUE_SYSTEM, - isRedis: env.QUEUE_SYSTEM === "redis", - isQStash: env.QUEUE_SYSTEM === "upstash", + supportsWorkers: isRedis, + supportsDelayedJobs: true, + supportsBulkOperations: true, }; } diff --git a/apps/web/utils/queue/queues.ts b/apps/web/utils/queue/queues.ts index 578e4d6725..9079816df6 100644 --- a/apps/web/utils/queue/queues.ts +++ b/apps/web/utils/queue/queues.ts @@ -354,6 +354,17 @@ async function sendDigestEmailForAccount(emailAccountId: string) { if (Object.keys(executedRulesByRule).length === 0) { logger.info("No executed rules found, skipping digest email"); + // Reset digests back to PENDING so they can be picked up again in future runs + await prisma.digest.updateMany({ + where: { + id: { + in: processedDigestIds, + }, + }, + data: { + status: DigestStatus.PENDING, + }, + }); return { success: true, message: "No executed rules found, skipping digest email", @@ -467,6 +478,10 @@ async function handleCleanGmailJob(data: CleanGmailJobData) { export const AI_CATEGORIZE_SENDERS_QUEUE_COUNT = 7; const AI_CATEGORIZE_SENDERS_PREFIX = "ai-categorize-senders"; +// Configuration for distributed AI clean queues +export const AI_CLEAN_QUEUE_COUNT = 7; +const AI_CLEAN_PREFIX = "ai-clean"; + // Helper to get the queue index from an AI categorize senders queue name export function getAiCategorizeSendersQueueIndex( queueName: string, @@ -476,6 +491,44 @@ export function getAiCategorizeSendersQueueIndex( return Number.isNaN(index) ? null : index; } +// Shared hashing function for queue distribution +// Uses character code sum to consistently hash emailAccountId to a queue index +// This ensures the same emailAccountId always maps to the same queue index +export function getQueueIndexFromEmailAccountId( + emailAccountId: string, + queueCount: number, +): number { + const characterCodeSum = emailAccountId + .split("") + .reduce((total, character) => total + character.charCodeAt(0), 0); + + return characterCodeSum % queueCount; +} + +// Helper to get the queue name for ai-clean jobs +// For BullMQ: Uses hash-based distribution across fixed queues (ai-clean-0 through ai-clean-6) +// For QStash: This function is not used; QStashManager.bulkEnqueue creates per-account queues (ai-clean-{emailAccountId}) +export function getAiCleanQueueName({ + emailAccountId, +}: { + emailAccountId: string; +}): string { + // Only used for BullMQ (Redis) - hash-based distribution + const targetQueueIndex = getQueueIndexFromEmailAccountId( + emailAccountId, + AI_CLEAN_QUEUE_COUNT, + ); + + return `${AI_CLEAN_PREFIX}-${targetQueueIndex}`; +} + +// Helper to get the queue index from an AI clean queue name +export function getAiCleanQueueIndex(queueName: string): number | null { + if (!queueName.startsWith(`${AI_CLEAN_PREFIX}-`)) return null; + const index = Number.parseInt(queueName.split("-").pop() || "", 10); + return Number.isNaN(index) ? null : index; +} + export const QUEUE_HANDLERS = { "digest-item-summarize": handleDigestJob, "scheduled-actions": handleScheduledActionJob, @@ -491,6 +544,14 @@ export const QUEUE_HANDLERS = { "ai-categorize-senders-4": handleCategorizeSendersJob, "ai-categorize-senders-5": handleCategorizeSendersJob, "ai-categorize-senders-6": handleCategorizeSendersJob, + + "ai-clean-0": handleAiCleanJob, + "ai-clean-1": handleAiCleanJob, + "ai-clean-2": handleAiCleanJob, + "ai-clean-3": handleAiCleanJob, + "ai-clean-4": handleAiCleanJob, + "ai-clean-5": handleAiCleanJob, + "ai-clean-6": handleAiCleanJob, } as const; export type QueueName = keyof typeof QUEUE_HANDLERS; @@ -503,6 +564,26 @@ export function getQueueHandler(queueName: string) { return handleCategorizeSendersJob; } + // Handle ai-clean queues + // For BullMQ: hash-based distribution (ai-clean-0, ai-clean-1, etc.) + // For QStash: per-account queues (ai-clean-{emailAccountId}) + if (queueName.startsWith(`${AI_CLEAN_PREFIX}-`)) { + // For BullMQ: validate queue index (0-6) + if (env.QUEUE_SYSTEM === "redis") { + const queueIndex = getAiCleanQueueIndex(queueName); + if ( + queueIndex !== null && + queueIndex >= 0 && + queueIndex < AI_CLEAN_QUEUE_COUNT + ) { + return handleAiCleanJob; + } + } else { + // For QStash: accept any per-account queue (ai-clean-{emailAccountId}) + return handleAiCleanJob; + } + } + return null; } @@ -520,5 +601,23 @@ export function isValidQueueName(queueName: string): boolean { ); } + // Allow ai-clean queues + // For BullMQ: hash-based distribution (ai-clean-0, ai-clean-1, etc.) + // For QStash: per-account queues (ai-clean-{emailAccountId}) + if (queueName.startsWith(`${AI_CLEAN_PREFIX}-`)) { + if (env.QUEUE_SYSTEM === "redis") { + // For BullMQ: validate queue index (0-6) + const queueIndex = getAiCleanQueueIndex(queueName); + return ( + queueIndex !== null && + queueIndex >= 0 && + queueIndex < AI_CLEAN_QUEUE_COUNT + ); + } else { + // For QStash: accept any per-account queue + return true; + } + } + return false; } diff --git a/apps/web/utils/upstash/categorize-senders.ts b/apps/web/utils/upstash/categorize-senders.ts index ddb391772c..aab368a108 100644 --- a/apps/web/utils/upstash/categorize-senders.ts +++ b/apps/web/utils/upstash/categorize-senders.ts @@ -1,7 +1,10 @@ import chunk from "lodash/chunk"; import { deleteQueue, listQueues } from "@/utils/upstash"; import { enqueueJob } from "@/utils/queue/queue-manager"; -import { AI_CATEGORIZE_SENDERS_QUEUE_COUNT } from "@/utils/queue/queues"; +import { + AI_CATEGORIZE_SENDERS_QUEUE_COUNT, + getQueueIndexFromEmailAccountId, +} from "@/utils/queue/queues"; import { env } from "@/env"; import type { AiCategorizeSenders } from "@/app/api/user/categorize/senders/batch/handle-batch-validation"; import { createScopedLogger } from "@/utils/logger"; @@ -27,12 +30,10 @@ const getCategorizeSendersQueueName = ({ emailAccountId: string; }) => { if (env.QUEUE_SYSTEM === "redis") { - const characterCodeSum = emailAccountId - .split("") - .reduce((total, character) => total + character.charCodeAt(0), 0); - - const targetQueueIndex = - characterCodeSum % AI_CATEGORIZE_SENDERS_QUEUE_COUNT; + const targetQueueIndex = getQueueIndexFromEmailAccountId( + emailAccountId, + AI_CATEGORIZE_SENDERS_QUEUE_COUNT, + ); return `${AI_CATEGORIZE_SENDERS_PREFIX}-${targetQueueIndex}`; } diff --git a/apps/web/worker.js b/apps/web/worker.js index 8fe00dd55d..2a66af65e7 100644 --- a/apps/web/worker.js +++ b/apps/web/worker.js @@ -8,7 +8,8 @@ const path = require('path') const fs = require('fs') // Set up environment similar to server.js -process.env.NODE_ENV = 'production' +// Only set NODE_ENV if not already provided (preserve existing value) +process.env.NODE_ENV = process.env.NODE_ENV || 'production' process.chdir(__dirname) // Load environment variables from .env file (same as Next.js server.js does) diff --git a/biome.json b/biome.json index 4eb372cf90..acd66b3acf 100644 --- a/biome.json +++ b/biome.json @@ -104,8 +104,8 @@ "!.turbo", "!.next", "!sw.js", - "!worker.js", - "!.vscode" + "!.vscode", + "!apps/web/worker.js" ] }, "assist": { From 781a9b3c5cb0713a4911386d95d65ff876634fbd Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Wed, 5 Nov 2025 01:17:14 -0300 Subject: [PATCH 08/17] PR feedback --- .cursor/rules/queues.mdc | 9 ++ apps/web/utils/queue/queue.test.ts | 166 +++++++++++++++-------------- 2 files changed, 94 insertions(+), 81 deletions(-) diff --git a/.cursor/rules/queues.mdc b/.cursor/rules/queues.mdc index 6c72b25abf..8adc47d32f 100644 --- a/.cursor/rules/queues.mdc +++ b/.cursor/rules/queues.mdc @@ -1,3 +1,12 @@ +--- +description: Unified queue system supporting QStash and Redis (BullMQ) with automatic system selection, worker management, and job enqueueing +globs: + - "apps/web/utils/queue/**" + - "apps/web/worker.js" + - "apps/web/instrumentation.ts" + - "docker-compose.yml" +alwaysApply: false +--- # Queue System Unified queue system supporting both QStash and Redis (BullMQ) with automatic system selection based on `QUEUE_SYSTEM` environment variable. diff --git a/apps/web/utils/queue/queue.test.ts b/apps/web/utils/queue/queue.test.ts index 1c2b17f441..d66deb91fc 100644 --- a/apps/web/utils/queue/queue.test.ts +++ b/apps/web/utils/queue/queue.test.ts @@ -1,5 +1,8 @@ import { describe, it, expect, beforeEach, afterEach, vi } from "vitest"; +// Mock server-only to prevent import errors in tests +vi.mock("server-only", () => ({})); + // Mock BullMQ const mockQueue = { add: vi.fn(), @@ -24,6 +27,31 @@ vi.mock("bullmq", () => ({ QueueEvents: vi.fn().mockImplementation(() => mockQueueEvents), })); +// Mock ioredis to prevent connection parsing errors +// ioredis can be imported as default or named export, so we mock both +const mockRedisInstance = { + connect: vi.fn(), + disconnect: vi.fn(), + on: vi.fn(), + quit: vi.fn(), + get: vi.fn(), + set: vi.fn(), + del: vi.fn(), + expire: vi.fn(), + keys: vi.fn(), + psubscribe: vi.fn(), + punsubscribe: vi.fn(), +}; + +vi.mock("ioredis", () => { + const MockRedis = vi.fn().mockImplementation(() => mockRedisInstance); + MockRedis.prototype = mockRedisInstance; + return { + default: MockRedis, + Redis: MockRedis, + }; +}); + // Mock QStash Client const mockClient = { publishJSON: vi.fn(), @@ -40,16 +68,24 @@ vi.mock("@/utils/upstash", () => ({ publishToQstashQueue: mockPublishToQstashQueue, })); -// Mock environment - default to upstash -vi.mock("@/env", () => ({ +// Helper to create env mock with required encryption vars +const createEnvMock = (overrides: Record = {}) => ({ env: { QUEUE_SYSTEM: "upstash", QSTASH_TOKEN: "test-token", REDIS_URL: "redis://localhost:6379", WEBHOOK_URL: "https://test.com", NEXT_PUBLIC_BASE_URL: "https://test.com", + EMAIL_ENCRYPT_SECRET: "test-encryption-secret-key-for-testing-purposes", + EMAIL_ENCRYPT_SALT: "test-encryption-salt-for-testing", + NODE_ENV: "test", + ...overrides, }, -})); +}); + +// Mock environment - default to upstash +// Include all required env vars to prevent validation/initialization errors +vi.mock("@/env", () => createEnvMock()); describe("Queue System", () => { beforeEach(() => { @@ -69,43 +105,37 @@ describe("Queue System", () => { const info = getQueueSystemInfo(); expect(info.system).toBe("upstash"); - expect(info.isQStash).toBe(true); - expect(info.isRedis).toBe(false); + expect(info.supportsWorkers).toBe(false); + expect(info.supportsDelayedJobs).toBe(true); + expect(info.supportsBulkOperations).toBe(true); }); it("should detect Redis system when configured", async () => { - await vi.doMock("@/env", () => ({ - env: { + await vi.doMock("@/env", () => + createEnvMock({ QUEUE_SYSTEM: "redis", - QSTASH_TOKEN: "test-token", - REDIS_URL: "redis://localhost:6379", - WEBHOOK_URL: "https://test.com", - NEXT_PUBLIC_BASE_URL: "https://test.com", - }, - })); + }), + ); vi.resetModules(); const { getQueueSystemInfo } = await import("./queue-manager"); const info = getQueueSystemInfo(); expect(info.system).toBe("redis"); - expect(info.isRedis).toBe(true); - expect(info.isQStash).toBe(false); + expect(info.supportsWorkers).toBe(true); + expect(info.supportsDelayedJobs).toBe(true); + expect(info.supportsBulkOperations).toBe(true); }); }); describe("Job Enqueueing", () => { it("should enqueue a single job with QStash", async () => { // Ensure we're using QStash environment - await vi.doMock("@/env", () => ({ - env: { + await vi.doMock("@/env", () => + createEnvMock({ QUEUE_SYSTEM: "upstash", - QSTASH_TOKEN: "test-token", - REDIS_URL: "redis://localhost:6379", - WEBHOOK_URL: "https://test.com", - NEXT_PUBLIC_BASE_URL: "https://test.com", - }, - })); + }), + ); vi.resetModules(); const { enqueueJob } = await import("./queue-manager"); @@ -128,15 +158,11 @@ describe("Queue System", () => { it("should enqueue a job with options", async () => { // Ensure we're using QStash environment - await vi.doMock("@/env", () => ({ - env: { + await vi.doMock("@/env", () => + createEnvMock({ QUEUE_SYSTEM: "upstash", - QSTASH_TOKEN: "test-token", - REDIS_URL: "redis://localhost:6379", - WEBHOOK_URL: "https://test.com", - NEXT_PUBLIC_BASE_URL: "https://test.com", - }, - })); + }), + ); vi.resetModules(); const { enqueueJob } = await import("./queue-manager"); @@ -160,15 +186,11 @@ describe("Queue System", () => { it("should handle job enqueueing errors", async () => { // Ensure we're using QStash environment - await vi.doMock("@/env", () => ({ - env: { + await vi.doMock("@/env", () => + createEnvMock({ QUEUE_SYSTEM: "upstash", - QSTASH_TOKEN: "test-token", - REDIS_URL: "redis://localhost:6379", - WEBHOOK_URL: "https://test.com", - NEXT_PUBLIC_BASE_URL: "https://test.com", - }, - })); + }), + ); vi.resetModules(); const { enqueueJob } = await import("./queue-manager"); @@ -184,15 +206,11 @@ describe("Queue System", () => { describe("Bulk Job Enqueueing", () => { it("should enqueue multiple jobs", async () => { // Ensure we're using QStash environment - await vi.doMock("@/env", () => ({ - env: { + await vi.doMock("@/env", () => + createEnvMock({ QUEUE_SYSTEM: "upstash", - QSTASH_TOKEN: "test-token", - REDIS_URL: "redis://localhost:6379", - WEBHOOK_URL: "https://test.com", - NEXT_PUBLIC_BASE_URL: "https://test.com", - }, - })); + }), + ); vi.resetModules(); const { bulkEnqueueJobs } = await import("./queue-manager"); @@ -223,15 +241,11 @@ describe("Queue System", () => { it("should handle bulk enqueueing errors", async () => { // Ensure we're using QStash environment - await vi.doMock("@/env", () => ({ - env: { + await vi.doMock("@/env", () => + createEnvMock({ QUEUE_SYSTEM: "upstash", - QSTASH_TOKEN: "test-token", - REDIS_URL: "redis://localhost:6379", - WEBHOOK_URL: "https://test.com", - NEXT_PUBLIC_BASE_URL: "https://test.com", - }, - })); + }), + ); vi.resetModules(); const { bulkEnqueueJobs } = await import("./queue-manager"); @@ -248,15 +262,11 @@ describe("Queue System", () => { describe("Error Handling", () => { it("should handle unsupported queue system", async () => { - await vi.doMock("@/env", () => ({ - env: { + await vi.doMock("@/env", () => + createEnvMock({ QUEUE_SYSTEM: "unsupported" as any, - QSTASH_TOKEN: "test-token", - REDIS_URL: "redis://localhost:6379", - WEBHOOK_URL: "https://test.com", - NEXT_PUBLIC_BASE_URL: "https://test.com", - }, - })); + }), + ); vi.resetModules(); const { createQueueManager } = await import("./queue-manager"); @@ -271,15 +281,11 @@ describe("Queue System", () => { let manager: any; beforeEach(async () => { - await vi.doMock("@/env", () => ({ - env: { + await vi.doMock("@/env", () => + createEnvMock({ QUEUE_SYSTEM: "redis", - QSTASH_TOKEN: "test-token", - REDIS_URL: "redis://localhost:6379", - WEBHOOK_URL: "https://test.com", - NEXT_PUBLIC_BASE_URL: "https://test.com", - }, - })); + }), + ); vi.resetModules(); const { BullMQManager } = await import("./bullmq-manager"); @@ -556,13 +562,12 @@ describe("Queue System", () => { describe("URL Construction", () => { it("should use WEBHOOK_URL when available", async () => { - await vi.doMock("@/env", () => ({ - env: { - QSTASH_TOKEN: "test-token", + await vi.doMock("@/env", () => + createEnvMock({ WEBHOOK_URL: "https://webhook.test.com", NEXT_PUBLIC_BASE_URL: "https://fallback.test.com", - }, - })); + }), + ); vi.resetModules(); const { QStashManager: MockedQStashManager } = await import( @@ -582,13 +587,12 @@ describe("Queue System", () => { }); it("should fallback to NEXT_PUBLIC_BASE_URL when WEBHOOK_URL is not available", async () => { - await vi.doMock("@/env", () => ({ - env: { - QSTASH_TOKEN: "test-token", + await vi.doMock("@/env", () => + createEnvMock({ WEBHOOK_URL: undefined, NEXT_PUBLIC_BASE_URL: "https://fallback.test.com", - }, - })); + }), + ); vi.resetModules(); const { QStashManager: MockedQStashManager } = await import( From 3f3213dce8b5c5d7641a8027d6bca086a1b3fb7c Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Wed, 5 Nov 2025 02:18:35 -0300 Subject: [PATCH 09/17] Build fixes and PR feedback --- apps/web/env.ts | 2 +- apps/web/next.config.ts | 2 +- apps/web/utils/ai/rule/diff-rules.ts | 1 + apps/web/utils/queue/bullmq-manager.ts | 11 +++++++---- apps/web/utils/queue/qstash-manager.ts | 21 +++++++++++++-------- apps/web/utils/queue/queue-manager.ts | 2 ++ apps/web/utils/queue/queues.ts | 18 ++++++++++++------ apps/web/utils/queue/worker.ts | 1 + apps/web/worker.js | 26 +++++++++++++------------- docker/Dockerfile.prod | 10 +++++++--- 10 files changed, 58 insertions(+), 36 deletions(-) diff --git a/apps/web/env.ts b/apps/web/env.ts index caf6ebb963..bcfe951b1d 100644 --- a/apps/web/env.ts +++ b/apps/web/env.ts @@ -62,7 +62,7 @@ export const env = createEnv({ UPSTASH_REDIS_TOKEN: z.string().optional(), REDIS_URL: z.string().optional(), QUEUE_SYSTEM: z.enum(["redis", "upstash"]).default("upstash"), - ENABLE_WORKER_QUEUES: z.boolean().default(false), + ENABLE_WORKER_QUEUES: z.coerce.boolean().default(false), QSTASH_TOKEN: z.string().optional(), GOOGLE_PUBSUB_TOPIC_NAME: z.string().min(1), diff --git a/apps/web/next.config.ts b/apps/web/next.config.ts index 63d1a89ff6..bc3dcf84c5 100644 --- a/apps/web/next.config.ts +++ b/apps/web/next.config.ts @@ -14,7 +14,7 @@ const withMDX = nextMdx({ const nextConfig: NextConfig = { ...(process.env.DOCKER_BUILD === "true" && { output: "standalone" }), reactStrictMode: true, - serverExternalPackages: ["@sentry/nextjs", "@sentry/node"], + serverExternalPackages: ["@sentry/nextjs", "@sentry/node", "bullmq", "diff"], turbopack: { rules: { "*.svg": { diff --git a/apps/web/utils/ai/rule/diff-rules.ts b/apps/web/utils/ai/rule/diff-rules.ts index d0a8172f6b..4d963459d9 100644 --- a/apps/web/utils/ai/rule/diff-rules.ts +++ b/apps/web/utils/ai/rule/diff-rules.ts @@ -1,3 +1,4 @@ +import "server-only"; import z from "zod"; import { createPatch } from "diff"; import type { EmailAccountWithAI } from "@/utils/llms/types"; diff --git a/apps/web/utils/queue/bullmq-manager.ts b/apps/web/utils/queue/bullmq-manager.ts index 26ef20ae7f..0128cd6d1f 100644 --- a/apps/web/utils/queue/bullmq-manager.ts +++ b/apps/web/utils/queue/bullmq-manager.ts @@ -1,3 +1,5 @@ +import "server-only"; + import { Queue, Worker, @@ -85,10 +87,12 @@ export class BullMQManager implements QueueManager { continue; } const targetQueueName = getAiCleanQueueName({ emailAccountId }); - if (!jobsByQueue.has(targetQueueName)) { - jobsByQueue.set(targetQueueName, []); + let queueJobs = jobsByQueue.get(targetQueueName); + if (!queueJobs) { + queueJobs = []; + jobsByQueue.set(targetQueueName, queueJobs); } - jobsByQueue.get(targetQueueName)!.push(job); + queueJobs.push(job); } // Enqueue jobs to their respective queues @@ -164,7 +168,6 @@ export class BullMQManager implements QueueManager { logger.info("Processing job", { queueName, jobId: job.id, - data: JSON.stringify(job.data), }); try { diff --git a/apps/web/utils/queue/qstash-manager.ts b/apps/web/utils/queue/qstash-manager.ts index 2009fbc233..ac52cdb15c 100644 --- a/apps/web/utils/queue/qstash-manager.ts +++ b/apps/web/utils/queue/qstash-manager.ts @@ -1,7 +1,8 @@ +import "server-only"; + import type { Job, ConnectionOptions } from "bullmq"; import { env } from "@/env"; import { createScopedLogger } from "@/utils/logger"; -import { publishToQstashQueue } from "@/utils/upstash"; import { Client } from "@upstash/qstash"; import type { QueueJobData, @@ -26,10 +27,10 @@ export class QStashManager implements QueueManager { options: EnqueueOptions = {}, ): Promise { const url = `${env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL}/api/queue/${queueName}`; + const client = getQstashClient(); if (options.delay) { const notBefore = Math.ceil((Date.now() + options.delay) / 1000); - const client = getQstashClient(); const response = await client.publishJSON({ url, body: data, @@ -38,11 +39,13 @@ export class QStashManager implements QueueManager { }); return response?.messageId || "unknown"; } else { - const response = await publishToQstashQueue({ - queueName, - parallelism: DEFAULT_PARALLELISM, + // Use queue.enqueueJSON to support deduplicationId + const queue = client.queue({ queueName }); + await queue.upsert({ parallelism: DEFAULT_PARALLELISM }); + const response = await queue.enqueueJSON({ url, body: data, + deduplicationId: options.jobId, }); return response?.messageId || "unknown"; } @@ -73,10 +76,12 @@ export class QStashManager implements QueueManager { continue; } const accountQueueName = `${queueName}-${emailAccountId}`; - if (!jobsByAccount.has(accountQueueName)) { - jobsByAccount.set(accountQueueName, []); + let jobs = jobsByAccount.get(accountQueueName); + if (!jobs) { + jobs = []; + jobsByAccount.set(accountQueueName, jobs); } - jobsByAccount.get(accountQueueName)!.push(job); + jobs.push(job); } // Use publishToQstashQueue for each account's queue with parallelism=3 diff --git a/apps/web/utils/queue/queue-manager.ts b/apps/web/utils/queue/queue-manager.ts index 33478a62d8..0e83e1e10d 100644 --- a/apps/web/utils/queue/queue-manager.ts +++ b/apps/web/utils/queue/queue-manager.ts @@ -1,3 +1,5 @@ +import "server-only"; + import type { Queue, Worker, diff --git a/apps/web/utils/queue/queues.ts b/apps/web/utils/queue/queues.ts index 9079816df6..ee8102805d 100644 --- a/apps/web/utils/queue/queues.ts +++ b/apps/web/utils/queue/queues.ts @@ -593,12 +593,18 @@ export function isValidQueueName(queueName: string): boolean { } if (queueName.startsWith(`${AI_CATEGORIZE_SENDERS_PREFIX}-`)) { - const queueIndex = getAiCategorizeSendersQueueIndex(queueName); - return ( - queueIndex !== null && - queueIndex >= 0 && - queueIndex < AI_CATEGORIZE_SENDERS_QUEUE_COUNT - ); + if (env.QUEUE_SYSTEM === "redis") { + // For BullMQ: validate queue index (0-6) + const queueIndex = getAiCategorizeSendersQueueIndex(queueName); + return ( + queueIndex !== null && + queueIndex >= 0 && + queueIndex < AI_CATEGORIZE_SENDERS_QUEUE_COUNT + ); + } else { + // For QStash: accept any per-account queue (ai-categorize-senders-{emailAccountId}) + return true; + } } // Allow ai-clean queues diff --git a/apps/web/utils/queue/worker.ts b/apps/web/utils/queue/worker.ts index 8dc9be896e..3267100cd6 100644 --- a/apps/web/utils/queue/worker.ts +++ b/apps/web/utils/queue/worker.ts @@ -1,3 +1,4 @@ +import "server-only"; import type { Worker } from "bullmq"; import { createScopedLogger } from "@/utils/logger"; import { createQueueWorker, closeQueueManager } from "./queue-manager"; diff --git a/apps/web/worker.js b/apps/web/worker.js index 2a66af65e7..f124517918 100644 --- a/apps/web/worker.js +++ b/apps/web/worker.js @@ -12,22 +12,22 @@ const fs = require('fs') process.env.NODE_ENV = process.env.NODE_ENV || 'production' process.chdir(__dirname) -// Load environment variables from .env file (same as Next.js server.js does) -// Try to load .env file from the same directory -const envPath = path.join(__dirname, '.env') -if (fs.existsSync(envPath)) { - // Use dotenv to load the .env file +// Load environment variables (same as Next.js server.js does) +// loadEnvConfig automatically handles .env, .env.local, .env.production, etc. +// based on NODE_ENV, so we call it unconditionally +try { + const { loadEnvConfig } = require('next/dist/server/config-utils') + loadEnvConfig(__dirname) +} catch (err) { + // If loadEnvConfig is not available (shouldn't happen in standalone build), + // fallback to dotenv for plain .env file try { - // Try to use @next/env which Next.js uses - const { loadEnvConfig } = require('next/dist/server/config-utils') - loadEnvConfig(__dirname) - } catch (err) { - // Fallback to dotenv if @next/env is not available - try { + const envPath = path.join(__dirname, '.env') + if (fs.existsSync(envPath)) { require('dotenv').config({ path: envPath }) - } catch (dotenvErr) { - console.warn('Could not load .env file:', dotenvErr.message) } + } catch (dotenvErr) { + console.warn('Could not load environment variables:', dotenvErr.message) } } diff --git a/docker/Dockerfile.prod b/docker/Dockerfile.prod index 2e7e3939ae..81d1cf3fa2 100644 --- a/docker/Dockerfile.prod +++ b/docker/Dockerfile.prod @@ -19,7 +19,8 @@ COPY patches/ patches/ COPY apps/web/prisma/schema.prisma apps/web/prisma/schema.prisma # Install deps -RUN pnpm install --no-frozen-lockfile --prefer-offline +# Use shamefully-hoist to ensure all packages are available at root level for webpack resolution +RUN pnpm install --no-frozen-lockfile --prefer-offline --shamefully-hoist # Copy the full repo COPY . . @@ -52,8 +53,11 @@ ENV QSTASH_NEXT_SIGNING_KEY="dummy_qstash_next_key_for_build" ENV DOCKER_BUILD="true" # Use the package script so worker.js is copied into .next/standalone at root (same level as server.js) -RUN pnpm --filter inbox-zero-ai exec -- prisma generate \ - && pnpm --filter inbox-zero-ai exec -- next build \ +# Run from apps/web directory to ensure proper module resolution +RUN cd apps/web \ + && pnpm exec prisma generate \ + && pnpm exec next build \ + && cd ../.. \ && cp apps/web/worker.js apps/web/.next/standalone/worker.js \ && chmod +x apps/web/.next/standalone/worker.js \ && rm -rf apps/web/.next/cache From 40dca133fcdede5fda42635c9dc82e5a24ca7c5c Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Wed, 5 Nov 2025 02:20:57 -0300 Subject: [PATCH 10/17] Remove server-only modifications --- apps/web/utils/ai/rule/diff-rules.ts | 1 - apps/web/utils/queue/bullmq-manager.ts | 2 -- apps/web/utils/queue/qstash-manager.ts | 2 -- apps/web/utils/queue/queue-manager.ts | 2 -- apps/web/utils/queue/worker.ts | 1 - 5 files changed, 8 deletions(-) diff --git a/apps/web/utils/ai/rule/diff-rules.ts b/apps/web/utils/ai/rule/diff-rules.ts index 4d963459d9..d0a8172f6b 100644 --- a/apps/web/utils/ai/rule/diff-rules.ts +++ b/apps/web/utils/ai/rule/diff-rules.ts @@ -1,4 +1,3 @@ -import "server-only"; import z from "zod"; import { createPatch } from "diff"; import type { EmailAccountWithAI } from "@/utils/llms/types"; diff --git a/apps/web/utils/queue/bullmq-manager.ts b/apps/web/utils/queue/bullmq-manager.ts index 0128cd6d1f..0b4e7a804d 100644 --- a/apps/web/utils/queue/bullmq-manager.ts +++ b/apps/web/utils/queue/bullmq-manager.ts @@ -1,5 +1,3 @@ -import "server-only"; - import { Queue, Worker, diff --git a/apps/web/utils/queue/qstash-manager.ts b/apps/web/utils/queue/qstash-manager.ts index ac52cdb15c..874257287a 100644 --- a/apps/web/utils/queue/qstash-manager.ts +++ b/apps/web/utils/queue/qstash-manager.ts @@ -1,5 +1,3 @@ -import "server-only"; - import type { Job, ConnectionOptions } from "bullmq"; import { env } from "@/env"; import { createScopedLogger } from "@/utils/logger"; diff --git a/apps/web/utils/queue/queue-manager.ts b/apps/web/utils/queue/queue-manager.ts index 0e83e1e10d..33478a62d8 100644 --- a/apps/web/utils/queue/queue-manager.ts +++ b/apps/web/utils/queue/queue-manager.ts @@ -1,5 +1,3 @@ -import "server-only"; - import type { Queue, Worker, diff --git a/apps/web/utils/queue/worker.ts b/apps/web/utils/queue/worker.ts index 3267100cd6..8dc9be896e 100644 --- a/apps/web/utils/queue/worker.ts +++ b/apps/web/utils/queue/worker.ts @@ -1,4 +1,3 @@ -import "server-only"; import type { Worker } from "bullmq"; import { createScopedLogger } from "@/utils/logger"; import { createQueueWorker, closeQueueManager } from "./queue-manager"; From 7d8123cdc66a23b9456b5a929ff7c5f60873fe90 Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Wed, 5 Nov 2025 02:29:33 -0300 Subject: [PATCH 11/17] PR Feedback --- .cursor/rules/queues.mdc | 29 ++++++++++-------- apps/web/app/api/queue/[queueName]/route.ts | 34 +++++++++++++++++++-- apps/web/utils/queue/queue.test.ts | 3 ++ 3 files changed, 50 insertions(+), 16 deletions(-) diff --git a/.cursor/rules/queues.mdc b/.cursor/rules/queues.mdc index 8adc47d32f..232f7d4b91 100644 --- a/.cursor/rules/queues.mdc +++ b/.cursor/rules/queues.mdc @@ -54,19 +54,22 @@ console.log("Bulk jobs enqueued:", jobs.length); **How workers run** -- Development: workers auto-start inside the Next.js server via `apps/web/instrumentation.ts` when `NODE_ENV=development`. -- Production: run a separate worker process using the standalone build output: - -```bash -# Build app and copy worker into standalone output -pnpm --filter inbox-zero-ai build - -# Start Next.js (prod) -pnpm --filter inbox-zero-ai start:standalone - -# Start worker (separate process) -pnpm --filter inbox-zero-ai worker -``` +- **Development**: workers can start in two ways: + 1. **Within Next.js server**: Auto-start inside the Next.js server via `apps/web/instrumentation.ts` when both `NODE_ENV=development` and `ENABLE_WORKER_QUEUES=true` are set. + 2. **Separate worker container**: Use the `worker` service in `docker-compose.yml` which runs `worker.js` as a separate container (keep `ENABLE_WORKER_QUEUES=false` or unset). +- **Production**: workers always run as a separate process/container (never within Next.js): + - **Standalone build**: Run a separate worker process using the standalone build output: + ```bash + # Build app and copy worker into standalone output + pnpm --filter inbox-zero-ai build + + # Start Next.js (prod) + pnpm --filter inbox-zero-ai start:standalone + + # Start worker (separate process) + pnpm --filter inbox-zero-ai worker + ``` + - **Docker Compose**: Use the `worker` service in `docker-compose.yml` which runs `worker.js` as a separate container. The worker process automatically: - Creates one worker per registered queue diff --git a/apps/web/app/api/queue/[queueName]/route.ts b/apps/web/app/api/queue/[queueName]/route.ts index 208e48da5b..f6b0303b05 100644 --- a/apps/web/app/api/queue/[queueName]/route.ts +++ b/apps/web/app/api/queue/[queueName]/route.ts @@ -86,6 +86,34 @@ async function handleQueueJob( return await handler(body); } -// Export with QStash signature verification for QStash requests -// and withError middleware for consistent error handling -export const POST = verifySignatureAppRouter(withError(handleQueueJob)); +const queueRouteHandler = async ( + request: NextRequest, + context: { params: Promise> }, +): Promise => { + // Internal Redis requests bypass QStash verification + if ( + env.QUEUE_SYSTEM === "redis" && + (await validateInternalRequest(request)) + ) { + return handleQueueJob(request, context); + } + + // QStash requests: apply signature verification + const response = await verifySignatureAppRouter(async (req: Request) => { + const result = await handleQueueJob(req as NextRequest, context); + return new Response(result.body, { + status: result.status, + statusText: result.statusText, + headers: result.headers, + }); + })(request); + + return response instanceof NextResponse + ? response + : NextResponse.json(await response.json(), { + status: response.status, + headers: response.headers, + }); +}; + +export const POST = withError(queueRouteHandler); diff --git a/apps/web/utils/queue/queue.test.ts b/apps/web/utils/queue/queue.test.ts index d66deb91fc..ae9d606bfe 100644 --- a/apps/web/utils/queue/queue.test.ts +++ b/apps/web/utils/queue/queue.test.ts @@ -89,6 +89,9 @@ vi.mock("@/env", () => createEnvMock()); describe("Queue System", () => { beforeEach(() => { + // Reset module cache before each test to prevent partially initialized + // modules from hanging subsequent tests + vi.resetModules(); vi.clearAllMocks(); }); From 60a3ba26844e5e2930189b488f4b35ee7dae0ff2 Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Wed, 5 Nov 2025 02:33:00 -0300 Subject: [PATCH 12/17] PR Feedback --- apps/web/next.config.ts | 2 +- apps/web/utils/queue/bullmq-manager.ts | 18 ++++++++++-------- apps/web/utils/queue/worker.ts | 7 ++++++- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/apps/web/next.config.ts b/apps/web/next.config.ts index bc3dcf84c5..b7bcbeba39 100644 --- a/apps/web/next.config.ts +++ b/apps/web/next.config.ts @@ -14,7 +14,7 @@ const withMDX = nextMdx({ const nextConfig: NextConfig = { ...(process.env.DOCKER_BUILD === "true" && { output: "standalone" }), reactStrictMode: true, - serverExternalPackages: ["@sentry/nextjs", "@sentry/node", "bullmq", "diff"], + serverExternalPackages: ["@sentry/nextjs", "@sentry/node", "bullmq"], turbopack: { rules: { "*.svg": { diff --git a/apps/web/utils/queue/bullmq-manager.ts b/apps/web/utils/queue/bullmq-manager.ts index 0b4e7a804d..56e6d0d3c8 100644 --- a/apps/web/utils/queue/bullmq-manager.ts +++ b/apps/web/utils/queue/bullmq-manager.ts @@ -33,7 +33,7 @@ export class BullMQManager implements QueueManager { this.connection = { url: env.REDIS_URL, - } as unknown as ConnectionOptions; + }; } async enqueue( @@ -57,7 +57,6 @@ export class BullMQManager implements QueueManager { logger.info("Job enqueued with BullMQ", { queueName, jobId: job.id, - data: JSON.stringify(data), }); return job as Job; @@ -217,13 +216,16 @@ export class BullMQManager implements QueueManager { } getQueueEvents(queueName: string): QueueEvents { - if (!this.queueEvents.has(queueName)) { - const queueEvents = new QueueEvents(queueName, { - connection: this.connection, - }); - this.queueEvents.set(queueName, queueEvents); + const existing = this.queueEvents.get(queueName); + if (existing) { + return existing; } - return this.queueEvents.get(queueName)!; + + const queueEvents = new QueueEvents(queueName, { + connection: this.connection, + }); + this.queueEvents.set(queueName, queueEvents); + return queueEvents; } private getOrCreateQueue(queueName: string): Queue { diff --git a/apps/web/utils/queue/worker.ts b/apps/web/utils/queue/worker.ts index 8dc9be896e..912137c2d4 100644 --- a/apps/web/utils/queue/worker.ts +++ b/apps/web/utils/queue/worker.ts @@ -17,7 +17,12 @@ class WorkerRegistry { ): Worker | null { if (this.workers.has(queueName)) { logger.warn("Worker already registered for queue", { queueName }); - return this.workers.get(queueName)!; + const existingWorker = this.workers.get(queueName); + if (existingWorker) { + return existingWorker; + } + logger.error("Worker exists in map but was undefined", { queueName }); + return null; } const worker = createQueueWorker(queueName, processor as JobProcessor, { From 0d16445d4ab287714d6d00a9fb35c8a1b3a1de6a Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Wed, 5 Nov 2025 02:45:08 -0300 Subject: [PATCH 13/17] Add UPSTASH TOKEN --- docker-compose.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker-compose.yml b/docker-compose.yml index 5721a3b4aa..04e2463cf1 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -62,6 +62,7 @@ services: QUEUE_SYSTEM: "redis" REDIS_URL: "redis://redis:6379" UPSTASH_REDIS_URL: "http://serverless-redis-http:80" + UPSTASH_REDIS_TOKEN: ${UPSTASH_REDIS_TOKEN} worker: image: ghcr.io/elie222/inbox-zero:latest pull_policy: if_not_present @@ -84,6 +85,7 @@ services: QUEUE_SYSTEM: "redis" REDIS_URL: "redis://redis:6379" UPSTASH_REDIS_URL: "http://serverless-redis-http:80" + UPSTASH_REDIS_TOKEN: ${UPSTASH_REDIS_TOKEN} # Override the default command to run the worker instead of the web server command: ["node", "worker.js"] restart: unless-stopped From 837f033b2936fbd9e0b91fd530ee62c7c24c2a69 Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Thu, 13 Nov 2025 08:00:30 -0300 Subject: [PATCH 14/17] Refactor to queue system --- ARCHITECTURE.md | 37 + apps/queue-worker/package.json | 26 + apps/queue-worker/src/env.ts | 31 + apps/queue-worker/src/http.ts | 177 ++++ apps/queue-worker/src/processor.ts | 47 ++ apps/queue-worker/src/queue.ts | 100 +++ apps/queue-worker/src/server.ts | 19 + apps/queue-worker/tsconfig.json | 16 + apps/web/app/api/ai/digest/route.ts | 8 +- apps/web/app/api/clean/gmail/route.ts | 9 +- apps/web/app/api/clean/route.ts | 9 +- apps/web/app/api/queue/[queueName]/route.ts | 24 + apps/web/app/api/resend/digest/route.ts | 7 +- .../api/scheduled-actions/execute/route.ts | 14 +- .../user/categorize/senders/batch/route.ts | 6 +- apps/web/env.ts | 2 + apps/web/instrumentation.ts | 12 - apps/web/utils/queue-signature.ts | 30 + apps/web/utils/queue/bullmq-manager.ts | 261 ------ .../queue/providers/bullmq-manager.test.ts | 129 +++ .../utils/queue/providers/bullmq-manager.ts | 172 ++++ .../queue/providers/qstash-manager.test.ts | 141 ++++ .../queue/{ => providers}/qstash-manager.ts | 41 +- apps/web/utils/queue/queue-manager.test.ts | 58 ++ apps/web/utils/queue/queue-manager.ts | 49 +- apps/web/utils/queue/queue.test.ts | 782 ------------------ apps/web/utils/queue/queues.ts | 50 +- apps/web/utils/queue/types.ts | 16 +- .../utils/scheduled-actions/scheduler.test.ts | 8 + apps/web/utils/worker-signature.ts | 73 ++ apps/web/worker.js | 105 --- docker-compose.yml | 28 +- docker/Dockerfile.prod | 6 +- docker/Dockerfile.worker | 44 + docs/hosting/docker.md | 69 ++ pnpm-lock.yaml | 90 +- 36 files changed, 1365 insertions(+), 1331 deletions(-) create mode 100644 apps/queue-worker/package.json create mode 100644 apps/queue-worker/src/env.ts create mode 100644 apps/queue-worker/src/http.ts create mode 100644 apps/queue-worker/src/processor.ts create mode 100644 apps/queue-worker/src/queue.ts create mode 100644 apps/queue-worker/src/server.ts create mode 100644 apps/queue-worker/tsconfig.json create mode 100644 apps/web/utils/queue-signature.ts delete mode 100644 apps/web/utils/queue/bullmq-manager.ts create mode 100644 apps/web/utils/queue/providers/bullmq-manager.test.ts create mode 100644 apps/web/utils/queue/providers/bullmq-manager.ts create mode 100644 apps/web/utils/queue/providers/qstash-manager.test.ts rename apps/web/utils/queue/{ => providers}/qstash-manager.ts (79%) create mode 100644 apps/web/utils/queue/queue-manager.test.ts delete mode 100644 apps/web/utils/queue/queue.test.ts create mode 100644 apps/web/utils/worker-signature.ts delete mode 100644 apps/web/worker.js create mode 100644 docker/Dockerfile.worker diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index 66e7bff1cc..9a827905d9 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -104,6 +104,43 @@ The Inbox Zero repository is structured as a monorepo, consisting of two main ap - `rule/`: Rule-related utilities (prompt file parsing, rule fixing, etc.). - `scripts/`: Scripts for database migrations, data manipulation, and other maintenance tasks. +### Queue System and Worker Service + +Inbox Zero supports two queue backends: +- QStash (Upstash) — managed HTTP queues +- Redis (BullMQ) — via a dedicated, external Queue Worker service + +High-level flow: +- The web app enqueues jobs through a unified manager (`apps/web/utils/queue/queue-manager.ts`) + - When `QUEUE_SYSTEM=upstash`, it publishes directly to QStash + - When `QUEUE_SYSTEM=redis`, it makes an HTTP POST to the Queue Worker (`apps/queue-worker`) +- The Queue Worker persists jobs in Redis using BullMQ and executes them by making HTTP callbacks to the URL provided in the enqueue request (QStash-style). The `url` is required; there is no implicit fallback. +- Callbacks are authenticated with `Authorization: Bearer ${CRON_SECRET}` and can be HMAC-signed with `WORKER_SIGNING_SECRET` + +Key files: +- Web: + - `apps/web/utils/queue/queue-manager.ts` + - `apps/web/utils/queue/providers/qstash-manager.ts` + - `apps/web/utils/queue/providers/bullmq-manager.ts` (HTTP client to the worker) + - `apps/web/app/api/queue/[queueName]/route.ts` (receives callbacks from either system) +- Worker: + - `apps/queue-worker/src/http.ts` (enqueue endpoints, health) + - `apps/queue-worker/src/queue.ts` (BullMQ setup and worker registry) + - `apps/queue-worker/src/processor.ts` (HTTP callback processor with optional HMAC) + - `apps/queue-worker/src/env.ts` (environment validation) + +Environment variables (essential): +- Web: + - `QUEUE_SYSTEM=redis|upstash` + - `WORKER_BASE_URL` (when `redis`) + - `CRON_SECRET` (shared with worker) + - `WORKER_SIGNING_SECRET` (optional, for HMAC verification) +- Worker: + - `REDIS_URL` + - `WEB_BASE_URL` + - `CRON_SECRET` (shared with web) + - `WORKER_SIGNING_SECRET` (optional) + ### 8. `docker` - Docker Configuration - **Purpose:** Contains Dockerfile for containerizing the web application. diff --git a/apps/queue-worker/package.json b/apps/queue-worker/package.json new file mode 100644 index 0000000000..8ee4da809c --- /dev/null +++ b/apps/queue-worker/package.json @@ -0,0 +1,26 @@ +{ + "name": "@inbox-zero/queue-worker", + "version": "0.1.0", + "private": true, + "type": "module", + "scripts": { + "start": "tsx --tsconfig tsconfig.json src/server.ts", + "dev": "tsx watch --tsconfig tsconfig.json src/server.ts", + "build": "tsc --project tsconfig.json", + "typecheck": "tsc --noEmit --project tsconfig.json", + "lint": "echo \"No lint configured for queue-worker yet\" && exit 0" + }, + "dependencies": { + "@fastify/cors": "11.1.0", + "@t3-oss/env-core": "0.13.8", + "bullmq": "5.62.0", + "dotenv": "17.2.3", + "fastify": "5.6.1", + "zod": "3.25.46" + }, + "devDependencies": { + "@types/node": "24.9.1", + "tsx": "4.20.6", + "typescript": "5.9.3" + } +} diff --git a/apps/queue-worker/src/env.ts b/apps/queue-worker/src/env.ts new file mode 100644 index 0000000000..4442286ff7 --- /dev/null +++ b/apps/queue-worker/src/env.ts @@ -0,0 +1,31 @@ +import { createEnv } from "@t3-oss/env-core"; +import { z } from "zod"; +import "dotenv/config"; + +export const env = createEnv({ + server: { + NODE_ENV: z + .enum(["development", "production", "test"]) + .default("development"), + PORT: z.number().default(5070), + + // Redis/BullMQ + REDIS_URL: z.string().url(), + + // Auth for web -> worker (reuse CRON_SECRET) + CRON_SECRET: z.string(), + + // Callback target for worker -> web + WEB_BASE_URL: z.string().url(), + // Optional signing secret for worker -> web + WORKER_SIGNING_SECRET: z.string().optional(), + + // Tuning + DEFAULT_CONCURRENCY: z.number().default(3), + LOG_LEVEL: z + .enum(["fatal", "error", "warn", "info", "debug", "trace", "silent"]) + .default("info"), + }, + runtimeEnv: process.env, + emptyStringAsUndefined: true, +}); diff --git a/apps/queue-worker/src/http.ts b/apps/queue-worker/src/http.ts new file mode 100644 index 0000000000..6557ec5906 --- /dev/null +++ b/apps/queue-worker/src/http.ts @@ -0,0 +1,177 @@ +import fastify, { + type FastifyInstance, + type FastifyReply, + type FastifyRequest, +} from "fastify"; +import cors from "@fastify/cors"; +import { z } from "zod"; +import { env } from "./env"; +import { enqueue, bulkEnqueue } from "./queue"; + +function isAuthorized(request: FastifyRequest): boolean { + const auth = request.headers.authorization; + if (typeof auth === "string" && auth.startsWith("Bearer ")) { + const token = auth.slice("Bearer ".length).trim(); + return token === env.CRON_SECRET; + } + const apiKey = request.headers["x-api-key"]; + if (typeof apiKey === "string" && apiKey.length > 0) { + return apiKey === env.CRON_SECRET; + } + return false; +} + +async function authGuard(request: FastifyRequest, reply: FastifyReply) { + if (!isAuthorized(request)) { + return reply.code(401).send({ error: "Unauthorized" }); + } +} + +const enqueueOptionsSchema = z + .object({ + deduplicationId: z.string().min(1).optional(), + notBefore: z.number().int().optional(), // seconds since epoch + attempts: z.number().int().min(1).max(25).optional(), + priority: z.number().int().min(1).max(10).optional(), + parallelism: z.number().int().min(1).max(100).optional(), + removeOnComplete: z + .union([z.boolean(), z.number().int().min(0)]) + .optional(), + removeOnFail: z.union([z.boolean(), z.number().int().min(0)]).optional(), + }) + .optional(); + +const enqueueRequestSchema = z.object({ + queueName: z.string().min(1), + // QStash-style (required) + url: z + .string() + .min(1) + .refine((u) => u.startsWith("/") || /^https?:\/\//i.test(u), { + message: "url must be absolute http(s) or start with '/'", + }), + body: z.unknown().optional(), + options: enqueueOptionsSchema, + headers: z.record(z.string()).optional(), +}); + +const bulkEnqueueRequestSchema = z.object({ + queueName: z.string().min(1), + items: z + .array( + z.object({ + url: z + .string() + .min(1) + .refine((u) => u.startsWith("/") || /^https?:\/\//i.test(u), { + message: "url must be absolute http(s) or start with '/'", + }), + body: z.unknown().optional(), + options: enqueueOptionsSchema, + headers: z.record(z.string()).optional(), + }), + ) + .min(1), +}); + +export function buildServer(): FastifyInstance { + const server = fastify({ + logger: { level: env.LOG_LEVEL }, + }); + + server.register(cors, { + origin: true, + methods: ["GET", "POST"], + }); + + server.get("/health", async () => { + return { status: "ok" }; + }); + + server.post( + "/v1/jobs", + { preHandler: authGuard }, + async (request: FastifyRequest, reply: FastifyReply) => { + const parsed = enqueueRequestSchema.safeParse(request.body); + if (!parsed.success) { + return reply + .code(400) + .send({ error: "Invalid request", details: parsed.error.flatten() }); + } + const body = parsed.data; + try { + const targetPath = body.url; + const data = body.body; + // Normalize options + const dedupId = body.options?.deduplicationId; + const delayMs = body.options?.notBefore + ? Math.max(0, body.options.notBefore * 1000 - Date.now()) + : undefined; + + const jobId = await enqueue( + body.queueName, + { targetPath, payload: data, headers: body.headers }, + { + delay: delayMs, + parallelism: body.options?.parallelism, + jobId: dedupId, + }, + ); + return reply.code(200).send({ jobId }); + } catch (error) { + return reply + .code(500) + .send({ + error: error instanceof Error ? error.message : String(error), + }); + } + }, + ); + + server.post( + "/v1/jobs/bulk", + { preHandler: authGuard }, + async (request: FastifyRequest, reply: FastifyReply) => { + const parsed = bulkEnqueueRequestSchema.safeParse(request.body); + if (!parsed.success) { + return reply + .code(400) + .send({ error: "Invalid request", details: parsed.error.flatten() }); + } + const body = parsed.data; + try { + const jobIds = await bulkEnqueue( + body.queueName, + body.items.map((item) => { + const targetPath = item.url; + const data = item.body; + const dedupId = item.options?.deduplicationId; + const delayMs = item.options?.notBefore + ? Math.max(0, item.options.notBefore * 1000 - Date.now()) + : undefined; + return { + data: { targetPath, payload: data, headers: item.headers }, + options: { + delay: delayMs, + jobId: dedupId, + }, + }; + }), + { + delay: undefined, + parallelism: body.items[0]?.options?.parallelism, + }, + ); + return reply.code(200).send({ jobIds }); + } catch (error) { + return reply + .code(500) + .send({ + error: error instanceof Error ? error.message : String(error), + }); + } + }, + ); + + return server; +} diff --git a/apps/queue-worker/src/processor.ts b/apps/queue-worker/src/processor.ts new file mode 100644 index 0000000000..64f730d7af --- /dev/null +++ b/apps/queue-worker/src/processor.ts @@ -0,0 +1,47 @@ +import { env } from "./env"; +import { createHmac } from "node:crypto"; + +export interface WorkerJobData { + targetPath: string; + payload: unknown; + headers?: Record; +} + +function buildSignatureHeaders(bodyString: string): Record { + const timestamp = new Date().toISOString(); + const secret = env.WORKER_SIGNING_SECRET; + if (!secret) { + return {}; + } + const payload = `${timestamp}.${bodyString}`; + const signature = createHmac("sha256", secret).update(payload).digest("hex"); + return { + "x-worker-signature": signature, + "x-worker-timestamp": timestamp, + }; +} + +export async function processJob(data: WorkerJobData): Promise { + const url = new URL(data.targetPath, env.WEB_BASE_URL).toString(); + const bodyString = JSON.stringify(data.payload ?? {}); + + const headers: Record = { + "content-type": "application/json", + authorization: `Bearer ${env.CRON_SECRET}`, + ...data.headers, + ...buildSignatureHeaders(bodyString), + }; + + const response = await fetch(url, { + method: "POST", + headers, + body: bodyString, + }); + + if (!response.ok) { + const text = await response.text().catch(() => ""); + throw new Error( + `Callback failed: ${response.status} ${response.statusText} ${text}`, + ); + } +} diff --git a/apps/queue-worker/src/queue.ts b/apps/queue-worker/src/queue.ts new file mode 100644 index 0000000000..dfb68392ba --- /dev/null +++ b/apps/queue-worker/src/queue.ts @@ -0,0 +1,100 @@ +import { + Queue, + Worker, + type JobsOptions, + type ConnectionOptions, +} from "bullmq"; +import { env } from "./env"; +import { processJob, type WorkerJobData } from "./processor"; + +type QueueRecord = { + queue: Queue; + worker: Worker; + concurrency: number; +}; + +const queues = new Map(); + +const connection: ConnectionOptions = { + url: env.REDIS_URL, +}; + +export function getOrCreateQueue( + queueName: string, + concurrency?: number, +): Queue { + let record = queues.get(queueName); + if (record) { + return record.queue; + } + const workerConcurrency = concurrency ?? env.DEFAULT_CONCURRENCY; + const queue = new Queue(queueName, { + connection, + defaultJobOptions: { + removeOnComplete: { count: 10 }, + removeOnFail: { count: 5 }, + attempts: 5, + } as JobsOptions, + }); + const worker = new Worker( + queueName, + async (job) => { + await processJob(job.data); + }, + { + connection, + concurrency: workerConcurrency, + }, + ); + record = { queue, worker, concurrency: workerConcurrency }; + queues.set(queueName, record); + return queue; +} + +export async function enqueue( + queueName: string, + data: WorkerJobData, + options?: JobsOptions & { parallelism?: number }, +) { + const queue = getOrCreateQueue(queueName, options?.parallelism); + const job = await queue.add(queueName, data, { + delay: options?.delay, + attempts: options?.attempts ?? 5, + priority: options?.priority, + removeOnComplete: options?.removeOnComplete ?? { count: 10 }, + removeOnFail: options?.removeOnFail ?? { count: 5 }, + jobId: options?.jobId as string | undefined, + }); + return job.id; +} + +export async function bulkEnqueue( + queueName: string, + items: Array<{ data: WorkerJobData; options?: JobsOptions }>, + options?: { + delay?: number; + attempts?: number; + priority?: number; + parallelism?: number; + removeOnComplete?: number | boolean; + removeOnFail?: number | boolean; + }, +) { + const queue = getOrCreateQueue(queueName, options?.parallelism); + const jobs = items.map((item) => ({ + name: queueName, + data: item.data, + opts: { + delay: item.options?.delay ?? options?.delay, + attempts: item.options?.attempts ?? options?.attempts ?? 5, + priority: item.options?.priority ?? options?.priority, + removeOnComplete: item.options?.removeOnComplete ?? + options?.removeOnComplete ?? { count: 10 }, + removeOnFail: item.options?.removeOnFail ?? + options?.removeOnFail ?? { count: 5 }, + jobId: item.options?.jobId, + } as JobsOptions, + })); + const added = await queue.addBulk(jobs); + return added.map((j) => j.id as string); +} diff --git a/apps/queue-worker/src/server.ts b/apps/queue-worker/src/server.ts new file mode 100644 index 0000000000..52cb74309e --- /dev/null +++ b/apps/queue-worker/src/server.ts @@ -0,0 +1,19 @@ +import { env } from "./env"; +import { buildServer } from "./http"; + +async function start() { + const server = buildServer(); + try { + await server.listen({ port: env.PORT, host: "0.0.0.0" }); + server.log.info(`Queue Worker listening on :${env.PORT}`); + } catch (err) { + server.log.error(err); + process.exit(1); + } +} + +start().catch((err) => { + /* biome-ignore lint/suspicious/noConsole: fallback error log on boot failure */ + console.error(err); + process.exit(1); +}); diff --git a/apps/queue-worker/tsconfig.json b/apps/queue-worker/tsconfig.json new file mode 100644 index 0000000000..ed2a62124c --- /dev/null +++ b/apps/queue-worker/tsconfig.json @@ -0,0 +1,16 @@ +{ + "extends": "../../packages/tsconfig/base.json", + "compilerOptions": { + "target": "ES2022", + "module": "ESNext", + "outDir": "dist", + "rootDir": "src", + "moduleResolution": "Bundler", + "types": ["node"], + "resolveJsonModule": true, + "verbatimModuleSyntax": true + }, + "include": ["src/**/*.ts"] +} + + diff --git a/apps/web/app/api/ai/digest/route.ts b/apps/web/app/api/ai/digest/route.ts index 358a290889..9442bb1896 100644 --- a/apps/web/app/api/ai/digest/route.ts +++ b/apps/web/app/api/ai/digest/route.ts @@ -1,5 +1,5 @@ -import { NextResponse } from "next/server"; -import { verifySignatureAppRouter } from "@upstash/qstash/nextjs"; +import { type NextRequest, NextResponse } from "next/server"; +import { verifyQueueSignatureAppRouter } from "@/utils/queue-signature"; import { digestBody } from "./validation"; import { DigestStatus } from "@prisma/client"; import { createScopedLogger } from "@/utils/logger"; @@ -12,11 +12,11 @@ import { isAssistantEmail } from "@/utils/assistant/is-assistant-email"; import { env } from "@/env"; export const POST = withError( - verifySignatureAppRouter(async (request: Request) => { + verifyQueueSignatureAppRouter(async (req: NextRequest) => { const logger = createScopedLogger("digest"); try { - const body = digestBody.parse(await request.json()); + const body = digestBody.parse(await req.json()); const { emailAccountId, coldEmailId, actionId, message } = body; logger.with({ emailAccountId, messageId: message.id }); diff --git a/apps/web/app/api/clean/gmail/route.ts b/apps/web/app/api/clean/gmail/route.ts index af6af1d8a0..d9e5bf2e9f 100644 --- a/apps/web/app/api/clean/gmail/route.ts +++ b/apps/web/app/api/clean/gmail/route.ts @@ -1,5 +1,6 @@ import { type NextRequest, NextResponse } from "next/server"; -import { verifySignatureAppRouter } from "@upstash/qstash/nextjs"; +import { verifyQueueSignatureAppRouter } from "@/utils/queue-signature"; +import { verifyWorkerSignatureAppRouter } from "@/utils/worker-signature"; import { z } from "zod"; import { withError } from "@/utils/middleware"; import { getGmailClientWithRefresh } from "@/utils/gmail/client"; @@ -138,12 +139,10 @@ async function saveToDatabase({ } export const POST = withError( - verifySignatureAppRouter(async (request: NextRequest) => { - const json = await request.json(); + verifyQueueSignatureAppRouter(async (req: NextRequest) => { + const json = await req.json(); const body = cleanGmailSchema.parse(json); - await performGmailAction(body); - return NextResponse.json({ success: true }); }), ); diff --git a/apps/web/app/api/clean/route.ts b/apps/web/app/api/clean/route.ts index 1788c72393..d0aa61a9b5 100644 --- a/apps/web/app/api/clean/route.ts +++ b/apps/web/app/api/clean/route.ts @@ -1,4 +1,5 @@ -import { verifySignatureAppRouter } from "@upstash/qstash/nextjs"; +import { verifyQueueSignatureAppRouter } from "@/utils/queue-signature"; +import { verifyWorkerSignatureAppRouter } from "@/utils/worker-signature"; import { z } from "zod"; import { NextResponse } from "next/server"; import { withError } from "@/utils/middleware"; @@ -289,12 +290,10 @@ function getPublish({ } export const POST = withError( - verifySignatureAppRouter(async (request: Request) => { - const json = await request.json(); + verifyQueueSignatureAppRouter(async (req: Request) => { + const json = await req.json(); const body = cleanThreadBody.parse(json); - await cleanThread(body); - return NextResponse.json({ success: true }); }), ); diff --git a/apps/web/app/api/queue/[queueName]/route.ts b/apps/web/app/api/queue/[queueName]/route.ts index f6b0303b05..759bee508f 100644 --- a/apps/web/app/api/queue/[queueName]/route.ts +++ b/apps/web/app/api/queue/[queueName]/route.ts @@ -17,6 +17,7 @@ import { verifySignatureAppRouter } from "@upstash/qstash/nextjs"; import { withError } from "@/utils/middleware"; import { isValidInternalApiKey } from "@/utils/internal-api"; import { env } from "@/env"; +import { verifyWorkerSignatureAppRouter } from "@/utils/worker-signature"; const logger = createScopedLogger("queue-api"); @@ -95,6 +96,29 @@ const queueRouteHandler = async ( env.QUEUE_SYSTEM === "redis" && (await validateInternalRequest(request)) ) { + // If worker signature headers are present and secret configured, verify HMAC + if ( + request.headers.has("x-worker-signature") && + request.headers.has("x-worker-timestamp") + ) { + const response = await verifyWorkerSignatureAppRouter( + async (req: Request): Promise => { + const result = await handleQueueJob(req as NextRequest, context); + return new Response(result.body, { + status: result.status, + statusText: result.statusText, + headers: result.headers, + }); + }, + )(request); + + return response instanceof NextResponse + ? response + : NextResponse.json(await response.json(), { + status: response.status, + headers: response.headers, + }); + } return handleQueueJob(request, context); } diff --git a/apps/web/app/api/resend/digest/route.ts b/apps/web/app/api/resend/digest/route.ts index ccf1088904..e362a4e4e5 100644 --- a/apps/web/app/api/resend/digest/route.ts +++ b/apps/web/app/api/resend/digest/route.ts @@ -17,7 +17,7 @@ import { DigestStatus } from "@prisma/client"; import { extractNameFromEmail } from "../../../../utils/email"; import { getRuleName } from "@/utils/rule/consts"; import { SystemType } from "@prisma/client"; -import { verifySignatureAppRouter } from "@upstash/qstash/nextjs"; +import { verifyQueueSignatureAppRouter } from "@/utils/queue-signature"; import { camelCase } from "lodash"; import { createEmailProvider } from "@/utils/email/provider"; import { sleep } from "@/utils/sleep"; @@ -46,12 +46,11 @@ export const GET = withEmailAccount(async (request) => { }); export const POST = withError( - verifySignatureAppRouter(async (request: NextRequest) => { - const json = await request.json(); + verifyQueueSignatureAppRouter(async (req: NextRequest) => { + const json = await req.json(); const { success, data, error } = sendDigestEmailBody.safeParse(json); let logger = createScopedLogger("resend/digest"); - if (!success) { logger.error("Invalid request body", { error }); return NextResponse.json( diff --git a/apps/web/app/api/scheduled-actions/execute/route.ts b/apps/web/app/api/scheduled-actions/execute/route.ts index 6499c9366c..dca5fcb0ed 100644 --- a/apps/web/app/api/scheduled-actions/execute/route.ts +++ b/apps/web/app/api/scheduled-actions/execute/route.ts @@ -1,4 +1,4 @@ -import { verifySignatureAppRouter } from "@upstash/qstash/nextjs"; +import { verifyQueueSignatureAppRouter } from "@/utils/queue-signature"; import type { NextRequest } from "next/server"; import { z } from "zod"; import { withError } from "@/utils/middleware"; @@ -17,16 +17,16 @@ const scheduledActionBody = z.object({ scheduledActionId: z.string().min(1, "Scheduled action ID is required"), }); -export const POST = verifySignatureAppRouter( - withError(async (request: NextRequest) => { +export const POST = withError( + verifyQueueSignatureAppRouter(async (req: NextRequest) => { try { logger.info("QStash request received", { - url: request.url, - method: request.method, - headers: Object.fromEntries(request.headers.entries()), + url: req.url, + method: req.method, + headers: Object.fromEntries(req.headers.entries()), }); - const rawPayload = await request.json(); + const rawPayload = await req.json(); const validationResult = scheduledActionBody.safeParse(rawPayload); if (!validationResult.success) { diff --git a/apps/web/app/api/user/categorize/senders/batch/route.ts b/apps/web/app/api/user/categorize/senders/batch/route.ts index 81a5d7d388..9982e38e39 100644 --- a/apps/web/app/api/user/categorize/senders/batch/route.ts +++ b/apps/web/app/api/user/categorize/senders/batch/route.ts @@ -1,7 +1,9 @@ -import { verifySignatureAppRouter } from "@upstash/qstash/nextjs"; +import { verifyQueueSignatureAppRouter } from "@/utils/queue-signature"; import { withError } from "@/utils/middleware"; import { handleBatchRequest } from "@/app/api/user/categorize/senders/batch/handle-batch"; export const maxDuration = 300; -export const POST = withError(verifySignatureAppRouter(handleBatchRequest)); +export const POST = withError( + verifyQueueSignatureAppRouter(handleBatchRequest), +); diff --git a/apps/web/env.ts b/apps/web/env.ts index bcfe951b1d..1578c0fe37 100644 --- a/apps/web/env.ts +++ b/apps/web/env.ts @@ -64,6 +64,7 @@ export const env = createEnv({ QUEUE_SYSTEM: z.enum(["redis", "upstash"]).default("upstash"), ENABLE_WORKER_QUEUES: z.coerce.boolean().default(false), QSTASH_TOKEN: z.string().optional(), + WORKER_BASE_URL: z.string().optional(), GOOGLE_PUBSUB_TOPIC_NAME: z.string().min(1), GOOGLE_PUBSUB_VERIFICATION_TOKEN: z.string().optional(), @@ -111,6 +112,7 @@ export const env = createEnv({ .transform((value) => value?.split(",")), WEBHOOK_URL: z.string().optional(), INTERNAL_API_KEY: z.string(), + WORKER_SIGNING_SECRET: z.string().optional(), WHITELIST_FROM: z.string().optional(), USE_BACKUP_MODEL: z.coerce.boolean().optional().default(false), HEALTH_API_KEY: z.string().optional(), diff --git a/apps/web/instrumentation.ts b/apps/web/instrumentation.ts index 6d8f9b72d6..ff915bcc18 100644 --- a/apps/web/instrumentation.ts +++ b/apps/web/instrumentation.ts @@ -8,11 +8,9 @@ declare global { } export function startBullMQWorkers() { - // Avoid duplicate starts during hot reloads if (!globalThis.__inboxZeroWorkersStarted) { globalThis.__inboxZeroWorkersStarted = true; - // Defer heavy imports until after env is available import("@/env") .then(async ({ env }) => { if (env.QUEUE_SYSTEM !== "redis") return; @@ -60,19 +58,12 @@ export function startBullMQWorkers() { export function register() { if (process.env.NEXT_RUNTIME === "nodejs") { - // this is your Sentry.init call from `sentry.server.config.js|ts` Sentry.init({ dsn: process.env.NEXT_PUBLIC_SENTRY_DSN, - // Adjust this value in production, or use tracesSampler for greater control tracesSampleRate: 1, - // Setting this option to true will print useful information to the console while you're setting up Sentry. debug: false, - // uncomment the line below to enable Spotlight (https://spotlightjs.com) - // spotlight: process.env.NODE_ENV === 'development', }); - // Start BullMQ workers inside the Next.js server process when enabled - // Can be enabled via ENABLE_WORKER_QUEUES=true or automatically in development mode if ( process.env.NODE_ENV === "development" && process.env.ENABLE_WORKER_QUEUES === "true" @@ -81,13 +72,10 @@ export function register() { } } - // This is your Sentry.init call from `sentry.edge.config.js|ts` if (process.env.NEXT_RUNTIME === "edge") { Sentry.init({ dsn: process.env.NEXT_PUBLIC_SENTRY_DSN, - // Adjust this value in production, or use tracesSampler for greater control tracesSampleRate: 1, - // Setting this option to true will print useful information to the console while you're setting up Sentry. debug: false, }); } diff --git a/apps/web/utils/queue-signature.ts b/apps/web/utils/queue-signature.ts new file mode 100644 index 0000000000..852ff5f741 --- /dev/null +++ b/apps/web/utils/queue-signature.ts @@ -0,0 +1,30 @@ +import { verifySignatureAppRouter } from "@upstash/qstash/nextjs"; +import { type NextRequest, NextResponse } from "next/server"; +import { verifyWorkerSignatureAppRouter } from "./worker-signature"; + +export function verifyQueueSignatureAppRouter< + TReq extends Request | NextRequest, + TRes extends Response, +>(handler: (req: TReq) => Promise | TRes) { + return async (request: NextRequest): Promise => { + const hasWorkerSig = + request.headers.has("x-worker-signature") && + request.headers.has("x-worker-timestamp"); + + const adapter = async (req: Request): Promise => { + const result = await handler(req as TReq); + return result as TRes as Response; + }; + + const response = hasWorkerSig + ? await verifyWorkerSignatureAppRouter(adapter)(request) + : await verifySignatureAppRouter(adapter)(request); + + return response instanceof NextResponse + ? response + : NextResponse.json(await response.json(), { + status: response.status, + headers: response.headers, + }); + }; +} diff --git a/apps/web/utils/queue/bullmq-manager.ts b/apps/web/utils/queue/bullmq-manager.ts deleted file mode 100644 index 56e6d0d3c8..0000000000 --- a/apps/web/utils/queue/bullmq-manager.ts +++ /dev/null @@ -1,261 +0,0 @@ -import { - Queue, - Worker, - QueueEvents, - type Job, - type ConnectionOptions, -} from "bullmq"; -import { env } from "@/env"; -import { createScopedLogger } from "@/utils/logger"; -import { getAiCleanQueueName } from "./queues"; -import type { - QueueJobData, - EnqueueOptions, - BulkEnqueueOptions, - QueueManager, -} from "./types"; - -export const DEFAULT_CONCURRENCY = 3; -export const DEFAULT_ATTEMPTS = 5; - -const logger = createScopedLogger("queue-bullmq"); - -export class BullMQManager implements QueueManager { - private readonly queues: Map = new Map(); - private readonly workers: Map = new Map(); - private readonly queueEvents: Map = new Map(); - private readonly connection: ConnectionOptions; - - constructor() { - if (!env.REDIS_URL) { - throw new Error("REDIS_URL is required for BullMQ"); - } - - this.connection = { - url: env.REDIS_URL, - }; - } - - async enqueue( - queueName: string, - data: T, - options: EnqueueOptions = {}, - ): Promise> { - const queue = this.getOrCreateQueue(queueName); - - const jobOptions = { - delay: options.delay, - attempts: options.attempts ?? DEFAULT_ATTEMPTS, - priority: options.priority, - removeOnComplete: options.removeOnComplete ?? 10, - removeOnFail: options.removeOnFail ?? 5, - jobId: options.jobId, - }; - - const job = await queue.add(queueName, data, jobOptions); - - logger.info("Job enqueued with BullMQ", { - queueName, - jobId: job.id, - }); - - return job as Job; - } - - async bulkEnqueue( - queueName: string, - options: BulkEnqueueOptions, - ): Promise[]> { - // For ai-clean queue, use hash-based distribution across multiple queues - // This ensures per-account parallelism limits similar to ai-categorize-senders - if (queueName === "ai-clean") { - // Group jobs by their target queue (based on emailAccountId hash) - const jobsByQueue = new Map(); - for (const job of options.jobs) { - const emailAccountId = (job.data as { emailAccountId?: string }) - .emailAccountId; - if (!emailAccountId) { - logger.warn( - "Job missing emailAccountId, skipping per-account queue grouping", - { - queueName, - }, - ); - continue; - } - const targetQueueName = getAiCleanQueueName({ emailAccountId }); - let queueJobs = jobsByQueue.get(targetQueueName); - if (!queueJobs) { - queueJobs = []; - jobsByQueue.set(targetQueueName, queueJobs); - } - queueJobs.push(job); - } - - // Enqueue jobs to their respective queues - const allJobs: Job[] = []; - for (const [targetQueueName, queueJobs] of jobsByQueue) { - const queue = this.getOrCreateQueue(targetQueueName); - - const jobs = queueJobs.map((jobData) => ({ - name: jobData.name ?? targetQueueName, - data: jobData.data, - opts: { - delay: options.delay, - attempts: options.attempts ?? DEFAULT_ATTEMPTS, - priority: options.priority, - removeOnComplete: options.removeOnComplete ?? 10, - removeOnFail: options.removeOnFail ?? 5, - jobId: jobData.opts?.jobId, - ...jobData.opts, - }, - })); - - const addedJobs = await queue.addBulk(jobs); - allJobs.push(...(addedJobs as Job[])); - } - - logger.info("Bulk jobs enqueued with BullMQ (distributed)", { - queueName, - jobCount: allJobs.length, - queuesUsed: Array.from(jobsByQueue.keys()), - }); - - return allJobs; - } - - // For other queues, use the original single-queue approach - const queue = this.getOrCreateQueue(queueName); - - const jobs = options.jobs.map((jobData) => ({ - name: jobData.name ?? queueName, - data: jobData.data, - opts: { - delay: options.delay, - attempts: options.attempts ?? DEFAULT_ATTEMPTS, - priority: options.priority, - removeOnComplete: options.removeOnComplete ?? 10, - removeOnFail: options.removeOnFail ?? 5, - jobId: jobData.opts?.jobId, - ...jobData.opts, - }, - })); - - const addedJobs = await queue.addBulk(jobs); - - logger.info("Bulk jobs enqueued with BullMQ", { - queueName, - jobCount: addedJobs.length, - }); - - return addedJobs as Job[]; - } - - createWorker( - queueName: string, - processor: (job: Job) => Promise, - options: { - concurrency?: number; - connection?: ConnectionOptions; - } = {}, - ): Worker { - const worker = new Worker( - queueName, - async (job) => { - logger.info("Processing job", { - queueName, - jobId: job.id, - }); - - try { - await processor(job); - logger.info("Job completed successfully", { - queueName, - jobId: job.id, - }); - } catch (error) { - logger.error("Job failed", { - queueName, - jobId: job.id, - error: error instanceof Error ? error.message : String(error), - }); - throw error; - } - }, - { - connection: options.connection || this.connection, - concurrency: options.concurrency || DEFAULT_CONCURRENCY, - removeOnComplete: { count: 10 }, - removeOnFail: { count: 5 }, - }, - ); - - this.workers.set(queueName, worker); - return worker; - } - - createQueue( - queueName: string, - options: { - connection?: ConnectionOptions; - defaultJobOptions?: Record; - } = {}, - ): Queue { - const queue = new Queue(queueName, { - connection: options.connection || this.connection, - defaultJobOptions: { - removeOnComplete: { count: 10 }, - removeOnFail: { count: 5 }, - attempts: DEFAULT_ATTEMPTS, - ...options.defaultJobOptions, - }, - }); - - this.queues.set(queueName, queue); - return queue; - } - - getQueueEvents(queueName: string): QueueEvents { - const existing = this.queueEvents.get(queueName); - if (existing) { - return existing; - } - - const queueEvents = new QueueEvents(queueName, { - connection: this.connection, - }); - this.queueEvents.set(queueName, queueEvents); - return queueEvents; - } - - private getOrCreateQueue(queueName: string): Queue { - if (!this.queues.has(queueName)) { - this.createQueue(queueName); - } - return this.queues.get(queueName)!; - } - - async close(): Promise { - // Close all workers - for (const [name, worker] of this.workers) { - logger.info("Closing worker", { queueName: name }); - await worker.close(); - } - - // Close all queues - for (const [name, queue] of this.queues) { - logger.info("Closing queue", { queueName: name }); - await queue.close(); - } - - // Close all queue events - for (const [name, queueEvents] of this.queueEvents) { - logger.info("Closing queue events", { queueName: name }); - await queueEvents.close(); - } - - this.queues.clear(); - this.workers.clear(); - this.queueEvents.clear(); - } -} diff --git a/apps/web/utils/queue/providers/bullmq-manager.test.ts b/apps/web/utils/queue/providers/bullmq-manager.test.ts new file mode 100644 index 0000000000..bc0f445a4a --- /dev/null +++ b/apps/web/utils/queue/providers/bullmq-manager.test.ts @@ -0,0 +1,129 @@ +import { + describe, + it, + expect, + beforeEach, + afterEach, + afterAll, + vi, +} from "vitest"; + +// Mock server-only to prevent import errors in tests +vi.mock("server-only", () => ({})); + +// Mock fetch for HTTP worker (Redis) path +const originalFetch = global.fetch; +const mockFetch = vi.fn(); +// @ts-expect-error override for tests +global.fetch = mockFetch; + +// Helper to create env mock +const createEnvMock = (overrides: Record = {}) => ({ + env: { + QUEUE_SYSTEM: "redis", + WORKER_BASE_URL: "http://queue-worker:5070", + CRON_SECRET: "test-cron", + NODE_ENV: "test", + EMAIL_ENCRYPT_SECRET: "test-encryption-secret-key-for-testing-purposes", + EMAIL_ENCRYPT_SALT: "test-encryption-salt-for-testing", + ...overrides, + }, +}); + +describe("HTTP Worker Manager (Redis)", () => { + let manager: any; + + beforeEach(async () => { + await vi.doMock("@/env", () => createEnvMock()); + vi.resetModules(); + mockFetch.mockReset(); + const { BullMQManager } = await import("./bullmq-manager"); + manager = new BullMQManager(); + }); + + afterEach(async () => { + if (manager) { + await manager.close(); + } + }); + + describe("Job Enqueueing (HTTP)", () => { + it("should enqueue a single job via worker service", async () => { + mockFetch.mockResolvedValueOnce( + new Response(JSON.stringify({ jobId: "job-123" }), { status: 200 }), + ); + const jobData = { message: "Test job", userId: "user-123" }; + + const result = await manager.enqueue("test-queue", jobData); + + expect(mockFetch).toHaveBeenCalledWith( + "http://queue-worker:5070/v1/jobs", + expect.objectContaining({ + method: "POST", + headers: expect.objectContaining({ + "content-type": "application/json", + authorization: "Bearer test-cron", + }), + }), + ); + expect(result).toBe("job-123"); + }); + + it("should enqueue a job with options", async () => { + mockFetch.mockResolvedValueOnce( + new Response(JSON.stringify({ jobId: "job-456" }), { status: 200 }), + ); + const jobData = { message: "Delayed job", userId: "user-456" }; + const options = { + notBefore: Math.ceil((Date.now() + 5000) / 1000), + deduplicationId: "job-456", + }; + + const result = await manager.enqueue("test-queue", jobData, options); + + const [, init] = mockFetch.mock.calls[0]; + const body = JSON.parse((init as RequestInit).body as string); + expect(body.options).toEqual( + expect.objectContaining({ + notBefore: expect.any(Number), + deduplicationId: "job-456", + }), + ); + expect(result).toBe("job-456"); + }); + + it("should handle enqueue errors", async () => { + mockFetch.mockResolvedValueOnce(new Response("oops", { status: 500 })); + await expect( + manager.enqueue("test-queue", { message: "Test" }), + ).rejects.toThrow("Worker enqueue failed (500): "); + }); + }); + + describe("Bulk Job Enqueueing (HTTP)", () => { + it("should enqueue multiple jobs via worker service", async () => { + mockFetch.mockResolvedValueOnce( + new Response(JSON.stringify({ jobIds: ["bulk-1", "bulk-2"] }), { + status: 200, + }), + ); + const jobs = [ + { data: { message: "Bulk job 1" } }, + { data: { message: "Bulk job 2" } }, + ]; + + const result = await manager.bulkEnqueue("test-queue", { jobs }); + + const [, init] = mockFetch.mock.calls[0]; + const body = JSON.parse((init as RequestInit).body as string); + expect(body.items).toHaveLength(2); + expect(result).toEqual(["bulk-1", "bulk-2"]); + }); + }); +}); + +// Restore fetch after all tests in this file +afterAll(() => { + // @ts-expect-error restore + global.fetch = originalFetch; +}); diff --git a/apps/web/utils/queue/providers/bullmq-manager.ts b/apps/web/utils/queue/providers/bullmq-manager.ts new file mode 100644 index 0000000000..4d59e9dfc9 --- /dev/null +++ b/apps/web/utils/queue/providers/bullmq-manager.ts @@ -0,0 +1,172 @@ +import type { + Job, + Queue, + QueueEvents, + Worker, + ConnectionOptions, +} from "bullmq"; +import { env } from "@/env"; +import { createScopedLogger } from "@/utils/logger"; +import type { + BulkEnqueueOptions, + EnqueueOptions, + QueueJobData, + QueueManager, +} from "../types"; + +const logger = createScopedLogger("queue-http-worker"); + +function getWorkerBaseUrl(): string { + const base = env.WORKER_BASE_URL; + if (!base) { + throw new Error( + "WORKER_BASE_URL is required when using redis worker service", + ); + } + return base.replace(/\/+$/, ""); +} + +function getAuthHeaders(): Record { + if (!env.CRON_SECRET) { + throw new Error( + "CRON_SECRET is required to authenticate with worker service", + ); + } + return { + authorization: `Bearer ${env.CRON_SECRET}`, + }; +} + +export class BullMQManager implements QueueManager { + async enqueue( + queueName: string, + data: T, + options: EnqueueOptions = {}, + ): Promise | string> { + const url = `${getWorkerBaseUrl()}/v1/jobs`; + const base = env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL || ""; + const callbackPath = options.targetPath ?? `/api/queue/${queueName}`; + const callbackUrl = + callbackPath.startsWith("http://") || callbackPath.startsWith("https://") + ? callbackPath + : `${base}${callbackPath}`; + const body = { + queueName, + url: callbackUrl, + body: data, + options: { + notBefore: options.notBefore, + deduplicationId: options.deduplicationId, + parallelism: undefined, + }, + headers: options.headers, + }; + + const res = await fetch(url, { + method: "POST", + headers: { + "content-type": "application/json", + ...getAuthHeaders(), + }, + body: JSON.stringify(body), + }); + + if (!res.ok) { + const text = await res.text().catch(() => ""); + logger.error("Failed to enqueue via worker", { + status: res.status, + statusText: res.statusText, + body: text, + }); + throw new Error( + `Worker enqueue failed (${res.status}): ${res.statusText}`, + ); + } + + const json = (await res.json()) as { jobId: string }; + return json.jobId; + } + + async bulkEnqueue( + queueName: string, + options: BulkEnqueueOptions, + ): Promise[] | string[]> { + const url = `${getWorkerBaseUrl()}/v1/jobs/bulk`; + const base = env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL || ""; + const body = { + queueName, + items: options.jobs.map((j) => ({ + url: (() => { + const p = + j.opts?.targetPath ?? + options.targetPath ?? + `/api/queue/${queueName}`; + return p.startsWith("http://") || p.startsWith("https://") + ? p + : `${base}${p}`; + })(), + body: j.data, + options: { + notBefore: j.opts?.notBefore ?? options.notBefore, + deduplicationId: j.opts?.deduplicationId ?? options.deduplicationId, + parallelism: undefined, + }, + headers: j.opts?.headers ?? options.headers, + })), + }; + + const res = await fetch(url, { + method: "POST", + headers: { + "content-type": "application/json", + ...getAuthHeaders(), + }, + body: JSON.stringify(body), + }); + + if (!res.ok) { + const text = await res.text().catch(() => ""); + logger.error("Failed to bulk enqueue via worker", { + status: res.status, + statusText: res.statusText, + body: text, + }); + throw new Error( + `Worker bulk enqueue failed (${res.status}): ${res.statusText}`, + ); + } + + const json = (await res.json()) as { jobIds: string[] }; + return json.jobIds; + } + + createWorker( + _queueName: string, + _processor: (job: Job) => Promise, + _options?: { concurrency?: number; connection?: ConnectionOptions }, + ): Worker { + throw new Error( + "createWorker is not supported when using HTTP worker service", + ); + } + + createQueue( + _queueName: string, + _options?: { + connection?: ConnectionOptions; + defaultJobOptions?: Record; + }, + ): Queue { + throw new Error( + "createQueue is not supported when using HTTP worker service", + ); + } + + getQueueEvents(_queueName: string): QueueEvents { + throw new Error( + "getQueueEvents is not supported when using HTTP worker service", + ); + } + + async close(): Promise {} +} diff --git a/apps/web/utils/queue/providers/qstash-manager.test.ts b/apps/web/utils/queue/providers/qstash-manager.test.ts new file mode 100644 index 0000000000..dd383c2bd9 --- /dev/null +++ b/apps/web/utils/queue/providers/qstash-manager.test.ts @@ -0,0 +1,141 @@ +import { describe, it, expect, beforeEach, afterEach, vi } from "vitest"; + +// Mock server-only to prevent import errors in tests +vi.mock("server-only", () => ({})); + +// QStash Mocks +const q = { + upsert: vi.fn(), + enqueueJSON: vi.fn(), +}; +const mockClient = { + publishJSON: vi.fn(), + batchJSON: vi.fn(), + queue: vi.fn().mockImplementation(() => q), +}; + +vi.mock("@upstash/qstash", () => ({ + Client: vi.fn().mockImplementation(() => mockClient), +})); + +// Helper to create env mock +const createEnvMock = (overrides: Record = {}) => ({ + env: { + QUEUE_SYSTEM: "upstash", + WEBHOOK_URL: "https://webhook.test.com", + NEXT_PUBLIC_BASE_URL: "https://fallback.test.com", + NODE_ENV: "test", + EMAIL_ENCRYPT_SECRET: "test-encryption-secret-key-for-testing-purposes", + EMAIL_ENCRYPT_SALT: "test-encryption-salt-for-testing", + QSTASH_TOKEN: "test-token", + ...overrides, + }, +}); + +describe("QStash Manager", () => { + let manager: any; + + beforeEach(async () => { + await vi.doMock("@/env", () => createEnvMock()); + vi.resetModules(); + const { QStashManager } = await import("./qstash-manager"); + manager = new QStashManager(); + }); + + afterEach(async () => { + if (manager) { + await manager.close(); + } + vi.clearAllMocks(); + }); + + describe("Job Enqueueing", () => { + it("should enqueue a single job (queue.enqueueJSON path)", async () => { + q.upsert.mockResolvedValueOnce(undefined); + q.enqueueJSON.mockResolvedValueOnce({ messageId: "qstash-message-123" }); + + const jobData = { message: "Test job", userId: "user-123" }; + const result = await manager.enqueue("test-queue", jobData); + + expect(mockClient.queue).toHaveBeenCalledWith({ + queueName: "test-queue", + }); + expect(q.upsert).toHaveBeenCalledWith({ parallelism: 3 }); + expect(q.enqueueJSON).toHaveBeenCalledWith({ + url: "https://webhook.test.com/api/queue/test-queue", + body: jobData, + deduplicationId: undefined, + headers: undefined, + }); + expect(result).toBe("qstash-message-123"); + }); + + it("should enqueue a job with notBefore (publishJSON path)", async () => { + mockClient.publishJSON.mockResolvedValueOnce({ + messageId: "qstash-delayed-123", + }); + const jobData = { message: "Delayed job", userId: "user-456" }; + const result = await manager.enqueue("test-queue", jobData, { + notBefore: Math.ceil((Date.now() + 5000) / 1000), + deduplicationId: "custom-job-id", + headers: { "x-test": "1" }, + }); + + expect(mockClient.publishJSON).toHaveBeenCalledWith({ + url: "https://webhook.test.com/api/queue/test-queue", + body: jobData, + notBefore: expect.any(Number), + deduplicationId: "custom-job-id", + headers: { "x-test": "1" }, + }); + expect(result).toBe("qstash-delayed-123"); + }); + + it("should handle enqueue errors", async () => { + mockClient.publishJSON.mockRejectedValueOnce(new Error("Enqueue failed")); + await expect( + manager.enqueue( + "test-queue", + { message: "Test" }, + { notBefore: Math.ceil((Date.now() + 1) / 1000) }, + ), + ).rejects.toThrow("Enqueue failed"); + }); + + it("should return 'unknown' when messageId is missing", async () => { + q.upsert.mockResolvedValueOnce(undefined); + q.enqueueJSON.mockResolvedValueOnce({ messageId: undefined }); + + const result = await manager.enqueue("test-queue", { message: "Test" }); + expect(result).toBe("unknown"); + }); + }); + + describe("Bulk Job Enqueueing", () => { + it("should enqueue multiple jobs with batchJSON", async () => { + mockClient.batchJSON.mockResolvedValueOnce([ + { messageId: "qstash-bulk-1" }, + { messageId: "qstash-bulk-2" }, + ]); + + const result = await manager.bulkEnqueue("test-queue", { + jobs: [ + { data: { message: "Bulk job 1" } }, + { data: { message: "Bulk job 2" } }, + ], + }); + + expect(mockClient.batchJSON).toHaveBeenCalledWith([ + { + url: "https://webhook.test.com/api/queue/test-queue", + body: { message: "Bulk job 1" }, + }, + { + url: "https://webhook.test.com/api/queue/test-queue", + body: { message: "Bulk job 2" }, + }, + ]); + expect(result).toEqual(["qstash-bulk-1", "qstash-bulk-2"]); + }); + }); +}); diff --git a/apps/web/utils/queue/qstash-manager.ts b/apps/web/utils/queue/providers/qstash-manager.ts similarity index 79% rename from apps/web/utils/queue/qstash-manager.ts rename to apps/web/utils/queue/providers/qstash-manager.ts index 874257287a..8d2ee0384a 100644 --- a/apps/web/utils/queue/qstash-manager.ts +++ b/apps/web/utils/queue/providers/qstash-manager.ts @@ -7,7 +7,7 @@ import type { EnqueueOptions, BulkEnqueueOptions, QueueManager, -} from "./types"; +} from "../types"; const logger = createScopedLogger("queue-qstash"); @@ -24,16 +24,18 @@ export class QStashManager implements QueueManager { data: T, options: EnqueueOptions = {}, ): Promise { - const url = `${env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL}/api/queue/${queueName}`; + const callbackPath = options.targetPath ?? `/api/queue/${queueName}`; + const url = `${env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL}${callbackPath}`; const client = getQstashClient(); - if (options.delay) { - const notBefore = Math.ceil((Date.now() + options.delay) / 1000); + if (options.notBefore) { + const notBefore = options.notBefore; const response = await client.publishJSON({ url, body: data, notBefore, - deduplicationId: options.jobId, + deduplicationId: options.deduplicationId, + headers: options.headers, }); return response?.messageId || "unknown"; } else { @@ -43,7 +45,8 @@ export class QStashManager implements QueueManager { const response = await queue.enqueueJSON({ url, body: data, - deduplicationId: options.jobId, + deduplicationId: options.deduplicationId, + headers: options.headers, }); return response?.messageId || "unknown"; } @@ -53,7 +56,8 @@ export class QStashManager implements QueueManager { queueName: string, options: BulkEnqueueOptions, ): Promise { - const url = `${env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL}/api/queue/${queueName}`; + const base = `${env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL}`; + const defaultPath = options.targetPath ?? `/api/queue/${queueName}`; // For ai-clean queue, use per-account queues to maintain parallelism limits per account // This ensures each account has its own queue with parallelism=3, preventing one account @@ -94,6 +98,8 @@ export class QStashManager implements QueueManager { // Enqueue all jobs for this account const accountResults = await Promise.all( accountJobs.map(async (job) => { + const targetPath = job.opts?.targetPath ?? defaultPath; + const url = `${base}${targetPath}`; if (options.delay) { // For delayed jobs, use publishJSON with notBefore const notBefore = Math.ceil((Date.now() + options.delay) / 1000); @@ -101,7 +107,8 @@ export class QStashManager implements QueueManager { url, body: job.data, notBefore, - deduplicationId: job.opts?.jobId, + deduplicationId: job.opts?.deduplicationId, + headers: job.opts?.headers ?? options.headers, }); return response?.messageId || "unknown"; } else { @@ -109,7 +116,8 @@ export class QStashManager implements QueueManager { const response = await queue.enqueueJSON({ url, body: job.data, - deduplicationId: job.opts?.jobId, + deduplicationId: job.opts?.deduplicationId, + headers: job.opts?.headers ?? options.headers, }); return response?.messageId || "unknown"; } @@ -122,22 +130,29 @@ export class QStashManager implements QueueManager { // For other queues, use the original batchJSON approach const items = options.jobs.map((job) => { + const targetPath = job.opts?.targetPath ?? defaultPath; + const url = `${base}${targetPath}`; const item: { url: string; body: QueueJobData; notBefore?: number; deduplicationId?: string; + headers?: Record; } = { url, body: job.data, }; - if (options.delay) { - item.notBefore = Math.ceil((Date.now() + options.delay) / 1000); + if (options.notBefore) { + item.notBefore = options.notBefore; } - if (job.opts?.jobId) { - item.deduplicationId = job.opts.jobId; + if (job.opts?.deduplicationId) { + item.deduplicationId = job.opts.deduplicationId; + } + + if (job.opts?.headers ?? options.headers) { + item.headers = job.opts?.headers ?? options.headers; } return item; diff --git a/apps/web/utils/queue/queue-manager.test.ts b/apps/web/utils/queue/queue-manager.test.ts new file mode 100644 index 0000000000..48a16401bf --- /dev/null +++ b/apps/web/utils/queue/queue-manager.test.ts @@ -0,0 +1,58 @@ +import { describe, it, expect, beforeEach, afterEach, vi } from "vitest"; + +// Mock server-only to prevent import errors in tests +vi.mock("server-only", () => ({})); + +const createEnvMock = (overrides: Record = {}) => ({ + env: { + QUEUE_SYSTEM: "upstash", + QSTASH_TOKEN: "test-token", + REDIS_URL: "redis://localhost:6379", + WEBHOOK_URL: "https://test.com", + NEXT_PUBLIC_BASE_URL: "https://test.com", + EMAIL_ENCRYPT_SECRET: "test-encryption-secret-key-for-testing-purposes", + EMAIL_ENCRYPT_SALT: "test-encryption-salt-for-testing", + NODE_ENV: "test", + ...overrides, + }, +}); + +describe("Queue Manager - System Detection", () => { + beforeEach(() => { + vi.resetModules(); + vi.clearAllMocks(); + }); + + afterEach(async () => { + const { closeQueueManager } = await import("./queue-manager"); + await closeQueueManager(); + }); + + it("should detect QStash system by default", async () => { + await vi.doMock("@/env", () => createEnvMock()); + vi.resetModules(); + const { getQueueSystemInfo } = await import("./queue-manager"); + const info = getQueueSystemInfo(); + expect(info.system).toBe("upstash"); + expect(info.supportsWorkers).toBe(false); + expect(info.supportsDelayedJobs).toBe(true); + expect(info.supportsBulkOperations).toBe(true); + }); + + it("should detect Redis system when configured", async () => { + await vi.doMock("@/env", () => + createEnvMock({ + QUEUE_SYSTEM: "redis", + WORKER_BASE_URL: "http://queue-worker:5070", + CRON_SECRET: "test-cron", + }), + ); + vi.resetModules(); + const { getQueueSystemInfo } = await import("./queue-manager"); + const info = getQueueSystemInfo(); + expect(info.system).toBe("redis"); + expect(info.supportsWorkers).toBe(true); + expect(info.supportsDelayedJobs).toBe(true); + expect(info.supportsBulkOperations).toBe(true); + }); +}); diff --git a/apps/web/utils/queue/queue-manager.ts b/apps/web/utils/queue/queue-manager.ts index 33478a62d8..fccc7dfd12 100644 --- a/apps/web/utils/queue/queue-manager.ts +++ b/apps/web/utils/queue/queue-manager.ts @@ -7,8 +7,8 @@ import type { } from "bullmq"; import { env } from "@/env"; import { createScopedLogger } from "@/utils/logger"; -import { BullMQManager } from "./bullmq-manager"; -import { QStashManager } from "./qstash-manager"; +import { QStashManager } from "./providers/qstash-manager"; +import { BullMQManager } from "./providers/bullmq-manager"; import type { QueueJobData, EnqueueOptions, @@ -62,46 +62,35 @@ export async function bulkEnqueueJobs( } export function createQueueWorker( - queueName: string, - processor: (job: Job) => Promise, - options?: { + _queueName: string, + _processor: (job: Job) => Promise, + _options?: { concurrency?: number; connection?: ConnectionOptions; }, ): Worker | null { - const manager = getQueueManager(); - - // Only BullMQ supports workers; QStash uses HTTP endpoints - if (env.QUEUE_SYSTEM !== "redis") { - logger.warn("Workers not supported for queue system", { - queueSystem: env.QUEUE_SYSTEM, - queueName, - }); - return null; - } - - return manager.createWorker(queueName, processor, options); + logger.warn("createQueueWorker is disabled; using external worker service", { + queueSystem: env.QUEUE_SYSTEM, + queueName: _queueName, + }); + return null; } export function createQueue( - queueName: string, - options?: { + _queueName: string, + _options?: { connection?: ConnectionOptions; defaultJobOptions?: Record; }, ): Queue | null { - const manager = getQueueManager(); - - // Only BullMQ supports queue creation; QStash uses HTTP endpoints - if (env.QUEUE_SYSTEM !== "redis") { - logger.warn("Queue creation not supported for queue system", { + logger.warn( + "createQueue is disabled; queues are managed by the worker service", + { queueSystem: env.QUEUE_SYSTEM, - queueName, - }); - return null; - } - - return manager.createQueue(queueName, options); + queueName: _queueName, + }, + ); + return null; } export async function closeQueueManager(): Promise { diff --git a/apps/web/utils/queue/queue.test.ts b/apps/web/utils/queue/queue.test.ts deleted file mode 100644 index ae9d606bfe..0000000000 --- a/apps/web/utils/queue/queue.test.ts +++ /dev/null @@ -1,782 +0,0 @@ -import { describe, it, expect, beforeEach, afterEach, vi } from "vitest"; - -// Mock server-only to prevent import errors in tests -vi.mock("server-only", () => ({})); - -// Mock BullMQ -const mockQueue = { - add: vi.fn(), - addBulk: vi.fn(), - close: vi.fn(), -}; - -const mockWorker = { - on: vi.fn(), - close: vi.fn(), - isRunning: vi.fn().mockReturnValue(true), - opts: { concurrency: 3 }, -}; - -const mockQueueEvents = { - close: vi.fn(), -}; - -vi.mock("bullmq", () => ({ - Queue: vi.fn().mockImplementation(() => mockQueue), - Worker: vi.fn().mockImplementation(() => mockWorker), - QueueEvents: vi.fn().mockImplementation(() => mockQueueEvents), -})); - -// Mock ioredis to prevent connection parsing errors -// ioredis can be imported as default or named export, so we mock both -const mockRedisInstance = { - connect: vi.fn(), - disconnect: vi.fn(), - on: vi.fn(), - quit: vi.fn(), - get: vi.fn(), - set: vi.fn(), - del: vi.fn(), - expire: vi.fn(), - keys: vi.fn(), - psubscribe: vi.fn(), - punsubscribe: vi.fn(), -}; - -vi.mock("ioredis", () => { - const MockRedis = vi.fn().mockImplementation(() => mockRedisInstance); - MockRedis.prototype = mockRedisInstance; - return { - default: MockRedis, - Redis: MockRedis, - }; -}); - -// Mock QStash Client -const mockClient = { - publishJSON: vi.fn(), - batchJSON: vi.fn(), -}; - -vi.mock("@upstash/qstash", () => ({ - Client: vi.fn().mockImplementation(() => mockClient), -})); - -// Mock publishToQstashQueue -const mockPublishToQstashQueue = vi.fn(); -vi.mock("@/utils/upstash", () => ({ - publishToQstashQueue: mockPublishToQstashQueue, -})); - -// Helper to create env mock with required encryption vars -const createEnvMock = (overrides: Record = {}) => ({ - env: { - QUEUE_SYSTEM: "upstash", - QSTASH_TOKEN: "test-token", - REDIS_URL: "redis://localhost:6379", - WEBHOOK_URL: "https://test.com", - NEXT_PUBLIC_BASE_URL: "https://test.com", - EMAIL_ENCRYPT_SECRET: "test-encryption-secret-key-for-testing-purposes", - EMAIL_ENCRYPT_SALT: "test-encryption-salt-for-testing", - NODE_ENV: "test", - ...overrides, - }, -}); - -// Mock environment - default to upstash -// Include all required env vars to prevent validation/initialization errors -vi.mock("@/env", () => createEnvMock()); - -describe("Queue System", () => { - beforeEach(() => { - // Reset module cache before each test to prevent partially initialized - // modules from hanging subsequent tests - vi.resetModules(); - vi.clearAllMocks(); - }); - - afterEach(async () => { - const { closeQueueManager } = await import("./queue-manager"); - await closeQueueManager(); - vi.resetModules(); - }); - - describe("Queue Manager", () => { - describe("System Detection", () => { - it("should detect QStash system by default", async () => { - const { getQueueSystemInfo } = await import("./queue-manager"); - const info = getQueueSystemInfo(); - - expect(info.system).toBe("upstash"); - expect(info.supportsWorkers).toBe(false); - expect(info.supportsDelayedJobs).toBe(true); - expect(info.supportsBulkOperations).toBe(true); - }); - - it("should detect Redis system when configured", async () => { - await vi.doMock("@/env", () => - createEnvMock({ - QUEUE_SYSTEM: "redis", - }), - ); - vi.resetModules(); - - const { getQueueSystemInfo } = await import("./queue-manager"); - const info = getQueueSystemInfo(); - - expect(info.system).toBe("redis"); - expect(info.supportsWorkers).toBe(true); - expect(info.supportsDelayedJobs).toBe(true); - expect(info.supportsBulkOperations).toBe(true); - }); - }); - - describe("Job Enqueueing", () => { - it("should enqueue a single job with QStash", async () => { - // Ensure we're using QStash environment - await vi.doMock("@/env", () => - createEnvMock({ - QUEUE_SYSTEM: "upstash", - }), - ); - vi.resetModules(); - - const { enqueueJob } = await import("./queue-manager"); - const jobData = { message: "Test job", userId: "user-123" }; - - mockPublishToQstashQueue.mockResolvedValueOnce({ - messageId: "qstash-message-123", - }); - - const result = await enqueueJob("test-queue", jobData); - - expect(mockPublishToQstashQueue).toHaveBeenCalledWith({ - queueName: "test-queue", - parallelism: 3, - url: "https://test.com/api/queue/test-queue", - body: jobData, - }); - expect(result).toBe("qstash-message-123"); - }); - - it("should enqueue a job with options", async () => { - // Ensure we're using QStash environment - await vi.doMock("@/env", () => - createEnvMock({ - QUEUE_SYSTEM: "upstash", - }), - ); - vi.resetModules(); - - const { enqueueJob } = await import("./queue-manager"); - const jobData = { message: "Delayed job", userId: "user-456" }; - const options = { delay: 5000, priority: 1, jobId: "custom-job-id" }; - - mockClient.publishJSON.mockResolvedValueOnce({ - messageId: "qstash-delayed-123", - }); - - const result = await enqueueJob("test-queue", jobData, options); - - expect(mockClient.publishJSON).toHaveBeenCalledWith({ - url: "https://test.com/api/queue/test-queue", - body: jobData, - notBefore: expect.any(Number), - deduplicationId: "custom-job-id", - }); - expect(result).toBe("qstash-delayed-123"); - }); - - it("should handle job enqueueing errors", async () => { - // Ensure we're using QStash environment - await vi.doMock("@/env", () => - createEnvMock({ - QUEUE_SYSTEM: "upstash", - }), - ); - vi.resetModules(); - - const { enqueueJob } = await import("./queue-manager"); - const error = new Error("Enqueue failed"); - mockPublishToQstashQueue.mockRejectedValueOnce(error); - - await expect( - enqueueJob("test-queue", { message: "Test" }), - ).rejects.toThrow("Enqueue failed"); - }); - }); - - describe("Bulk Job Enqueueing", () => { - it("should enqueue multiple jobs", async () => { - // Ensure we're using QStash environment - await vi.doMock("@/env", () => - createEnvMock({ - QUEUE_SYSTEM: "upstash", - }), - ); - vi.resetModules(); - - const { bulkEnqueueJobs } = await import("./queue-manager"); - const jobs = [ - { data: { message: "Bulk job 1" } }, - { data: { message: "Bulk job 2" } }, - ]; - - mockClient.batchJSON.mockResolvedValueOnce([ - { messageId: "qstash-bulk-1" }, - { messageId: "qstash-bulk-2" }, - ]); - - const result = await bulkEnqueueJobs("test-queue", { jobs }); - - expect(mockClient.batchJSON).toHaveBeenCalledWith([ - { - url: "https://test.com/api/queue/test-queue", - body: { message: "Bulk job 1" }, - }, - { - url: "https://test.com/api/queue/test-queue", - body: { message: "Bulk job 2" }, - }, - ]); - expect(result).toEqual(["qstash-bulk-1", "qstash-bulk-2"]); - }); - - it("should handle bulk enqueueing errors", async () => { - // Ensure we're using QStash environment - await vi.doMock("@/env", () => - createEnvMock({ - QUEUE_SYSTEM: "upstash", - }), - ); - vi.resetModules(); - - const { bulkEnqueueJobs } = await import("./queue-manager"); - const error = new Error("Bulk enqueue failed"); - mockClient.batchJSON.mockRejectedValueOnce(error); - - await expect( - bulkEnqueueJobs("test-queue", { - jobs: [{ data: { message: "Test" } }], - }), - ).rejects.toThrow("Bulk enqueue failed"); - }); - }); - - describe("Error Handling", () => { - it("should handle unsupported queue system", async () => { - await vi.doMock("@/env", () => - createEnvMock({ - QUEUE_SYSTEM: "unsupported" as any, - }), - ); - vi.resetModules(); - - const { createQueueManager } = await import("./queue-manager"); - expect(() => createQueueManager()).toThrow( - "Unsupported queue system: unsupported", - ); - }); - }); - }); - - describe("BullMQ Manager", () => { - let manager: any; - - beforeEach(async () => { - await vi.doMock("@/env", () => - createEnvMock({ - QUEUE_SYSTEM: "redis", - }), - ); - vi.resetModules(); - - const { BullMQManager } = await import("./bullmq-manager"); - manager = new BullMQManager(); - }); - - afterEach(async () => { - if (manager) { - await manager.close(); - } - }); - - describe("Job Enqueueing", () => { - it("should enqueue a single job", async () => { - const jobData = { message: "Test job", userId: "user-123" }; - mockQueue.add.mockResolvedValueOnce({ id: "job-123" }); - - const result = await manager.enqueue("test-queue", jobData); - - expect(mockQueue.add).toHaveBeenCalledWith("test-queue", jobData, { - delay: undefined, - attempts: 5, - priority: undefined, - removeOnComplete: 10, - removeOnFail: 5, - jobId: undefined, - }); - expect(result).toEqual({ id: "job-123" }); - }); - - it("should enqueue a job with options", async () => { - const jobData = { message: "Delayed job", userId: "user-456" }; - const options = { delay: 5000, priority: 1, jobId: "job-456" }; - mockQueue.add.mockResolvedValueOnce({ id: "job-456" }); - - const result = await manager.enqueue("test-queue", jobData, options); - - expect(mockQueue.add).toHaveBeenCalledWith("test-queue", jobData, { - delay: 5000, - attempts: 5, - priority: 1, - removeOnComplete: 10, - removeOnFail: 5, - jobId: "job-456", - }); - expect(result).toEqual({ id: "job-456" }); - }); - - it("should handle enqueue errors", async () => { - const error = new Error("Enqueue failed"); - mockQueue.add.mockRejectedValueOnce(error); - - await expect( - manager.enqueue("test-queue", { message: "Test" }), - ).rejects.toThrow("Enqueue failed"); - }); - }); - - describe("Bulk Job Enqueueing", () => { - it("should enqueue multiple jobs", async () => { - const jobs = [ - { data: { message: "Bulk job 1" } }, - { data: { message: "Bulk job 2" } }, - ]; - const mockJobs = [{ id: "bulk-job-1" }, { id: "bulk-job-2" }]; - mockQueue.addBulk.mockResolvedValueOnce(mockJobs); - - const result = await manager.bulkEnqueue("test-queue", { jobs }); - - expect(mockQueue.addBulk).toHaveBeenCalledWith([ - { - name: "test-queue", - data: { message: "Bulk job 1" }, - opts: { - delay: undefined, - attempts: 5, - priority: undefined, - removeOnComplete: 10, - removeOnFail: 5, - jobId: undefined, - }, - }, - { - name: "test-queue", - data: { message: "Bulk job 2" }, - opts: { - delay: undefined, - attempts: 5, - priority: undefined, - removeOnComplete: 10, - removeOnFail: 5, - jobId: undefined, - }, - }, - ]); - expect(result).toBe(mockJobs); - }); - }); - - describe("Worker Management", () => { - it("should create a worker", () => { - const processor = vi.fn(); - const worker = manager.createWorker("test-queue", processor); - - expect(worker).toBe(mockWorker); - }); - - it("should create a worker with concurrency", () => { - const processor = vi.fn(); - const worker = manager.createWorker("test-queue", processor, { - concurrency: 5, - }); - - expect(worker).toBe(mockWorker); - }); - }); - - describe("Queue Management", () => { - it("should create a queue", () => { - const queue = manager.createQueue("test-queue"); - expect(queue).toBe(mockQueue); - }); - - it("should get queue events", () => { - const events = manager.getQueueEvents("test-queue"); - expect(events).toBe(mockQueueEvents); - }); - }); - - describe("Cleanup", () => { - it("should close all workers and queues", async () => { - manager.createWorker("test-queue-1", vi.fn()); - manager.createWorker("test-queue-2", vi.fn()); - manager.createQueue("test-queue-1"); - manager.createQueue("test-queue-2"); - manager.getQueueEvents("test-queue-1"); - manager.getQueueEvents("test-queue-2"); - - await manager.close(); - - expect(mockWorker.close).toHaveBeenCalledTimes(2); - expect(mockQueue.close).toHaveBeenCalledTimes(2); - expect(mockQueueEvents.close).toHaveBeenCalledTimes(2); - }); - }); - }); - - describe("QStash Manager", () => { - let manager: any; - - beforeEach(async () => { - const { QStashManager } = await import("./qstash-manager"); - manager = new QStashManager(); - }); - - afterEach(async () => { - if (manager) { - await manager.close(); - } - }); - - describe("Job Enqueueing", () => { - it("should enqueue a single job", async () => { - const jobData = { message: "Test job", userId: "user-123" }; - mockPublishToQstashQueue.mockResolvedValueOnce({ - messageId: "qstash-message-123", - }); - - const result = await manager.enqueue("test-queue", jobData); - - expect(mockPublishToQstashQueue).toHaveBeenCalledWith({ - queueName: "test-queue", - parallelism: 3, - url: "https://test.com/api/queue/test-queue", - body: jobData, - }); - expect(result).toBe("qstash-message-123"); - }); - - it("should enqueue a job with delay", async () => { - const jobData = { message: "Delayed job", userId: "user-456" }; - const options = { delay: 5000, jobId: "delayed-job-123" }; - mockClient.publishJSON.mockResolvedValueOnce({ - messageId: "qstash-delayed-123", - }); - - const result = await manager.enqueue("test-queue", jobData, options); - - expect(mockClient.publishJSON).toHaveBeenCalledWith({ - url: "https://test.com/api/queue/test-queue", - body: jobData, - notBefore: expect.any(Number), - deduplicationId: "delayed-job-123", - }); - expect(result).toBe("qstash-delayed-123"); - }); - - it("should handle enqueue errors", async () => { - const error = new Error("Enqueue failed"); - mockPublishToQstashQueue.mockRejectedValueOnce(error); - - await expect( - manager.enqueue("test-queue", { message: "Test" }), - ).rejects.toThrow("Enqueue failed"); - }); - - it("should return 'unknown' when messageId is missing", async () => { - mockPublishToQstashQueue.mockResolvedValueOnce({ - messageId: undefined, - }); // No messageId - - const result = await manager.enqueue("test-queue", { message: "Test" }); - expect(result).toBe("unknown"); - }); - }); - - describe("Bulk Job Enqueueing", () => { - it("should enqueue multiple jobs", async () => { - const jobs = [ - { data: { message: "Bulk job 1" } }, - { data: { message: "Bulk job 2" } }, - ]; - mockClient.batchJSON.mockResolvedValueOnce([ - { messageId: "qstash-bulk-1" }, - { messageId: "qstash-bulk-2" }, - ]); - - const result = await manager.bulkEnqueue("test-queue", { jobs }); - - expect(mockClient.batchJSON).toHaveBeenCalledWith([ - { - url: "https://test.com/api/queue/test-queue", - body: { message: "Bulk job 1" }, - }, - { - url: "https://test.com/api/queue/test-queue", - body: { message: "Bulk job 2" }, - }, - ]); - expect(result).toEqual(["qstash-bulk-1", "qstash-bulk-2"]); - }); - - it("should handle bulk enqueue errors", async () => { - const error = new Error("Bulk enqueue failed"); - mockClient.batchJSON.mockRejectedValueOnce(error); - - await expect( - manager.bulkEnqueue("test-queue", { - jobs: [{ data: { message: "Test" } }], - }), - ).rejects.toThrow("Bulk enqueue failed"); - }); - }); - - describe("Unsupported Operations", () => { - it("should throw error for createWorker", () => { - expect(() => manager.createWorker("test-queue", vi.fn())).toThrow( - "QStash workers are handled via HTTP endpoints, not BullMQ workers", - ); - }); - - it("should throw error for createQueue", () => { - expect(() => manager.createQueue("test-queue")).toThrow( - "QStash queues are managed by QStash, not BullMQ", - ); - }); - - it("should throw error for getQueueEvents", () => { - expect(() => manager.getQueueEvents("test-queue")).toThrow( - "QStash queue events are not available through BullMQ", - ); - }); - }); - - describe("URL Construction", () => { - it("should use WEBHOOK_URL when available", async () => { - await vi.doMock("@/env", () => - createEnvMock({ - WEBHOOK_URL: "https://webhook.test.com", - NEXT_PUBLIC_BASE_URL: "https://fallback.test.com", - }), - ); - vi.resetModules(); - - const { QStashManager: MockedQStashManager } = await import( - "./qstash-manager" - ); - const mockedManager = new MockedQStashManager(); - - mockPublishToQstashQueue.mockResolvedValue({ messageId: "test-123" }); - - await mockedManager.enqueue("test-queue", { message: "Test" }); - - expect(mockPublishToQstashQueue).toHaveBeenCalledWith( - expect.objectContaining({ - url: "https://webhook.test.com/api/queue/test-queue", - }), - ); - }); - - it("should fallback to NEXT_PUBLIC_BASE_URL when WEBHOOK_URL is not available", async () => { - await vi.doMock("@/env", () => - createEnvMock({ - WEBHOOK_URL: undefined, - NEXT_PUBLIC_BASE_URL: "https://fallback.test.com", - }), - ); - vi.resetModules(); - - const { QStashManager: MockedQStashManager } = await import( - "./qstash-manager" - ); - const mockedManager = new MockedQStashManager(); - - mockPublishToQstashQueue.mockResolvedValue({ messageId: "test-123" }); - - await mockedManager.enqueue("test-queue", { message: "Test" }); - - expect(mockPublishToQstashQueue).toHaveBeenCalledWith( - expect.objectContaining({ - url: "https://fallback.test.com/api/queue/test-queue", - }), - ); - }); - }); - }); - - describe("Worker Management", () => { - const mockCreateQueueWorker = vi.fn(); - const mockCloseQueueManager = vi.fn(); - - beforeEach(() => { - vi.doMock("./queue-manager", () => ({ - createQueueWorker: mockCreateQueueWorker, - closeQueueManager: mockCloseQueueManager, - })); - }); - - describe("Worker Registration", () => { - it("should register a worker", async () => { - const { registerWorker } = await import("./worker"); - const processor = vi.fn(); - - mockCreateQueueWorker.mockReturnValueOnce(mockWorker); - - const worker = registerWorker("test-queue", processor); - - expect(mockCreateQueueWorker).toHaveBeenCalledWith( - "test-queue", - processor, - { - concurrency: 3, - }, - ); - expect(worker).toBe(mockWorker); - }); - - it("should register a worker with configuration", async () => { - const { registerWorker } = await import("./worker"); - const processor = vi.fn(); - - mockCreateQueueWorker.mockReturnValueOnce(mockWorker); - - registerWorker("test-queue", processor, { concurrency: 5 }); - - expect(mockCreateQueueWorker).toHaveBeenCalledWith( - "test-queue", - processor, - { - concurrency: 5, - }, - ); - }); - - it("should return existing worker if already registered", async () => { - const { registerWorker } = await import("./worker"); - const processor = vi.fn(); - - mockCreateQueueWorker.mockReturnValueOnce(mockWorker); - registerWorker("test-queue", processor); - const worker = registerWorker("test-queue", vi.fn()); // Try to register again - - expect(mockCreateQueueWorker).toHaveBeenCalledTimes(1); // Should only be called once - expect(worker).toBe(mockWorker); - }); - - it("should handle worker creation failure", async () => { - const { registerWorker } = await import("./worker"); - const processor = vi.fn(); - - mockCreateQueueWorker.mockReturnValueOnce(null); - - const worker = registerWorker("test-queue", processor); - - expect(worker).toBeNull(); - }); - }); - - describe("Worker Events", () => { - it("should set up worker event listeners", async () => { - const { registerWorker } = await import("./worker"); - const processor = vi.fn(); - - mockCreateQueueWorker.mockReturnValueOnce(mockWorker); - registerWorker("test-queue", processor); - - expect(mockWorker.on).toHaveBeenCalledWith( - "completed", - expect.any(Function), - ); - expect(mockWorker.on).toHaveBeenCalledWith( - "failed", - expect.any(Function), - ); - expect(mockWorker.on).toHaveBeenCalledWith( - "stalled", - expect.any(Function), - ); - expect(mockWorker.on).toHaveBeenCalledWith( - "error", - expect.any(Function), - ); - }); - }); - - describe("Worker Management", () => { - it("should unregister a worker", async () => { - const { registerWorker, unregisterWorker } = await import("./worker"); - const processor = vi.fn(); - - mockCreateQueueWorker.mockReturnValueOnce(mockWorker); - registerWorker("test-queue", processor); - - await unregisterWorker("test-queue"); - - expect(mockWorker.close).toHaveBeenCalledTimes(1); - }); - - it("should handle unregistering non-existent worker", async () => { - const { unregisterWorker } = await import("./worker"); - - await unregisterWorker("non-existent-queue"); - expect(mockWorker.close).not.toHaveBeenCalled(); - }); - - it("should get a specific worker", async () => { - const { registerWorker, getWorker } = await import("./worker"); - const processor = vi.fn(); - - mockCreateQueueWorker.mockReturnValueOnce(mockWorker); - registerWorker("test-queue", processor); - - const worker = getWorker("test-queue"); - expect(worker).toBe(mockWorker); - }); - - it("should return undefined for non-existent worker", async () => { - const { getWorker } = await import("./worker"); - - const worker = getWorker("non-existent-queue"); - expect(worker).toBeUndefined(); - }); - }); - - describe("Shutdown", () => { - it("should shutdown all workers", async () => { - const { registerWorker, shutdownAllWorkers } = await import("./worker"); - - mockCreateQueueWorker.mockReturnValue(mockWorker); - registerWorker("test-queue-1", vi.fn()); - registerWorker("test-queue-2", vi.fn()); - - await shutdownAllWorkers(); - - expect(mockWorker.close).toHaveBeenCalledTimes(2); - }); - - it("should handle shutdown errors gracefully", async () => { - const { registerWorker, shutdownAllWorkers } = await import("./worker"); - - mockCreateQueueWorker.mockReturnValueOnce(mockWorker); - registerWorker("test-queue", vi.fn()); - mockWorker.close.mockRejectedValueOnce(new Error("Close failed")); - - await shutdownAllWorkers(); - - expect(mockWorker.close).toHaveBeenCalledTimes(1); - }); - }); - }); -}); diff --git a/apps/web/utils/queue/queues.ts b/apps/web/utils/queue/queues.ts index ee8102805d..e70f5f5f1b 100644 --- a/apps/web/utils/queue/queues.ts +++ b/apps/web/utils/queue/queues.ts @@ -536,22 +536,6 @@ export const QUEUE_HANDLERS = { "email-digest-all": handleEmailDigestAllJob, "email-summary-all": handleEmailSummaryAllJob, "clean-gmail": handleCleanGmailJob, - - "ai-categorize-senders-0": handleCategorizeSendersJob, - "ai-categorize-senders-1": handleCategorizeSendersJob, - "ai-categorize-senders-2": handleCategorizeSendersJob, - "ai-categorize-senders-3": handleCategorizeSendersJob, - "ai-categorize-senders-4": handleCategorizeSendersJob, - "ai-categorize-senders-5": handleCategorizeSendersJob, - "ai-categorize-senders-6": handleCategorizeSendersJob, - - "ai-clean-0": handleAiCleanJob, - "ai-clean-1": handleAiCleanJob, - "ai-clean-2": handleAiCleanJob, - "ai-clean-3": handleAiCleanJob, - "ai-clean-4": handleAiCleanJob, - "ai-clean-5": handleAiCleanJob, - "ai-clean-6": handleAiCleanJob, } as const; export type QueueName = keyof typeof QUEUE_HANDLERS; @@ -592,38 +576,12 @@ export function isValidQueueName(queueName: string): boolean { return true; } - if (queueName.startsWith(`${AI_CATEGORIZE_SENDERS_PREFIX}-`)) { - if (env.QUEUE_SYSTEM === "redis") { - // For BullMQ: validate queue index (0-6) - const queueIndex = getAiCategorizeSendersQueueIndex(queueName); - return ( - queueIndex !== null && - queueIndex >= 0 && - queueIndex < AI_CATEGORIZE_SENDERS_QUEUE_COUNT - ); - } else { - // For QStash: accept any per-account queue (ai-categorize-senders-{emailAccountId}) - return true; - } - } + // Accept any ai-categorize-senders-* queue (dynamic naming) + if (queueName.startsWith(`${AI_CATEGORIZE_SENDERS_PREFIX}-`)) return true; // Allow ai-clean queues - // For BullMQ: hash-based distribution (ai-clean-0, ai-clean-1, etc.) - // For QStash: per-account queues (ai-clean-{emailAccountId}) - if (queueName.startsWith(`${AI_CLEAN_PREFIX}-`)) { - if (env.QUEUE_SYSTEM === "redis") { - // For BullMQ: validate queue index (0-6) - const queueIndex = getAiCleanQueueIndex(queueName); - return ( - queueIndex !== null && - queueIndex >= 0 && - queueIndex < AI_CLEAN_QUEUE_COUNT - ); - } else { - // For QStash: accept any per-account queue - return true; - } - } + // Accept any ai-clean-* queue (dynamic naming) + if (queueName.startsWith(`${AI_CLEAN_PREFIX}-`)) return true; return false; } diff --git a/apps/web/utils/queue/types.ts b/apps/web/utils/queue/types.ts index 314363856b..773845ab83 100644 --- a/apps/web/utils/queue/types.ts +++ b/apps/web/utils/queue/types.ts @@ -22,8 +22,6 @@ export interface QueueSystemConfig { export interface QueueConfig { name: string; parallelism?: number; - delay?: number; - attempts?: number; backoff?: { type: "fixed" | "exponential"; delay: number; @@ -31,12 +29,14 @@ export interface QueueConfig { } export interface EnqueueOptions { - delay?: number; - attempts?: number; - priority?: number; - removeOnComplete?: number; - removeOnFail?: number; - jobId?: string; + // seconds since epoch (QStash style). If provided, takes precedence over delay at worker. + notBefore?: number; + // QStash style name + deduplicationId?: string; + // Optional explicit callback path (e.g., "/api/clean/gmail"); defaults to "/api/queue/{queueName}" + targetPath?: string; + // Optional extra headers to include when the worker calls back + headers?: Record; } export interface BulkEnqueueOptions extends EnqueueOptions { diff --git a/apps/web/utils/scheduled-actions/scheduler.test.ts b/apps/web/utils/scheduled-actions/scheduler.test.ts index a5a625fa1f..ea7633addb 100644 --- a/apps/web/utils/scheduled-actions/scheduler.test.ts +++ b/apps/web/utils/scheduled-actions/scheduler.test.ts @@ -6,6 +6,14 @@ import prisma from "@/utils/__mocks__/prisma"; vi.mock("server-only", () => ({})); vi.mock("@/utils/prisma"); +// Mock QStash Client to avoid real HTTP in tests +vi.mock("@upstash/qstash", () => ({ + Client: vi.fn().mockImplementation(() => ({ + http: { + request: vi.fn().mockResolvedValue({}), + }, + })), +})); vi.mock("@/utils/upstash", () => ({ qstash: { messages: { diff --git a/apps/web/utils/worker-signature.ts b/apps/web/utils/worker-signature.ts new file mode 100644 index 0000000000..9cd7749e66 --- /dev/null +++ b/apps/web/utils/worker-signature.ts @@ -0,0 +1,73 @@ +import { createHmac, timingSafeEqual } from "node:crypto"; +import { env } from "@/env"; +import { createScopedLogger } from "@/utils/logger"; +import type { NextRequest } from "next/server"; + +const logger = createScopedLogger("worker-verify"); + +function buildExpectedSignature( + timestamp: string, + body: string, + secret: string, +): string { + const payload = `${timestamp}.${body}`; + return createHmac("sha256", secret).update(payload).digest("hex"); +} + +function isFresh(timestampHeader: string, toleranceSeconds = 300): boolean { + const ts = Date.parse(timestampHeader); + if (Number.isNaN(ts)) return false; + const now = Date.now(); + return Math.abs(now - ts) <= toleranceSeconds * 1000; +} + +export function verifyWorkerSignatureAppRouter( + handler: (req: Request) => Promise | Response, +) { + return async (req: NextRequest): Promise => { + const signature = req.headers.get("x-worker-signature") || ""; + const timestamp = req.headers.get("x-worker-timestamp") || ""; + const secret = env.WORKER_SIGNING_SECRET; + + if (!secret) { + logger.warn("Missing WORKER_SIGNING_SECRET; rejecting signed request"); + return new Response( + JSON.stringify({ error: "Signature verification not configured" }), + { + status: 500, + headers: { "content-type": "application/json" }, + }, + ); + } + + // Basic freshness check (default 5 minutes) + if (!isFresh(timestamp)) { + logger.warn("Stale or invalid timestamp on worker request", { + timestamp, + }); + return new Response(JSON.stringify({ error: "Invalid timestamp" }), { + status: 401, + headers: { "content-type": "application/json" }, + }); + } + + // Use clone to avoid consuming the original body + const rawBody = await req.clone().text(); + const expected = buildExpectedSignature(timestamp, rawBody, secret); + + // Constant-time compare when lengths match + const ok = + signature.length === expected.length && + timingSafeEqual(Buffer.from(signature), Buffer.from(expected)); + + if (!ok) { + logger.warn("Worker signature mismatch"); + return new Response(JSON.stringify({ error: "Invalid signature" }), { + status: 401, + headers: { "content-type": "application/json" }, + }); + } + + return handler(req); + }; +} diff --git a/apps/web/worker.js b/apps/web/worker.js deleted file mode 100644 index f124517918..0000000000 --- a/apps/web/worker.js +++ /dev/null @@ -1,105 +0,0 @@ -/** - * DISCLAIMER: This is a precompiled file that gets copied to the standalone build. - * This file is used to start BullMQ workers in production/standalone mode. - * It loads environment variables from .env and calls startBullMQWorkers() from instrumentation.js - */ - -const path = require('path') -const fs = require('fs') - -// Set up environment similar to server.js -// Only set NODE_ENV if not already provided (preserve existing value) -process.env.NODE_ENV = process.env.NODE_ENV || 'production' -process.chdir(__dirname) - -// Load environment variables (same as Next.js server.js does) -// loadEnvConfig automatically handles .env, .env.local, .env.production, etc. -// based on NODE_ENV, so we call it unconditionally -try { - const { loadEnvConfig } = require('next/dist/server/config-utils') - loadEnvConfig(__dirname) -} catch (err) { - // If loadEnvConfig is not available (shouldn't happen in standalone build), - // fallback to dotenv for plain .env file - try { - const envPath = path.join(__dirname, '.env') - if (fs.existsSync(envPath)) { - require('dotenv').config({ path: envPath }) - } - } catch (dotenvErr) { - console.warn('Could not load environment variables:', dotenvErr.message) - } -} - -// In Docker standalone build, worker.js is at /app/worker.js -// and instrumentation.js is at /app/apps/web/.next/server/instrumentation.js -const instrumentationPath = path.join(__dirname, 'apps/web/.next/server/instrumentation.js') - -if (!fs.existsSync(instrumentationPath)) { - console.error('Could not find instrumentation.js at:', instrumentationPath) - console.error('Current __dirname:', __dirname) - process.exit(1) -} - -// Start workers and keep process alive -async function startWorkers() { - try { - // Try ES module import first (Next.js compiles TS to ESM) - const instrumentation = await import(instrumentationPath) - - if (instrumentation.startBullMQWorkers) { - instrumentation.startBullMQWorkers() - console.log('BullMQ workers started successfully') - } else { - throw new Error('startBullMQWorkers not found in instrumentation module') - } - } catch (importErr) { - // Fallback: try CommonJS require - try { - // Clear require cache if needed - delete require.cache[require.resolve(instrumentationPath)] - const instrumentation = require(instrumentationPath) - - if (instrumentation.startBullMQWorkers) { - instrumentation.startBullMQWorkers() - console.log('BullMQ workers started successfully') - } else { - throw new Error('startBullMQWorkers not found in instrumentation module') - } - } catch (requireErr) { - console.error('Failed to load instrumentation module') - console.error('Import error:', importErr.message) - console.error('Require error:', requireErr.message) - console.error('Attempted path:', instrumentationPath) - process.exit(1) - } - } -} - -startWorkers().catch((err) => { - console.error('Failed to start workers:', err) - process.exit(1) -}) - -// Handle graceful shutdown -process.on('SIGTERM', () => { - console.log('Received SIGTERM, shutting down gracefully...') - process.exit(0) -}) - -process.on('SIGINT', () => { - console.log('Received SIGINT, shutting down gracefully...') - process.exit(0) -}) - -// Keep the process running - don't exit on errors -process.on('uncaughtException', (err) => { - console.error('Uncaught exception:', err) - // Log but don't exit to keep workers running -}) - -process.on('unhandledRejection', (reason, promise) => { - console.error('Unhandled rejection at:', promise, 'reason:', reason) - // Log but don't exit to keep workers running -}) - diff --git a/docker-compose.yml b/docker-compose.yml index 04e2463cf1..23ad470216 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -23,6 +23,7 @@ services: - database-data:/data networks: - inbox-zero-network + restart: unless-stopped serverless-redis-http: ports: @@ -36,7 +37,7 @@ services: SRH_CONNECTION_STRING: "redis://redis:6379" # Using `redis` hostname since they're in the same Docker network. networks: - inbox-zero-network - + restart: unless-stopped web: image: ghcr.io/elie222/inbox-zero:latest pull_policy: if_not_present @@ -52,6 +53,7 @@ services: depends_on: - db - redis + - queue-worker ports: - ${WEB_PORT:-3000}:3000 networks: @@ -63,31 +65,23 @@ services: REDIS_URL: "redis://redis:6379" UPSTASH_REDIS_URL: "http://serverless-redis-http:80" UPSTASH_REDIS_TOKEN: ${UPSTASH_REDIS_TOKEN} + WORKER_BASE_URL: "http://queue-worker:5070" + CRON_SECRET: ${CRON_SECRET} + restart: unless-stopped worker: - image: ghcr.io/elie222/inbox-zero:latest - pull_policy: if_not_present - # Use the same build context as web service build: context: . - dockerfile: ./docker/Dockerfile.prod - args: - NEXT_PUBLIC_BASE_URL: ${NEXT_PUBLIC_BASE_URL:-http://localhost:3000} - env_file: - - ./apps/web/.env + dockerfile: ./docker/Dockerfile.worker depends_on: - - db - redis networks: - inbox-zero-network environment: - DATABASE_URL: "postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-password}@db:5432/${POSTGRES_DB:-inboxzero}?schema=public" - DIRECT_URL: "postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-password}@db:5432/${POSTGRES_DB:-inboxzero}?schema=public" - QUEUE_SYSTEM: "redis" + PORT: 5070 REDIS_URL: "redis://redis:6379" - UPSTASH_REDIS_URL: "http://serverless-redis-http:80" - UPSTASH_REDIS_TOKEN: ${UPSTASH_REDIS_TOKEN} - # Override the default command to run the worker instead of the web server - command: ["node", "worker.js"] + WEB_BASE_URL: "http://web:3000" + CRON_SECRET: ${CRON_SECRET} + WORKER_SIGNING_SECRET: ${WORKER_SIGNING_SECRET} restart: unless-stopped volumes: diff --git a/docker/Dockerfile.prod b/docker/Dockerfile.prod index 81d1cf3fa2..237ea8bdab 100644 --- a/docker/Dockerfile.prod +++ b/docker/Dockerfile.prod @@ -25,7 +25,7 @@ RUN pnpm install --no-frozen-lockfile --prefer-offline --shamefully-hoist # Copy the full repo COPY . . - # Build app (runs Next build and copies worker.js into .next/standalone) + # Build app (Next.js standalone) ENV NODE_ENV=production # Increase V8 heap for Next build to avoid OOM in builder ENV NODE_OPTIONS=--max_old_space_size=16384 @@ -52,14 +52,10 @@ ENV QSTASH_CURRENT_SIGNING_KEY="dummy_qstash_curr_key_for_build" ENV QSTASH_NEXT_SIGNING_KEY="dummy_qstash_next_key_for_build" ENV DOCKER_BUILD="true" -# Use the package script so worker.js is copied into .next/standalone at root (same level as server.js) -# Run from apps/web directory to ensure proper module resolution RUN cd apps/web \ && pnpm exec prisma generate \ && pnpm exec next build \ && cd ../.. \ - && cp apps/web/worker.js apps/web/.next/standalone/worker.js \ - && chmod +x apps/web/.next/standalone/worker.js \ && rm -rf apps/web/.next/cache FROM node:22-alpine AS runner diff --git a/docker/Dockerfile.worker b/docker/Dockerfile.worker new file mode 100644 index 0000000000..75473dcac6 --- /dev/null +++ b/docker/Dockerfile.worker @@ -0,0 +1,44 @@ +FROM node:22-alpine AS builder + +WORKDIR /app + +RUN apk add --no-cache openssl +RUN npm install -g pnpm@10.15.0 + +# Copy lockfiles/workspace manifests for better caching +COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc* ./ +COPY apps/web/package.json apps/web/package.json +COPY apps/unsubscriber/package.json apps/unsubscriber/package.json +COPY apps/queue-worker/package.json apps/queue-worker/package.json +COPY packages/loops/package.json packages/loops/package.json +COPY packages/resend/package.json packages/resend/package.json +COPY packages/tinybird/package.json packages/tinybird/package.json +COPY packages/tinybird-ai-analytics/package.json packages/tinybird-ai-analytics/package.json +COPY packages/tsconfig/package.json packages/tsconfig/package.json +COPY patches/ patches/ + +# Install dependencies +RUN pnpm install --no-frozen-lockfile --prefer-offline + +# Copy the full repo +COPY . . + +# Build the worker app +RUN pnpm --filter @inbox-zero/queue-worker build + +FROM node:22-alpine AS runner + +WORKDIR /app + +# Copy built worker app and node_modules +COPY --from=builder /app/apps/queue-worker/dist ./apps/queue-worker/dist +COPY --from=builder /app/apps/queue-worker/package.json ./apps/queue-worker/package.json +COPY --from=builder /app/node_modules ./node_modules +COPY --from=builder /app/package.json ./package.json + +ENV NODE_ENV=production +EXPOSE 5070 + +CMD ["node", "apps/queue-worker/dist/server.js"] + + diff --git a/docs/hosting/docker.md b/docs/hosting/docker.md index 627a1c5ab4..2a17b296fa 100644 --- a/docs/hosting/docker.md +++ b/docs/hosting/docker.md @@ -52,6 +52,75 @@ Start the services: docker compose --env-file ./apps/web/.env up -d ``` +### Queue Worker (Redis) setup + +Inbox Zero supports two queue backends: +- QStash (Upstash) - managed HTTP queues +- Redis (BullMQ) - via a dedicated Queue Worker service + +For self-hosting with Redis, enable and configure the Queue Worker service: + +- Web app (`apps/web`) runtime env (in `apps/web/.env`): + - `QUEUE_SYSTEM=redis` + - `WORKER_BASE_URL=http://queue-worker:5070` (internal URL the web app uses to enqueue) + - `CRON_SECRET=...` (shared secret used by web → worker and worker → web) + - `WORKER_SIGNING_SECRET=...` (optional; enables HMAC verification on callbacks) + - Keep your existing DB/OAuth/LLM envs as usual + +- Queue Worker service (`apps/queue-worker`) runtime env: + - `PORT=5070` + - `REDIS_URL=redis://redis:6379` + - `WEB_BASE_URL=http://web:3000` (internal URL the worker uses to callback the web) + - `CRON_SECRET=...` (must match web’s) + - `WORKER_SIGNING_SECRET=...` (optional; must match web’s if used) + +Example Docker Compose services (excerpt): + +```yaml +services: + web: + # ... + environment: + DATABASE_URL: "postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-password}@db:5432/${POSTGRES_DB:-inboxzero}?schema=public" + DIRECT_URL: "postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-password}@db:5432/${POSTGRES_DB:-inboxzero}?schema=public" + QUEUE_SYSTEM: "redis" + REDIS_URL: "redis://redis:6379" + WORKER_BASE_URL: "http://queue-worker:5070" + CRON_SECRET: ${CRON_SECRET} + WORKER_SIGNING_SECRET: ${WORKER_SIGNING_SECRET} # optional + depends_on: + - db + - redis + - queue-worker + + queue-worker: + build: + context: . + dockerfile: ./docker/Dockerfile.worker + environment: + PORT: 5070 + REDIS_URL: "redis://redis:6379" + WEB_BASE_URL: "http://web:3000" + CRON_SECRET: ${CRON_SECRET} + WORKER_SIGNING_SECRET: ${WORKER_SIGNING_SECRET} # optional + depends_on: + - redis +``` + +Auth and callbacks: +- Web → Worker: `Authorization: Bearer ${CRON_SECRET}` (enqueuing) +- Worker → Web (callbacks): + - Calls the URL you provide in the enqueue request (QStash-style `url`); `url` is required + - `Authorization: Bearer ${CRON_SECRET}` + - Optional HMAC headers if `WORKER_SIGNING_SECRET` is set: `x-worker-signature`, `x-worker-timestamp` + +Health checks: +- Worker: `GET http://queue-worker:5070/health` should return 200 when ready + +Using QStash instead: +- Set `QUEUE_SYSTEM=upstash` and configure `QSTASH_TOKEN` +- No worker service is required + ### 5. Run Database Migrations In another terminal, run the database migrations : diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index eaafc6cf12..64dc57eef5 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -45,6 +45,37 @@ importers: specifier: 5.3.3 version: 5.3.3(@inquirer/prompts@7.9.0(@types/node@24.9.1))(@types/debug@4.1.12)(@types/node@24.9.1)(@vitest/ui@3.2.4)(jiti@2.6.1)(jsdom@27.0.1(postcss@8.5.6))(terser@5.44.0)(tsx@4.20.6)(typescript@5.9.3)(yaml@2.8.1) + apps/queue-worker: + dependencies: + '@fastify/cors': + specifier: 11.1.0 + version: 11.1.0 + '@t3-oss/env-core': + specifier: 0.13.8 + version: 0.13.8(typescript@5.9.3)(valibot@1.1.0(typescript@5.9.3))(zod@3.25.46) + bullmq: + specifier: 5.62.0 + version: 5.62.0 + dotenv: + specifier: 17.2.3 + version: 17.2.3 + fastify: + specifier: 5.6.1 + version: 5.6.1 + zod: + specifier: 3.25.46 + version: 3.25.46 + devDependencies: + '@types/node': + specifier: 24.9.1 + version: 24.9.1 + tsx: + specifier: 4.20.6 + version: 4.20.6 + typescript: + specifier: 5.9.3 + version: 5.9.3 + apps/unsubscriber: dependencies: '@ai-sdk/amazon-bedrock': @@ -136,7 +167,7 @@ importers: specifier: 0.9.0 version: 0.9.0 '@googleapis/calendar': - specifier: ^12.0.0 + specifier: 12.0.0 version: 12.0.0 '@googleapis/gmail': specifier: 15.0.0 @@ -175,7 +206,7 @@ importers: specifier: 3.0.7 version: 3.0.7 '@modelcontextprotocol/sdk': - specifier: ^1.20.1 + specifier: 1.20.1 version: 1.20.1 '@mux/mux-player-react': specifier: 3.6.1 @@ -331,8 +362,8 @@ importers: specifier: 0.4.6 version: 0.4.6(@aws-sdk/credential-provider-web-identity@3.911.0)(zod@3.25.46) bullmq: - specifier: ^5.61.0 - version: 5.62.0 + specifier: 5.61.0 + version: 5.61.0 capital-case: specifier: 2.0.0 version: 2.0.0 @@ -355,8 +386,8 @@ importers: specifier: 4.1.0 version: 4.1.0 diff: - specifier: ^8.0.0 - version: 8.0.2 + specifier: 8.0.0 + version: 8.0.0 dompurify: specifier: 3.3.0 version: 3.3.0 @@ -364,7 +395,7 @@ importers: specifier: 0.67.0 version: 0.67.0 easymde: - specifier: ^2.20.0 + specifier: 2.20.0 version: 2.20.0 email-reply-parser: specifier: 1.9.4 @@ -557,7 +588,7 @@ importers: specifier: 0.2.2 version: 0.2.2(tailwindcss@3.4.17(ts-node@10.9.2(@types/node@24.9.1)(typescript@5.9.3))) '@microsoft/microsoft-graph-types': - specifier: ^2.43.1 + specifier: 2.43.1 version: 2.43.1 '@testing-library/react': specifier: 16.3.0 @@ -625,18 +656,12 @@ importers: serwist: specifier: 9.2.1 version: 9.2.1(typescript@5.9.3) - shx: - specifier: ^0.3.4 - version: 0.3.4 tailwindcss: specifier: 3.4.17 version: 3.4.17(ts-node@10.9.2(@types/node@24.9.1)(typescript@5.9.3)) tsconfig: specifier: workspace:* version: link:../../packages/tsconfig - tsx: - specifier: ^4.20.0 - version: 4.20.6 vite-tsconfig-paths: specifier: 5.1.4 version: 5.1.4(typescript@5.9.3)(vite@7.1.11(@types/node@24.9.1)(jiti@2.6.1)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1)) @@ -6208,6 +6233,9 @@ packages: buffer@6.0.3: resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} + bullmq@5.61.0: + resolution: {integrity: sha512-khaTjc1JnzaYFl4FrUtsSsqugAW/urRrcZ9Q0ZE+REAw8W+gkHFqxbGlutOu6q7j7n91wibVaaNlOUMdiEvoSQ==} + bullmq@5.62.0: resolution: {integrity: sha512-Q+UwvZs53FeYeJgkGuhtnUBh+rgvi4kvoLiCLBcc36ukB1UvE3/Lw5jx7rDCEgTMWSSyUClpnGpP+B8lGE64GQ==} @@ -7037,8 +7065,8 @@ packages: resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==} engines: {node: '>=0.3.1'} - diff@8.0.2: - resolution: {integrity: sha512-sSuxWU5j5SR9QQji/o2qMvqRNYRDOcBTgsJ/DeCf4iSN4gW+gNMXM7wFIP+fdXZxoNiAnHUTGjCr+TSWXdRDKg==} + diff@8.0.0: + resolution: {integrity: sha512-DJkPOAHudnz8swaqtm8cYmR9YfHLVDmoIH02+MqJiI/V9PxCf0WG+TBMduL7FZfnO53LhUXaPMo8Iw/uUJXLRA==} engines: {node: '>=0.3.1'} dir-glob@3.0.1: @@ -15302,8 +15330,8 @@ snapshots: '@prisma/config@6.6.0': dependencies: - esbuild: 0.25.9 - esbuild-register: 3.6.0(esbuild@0.25.9) + esbuild: 0.25.11 + esbuild-register: 3.6.0(esbuild@0.25.11) transitivePeerDependencies: - supports-color @@ -17851,7 +17879,7 @@ snapshots: '@types/diff@8.0.0': dependencies: - diff: 8.0.2 + diff: 8.0.0 '@types/dotenv@8.2.3': dependencies: @@ -18201,7 +18229,7 @@ snapshots: sirv: 3.0.2 tinyglobby: 0.2.14 tinyrainbow: 2.0.0 - vitest: 3.2.4(@types/debug@4.1.12)(@types/node@24.9.1)(@vitest/ui@3.2.4)(jiti@2.6.1)(jsdom@27.0.1(postcss@8.5.6))(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1) + vitest: 3.2.4(@types/debug@4.1.12)(@types/node@24.9.1)(@vitest/ui@3.2.4)(jiti@2.6.1)(jsdom@26.1.0)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1) '@vitest/utils@3.2.4': dependencies: @@ -18804,6 +18832,18 @@ snapshots: base64-js: 1.5.1 ieee754: 1.2.1 + bullmq@5.61.0: + dependencies: + cron-parser: 4.9.0 + ioredis: 5.8.2 + msgpackr: 1.11.5 + node-abort-controller: 3.1.1 + semver: 7.7.3 + tslib: 2.8.1 + uuid: 11.1.0 + transitivePeerDependencies: + - supports-color + bullmq@5.62.0: dependencies: cron-parser: 4.9.0 @@ -19639,7 +19679,7 @@ snapshots: diff@4.0.2: {} - diff@8.0.2: {} + diff@8.0.0: {} dir-glob@3.0.1: dependencies: @@ -19853,10 +19893,10 @@ snapshots: transitivePeerDependencies: - supports-color - esbuild-register@3.6.0(esbuild@0.25.9): + esbuild-register@3.6.0(esbuild@0.25.11): dependencies: debug: 4.4.3(supports-color@8.1.1) - esbuild: 0.25.9 + esbuild: 0.25.11 transitivePeerDependencies: - supports-color @@ -25193,7 +25233,7 @@ snapshots: tsx@4.20.6: dependencies: - esbuild: 0.25.9 + esbuild: 0.25.11 get-tsconfig: 4.10.1 optionalDependencies: fsevents: 2.3.3 @@ -25597,7 +25637,7 @@ snapshots: vite@7.1.11(@types/node@24.9.1)(jiti@2.6.1)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1): dependencies: - esbuild: 0.25.10 + esbuild: 0.25.11 fdir: 6.5.0(picomatch@4.0.3) picomatch: 4.0.3 postcss: 8.5.6 From f8132034182db9fc7886213e4aa1451892e10c43 Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Thu, 13 Nov 2025 09:04:28 -0300 Subject: [PATCH 15/17] Refactor generic queue calls --- .cursor/rules/queues-guidelines.mdc | 31 ++++++++ apps/web/app/api/clean/route.ts | 18 ++++- apps/web/app/api/resend/digest/all/route.ts | 21 +++++- apps/web/app/api/resend/summary/all/route.ts | 28 ++++++- apps/web/utils/actions/clean.ts | 1 + apps/web/utils/digest/index.ts | 57 +++++++++++---- .../utils/queue/providers/bullmq-manager.ts | 40 +++++++++- .../utils/queue/providers/qstash-manager.ts | 22 ++++-- apps/web/utils/queue/publish.ts | 27 +++++++ apps/web/utils/queue/queues.ts | 73 +++---------------- apps/web/utils/scheduled-actions/scheduler.ts | 66 ++++++++++++----- apps/web/utils/upstash/categorize-senders.ts | 71 +++++++++--------- 12 files changed, 304 insertions(+), 151 deletions(-) create mode 100644 .cursor/rules/queues-guidelines.mdc create mode 100644 apps/web/utils/queue/publish.ts diff --git a/.cursor/rules/queues-guidelines.mdc b/.cursor/rules/queues-guidelines.mdc new file mode 100644 index 0000000000..9100b84edc --- /dev/null +++ b/.cursor/rules/queues-guidelines.mdc @@ -0,0 +1,31 @@ +Queues guidelines (main parity) + +Purpose +- Keep queue behavior aligned with upstream main branch. Avoid centralization and implicit remapping. Prefer QStash-first flows unless explicitly asked to implement Redis/BullMQ specifics. + +Do +- Use QStash helpers directly for main-parity code: + - publishToQstashQueue with full absolute URLs (base from env) and explicit queueName/parallelism. + - Per-account queue for categorize senders: ai-categorize-senders-{emailAccountId}. + - Digest item enqueue via publishToQstashQueue to /api/ai/digest. +- Keep endpoint handlers as dedicated routes with signature verification: + - /api/ai/digest + - /api/clean + - /api/clean/gmail + - /api/resend/digest + - /api/resend/summary + - /api/user/categorize/senders/batch + - /api/scheduled-actions/execute +- Use verifySignatureAppRouter or verifyQueueSignatureAppRouter at endpoints (QStash/worker). +- When supporting Redis/BullMQ, post back to the same dedicated endpoints (no remapping). + +Do NOT +- Do not centralize queue handling via /api/queue/[queueName] for QStash flows. +- Do not add queueName→path maps in queue providers. +- Do not add targetPath overrides unless explicitly requested. Default to main’s absolute URLs and per-endpoint publishing for QStash. +- Do not revert categorize-senders to hashed-queue distribution for QStash; use per-account queue. + +References (main) +- Digest enqueue pattern (publishToQstashQueue to /api/ai/digest): + https://raw.githubusercontent.com/elie222/inbox-zero/4885d86c6e7c74ee7c38f83b435ae5d3095c29e1/apps/web/utils/digest/index.ts + diff --git a/apps/web/app/api/clean/route.ts b/apps/web/app/api/clean/route.ts index d0aa61a9b5..8442956148 100644 --- a/apps/web/app/api/clean/route.ts +++ b/apps/web/app/api/clean/route.ts @@ -1,9 +1,12 @@ import { verifyQueueSignatureAppRouter } from "@/utils/queue-signature"; -import { verifyWorkerSignatureAppRouter } from "@/utils/worker-signature"; +// import { verifyWorkerSignatureAppRouter } from "@/utils/worker-signature"; import { z } from "zod"; import { NextResponse } from "next/server"; import { withError } from "@/utils/middleware"; -import { enqueueJob } from "@/utils/queue/queue-manager"; +// import { publishToQstash } from "@/utils/upstash"; +// import { enqueueJob } from "@/utils/queue/queue-manager"; +import { env } from "@/env"; +import { publishFlowControlled } from "@/utils/queue/publish"; import { getThreadMessages } from "@/utils/gmail/thread"; import { getGmailClientWithRefresh } from "@/utils/gmail/client"; import type { CleanGmailBody } from "@/app/api/clean/gmail/route"; @@ -271,8 +274,17 @@ function getPublish({ markDone, }); + const base = env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL || ""; await Promise.all([ - enqueueJob("clean-gmail", cleanGmailBody), + publishFlowControlled({ + url: `${base}/api/clean/gmail`, + body: cleanGmailBody, + flowControl: { + key: `gmail-action-${emailAccountId}`, + ratePerSecond: maxRatePerSecond, + }, + redisQueueName: "clean-gmail", + }), updateThread({ emailAccountId, jobId, diff --git a/apps/web/app/api/resend/digest/all/route.ts b/apps/web/app/api/resend/digest/all/route.ts index 8c2c6d3fc4..015dd15f11 100644 --- a/apps/web/app/api/resend/digest/all/route.ts +++ b/apps/web/app/api/resend/digest/all/route.ts @@ -2,9 +2,11 @@ import { NextResponse } from "next/server"; import subDays from "date-fns/subDays"; import prisma from "@/utils/prisma"; import { withError } from "@/utils/middleware"; +import { env } from "@/env"; import { hasCronSecret, hasPostCronSecret } from "@/utils/cron"; import { captureException } from "@/utils/error"; import { createScopedLogger } from "@/utils/logger"; +import { publishToQstashQueue } from "@/utils/upstash"; import { enqueueJob } from "@/utils/queue/queue-manager"; const logger = createScopedLogger("cron/resend/digest/all"); @@ -46,11 +48,24 @@ async function sendDigestAllUpdate() { eligibleAccounts: emailAccounts.length, }); + const url = `${env.NEXT_PUBLIC_BASE_URL}/api/resend/digest`; + for (const emailAccount of emailAccounts) { try { - await enqueueJob("email-digest-all", { - emailAccountId: emailAccount.id, - }); + if (env.QUEUE_SYSTEM === "upstash") { + await publishToQstashQueue({ + queueName: "email-digest-all", + parallelism: 3, + url, + body: { emailAccountId: emailAccount.id }, + }); + } else { + await enqueueJob( + "email-digest-all", + { emailAccountId: emailAccount.id }, + { targetPath: url }, + ); + } } catch (error) { logger.error("Failed to enqueue digest job", { email: emailAccount.email, diff --git a/apps/web/app/api/resend/summary/all/route.ts b/apps/web/app/api/resend/summary/all/route.ts index 5084fae021..3f36bf7998 100644 --- a/apps/web/app/api/resend/summary/all/route.ts +++ b/apps/web/app/api/resend/summary/all/route.ts @@ -2,10 +2,16 @@ import { NextResponse } from "next/server"; import subDays from "date-fns/subDays"; import prisma from "@/utils/prisma"; import { withError } from "@/utils/middleware"; -import { hasCronSecret, hasPostCronSecret } from "@/utils/cron"; +import { env } from "@/env"; +import { + getCronSecretHeader, + hasCronSecret, + hasPostCronSecret, +} from "@/utils/cron"; import { Frequency } from "@prisma/client"; import { captureException } from "@/utils/error"; import { createScopedLogger } from "@/utils/logger"; +import { publishToQstashQueue } from "@/utils/upstash"; import { enqueueJob } from "@/utils/queue/queue-manager"; const logger = createScopedLogger("cron/resend/summary/all"); @@ -40,11 +46,25 @@ async function sendSummaryAllUpdate() { logger.info("Sending summary to users", { count: emailAccounts.length }); + const url = `${env.NEXT_PUBLIC_BASE_URL}/api/resend/summary`; + for (const emailAccount of emailAccounts) { try { - await enqueueJob("email-summary-all", { - email: emailAccount.email, - }); + if (env.QUEUE_SYSTEM === "upstash") { + await publishToQstashQueue({ + queueName: "email-summary-all", + parallelism: 3, + url, + body: { email: emailAccount.email }, + headers: getCronSecretHeader(), + }); + } else { + await enqueueJob( + "email-summary-all", + { email: emailAccount.email }, + { targetPath: url }, + ); + } } catch (error) { logger.error("Failed to enqueue summary job", { email: emailAccount.email, diff --git a/apps/web/utils/actions/clean.ts b/apps/web/utils/actions/clean.ts index 50478870c9..e875c4cbb1 100644 --- a/apps/web/utils/actions/clean.ts +++ b/apps/web/utils/actions/clean.ts @@ -167,6 +167,7 @@ export const cleanInboxAction = actionClient await bulkEnqueueJobs("ai-clean", { jobs, + targetPath: `${env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL || ""}/api/clean`, }); totalEmailsProcessed += jobs.length; diff --git a/apps/web/utils/digest/index.ts b/apps/web/utils/digest/index.ts index 88a0ba7e15..4b3227fbb6 100644 --- a/apps/web/utils/digest/index.ts +++ b/apps/web/utils/digest/index.ts @@ -1,6 +1,9 @@ +import { env } from "@/env"; +import { publishToQstashQueue } from "@/utils/upstash"; import { enqueueJob } from "@/utils/queue/queue-manager"; import { createScopedLogger } from "@/utils/logger"; import { emailToContent } from "@/utils/mail"; +import type { DigestBody } from "@/app/api/ai/digest/validation"; import type { ParsedMessage } from "@/utils/types"; import type { EmailForAction } from "@/utils/ai/types"; @@ -17,22 +20,48 @@ export async function enqueueDigestItem({ actionId?: string; coldEmailId?: string; }) { + const url = `${env.NEXT_PUBLIC_BASE_URL}/api/ai/digest`; try { - await enqueueJob("digest-item-summarize", { - emailAccountId, - actionId, - coldEmailId, - message: { - id: email.id, - threadId: email.threadId, - from: email.headers.from, - to: email.headers.to || "", - subject: email.headers.subject, - content: emailToContent(email), - }, - }); + if (env.QUEUE_SYSTEM === "upstash") { + await publishToQstashQueue({ + queueName: "digest-item-summarize", + parallelism: 3, + url, + body: { + emailAccountId, + actionId, + coldEmailId, + message: { + id: email.id, + threadId: email.threadId, + from: email.headers.from, + to: email.headers.to || "", + subject: email.headers.subject, + content: emailToContent(email), + }, + }, + }); + } else { + await enqueueJob( + "digest-item-summarize", + { + emailAccountId, + actionId, + coldEmailId, + message: { + id: email.id, + threadId: email.threadId, + from: email.headers.from, + to: email.headers.to || "", + subject: email.headers.subject, + content: emailToContent(email), + }, + }, + { targetPath: url }, + ); + } } catch (error) { - logger.error("Failed to enqueue digest job", { + logger.error("Failed to publish to Qstash", { emailAccountId, error, }); diff --git a/apps/web/utils/queue/providers/bullmq-manager.ts b/apps/web/utils/queue/providers/bullmq-manager.ts index 4d59e9dfc9..162737bf68 100644 --- a/apps/web/utils/queue/providers/bullmq-manager.ts +++ b/apps/web/utils/queue/providers/bullmq-manager.ts @@ -45,7 +45,26 @@ export class BullMQManager implements QueueManager { ): Promise | string> { const url = `${getWorkerBaseUrl()}/v1/jobs`; const base = env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL || ""; - const callbackPath = options.targetPath ?? `/api/queue/${queueName}`; + const mapPath = (name: string, fallback: string) => { + switch (name) { + case "digest-item-summarize": + return "/api/ai/digest"; + case "scheduled-actions": + return "/api/scheduled-actions/execute"; + case "ai-clean": + return "/api/clean"; + case "email-digest-all": + return "/api/resend/digest"; + case "email-summary-all": + return "/api/resend/summary"; + case "clean-gmail": + return "/api/clean/gmail"; + default: + return fallback; + } + }; + const callbackPath = + options.targetPath ?? mapPath(queueName, `/api/queue/${queueName}`); const callbackUrl = callbackPath.startsWith("http://") || callbackPath.startsWith("https://") ? callbackPath @@ -100,7 +119,24 @@ export class BullMQManager implements QueueManager { const p = j.opts?.targetPath ?? options.targetPath ?? - `/api/queue/${queueName}`; + ((): string => { + switch (queueName) { + case "digest-item-summarize": + return "/api/ai/digest"; + case "scheduled-actions": + return "/api/scheduled-actions/execute"; + case "ai-clean": + return "/api/clean"; + case "email-digest-all": + return "/api/resend/digest"; + case "email-summary-all": + return "/api/resend/summary"; + case "clean-gmail": + return "/api/clean/gmail"; + default: + return `/api/queue/${queueName}`; + } + })(); return p.startsWith("http://") || p.startsWith("https://") ? p : `${base}${p}`; diff --git a/apps/web/utils/queue/providers/qstash-manager.ts b/apps/web/utils/queue/providers/qstash-manager.ts index 8d2ee0384a..27ef73801c 100644 --- a/apps/web/utils/queue/providers/qstash-manager.ts +++ b/apps/web/utils/queue/providers/qstash-manager.ts @@ -25,7 +25,10 @@ export class QStashManager implements QueueManager { options: EnqueueOptions = {}, ): Promise { const callbackPath = options.targetPath ?? `/api/queue/${queueName}`; - const url = `${env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL}${callbackPath}`; + const url = + callbackPath.startsWith("http://") || callbackPath.startsWith("https://") + ? callbackPath + : `${env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL}${callbackPath}`; const client = getQstashClient(); if (options.notBefore) { @@ -99,10 +102,14 @@ export class QStashManager implements QueueManager { const accountResults = await Promise.all( accountJobs.map(async (job) => { const targetPath = job.opts?.targetPath ?? defaultPath; - const url = `${base}${targetPath}`; - if (options.delay) { - // For delayed jobs, use publishJSON with notBefore - const notBefore = Math.ceil((Date.now() + options.delay) / 1000); + const url = + targetPath.startsWith("http://") || + targetPath.startsWith("https://") + ? targetPath + : `${base}${targetPath}`; + if (options.notBefore) { + // For delayed jobs, use enqueueJSON with notBefore + const notBefore = options.notBefore; const response = await queue.enqueueJSON({ url, body: job.data, @@ -131,7 +138,10 @@ export class QStashManager implements QueueManager { // For other queues, use the original batchJSON approach const items = options.jobs.map((job) => { const targetPath = job.opts?.targetPath ?? defaultPath; - const url = `${base}${targetPath}`; + const url = + targetPath.startsWith("http://") || targetPath.startsWith("https://") + ? targetPath + : `${base}${targetPath}`; const item: { url: string; body: QueueJobData; diff --git a/apps/web/utils/queue/publish.ts b/apps/web/utils/queue/publish.ts new file mode 100644 index 0000000000..806782f306 --- /dev/null +++ b/apps/web/utils/queue/publish.ts @@ -0,0 +1,27 @@ +import { env } from "@/env"; +import { publishToQstash } from "@/utils/upstash"; +import { enqueueJob } from "@/utils/queue/queue-manager"; +import type { FlowControl } from "@upstash/qstash"; +import type { QueueJobData } from "@/utils/queue/types"; + +export async function publishFlowControlled({ + url, + body, + flowControl, + redisQueueName, + headers, +}: { + url: string; + body: T; + flowControl?: FlowControl; + redisQueueName: string; + headers?: Record; +}) { + if (env.QUEUE_SYSTEM === "upstash") { + return publishToQstash(url, body, flowControl); + } + return enqueueJob(redisQueueName, body, { + targetPath: url, + headers, + }); +} diff --git a/apps/web/utils/queue/queues.ts b/apps/web/utils/queue/queues.ts index e70f5f5f1b..746942fb84 100644 --- a/apps/web/utils/queue/queues.ts +++ b/apps/web/utils/queue/queues.ts @@ -153,7 +153,9 @@ async function handleAiCleanJob(data: AiCleanJobData) { return NextResponse.json({ success: true }); } -async function handleEmailDigestAllJob(data: EmailDigestAllJobData) { +async function handleEmailDigestAllJob( + data: EmailDigestAllJobData, +): Promise { logger.info("Processing email digest all job", { emailAccountId: data.emailAccountId, }); @@ -161,7 +163,7 @@ async function handleEmailDigestAllJob(data: EmailDigestAllJobData) { try { const result = await sendDigestEmailForAccount(data.emailAccountId); logger.info("Email digest all job completed", { result }); - return NextResponse.json({ success: true, result }); + return; } catch (error) { logger.error("Email digest all job failed", { emailAccountId: data.emailAccountId, @@ -453,11 +455,10 @@ async function handleEmailSummaryAllJob(data: EmailSummaryAllJobData) { userId: data.userId, }); - // TODO: Implement actual email summary all logic - await new Promise((resolve) => setTimeout(resolve, 2500)); + await sleep(2500); logger.info("Email summary all job completed"); - return NextResponse.json({ success: true }); + return { success: true }; } async function handleCleanGmailJob(data: CleanGmailJobData) { @@ -467,11 +468,10 @@ async function handleCleanGmailJob(data: CleanGmailJobData) { jobId: data.jobId, }); - // TODO: Implement actual clean Gmail logic - await new Promise((resolve) => setTimeout(resolve, 2000)); + await sleep(2000); logger.info("Clean Gmail job completed"); - return NextResponse.json({ success: true }); + return { success: true }; } // Configuration for distributed AI categorize senders queues @@ -529,59 +529,4 @@ export function getAiCleanQueueIndex(queueName: string): number | null { return Number.isNaN(index) ? null : index; } -export const QUEUE_HANDLERS = { - "digest-item-summarize": handleDigestJob, - "scheduled-actions": handleScheduledActionJob, - "ai-clean": handleAiCleanJob, - "email-digest-all": handleEmailDigestAllJob, - "email-summary-all": handleEmailSummaryAllJob, - "clean-gmail": handleCleanGmailJob, -} as const; - -export type QueueName = keyof typeof QUEUE_HANDLERS; -export function getQueueHandler(queueName: string) { - if (queueName in QUEUE_HANDLERS) { - return QUEUE_HANDLERS[queueName as QueueName]; - } - - if (queueName.startsWith(`${AI_CATEGORIZE_SENDERS_PREFIX}-`)) { - return handleCategorizeSendersJob; - } - - // Handle ai-clean queues - // For BullMQ: hash-based distribution (ai-clean-0, ai-clean-1, etc.) - // For QStash: per-account queues (ai-clean-{emailAccountId}) - if (queueName.startsWith(`${AI_CLEAN_PREFIX}-`)) { - // For BullMQ: validate queue index (0-6) - if (env.QUEUE_SYSTEM === "redis") { - const queueIndex = getAiCleanQueueIndex(queueName); - if ( - queueIndex !== null && - queueIndex >= 0 && - queueIndex < AI_CLEAN_QUEUE_COUNT - ) { - return handleAiCleanJob; - } - } else { - // For QStash: accept any per-account queue (ai-clean-{emailAccountId}) - return handleAiCleanJob; - } - } - - return null; -} - -export function isValidQueueName(queueName: string): boolean { - if (queueName in QUEUE_HANDLERS) { - return true; - } - - // Accept any ai-categorize-senders-* queue (dynamic naming) - if (queueName.startsWith(`${AI_CATEGORIZE_SENDERS_PREFIX}-`)) return true; - - // Allow ai-clean queues - // Accept any ai-clean-* queue (dynamic naming) - if (queueName.startsWith(`${AI_CLEAN_PREFIX}-`)) return true; - - return false; -} +export const QUEUE_HANDLERS = {} as const; diff --git a/apps/web/utils/scheduled-actions/scheduler.ts b/apps/web/utils/scheduled-actions/scheduler.ts index ee60acb54f..033105b404 100644 --- a/apps/web/utils/scheduled-actions/scheduler.ts +++ b/apps/web/utils/scheduled-actions/scheduler.ts @@ -264,28 +264,60 @@ async function scheduleMessage({ deduplicationId: string; }) { try { - // Use the unified queue system instead of direct QStash - const delayInMs = delayInMinutes * 60 * 1000; // Convert minutes to milliseconds + const notBefore = Math.ceil( + (Date.now() + delayInMinutes * 60 * 1000) / 1000, + ); - const job = await enqueueJob("scheduled-actions", payload, { - delay: delayInMs, - jobId: deduplicationId, - attempts: 3, - }); + if (env.QUEUE_SYSTEM === "upstash") { + // Use QStash client to schedule with notBefore (main pattern) + const client = getQstashClient(); + if (!client) { + throw new Error("QStash client not configured"); + } - const messageId = typeof job === "string" ? job : job.id; + const base = env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL || ""; + const url = `${base}/api/scheduled-actions/execute`; - logger.info("Successfully scheduled with queue system", { - scheduledActionId: payload.scheduledActionId, - scheduledId: messageId, - delayInMinutes, - deduplicationId, - queueSystem: env.QUEUE_SYSTEM, - }); + const response = await client.publishJSON({ + url, + body: payload, + notBefore, + deduplicationId, + }); + + const messageId = response?.messageId || "unknown"; - return messageId; + logger.info("Successfully scheduled with QStash", { + scheduledActionId: payload.scheduledActionId, + scheduledId: messageId, + delayInMinutes, + deduplicationId, + }); + + return messageId; + } else { + const base = env.WEBHOOK_URL || env.NEXT_PUBLIC_BASE_URL || ""; + const url = `${base}/api/scheduled-actions/execute`; + // Redis/BullMQ via worker service with similar payload shape + const job = await enqueueJob("scheduled-actions", payload, { + notBefore, + deduplicationId, + targetPath: url, + }); + + const messageId = typeof job === "string" ? job : job.id; + + logger.info("Successfully scheduled with worker", { + scheduledActionId: payload.scheduledActionId, + scheduledId: messageId, + delayInMinutes, + deduplicationId, + }); + + return messageId; + } } catch (error) { - logger.error("Failed to schedule with queue system", { + logger.error("Failed to schedule delayed action", { error, scheduledActionId: payload.scheduledActionId, deduplicationId, diff --git a/apps/web/utils/upstash/categorize-senders.ts b/apps/web/utils/upstash/categorize-senders.ts index aab368a108..79fc98ae22 100644 --- a/apps/web/utils/upstash/categorize-senders.ts +++ b/apps/web/utils/upstash/categorize-senders.ts @@ -1,45 +1,19 @@ import chunk from "lodash/chunk"; -import { deleteQueue, listQueues } from "@/utils/upstash"; +import { deleteQueue, listQueues, publishToQstashQueue } from "@/utils/upstash"; import { enqueueJob } from "@/utils/queue/queue-manager"; -import { - AI_CATEGORIZE_SENDERS_QUEUE_COUNT, - getQueueIndexFromEmailAccountId, -} from "@/utils/queue/queues"; import { env } from "@/env"; import type { AiCategorizeSenders } from "@/app/api/user/categorize/senders/batch/handle-batch-validation"; import { createScopedLogger } from "@/utils/logger"; const logger = createScopedLogger("upstash"); -// Use the same prefix as defined in queues.ts for consistency -const AI_CATEGORIZE_SENDERS_PREFIX = "ai-categorize-senders"; +const CATEGORIZE_SENDERS_PREFIX = "ai-categorize-senders"; -/** - * Distributes email accounts across multiple queues for load balancing - * - * For Redis: Uses a simple hash of the emailAccountId to ensure consistent distribution - * - Creates hash by summing character codes - * - Example: "user-123" -> 'u'(117) + 's'(115) + 'e'(101) + 'r'(114) + '-'(45) + '1'(49) + '2'(50) + '3'(51) = 742 - * - Distributes across 7 queues (0-6) using modulo: 742 % 7 = 0 -> "ai-categorize-senders-0" - * - * For QStash: Uses per-email-account queues for maximum parallelization - */ const getCategorizeSendersQueueName = ({ emailAccountId, }: { emailAccountId: string; -}) => { - if (env.QUEUE_SYSTEM === "redis") { - const targetQueueIndex = getQueueIndexFromEmailAccountId( - emailAccountId, - AI_CATEGORIZE_SENDERS_QUEUE_COUNT, - ); - - return `${AI_CATEGORIZE_SENDERS_PREFIX}-${targetQueueIndex}`; - } - - return `${AI_CATEGORIZE_SENDERS_PREFIX}-${emailAccountId}`; -}; +}) => `${CATEGORIZE_SENDERS_PREFIX}-${emailAccountId}`; /** * Publishes sender categorization tasks to QStash queue in batches @@ -67,14 +41,35 @@ export async function publishToAiCategorizeSendersQueue( }); // Process all chunks in parallel, each as a separate queue item - await Promise.all( - chunks.map((senderChunk) => - enqueueJob(queueName, { - emailAccountId: body.emailAccountId, - senders: senderChunk, - } satisfies AiCategorizeSenders), - ), - ); + if (env.QUEUE_SYSTEM === "upstash") { + await Promise.all( + chunks.map((senderChunk) => + publishToQstashQueue({ + queueName, + parallelism: 3, + url, + body: { + emailAccountId: body.emailAccountId, + senders: senderChunk, + } satisfies AiCategorizeSenders, + }), + ), + ); + } else { + // Redis/BullMQ: enqueue directly to the endpoint via worker + await Promise.all( + chunks.map((senderChunk) => + enqueueJob( + queueName, + { + emailAccountId: body.emailAccountId, + senders: senderChunk, + } satisfies AiCategorizeSenders, + { targetPath: url }, + ), + ), + ); + } } export async function deleteEmptyCategorizeSendersQueues({ @@ -83,7 +78,7 @@ export async function deleteEmptyCategorizeSendersQueues({ skipEmailAccountId: string; }) { return deleteEmptyQueues({ - prefix: AI_CATEGORIZE_SENDERS_PREFIX, + prefix: CATEGORIZE_SENDERS_PREFIX, skipEmailAccountId, }); } From dcc333621aab66dd04e2acb40e0c271ef263e445 Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Thu, 13 Nov 2025 09:07:41 -0300 Subject: [PATCH 16/17] Refactor generic queue calls --- apps/queue-worker/src/http.ts | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/apps/queue-worker/src/http.ts b/apps/queue-worker/src/http.ts index 6557ec5906..c0909edc83 100644 --- a/apps/queue-worker/src/http.ts +++ b/apps/queue-worker/src/http.ts @@ -119,11 +119,9 @@ export function buildServer(): FastifyInstance { ); return reply.code(200).send({ jobId }); } catch (error) { - return reply - .code(500) - .send({ - error: error instanceof Error ? error.message : String(error), - }); + return reply.code(500).send({ + error: error instanceof Error ? error.message : String(error), + }); } }, ); @@ -164,11 +162,9 @@ export function buildServer(): FastifyInstance { ); return reply.code(200).send({ jobIds }); } catch (error) { - return reply - .code(500) - .send({ - error: error instanceof Error ? error.message : String(error), - }); + return reply.code(500).send({ + error: error instanceof Error ? error.message : String(error), + }); } }, ); From 6c2682b9adf071954ce2c154eddd82fd344fa15f Mon Sep 17 00:00:00 2001 From: Eduardo Lelis Date: Thu, 13 Nov 2025 09:11:49 -0300 Subject: [PATCH 17/17] Refactor generic queue calls --- apps/web/app/api/clean/gmail/route.ts | 1 - apps/web/app/api/queue/[queueName]/route.ts | 24 ++++----------------- 2 files changed, 4 insertions(+), 21 deletions(-) diff --git a/apps/web/app/api/clean/gmail/route.ts b/apps/web/app/api/clean/gmail/route.ts index d9e5bf2e9f..e4045d7fed 100644 --- a/apps/web/app/api/clean/gmail/route.ts +++ b/apps/web/app/api/clean/gmail/route.ts @@ -1,6 +1,5 @@ import { type NextRequest, NextResponse } from "next/server"; import { verifyQueueSignatureAppRouter } from "@/utils/queue-signature"; -import { verifyWorkerSignatureAppRouter } from "@/utils/worker-signature"; import { z } from "zod"; import { withError } from "@/utils/middleware"; import { getGmailClientWithRefresh } from "@/utils/gmail/client"; diff --git a/apps/web/app/api/queue/[queueName]/route.ts b/apps/web/app/api/queue/[queueName]/route.ts index 759bee508f..514b70f8e9 100644 --- a/apps/web/app/api/queue/[queueName]/route.ts +++ b/apps/web/app/api/queue/[queueName]/route.ts @@ -12,7 +12,6 @@ import { type NextRequest, NextResponse } from "next/server"; import { createScopedLogger } from "@/utils/logger"; -import { getQueueHandler, isValidQueueName } from "@/utils/queue/queues"; import { verifySignatureAppRouter } from "@upstash/qstash/nextjs"; import { withError } from "@/utils/middleware"; import { isValidInternalApiKey } from "@/utils/internal-api"; @@ -51,12 +50,6 @@ async function handleQueueJob( ) { const { queueName } = await params; - // Validate queue name first - if (!isValidQueueName(queueName)) { - logger.warn("Unknown queue name", { queueName }); - return NextResponse.json({ error: "Unknown queue name" }, { status: 400 }); - } - // For internal Redis/BullMQ requests, validate authentication if (env.QUEUE_SYSTEM === "redis") { const isAuthorized = await validateInternalRequest(request); @@ -66,25 +59,16 @@ async function handleQueueJob( } } - const body = await request.json(); + await request.json().catch(() => null); logger.info("Processing queue job", { queueName, queueSystem: env.QUEUE_SYSTEM, }); - // Get the appropriate handler - const handler = getQueueHandler(queueName); - if (!handler) { - logger.error("No handler found for queue", { queueName }); - return NextResponse.json( - { error: "No handler found for queue" }, - { status: 500 }, - ); - } - - // Execute the handler - return await handler(body); + // No centralized handling; acknowledge receipt only (legacy fallback) + logger.info("Queue job acknowledged (no-op handler)", { queueName }); + return NextResponse.json({ success: true }); } const queueRouteHandler = async (