
How I ship AI chat features safely: Clerk-gated access, OpenAI ChatKit sessions, prompt/response guardrails, and performance-minded client loading in a Next.js 16 App Router codebase.
When you add AI chat to a public site, the #1 risk is turning it into an unbounded cost center. My approach: gate access with auth, keep sessions explicit, and load heavy client code only when the user actually opens the chat.
- Clerk for authentication (UI localized per user language)
- OpenAI ChatKit for the chat UI/session control
- Next.js 16 App Router for routing + server actions
- Guardrails: validation, rate limiting, and careful prompt construction
- Tooling: Cursor/Claude Code for faster iteration; evaluations before rollout
- Keep the page static: render content at build time.
- Lazy-load the chat bundle only when the sidebar is opened.
- Avoid mounting expensive animations/charts/maps above the fold.
- Treat AI as a feature module, not part of your base layout bundle.
This implementation shows how to ship a production-ready, authenticated AI chat feature in a Next.js app using Clerk for auth and OpenAI ChatKit for the chat experience. It covers:
The key pattern is: gate the dashboard with Clerk, create a ChatKit session on the server using the Clerk userId, pass a clientSecret to the client, and enforce guardrails + rate limiting via server routes and Redis.
1'use client';
2
3import { ChatKit } from '@openai/chatkit-react';
4import { useAuth } from '@clerk/nextjs';
5import { useEffect, useState } from 'react';
6
7export function AIChat() {
8 const { getToken, isLoaded, isSignedIn } = useAuth();
9 const [authToken, setAuthToken] = useState<string | null>(null);
10
11 useEffect(() => {
12 async function loadToken() {
13 if (isLoaded && isSignedIn) {
14 const token = await getToken();
15 setAuthToken(token);
16 }
17 }
18 loadToken();
19 }, [isLoaded, isSignedIn, getToken]);
20
21 if (!isLoaded || !isSignedIn) {
22 return (
23 <div className="flex items-center justify-center h-96">
24 <p className="text-muted-foreground">Please sign in to use the AI chat</p>
25 </div>
26 );
27 }
28
29 if (!authToken) {
30 return (
31 <div className="flex items-center justify-center h-96">
32 <div className="animate-spin h-8 w-8 border-4 border-primary border-t-transparent rounded-full" />
33 </div>
34 );
35 }
36
37 return (
38 <ChatKit
39 workflowId={process.env.NEXT_PUBLIC_CHATKIT_WORKFLOW_ID!}
40 authToken={authToken}
41 className="w-full h-full"
42 theme={{
43 primaryColor: '#0ea5e9',
44 backgroundColor: '#ffffff',
45 textColor: '#1f2937',
46 }}
47 />
48 );
49}1import { auth } from '@clerk/nextjs/server';
2import { NextRequest, NextResponse } from 'next/server';
3import OpenAI from 'openai';
4
5const openai = new OpenAI({
6 apiKey: process.env.OPENAI_API_KEY!,
7});
8
9export async function POST(req: NextRequest) {
10 try {
11 // β
Verify authentication with Clerk
12 const { userId } = await auth();
13
14 if (!userId) {
15 return NextResponse.json(
16 { error: 'Unauthorized' },
17 { status: 401 }
18 );
19 }
20
21 const { messages } = await req.json();
22
23 // β
Add system prompt with user context
24 const systemMessage = {
25 role: 'system',
26 content: `You are a helpful AI assistant for dvirbi.com portfolio website.
27User ID: ${userId}. Provide concise, technical answers about web development,
28TypeScript, Next.js, and modern architecture patterns.`,
29 };
30
31 // β
Stream response with GPT-4
32 const stream = await openai.chat.completions.create({
33 model: 'gpt-4-turbo-preview',
34 messages: [systemMessage, ...messages],
35 stream: true,
36 temperature: 0.7,
37 max_tokens: 1000,
38 });
39
40 // β
Return streaming response
41 const encoder = new TextEncoder();
42 const readable = new ReadableStream({
43 async start(controller) {
44 for await (const chunk of stream) {
45 const content = chunk.choices[0]?.delta?.content || '';
46 controller.enqueue(encoder.encode(`data: ${JSON.stringify({ content })}\n\n`));
47 }
48 controller.enqueue(encoder.encode('data: [DONE]\n\n'));
49 controller.close();
50 },
51 });
52
53 return new Response(readable, {
54 headers: {
55 'Content-Type': 'text/event-stream',
56 'Cache-Control': 'no-cache',
57 'Connection': 'keep-alive',
58 },
59 });
60
61 } catch (error) {
62 console.error('Chat API error:', error);
63 return NextResponse.json(
64 { error: 'Internal server error' },
65 { status: 500 }
66 );
67 }
68}1import { auth } from '@clerk/nextjs/server';
2import { Redis } from '@upstash/redis';
3import { NextRequest, NextResponse } from 'next/server';
4
5const redis = new Redis({
6 url: process.env.UPSTASH_REDIS_REST_URL!,
7 token: process.env.UPSTASH_REDIS_REST_TOKEN!,
8});
9
10interface RateLimitConfig {
11 maxRequests: number;
12 windowMs: number;
13}
14
15const RATE_LIMITS: Record<string, RateLimitConfig> = {
16 free: { maxRequests: 10, windowMs: 60 * 60 * 1000 }, // 10 requests/hour
17 pro: { maxRequests: 100, windowMs: 60 * 60 * 1000 }, // 100 requests/hour
18};
19
20export async function checkRateLimit(
21 userId: string,
22 tier: 'free' | 'pro' = 'free'
23): Promise<{ allowed: boolean; remaining: number; resetAt: number }> {
24 const config = RATE_LIMITS[tier];
25 const key = `ratelimit:chat:${userId}`;
26 const now = Date.now();
27 const windowStart = now - config.windowMs;
28
29 // Remove old entries
30 await redis.zremrangebyscore(key, 0, windowStart);
31
32 // Count requests in current window
33 const requestCount = await redis.zcard(key);
34
35 if (requestCount >= config.maxRequests) {
36 const oldestRequest = await redis.zrange(key, 0, 0, { withScores: true });
37 const resetAt = oldestRequest[0]?.score ? oldestRequest[0].score + config.windowMs : now;
38
39 return {
40 allowed: false,
41 remaining: 0,
42 resetAt,
43 };
44 }
45
46 // Add current request
47 await redis.zadd(key, { score: now, member: `${now}-${Math.random()}` });
48 await redis.expire(key, Math.ceil(config.windowMs / 1000));
49
50 return {
51 allowed: true,
52 remaining: config.maxRequests - requestCount - 1,
53 resetAt: now + config.windowMs,
54 };
55}
56
57// Middleware for API routes
58export async function withRateLimit(
59 req: NextRequest,
60 handler: (req: NextRequest) => Promise<Response>
61) {
62 const { userId } = await auth();
63
64 if (!userId) {
65 return NextResponse.json({ error: 'Unauthorized' }, { status: 401 });
66 }
67
68 const rateLimit = await checkRateLimit(userId, 'free');
69
70 if (!rateLimit.allowed) {
71 return NextResponse.json(
72 {
73 error: 'Rate limit exceeded',
74 resetAt: new Date(rateLimit.resetAt).toISOString(),
75 },
76 {
77 status: 429,
78 headers: {
79 'X-RateLimit-Limit': RATE_LIMITS.free.maxRequests.toString(),
80 'X-RateLimit-Remaining': rateLimit.remaining.toString(),
81 'X-RateLimit-Reset': rateLimit.resetAt.toString(),
82 },
83 }
84 );
85 }
86
87 return handler(req);
88}1import OpenAI from 'openai';
2
3const openai = new OpenAI({
4 apiKey: process.env.OPENAI_API_KEY!,
5});
6
7export async function moderateContent(text: string): Promise<{
8 flagged: boolean;
9 categories: string[];
10 scores: Record<string, number>;
11}> {
12 const moderation = await openai.moderations.create({
13 input: text,
14 });
15
16 const result = moderation.results[0];
17
18 const flaggedCategories = Object.entries(result.categories)
19 .filter(([_, flagged]) => flagged)
20 .map(([category]) => category);
21
22 return {
23 flagged: result.flagged,
24 categories: flaggedCategories,
25 scores: result.category_scores,
26 };
27}
28
29// Use in chat API route
30export async function POST(req: NextRequest) {
31 const { userId } = await auth();
32 const { messages } = await req.json();
33
34 // Get user's latest message
35 const userMessage = messages[messages.length - 1]?.content;
36
37 if (!userMessage) {
38 return NextResponse.json({ error: 'No message provided' }, { status: 400 });
39 }
40
41 // β
Check for inappropriate content
42 const moderation = await moderateContent(userMessage);
43
44 if (moderation.flagged) {
45 console.warn(`Flagged message from user ${userId}:`, moderation.categories);
46
47 return NextResponse.json(
48 {
49 error: 'Your message was flagged for inappropriate content',
50 categories: moderation.categories,
51 },
52 { status: 400 }
53 );
54 }
55
56 // Continue with OpenAI request...
57}1import { auth } from '@clerk/nextjs/server';
2import { sql } from '@/lib/db';
3
4interface ChatEvent {
5 userId: string;
6 sessionId: string;
7 messageCount: number;
8 tokensUsed: number;
9 model: string;
10 duration: number;
11 satisfactionScore?: number;
12}
13
14export async function logChatSession(event: ChatEvent) {
15 try {
16 await sql`
17 INSERT INTO chat_analytics (
18 user_id,
19 session_id,
20 message_count,
21 tokens_used,
22 model,
23 duration,
24 satisfaction_score,
25 created_at
26 ) VALUES (
27 ${event.userId},
28 ${event.sessionId},
29 ${event.messageCount},
30 ${event.tokensUsed},
31 ${event.model},
32 ${event.duration},
33 ${event.satisfactionScore || null},
34 NOW()
35 )
36 `;
37 } catch (error) {
38 console.error('Failed to log chat session:', error);
39 }
40}
41
42export async function getChatAnalytics(userId: string) {
43 const stats = await sql`
44 SELECT
45 COUNT(*) as total_sessions,
46 SUM(message_count) as total_messages,
47 SUM(tokens_used) as total_tokens,
48 AVG(duration) as avg_duration,
49 AVG(satisfaction_score) as avg_satisfaction
50 FROM chat_analytics
51 WHERE user_id = ${userId}
52 AND created_at > NOW() - INTERVAL '30 days'
53 `;
54
55 return stats[0];
56}
57
58// Usage in chat completion
59export async function POST(req: NextRequest) {
60 const startTime = Date.now();
61 const { userId } = await auth();
62 const { messages, sessionId } = await req.json();
63
64 const response = await openai.chat.completions.create({
65 model: 'gpt-4-turbo-preview',
66 messages,
67 });
68
69 const duration = Date.now() - startTime;
70
71 // β
Log analytics
72 await logChatSession({
73 userId: userId!,
74 sessionId,
75 messageCount: messages.length,
76 tokensUsed: response.usage?.total_tokens || 0,
77 model: 'gpt-4-turbo-preview',
78 duration,
79 });
80
81 return NextResponse.json(response);
82}Want a consultant? Click here to schedule a call.
Schedule a call
A behind-the-scenes breakdown of this portfolio: Next.js 16.1.3 App Router, next-intl (he default + /en), Sanity CMS, Clerk auth for AI chat, Resend email, and full SEO with sitemap/robots/JSON-LD β optimized for static builds.

Patterns I use to keep a Next.js App Router project maintainable: strict TypeScript, CMS-driven content with Sanity, stable GROQ queries, and practical trade-offs when the CMS evolves faster than the code.

A practical playbook for improving day-to-day delivery: connect Slack, Jira, Monday.com, and GitHub using MCP-based AI agents for notifications, triage, status sync, code review feedback loops, and automated follow-ups.
Want to see how AI chat can build you automation workflows?
Try AI Dashboard βWherever you are in the world, let's work together on your next project.
Israel
Prefer to talk directly? Schedule a call and we can discuss your project live.
Schedule a call