
Integrating GauGau AI with Popular Frameworks: React, Next.js, and More
Step-by-step guide to integrating GauGau AI into your favorite frameworks. Learn best practices for React, Next.js, Vue, Express, and FastAPI applications.

Integrating GauGau AI with Popular Frameworks
This comprehensive guide shows you how to integrate GauGau AI into the most popular web frameworks. We'll cover React, Next.js, Vue, Express, and FastAPI with production-ready examples.
React Integration
Setup
First, install the OpenAI SDK:
npm install openai
Create an API Service
// src/services/ai.js
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: process.env.REACT_APP_GAUGAU_API_KEY,
baseURL: 'https://api.gaugauai.com/v1',
dangerouslyAllowBrowser: true // Only for development!
});
export const aiService = {
async chat(messages, model = 'gpt-4o-mini') {
const response = await client.chat.completions.create({
model,
messages,
});
return response.choices[0].message.content;
},
async streamChat(messages, model = 'gpt-4o-mini', onChunk) {
const stream = await client.chat.completions.create({
model,
messages,
stream: true,
});
let fullResponse = '';
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || '';
fullResponse += content;
onChunk(content, fullResponse);
}
return fullResponse;
},
};
Create a Chat Component
// src/components/ChatBox.jsx
import { useState } from 'react';
import { aiService } from '../services/ai';
export default function ChatBox() {
const [messages, setMessages] = useState([]);
const [input, setInput] = useState('');
const [loading, setLoading] = useState(false);
const handleSubmit = async (e) => {
e.preventDefault();
if (!input.trim()) return;
const userMessage = { role: 'user', content: input };
setMessages((prev) => [...prev, userMessage]);
setInput('');
setLoading(true);
try {
const response = await aiService.chat([...messages, userMessage]);
setMessages((prev) => [
...prev,
{ role: 'assistant', content: response },
]);
} catch (error) {
console.error('AI Error:', error);
setMessages((prev) => [
...prev,
{ role: 'assistant', content: 'Sorry, an error occurred.' },
]);
} finally {
setLoading(false);
}
};
return (
<div className="chat-container">
<div className="messages">
{messages.map((msg, idx) => (
<div key={idx} className={`message ${msg.role}`}>
<strong>{msg.role === 'user' ? 'You' : 'AI'}:</strong>
<p>{msg.content}</p>
</div>
))}
{loading && <div className="loading">AI is thinking...</div>}
</div>
<form onSubmit={handleSubmit}>
<input
type="text"
value={input}
onChange={(e) => setInput(e.target.value)}
placeholder="Type your message..."
disabled={loading}
/>
<button type="submit" disabled={loading}>
Send
</button>
</form>
</div>
);
}
Streaming Chat Component
// src/components/StreamingChat.jsx
import { useState } from 'react';
import { aiService } from '../services/ai';
export default function StreamingChat() {
const [messages, setMessages] = useState([]);
const [input, setInput] = useState('');
const [streaming, setStreaming] = useState(false);
const handleSubmit = async (e) => {
e.preventDefault();
if (!input.trim()) return;
const userMessage = { role: 'user', content: input };
setMessages((prev) => [...prev, userMessage]);
setInput('');
setStreaming(true);
// Add placeholder for AI response
setMessages((prev) => [...prev, { role: 'assistant', content: '' }]);
try {
await aiService.streamChat(
[...messages, userMessage],
'gpt-4o-mini',
(chunk, fullResponse) => {
setMessages((prev) => {
const newMessages = [...prev];
newMessages[newMessages.length - 1].content = fullResponse;
return newMessages;
});
}
);
} catch (error) {
console.error('Streaming error:', error);
} finally {
setStreaming(false);
}
};
return (
<div className="chat-container">
<div className="messages">
{messages.map((msg, idx) => (
<div key={idx} className={`message ${msg.role}`}>
<strong>{msg.role === 'user' ? 'You' : 'AI'}:</strong>
<p>{msg.content}</p>
</div>
))}
</div>
<form onSubmit={handleSubmit}>
<input
type="text"
value={input}
onChange={(e) => setInput(e.target.value)}
placeholder="Type your message..."
disabled={streaming}
/>
<button type="submit" disabled={streaming}>
{streaming ? 'Streaming...' : 'Send'}
</button>
</form>
</div>
);
}
Next.js Integration
API Route (Recommended for Security)
// app/api/chat/route.js
import OpenAI from 'openai';
import { NextResponse } from 'next/server';
const client = new OpenAI({
apiKey: process.env.GAUGAU_API_KEY,
baseURL: 'https://api.gaugauai.com/v1',
});
export async function POST(request) {
try {
const { messages, model = 'gpt-4o-mini' } = await request.json();
const response = await client.chat.completions.create({
model,
messages,
});
return NextResponse.json({
content: response.choices[0].message.content,
usage: response.usage,
});
} catch (error) {
console.error('AI API Error:', error);
return NextResponse.json(
{ error: 'Failed to generate response' },
{ status: 500 }
);
}
}
Streaming API Route
// app/api/chat/stream/route.js
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: process.env.GAUGAU_API_KEY,
baseURL: 'https://api.gaugauai.com/v1',
});
export async function POST(request) {
const { messages, model = 'gpt-4o-mini' } = await request.json();
const stream = await client.chat.completions.create({
model,
messages,
stream: true,
});
const encoder = new TextEncoder();
const readableStream = new ReadableStream({
async start(controller) {
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || '';
if (content) {
controller.enqueue(encoder.encode(content));
}
}
controller.close();
},
});
return new Response(readableStream, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
},
});
}
Client Component
// app/components/Chat.jsx
'use client';
import { useState } from 'react';
export default function Chat() {
const [messages, setMessages] = useState([]);
const [input, setInput] = useState('');
const [loading, setLoading] = useState(false);
const handleSubmit = async (e) => {
e.preventDefault();
if (!input.trim()) return;
const userMessage = { role: 'user', content: input };
setMessages((prev) => [...prev, userMessage]);
setInput('');
setLoading(true);
try {
const response = await fetch('/api/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
messages: [...messages, userMessage],
model: 'gpt-4o-mini',
}),
});
const data = await response.json();
setMessages((prev) => [
...prev,
{ role: 'assistant', content: data.content },
]);
} catch (error) {
console.error('Error:', error);
} finally {
setLoading(false);
}
};
return (
<div className="max-w-2xl mx-auto p-4">
<div className="space-y-4 mb-4">
{messages.map((msg, idx) => (
<div
key={idx}
className={`p-4 rounded-lg ${
msg.role === 'user'
? 'bg-blue-100 ml-auto max-w-[80%]'
: 'bg-gray-100 mr-auto max-w-[80%]'
}`}
>
{msg.content}
</div>
))}
</div>
<form onSubmit={handleSubmit} className="flex gap-2">
<input
type="text"
value={input}
onChange={(e) => setInput(e.target.value)}
className="flex-1 p-2 border rounded"
placeholder="Type your message..."
disabled={loading}
/>
<button
type="submit"
disabled={loading}
className="px-4 py-2 bg-blue-600 text-white rounded disabled:opacity-50"
>
{loading ? 'Sending...' : 'Send'}
</button>
</form>
</div>
);
}
Vue 3 Integration
Composable
// src/composables/useAI.js
import { ref } from 'vue';
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: import.meta.env.VITE_GAUGAU_API_KEY,
baseURL: 'https://api.gaugauai.com/v1',
dangerouslyAllowBrowser: true,
});
export function useAI() {
const loading = ref(false);
const error = ref(null);
const chat = async (messages, model = 'gpt-4o-mini') => {
loading.value = true;
error.value = null;
try {
const response = await client.chat.completions.create({
model,
messages,
});
return response.choices[0].message.content;
} catch (err) {
error.value = err.message;
throw err;
} finally {
loading.value = false;
}
};
const streamChat = async (messages, model, onChunk) => {
loading.value = true;
error.value = null;
try {
const stream = await client.chat.completions.create({
model,
messages,
stream: true,
});
let fullResponse = '';
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || '';
fullResponse += content;
onChunk(content, fullResponse);
}
return fullResponse;
} catch (err) {
error.value = err.message;
throw err;
} finally {
loading.value = false;
}
};
return {
loading,
error,
chat,
streamChat,
};
}
Chat Component
<!-- src/components/ChatBox.vue -->
<template>
<div class="chat-container">
<div class="messages">
<div
v-for="(msg, idx) in messages"
:key="idx"
:class="['message', msg.role]"
>
<strong>{{ msg.role === 'user' ? 'You' : 'AI' }}:</strong>
<p>{{ msg.content }}</p>
</div>
<div v-if="loading" class="loading">AI is thinking...</div>
</div>
<form @submit.prevent="handleSubmit">
<input
v-model="input"
type="text"
placeholder="Type your message..."
:disabled="loading"
/>
<button type="submit" :disabled="loading">Send</button>
</form>
</div>
</template>
<script setup>
import { ref } from 'vue';
import { useAI } from '../composables/useAI';
const { loading, chat } = useAI();
const messages = ref([]);
const input = ref('');
const handleSubmit = async () => {
if (!input.value.trim()) return;
const userMessage = { role: 'user', content: input.value };
messages.value.push(userMessage);
input.value = '';
try {
const response = await chat(messages.value);
messages.value.push({ role: 'assistant', content: response });
} catch (error) {
console.error('AI Error:', error);
messages.value.push({
role: 'assistant',
content: 'Sorry, an error occurred.',
});
}
};
</script>
<style scoped>
.chat-container {
max-width: 600px;
margin: 0 auto;
padding: 20px;
}
.messages {
height: 400px;
overflow-y: auto;
margin-bottom: 20px;
border: 1px solid #ddd;
padding: 10px;
border-radius: 8px;
}
.message {
margin-bottom: 15px;
padding: 10px;
border-radius: 8px;
}
.message.user {
background: #e3f2fd;
margin-left: 20%;
}
.message.assistant {
background: #f5f5f5;
margin-right: 20%;
}
form {
display: flex;
gap: 10px;
}
input {
flex: 1;
padding: 10px;
border: 1px solid #ddd;
border-radius: 4px;
}
button {
padding: 10px 20px;
background: #2196f3;
color: white;
border: none;
border-radius: 4px;
cursor: pointer;
}
button:disabled {
opacity: 0.5;
cursor: not-allowed;
}
</style>
Express.js Backend
// server.js
import express from 'express';
import OpenAI from 'openai';
import cors from 'cors';
const app = express();
app.use(cors());
app.use(express.json());
const client = new OpenAI({
apiKey: process.env.GAUGAU_API_KEY,
baseURL: 'https://api.gaugauai.com/v1',
});
// Chat endpoint
app.post('/api/chat', async (req, res) => {
try {
const { messages, model = 'gpt-4o-mini' } = req.body;
const response = await client.chat.completions.create({
model,
messages,
});
res.json({
content: response.choices[0].message.content,
usage: response.usage,
});
} catch (error) {
console.error('AI Error:', error);
res.status(500).json({ error: 'Failed to generate response' });
}
});
// Streaming endpoint
app.post('/api/chat/stream', async (req, res) => {
try {
const { messages, model = 'gpt-4o-mini' } = req.body;
res.setHeader('Content-Type', 'text/event-stream');
res.setHeader('Cache-Control', 'no-cache');
res.setHeader('Connection', 'keep-alive');
const stream = await client.chat.completions.create({
model,
messages,
stream: true,
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || '';
if (content) {
res.write(`data: ${JSON.stringify({ content })}\n\n`);
}
}
res.write('data: [DONE]\n\n');
res.end();
} catch (error) {
console.error('Streaming error:', error);
res.status(500).json({ error: 'Streaming failed' });
}
});
const PORT = process.env.PORT || 3000;
app.listen(PORT, () => {
console.log(`Server running on port ${PORT}`);
});
FastAPI Backend (Python)
# main.py
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from openai import OpenAI
import os
import json
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
client = OpenAI(
api_key=os.getenv("GAUGAU_API_KEY"),
base_url="https://api.gaugauai.com/v1"
)
class ChatRequest(BaseModel):
messages: list
model: str = "gpt-4o-mini"
@app.post("/api/chat")
async def chat(request: ChatRequest):
try:
response = client.chat.completions.create(
model=request.model,
messages=request.messages
)
return {
"content": response.choices[0].message.content,
"usage": {
"prompt_tokens": response.usage.prompt_tokens,
"completion_tokens": response.usage.completion_tokens,
"total_tokens": response.usage.total_tokens
}
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/api/chat/stream")
async def chat_stream(request: ChatRequest):
async def generate():
try:
stream = client.chat.completions.create(
model=request.model,
messages=request.messages,
stream=True
)
for chunk in stream:
content = chunk.choices[0].delta.content or ""
if content:
yield f"data: {json.dumps({'content': content})}\n\n"
yield "data: [DONE]\n\n"
except Exception as e:
yield f"data: {json.dumps({'error': str(e)})}\n\n"
return StreamingResponse(generate(), media_type="text/event-stream")
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
Best Practices
1. Never Expose API Keys in Frontend
❌ Bad:
const client = new OpenAI({
apiKey: 'gaugau_xxx', // Exposed in browser!
baseURL: 'https://api.gaugauai.com/v1',
dangerouslyAllowBrowser: true,
});
✅ Good:
// Use backend API route
const response = await fetch('/api/chat', {
method: 'POST',
body: JSON.stringify({ messages }),
});
2. Implement Rate Limiting
// Express middleware
import rateLimit from 'express-rate-limit';
const limiter = rateLimit({
windowMs: 15 * 60 * 1000, // 15 minutes
max: 100, // Limit each IP to 100 requests per windowMs
});
app.use('/api/chat', limiter);
3. Add Error Handling
try {
const response = await client.chat.completions.create({
model: 'gpt-4o-mini',
messages,
});
return response.choices[0].message.content;
} catch (error) {
if (error.status === 429) {
// Rate limit exceeded
return 'Too many requests. Please try again later.';
} else if (error.status === 401) {
// Invalid API key
console.error('Invalid API key');
return 'Authentication error.';
} else {
// Other errors
console.error('AI Error:', error);
return 'An error occurred. Please try again.';
}
}
4. Use Environment Variables
# .env
GAUGAU_API_KEY=your_api_key_here
// Load with dotenv
import 'dotenv/config';
const apiKey = process.env.GAUGAU_API_KEY;
Conclusion
Integrating GauGau AI into your application is straightforward regardless of your framework choice. Key takeaways:
- Always use backend API routes to protect your API key
- Implement streaming for better UX
- Add proper error handling and rate limiting
- Use environment variables for configuration
- Monitor usage and costs
Start building AI-powered features into your applications today!
Resources
Questions? Contact us at @gaugauai or support@gaugauai.com.
Related Posts

Getting Started with GauGau AI: Your Complete Guide
Learn how to integrate GauGau AI into your applications in minutes. This comprehensive guide covers API setup, authentication, and your first API call.

How to Build a Multi-Model AI Application with GauGau AI
Learn how to create intelligent applications that leverage multiple AI models for different tasks. This tutorial covers architecture patterns, model selection strategies, and practical implementation.