Vercel AI SDK
Sonamu๊ฐ ์ฌ์ฉํ๋ AI ํ๋ ์์ํฌ์ ๋๋ค. ์ฃผ์ ๊ธฐ๋ฅ:- ํ ์คํธ ์์ฑ (Text Generation)
- ์คํธ๋ฆฌ๋ฐ ์๋ต (Streaming)
- ๋๊ตฌ ํธ์ถ (Tool Calling)
- ๊ตฌ์กฐํ๋ ์ถ๋ ฅ (Structured Output)
- ์์ฑ ์ธ์ (Transcription)
ํ ์คํธ ์์ฑ
generateText()
์ผ๋ฐ ํ ์คํธ๋ฅผ ์์ฑํฉ๋๋ค.๋ณต์ฌ
import { openai } from '@ai-sdk/openai';
import { generateText } from 'ai';
const result = await generateText({
model: openai('gpt-4o'),
prompt: 'TypeScript๋ก Express ์๋ฒ๋ฅผ ๋ง๋๋ ๋ฐฉ๋ฒ์ ์๋ ค์ค',
});
console.log(result.text);
// => "Express ์๋ฒ๋ฅผ ๋ง๋ค๋ ค๋ฉด..."
๋ฉ์์ง ๊ธฐ๋ฐ ๋ํ
๋ณต์ฌ
const result = await generateText({
model: openai('gpt-4o'),
messages: [
{ role: 'system', content: '๋น์ ์ ์น์ ํ ํ๋ก๊ทธ๋๋ฐ ๋์ฐ๋ฏธ์
๋๋ค.' },
{ role: 'user', content: 'TypeScript๋?' },
{ role: 'assistant', content: 'TypeScript๋ JavaScript์ ํ์
์ ์ถ๊ฐํ...' },
{ role: 'user', content: '์ฅ์ ์ด ๋ญ์ผ?' },
],
});
์คํธ๋ฆฌ๋ฐ ์๋ต
streamText()
์ค์๊ฐ์ผ๋ก ํ ์คํธ๋ฅผ ์คํธ๋ฆฌ๋ฐํฉ๋๋ค.๋ณต์ฌ
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
const result = streamText({
model: openai('gpt-4o'),
prompt: '๊ธด ์ด์ผ๊ธฐ๋ฅผ ๋ค๋ ค์ค',
});
// ์คํธ๋ฆผ ์ฒ๋ฆฌ
for await (const chunk of result.textStream) {
process.stdout.write(chunk);
}
SSE์ ํตํฉ
๋ณต์ฌ
import { BaseModel, stream, api } from "sonamu";
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
import { z } from "zod";
class ChatModelClass extends BaseModel {
@stream({
type: 'sse',
events: z.object({
chunk: z.object({
text: z.string(),
}),
complete: z.object({
totalTokens: z.number(),
}),
})
})
@api({ compress: false })
async *streamChat(message: string, ctx: Context) {
const sse = ctx.createSSE(
z.object({
chunk: z.object({
text: z.string(),
}),
complete: z.object({
totalTokens: z.number(),
}),
})
);
try {
const result = streamText({
model: openai('gpt-4o'),
messages: [
{ role: 'user', content: message },
],
});
// ์ค์๊ฐ ์ ์ก
for await (const chunk of result.textStream) {
sse.publish('chunk', { text: chunk });
}
// ์๋ฃ ํต๊ณ
const usage = await result.usage;
sse.publish('complete', {
totalTokens: usage.totalTokens,
});
} finally {
await sse.end();
}
}
}
๋๊ตฌ ํธ์ถ
๋จ์ผ ๋๊ตฌ
๋ณต์ฌ
import { openai } from '@ai-sdk/openai';
import { generateText, tool } from 'ai';
import { z } from 'zod';
const result = await generateText({
model: openai('gpt-4o'),
prompt: '์์ธ์ ํ์ฌ ๋ ์จ๋ฅผ ์๋ ค์ค',
tools: {
getWeather: tool({
description: 'ํน์ ๋์์ ํ์ฌ ๋ ์จ๋ฅผ ์กฐํํฉ๋๋ค',
parameters: z.object({
city: z.string().describe('๋์ ์ด๋ฆ'),
}),
execute: async ({ city }) => {
// ๋ ์จ API ํธ์ถ
const weather = await fetchWeather(city);
return {
temperature: weather.temp,
condition: weather.condition,
};
},
}),
},
maxSteps: 5, // ์ต๋ ๋๊ตฌ ํธ์ถ ํ์
});
console.log(result.text);
// => "์์ธ์ ํ์ฌ ๋ ์จ๋ ๋ง๊ณ ๊ธฐ์จ์ 15๋์
๋๋ค."
๋ค์ค ๋๊ตฌ
๋ณต์ฌ
const result = await generateText({
model: openai('gpt-4o'),
prompt: '์์ธ์ ๋ ์จ๋ฅผ ๋ณด๊ณ ์ฐ์ฐ์ด ํ์ํ์ง ์๋ ค์ค',
tools: {
getWeather: tool({
description: '๋ ์จ ์กฐํ',
parameters: z.object({
city: z.string(),
}),
execute: async ({ city }) => {
return await fetchWeather(city);
},
}),
checkUmbrella: tool({
description: '๋ ์จ ์ ๋ณด๋ฅผ ๊ธฐ๋ฐ์ผ๋ก ์ฐ์ฐ ํ์ ์ฌ๋ถ ํ๋จ',
parameters: z.object({
condition: z.string().describe('๋ ์จ ์ํ'),
}),
execute: async ({ condition }) => {
return {
needUmbrella: ['rain', 'snow'].includes(condition),
};
},
}),
},
maxSteps: 10,
});
๊ตฌ์กฐํ๋ ์ถ๋ ฅ
generateObject()
JSON ํ์์ ๊ตฌ์กฐํ๋ ๋ฐ์ดํฐ๋ฅผ ์์ฑํฉ๋๋ค.๋ณต์ฌ
import { openai } from '@ai-sdk/openai';
import { generateObject } from 'ai';
import { z } from 'zod';
const result = await generateObject({
model: openai('gpt-4o'),
schema: z.object({
name: z.string(),
age: z.number(),
hobbies: z.array(z.string()),
address: z.object({
city: z.string(),
country: z.string(),
}),
}),
prompt: 'ํ๊ธธ๋์ ๋ํ ์ ๋ณด๋ฅผ ์์ฑํด์ค',
});
console.log(result.object);
// {
// name: "ํ๊ธธ๋",
// age: 30,
// hobbies: ["๋
์", "์ฌํ"],
// address: {
// city: "์์ธ",
// country: "๋ํ๋ฏผ๊ตญ"
// }
// }
์ค์ ์์
๋ณต์ฌ
import { BaseModel, api } from "sonamu";
import { openai } from '@ai-sdk/openai';
import { generateObject } from 'ai';
import { z } from 'zod';
class ProductModelClass extends BaseModel {
@api({ httpMethod: 'POST' })
async generateProductDescription(productName: string) {
const result = await generateObject({
model: openai('gpt-4o'),
schema: z.object({
title: z.string(),
description: z.string(),
features: z.array(z.string()),
price: z.number(),
tags: z.array(z.string()),
}),
prompt: `${productName}์ ๋ํ ์ํ ์ค๋ช
์ ์์ฑํด์ค`,
});
// DB์ ์ ์ฅ
const product = await this.saveOne({
name: result.object.title,
description: result.object.description,
features: result.object.features,
price: result.object.price,
tags: result.object.tags,
});
return product;
}
}
Rtzr Provider (์์ฑ ์ธ์)
Sonamu๋ Rtzr (ํ๊ตญ์ด ์์ฑ ์ธ์ ์๋น์ค)๋ฅผ ๊ธฐ๋ณธ ์ ๊ณตํฉ๋๋ค.์ค์
๋ณต์ฌ
RTZR_CLIENT_ID=your_client_id
RTZR_CLIENT_SECRET=your_client_secret
๊ธฐ๋ณธ ์ฌ์ฉ๋ฒ
๋ณต์ฌ
import { rtzr } from 'sonamu/ai/providers/rtzr';
const model = rtzr.transcription('whisper');
const result = await model.doGenerate({
audio: audioBuffer, // Uint8Array ๋๋ Base64
mediaType: 'audio/wav',
});
console.log(result.text);
// => "์๋
ํ์ธ์, ์ค๋ ๋ ์จ๊ฐ ์ข๋ค์"
console.log(result.segments);
// [
// { text: "์๋
ํ์ธ์", startSecond: 0, endSecond: 1 },
// { text: "์ค๋ ๋ ์จ๊ฐ ์ข๋ค์", startSecond: 1, endSecond: 3 }
// ]
ํ์ผ ์ ๋ก๋ + ์์ฑ ์ธ์
๋ณต์ฌ
import { BaseModel, upload, api } from "sonamu";
import { rtzr } from 'sonamu/ai/providers/rtzr';
class TranscriptionModelClass extends BaseModel {
@upload({ mode: 'single' })
@api({ httpMethod: 'POST' })
async transcribeAudio() {
const { file } = Sonamu.getUploadContext();
if (!file) {
throw new Error('์ค๋์ค ํ์ผ์ด ์์ต๋๋ค');
}
// ์์ฑ ์ธ์
const model = rtzr.transcription('whisper');
const buffer = await file.toBuffer();
const result = await model.doGenerate({
audio: buffer,
mediaType: file.mimetype,
});
// DB์ ์ ์ฅ
await this.saveOne({
audio_url: file.url,
transcription: result.text,
segments: result.segments,
language: result.language,
duration: result.durationInSeconds,
});
return {
text: result.text,
segments: result.segments,
};
}
}
Rtzr ์ต์
๋ณต์ฌ
const result = await model.doGenerate({
audio: audioBuffer,
mediaType: 'audio/wav',
providerOptions: {
rtzr: {
domain: 'GENERAL', // 'CALL' | 'GENERAL'
language: 'ko',
diarization: true, // ํ์ ๋ถ๋ฆฌ
wordTimestamp: true, // ๋จ์ด๋ณ ํ์์คํฌํ
profanityFilter: false, // ์์ค ํํฐ
}
}
});
๋ฉํฐ๋ชจ๋ฌ (์ด๋ฏธ์ง ์ฒ๋ฆฌ)
GPT-4o๋ ์ด๋ฏธ์ง๋ฅผ ์ ๋ ฅ์ผ๋ก ๋ฐ์ ์ ์์ต๋๋ค.๋ณต์ฌ
import { openai } from '@ai-sdk/openai';
import { generateText } from 'ai';
const result = await generateText({
model: openai('gpt-4o'),
messages: [
{
role: 'user',
content: [
{ type: 'text', text: '์ด ์ด๋ฏธ์ง์ ๋ฌด์์ด ์๋์?' },
{
type: 'image',
image: imageBuffer, // Uint8Array ๋๋ URL
},
],
},
],
});
console.log(result.text);
// => "์ด๋ฏธ์ง์๋ ๊ณ ์์ด๊ฐ ์์ต๋๋ค..."
์ด๋ฏธ์ง ์ ๋ก๋ + ๋ถ์
๋ณต์ฌ
import { BaseModel, upload, api } from "sonamu";
import { openai } from '@ai-sdk/openai';
import { generateText } from 'ai';
class ImageAnalysisModelClass extends BaseModel {
@upload({ mode: 'single' })
@api({ httpMethod: 'POST' })
async analyzeImage() {
const { file } = Sonamu.getUploadContext();
if (!file || !file.mimetype.startsWith('image/')) {
throw new Error('์ด๋ฏธ์ง ํ์ผ์ด ํ์ํฉ๋๋ค');
}
const buffer = await file.toBuffer();
const result = await generateText({
model: openai('gpt-4o'),
messages: [
{
role: 'user',
content: [
{ type: 'text', text: '์ด ์ด๋ฏธ์ง๋ฅผ ์์ธํ ๋ถ์ํด์ค' },
{ type: 'image', image: buffer },
],
},
],
});
return {
analysis: result.text,
imageUrl: file.url,
};
}
}
์๋ฌ ์ฒ๋ฆฌ
๋ณต์ฌ
import { openai } from '@ai-sdk/openai';
import { generateText } from 'ai';
try {
const result = await generateText({
model: openai('gpt-4o'),
prompt: '...',
});
return result.text;
} catch (error) {
if (error.name === 'AI_APICallError') {
// API ํธ์ถ ์๋ฌ
console.error('API Error:', error.message);
console.error('Status:', error.statusCode);
} else if (error.name === 'AI_InvalidPromptError') {
// ํ๋กฌํํธ ์๋ฌ
console.error('Invalid Prompt:', error.message);
} else {
// ๊ธฐํ ์๋ฌ
console.error('Unknown Error:', error);
}
throw error;
}
๋น์ฉ ์ถ์
๋ณต์ฌ
const result = await generateText({
model: openai('gpt-4o'),
prompt: '...',
});
// ํ ํฐ ์ฌ์ฉ๋
console.log('Prompt Tokens:', result.usage.promptTokens);
console.log('Completion Tokens:', result.usage.completionTokens);
console.log('Total Tokens:', result.usage.totalTokens);
// ๋น์ฉ ๊ณ์ฐ (์์)
const costPerToken = 0.00003; // GPT-4o ๊ฐ๊ฒฉ
const cost = result.usage.totalTokens * costPerToken;
console.log('Cost:', cost);
์ค์ ํตํฉ ์์
AI ์ฑํ API
๋ณต์ฌ
import { BaseModel, api } from "sonamu";
import { openai } from '@ai-sdk/openai';
import { generateText } from 'ai';
import { z } from 'zod';
class ChatModelClass extends BaseModel {
@api({ httpMethod: 'POST' })
async chat(
message: string,
conversationId: number | null,
ctx: Context
) {
// ๋ํ ์ด๋ ฅ ์กฐํ
const history = conversationId
? await ConversationModel.findById(conversationId)
: null;
const messages = history?.messages || [];
messages.push({
role: 'user',
content: message,
});
// AI ์๋ต ์์ฑ
const result = await generateText({
model: openai('gpt-4o'),
messages: [
{
role: 'system',
content: '๋น์ ์ ์น์ ํ ๊ณ ๊ฐ ์ง์ ์ฑ๋ด์
๋๋ค.',
},
...messages,
],
temperature: 0.7,
maxTokens: 500,
});
// ์๋ต ์ ์ฅ
messages.push({
role: 'assistant',
content: result.text,
});
const conversation = await ConversationModel.saveOne({
id: conversationId,
user_id: ctx.user.id,
messages,
token_usage: result.usage.totalTokens,
});
return {
conversationId: conversation.id,
message: result.text,
usage: result.usage,
};
}
}
์ฃผ์์ฌํญ
AI SDK ์ฌ์ฉ ์ ์ฃผ์์ฌํญ:
-
API ํค ๋ณด์: ํ๊ฒฝ๋ณ์ ์ฌ์ฉ
๋ณต์ฌ
// โ ํ๋์ฝ๋ฉ const model = openai('gpt-4o', { apiKey: 'sk-...' }); // โ ํ๊ฒฝ๋ณ์ const model = openai('gpt-4o'); // OPENAI_API_KEY ์๋ ์ฌ์ฉ -
์๋ฌ ์ฒ๋ฆฌ: ํญ์ try-catch
๋ณต์ฌ
try { const result = await generateText({ ... }); } catch (error) { console.error(error); } -
ํ ํฐ ์ ํ: maxTokens ์ค์
๋ณต์ฌ
generateText({ model: openai('gpt-4o'), prompt: '...', maxTokens: 1000, // ๋น์ฉ ์ ์ด }); -
์คํธ๋ฆฌ๋ฐ ์ ๋ฆฌ: ์๋ฌ ์์๋ ์คํธ๋ฆผ ์ข
๋ฃ
๋ณต์ฌ
try { for await (const chunk of result.textStream) { // ... } } finally { // ์ ๋ฆฌ ์์ } -
Rtzr ํ์ผ ํฌ๊ธฐ: ํฐ ํ์ผ์ ์ฒญํน ํ์
๋ณต์ฌ
if (file.size > 10 * 1024 * 1024) { throw new Error('ํ์ผ ํฌ๊ธฐ๋ 10MB ์ดํ์ฌ์ผ ํฉ๋๋ค'); } -
์ด๋ฏธ์ง ํฌ๊ธฐ: GPT-4o ์ด๋ฏธ์ง ์ ํ ํ์ธ
๋ณต์ฌ
// ์ด๋ฏธ์ง ํฌ๊ธฐ ์ ํ (20MB) if (imageBuffer.length > 20 * 1024 * 1024) { throw new Error('์ด๋ฏธ์ง๊ฐ ๋๋ฌด ํฝ๋๋ค'); }