Initial commit

This commit is contained in:
Sebastian
2025-11-26 19:00:04 +00:00
commit 0ba05b6483
27 changed files with 2517 additions and 0 deletions

View File

@@ -0,0 +1,154 @@
import { NextRequest, NextResponse } from 'next/server';
import { type ImageGenerationConfig, getSafetySettings } from '@/lib/google-ai';
import { vertexAI } from '@/lib/google-ai-server';
export const runtime = 'nodejs';
export const maxDuration = 60;
export async function POST(req: NextRequest) {
try {
const body: ImageGenerationConfig = await req.json();
const {
model,
prompt,
aspectRatio = '1:1',
numberOfImages = 1,
negativePrompt,
temperature = 1,
safetyFilterLevel = 'block_some',
referenceImages,
} = body;
if (!prompt || !model) {
return NextResponse.json(
{ error: 'Prompt y modelo son requeridos' },
{ status: 400 }
);
}
// Obtener el modelo generativo de Vertex AI
const generativeModel = vertexAI.getGenerativeModel({
model: model,
safetySettings: getSafetySettings(safetyFilterLevel) as any,
generationConfig: {
temperature: temperature,
maxOutputTokens: 8192,
},
});
// Construir el prompt completo
let fullPrompt = prompt;
if (negativePrompt) {
fullPrompt += `\n\nNo incluir: ${negativePrompt}`;
}
// Agregar instrucciones de aspect ratio si no es 1:1
if (aspectRatio !== '1:1') {
fullPrompt += `\n\nGenera la imagen en formato ${aspectRatio}.`;
}
// Construir parts con imágenes de referencia si existen
const parts: any[] = [];
// Agregar imágenes de referencia primero
if (referenceImages && referenceImages.length > 0) {
for (const refImg of referenceImages) {
parts.push({
inlineData: {
data: refImg.data,
mimeType: refImg.mimeType,
},
});
}
}
// Agregar el prompt de texto
parts.push({ text: fullPrompt });
// Generar contenido
const result = await generativeModel.generateContent({
contents: [
{
role: 'user',
parts: parts,
},
],
});
const response = result.response;
// Extraer imágenes de la respuesta
const images: string[] = [];
if (response.candidates && response.candidates.length > 0) {
for (const candidate of response.candidates) {
if (candidate.content?.parts) {
for (const part of candidate.content.parts) {
// Verificar si la parte contiene datos inline (imagen)
if ('inlineData' in part && part.inlineData) {
const imageData = part.inlineData.data;
if (imageData) {
images.push(imageData);
}
}
}
}
}
}
// Si no hay imágenes, intentar generar más información de debug
if (images.length === 0) {
console.error('No se encontraron imágenes en la respuesta');
return NextResponse.json(
{
error: 'El modelo no generó imágenes',
details: 'El modelo respondió pero no incluyó datos de imagen',
response: response,
suggestion: 'Prueba con un prompt más descriptivo o cambia el modelo',
},
{ status: 500 }
);
}
return NextResponse.json({
images: images,
model: model,
prompt: prompt,
aspectRatio: aspectRatio,
count: images.length,
});
} catch (error: any) {
console.error('Error generando imagen:', error);
// Manejar errores específicos de la API
let errorMessage = 'Error generando imagen';
let errorDetails = error.message;
if (error.message?.includes('credentials') || error.message?.includes('authentication')) {
errorMessage = 'Error de autenticación';
errorDetails = 'Verifica las credenciales del Service Account';
} else if (error.message?.includes('quota')) {
errorMessage = 'Límite de cuota excedido';
errorDetails = 'Has alcanzado el límite de uso de la API';
} else if (error.message?.includes('safety')) {
errorMessage = 'Contenido bloqueado por seguridad';
errorDetails = 'El prompt fue bloqueado por políticas de seguridad';
} else if (error.message?.includes('permission')) {
errorMessage = 'Error de permisos';
errorDetails = 'El Service Account no tiene permisos para Vertex AI';
}
return NextResponse.json(
{
error: errorMessage,
details: errorDetails,
stack: process.env.NODE_ENV === 'development' ? error.stack : undefined,
},
{ status: 500 }
);
}
}

View File

@@ -0,0 +1,163 @@
import { NextRequest, NextResponse } from 'next/server';
import { type VideoGenerationConfig } from '@/lib/google-ai';
import { projectId, location, credentials } from '@/lib/google-ai-server';
import { GoogleAuth } from 'google-auth-library';
export const runtime = 'nodejs';
export const maxDuration = 300; // Los videos pueden tardar más
export async function POST(req: NextRequest) {
try {
const body: VideoGenerationConfig = await req.json();
const {
model,
prompt,
aspectRatio = '16:9',
duration = 5,
negativePrompt,
} = body;
if (!prompt || !model) {
return NextResponse.json(
{ error: 'Prompt y modelo son requeridos' },
{ status: 400 }
);
}
// Configurar autenticación
const auth = new GoogleAuth({
credentials: credentials,
scopes: ['https://www.googleapis.com/auth/cloud-platform'],
});
const client = await auth.getClient();
const accessToken = await client.getAccessToken();
if (!accessToken.token) {
throw new Error('No se pudo obtener el token de acceso');
}
// Construir el endpoint de la API
const endpoint = `https://${location}-aiplatform.googleapis.com/v1/projects/${projectId}/locations/${location}/publishers/google/models/${model}:predict`;
// Construir el prompt completo
let fullPrompt = prompt;
if (negativePrompt) {
fullPrompt += `\n\nNegative prompt: ${negativePrompt}`;
}
// Construir el payload
const payload = {
instances: [
{
prompt: fullPrompt,
},
],
parameters: {
aspectRatio: aspectRatio,
videoDuration: `${duration}s`,
},
};
console.log(`Endpoint: ${endpoint}`);
// Hacer la solicitud a la API de Vertex AI
const response = await fetch(endpoint, {
method: 'POST',
headers: {
'Authorization': `Bearer ${accessToken.token}`,
'Content-Type': 'application/json',
},
body: JSON.stringify(payload),
});
const data = await response.json();
if (!response.ok) {
console.error('Error de la API:', data);
throw new Error(
data.error?.message ||
`Error ${response.status}: ${response.statusText}`
);
}
// Extraer video de la respuesta
let videoData: string | null = null;
if (data.predictions && data.predictions.length > 0) {
const prediction = data.predictions[0];
// Buscar el video en diferentes formatos posibles
if (prediction.bytesBase64Encoded) {
videoData = prediction.bytesBase64Encoded;
} else if (prediction.videoBase64) {
videoData = prediction.videoBase64;
} else if (prediction.video) {
videoData = prediction.video;
}
}
// Si no hay video, retornar error con más información
if (!videoData) {
console.error('No se encontró video en la respuesta');
return NextResponse.json(
{
error: 'El modelo no generó un video',
details: 'El modelo respondió pero no incluyó datos de video en el formato esperado',
response: data,
suggestion: 'Los modelos Veo pueden necesitar acceso especial o estar en preview. Verifica que tu proyecto tenga acceso a estos modelos.',
},
{ status: 500 }
);
}
return NextResponse.json({
videoData: videoData,
model: model,
prompt: prompt,
aspectRatio: aspectRatio,
duration: duration,
});
} catch (error: any) {
console.error('Error generando video:', error);
// Manejar errores específicos de la API
let errorMessage = 'Error generando video';
let errorDetails = error.message;
if (error.message?.includes('credentials') || error.message?.includes('authentication')) {
errorMessage = 'Error de autenticación';
errorDetails = 'Verifica las credenciales del Service Account';
} else if (error.message?.includes('Quota exceeded') || error.message?.includes('quota')) {
errorMessage = 'Cuota de uso excedida';
errorDetails = 'Necesitas solicitar un aumento de cuota para los modelos Veo en Google Cloud Console. Ve a: https://console.cloud.google.com/iam-admin/quotas y busca "Online prediction requests per base model"';
} else if (error.message?.includes('safety')) {
errorMessage = 'Contenido bloqueado por seguridad';
errorDetails = 'El prompt fue bloqueado por políticas de seguridad';
} else if (error.message?.includes('permission') || error.message?.includes('403')) {
errorMessage = 'Error de permisos';
errorDetails = 'El Service Account no tiene permisos para Vertex AI o los modelos Veo no están habilitados en tu proyecto';
} else if (error.message?.includes('not found') || error.message?.includes('404')) {
errorMessage = 'Modelo no encontrado';
errorDetails = 'El modelo Veo puede no estar disponible en tu región (us-central1). Prueba solicitar acceso a los modelos Veo en Google Cloud Console.';
} else if (error.message?.includes('Internal')) {
errorMessage = 'Error interno del servidor';
errorDetails = 'Los modelos Veo están en preview y pueden tener disponibilidad limitada. Intenta con otro modelo o más tarde.';
}
return NextResponse.json(
{
error: errorMessage,
details: errorDetails,
fullError: error.message,
stack: process.env.NODE_ENV === 'development' ? error.stack : undefined,
},
{ status: 500 }
);
}
}

55
frontend/app/globals.css Normal file
View File

@@ -0,0 +1,55 @@
@tailwind base;
@tailwind components;
@tailwind utilities;
@layer base {
:root {
--background: 0 0% 100%;
--foreground: 222.2 84% 4.9%;
--card: 0 0% 100%;
--card-foreground: 222.2 84% 4.9%;
--popover: 0 0% 100%;
--popover-foreground: 222.2 84% 4.9%;
--primary: 221.2 83.2% 53.3%;
--primary-foreground: 210 40% 98%;
--secondary: 210 40% 96.1%;
--secondary-foreground: 222.2 47.4% 11.2%;
--muted: 210 40% 96.1%;
--muted-foreground: 215.4 16.3% 46.9%;
--accent: 210 40% 96.1%;
--accent-foreground: 222.2 47.4% 11.2%;
--border: 214.3 31.8% 91.4%;
--input: 214.3 31.8% 91.4%;
--ring: 221.2 83.2% 53.3%;
--radius: 0.5rem;
}
.dark {
--background: 222.2 84% 4.9%;
--foreground: 210 40% 98%;
--card: 222.2 84% 4.9%;
--card-foreground: 210 40% 98%;
--popover: 222.2 84% 4.9%;
--popover-foreground: 210 40% 98%;
--primary: 217.2 91.2% 59.8%;
--primary-foreground: 222.2 47.4% 11.2%;
--secondary: 217.2 32.6% 17.5%;
--secondary-foreground: 210 40% 98%;
--muted: 217.2 32.6% 17.5%;
--muted-foreground: 215 20.2% 65.1%;
--accent: 217.2 32.6% 17.5%;
--accent-foreground: 210 40% 98%;
--border: 217.2 32.6% 17.5%;
--input: 217.2 32.6% 17.5%;
--ring: 224.3 76.3% 48%;
}
}
@layer base {
* {
@apply border-border;
}
body {
@apply bg-background text-foreground;
}
}

22
frontend/app/layout.tsx Normal file
View File

@@ -0,0 +1,22 @@
import type { Metadata } from 'next'
import { Inter } from 'next/font/google'
import './globals.css'
const inter = Inter({ subsets: ['latin'] })
export const metadata: Metadata = {
title: 'Image Playground - Google Generative AI',
description: 'Genera imágenes con modelos de Google Generative AI',
}
export default function RootLayout({
children,
}: {
children: React.ReactNode
}) {
return (
<html lang="es">
<body className={inter.className}>{children}</body>
</html>
)
}

426
frontend/app/page.tsx Normal file
View File

@@ -0,0 +1,426 @@
'use client';
import { useState } from 'react';
import { Sparkles, Loader2, ImageIcon, Video } from 'lucide-react';
import { ModelSelector } from '@/components/model-selector';
import { ImageConfig } from '@/components/image-config';
import { VideoConfig } from '@/components/video-config';
import { ImageCard } from '@/components/image-card';
import { VideoCard } from '@/components/video-card';
import { ReferenceImages } from '@/components/reference-images';
type ContentType = 'image' | 'video';
interface GeneratedImage {
id: string;
imageData: string;
prompt: string;
model: string;
aspectRatio: string;
timestamp: number;
}
interface GeneratedVideo {
id: string;
videoData: string;
prompt: string;
model: string;
aspectRatio: string;
duration: number;
timestamp: number;
}
interface ReferenceImage {
id: string;
data: string;
mimeType: string;
preview: string;
name: string;
}
export default function Home() {
// Estado del tipo de contenido
const [contentType, setContentType] = useState<ContentType>('image');
// Estado del modelo y configuración
const [selectedModel, setSelectedModel] = useState('gemini-2.5-flash-image');
const [aspectRatio, setAspectRatio] = useState('1:1');
const [numberOfImages, setNumberOfImages] = useState(1);
const [videoDuration, setVideoDuration] = useState(5);
const [negativePrompt, setNegativePrompt] = useState('');
const [safetyLevel, setSafetyLevel] = useState<'block_none' | 'block_some' | 'block_most'>('block_some');
const [referenceImages, setReferenceImages] = useState<ReferenceImage[]>([]);
// Estado del chat
const [prompt, setPrompt] = useState('');
const [isGenerating, setIsGenerating] = useState(false);
const [generatedImages, setGeneratedImages] = useState<GeneratedImage[]>([]);
const [generatedVideos, setGeneratedVideos] = useState<GeneratedVideo[]>([]);
const [error, setError] = useState<string | null>(null);
// Cambiar tipo de contenido y actualizar modelo por defecto
const handleContentTypeChange = (type: ContentType) => {
setContentType(type);
if (type === 'image') {
setSelectedModel('gemini-2.5-flash-image');
setAspectRatio('1:1');
} else {
setSelectedModel('veo-3.0-generate-001');
setAspectRatio('16:9');
}
};
const handleGenerateImage = async () => {
if (!prompt.trim()) return;
setIsGenerating(true);
setError(null);
try {
const response = await fetch('/api/generate-image', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: selectedModel,
prompt: prompt.trim(),
aspectRatio,
numberOfImages,
negativePrompt: negativePrompt.trim() || undefined,
safetyFilterLevel: safetyLevel,
temperature: 1,
referenceImages: referenceImages.length > 0
? referenceImages.map(img => ({
data: img.data,
mimeType: img.mimeType,
}))
: undefined,
}),
});
const data = await response.json();
if (!response.ok) {
throw new Error(data.details || data.error || 'Error generando imagen');
}
// Agregar imágenes generadas al historial
const newImages: GeneratedImage[] = data.images.map((imageData: string, index: number) => ({
id: `${Date.now()}-${index}`,
imageData,
prompt: prompt.trim(),
model: selectedModel,
aspectRatio,
timestamp: Date.now(),
}));
setGeneratedImages([...newImages, ...generatedImages]);
setPrompt(''); // Limpiar prompt después de generar
} catch (err: any) {
setError(err.message);
} finally {
setIsGenerating(false);
}
};
const handleGenerateVideo = async () => {
if (!prompt.trim()) return;
setIsGenerating(true);
setError(null);
try {
const response = await fetch('/api/generate-video', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: selectedModel,
prompt: prompt.trim(),
aspectRatio,
duration: videoDuration,
negativePrompt: negativePrompt.trim() || undefined,
safetyFilterLevel: safetyLevel,
temperature: 1,
}),
});
const data = await response.json();
if (!response.ok) {
throw new Error(data.details || data.error || 'Error generando video');
}
// Agregar video generado al historial
const newVideo: GeneratedVideo = {
id: `${Date.now()}`,
videoData: data.videoData,
prompt: prompt.trim(),
model: selectedModel,
aspectRatio,
duration: videoDuration,
timestamp: Date.now(),
};
setGeneratedVideos([newVideo, ...generatedVideos]);
setPrompt(''); // Limpiar prompt después de generar
} catch (err: any) {
setError(err.message);
} finally {
setIsGenerating(false);
}
};
const handleGenerate = () => {
if (contentType === 'image') {
handleGenerateImage();
} else {
handleGenerateVideo();
}
};
const handleKeyPress = (e: React.KeyboardEvent) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
handleGenerate();
}
};
return (
<div className="min-h-screen bg-gradient-to-br from-background via-background to-secondary">
<div className="container mx-auto px-4 py-8">
{/* Header */}
<div className="mb-8 text-center">
<div className="flex items-center justify-center gap-3 mb-3">
<Sparkles className="w-8 h-8 text-primary" />
<h1 className="text-4xl font-bold bg-gradient-to-r from-primary to-purple-600 bg-clip-text text-transparent">
AI Playground
</h1>
</div>
<p className="text-muted-foreground">
Genera imágenes y videos con Google Generative AI
</p>
</div>
{/* Selector de tipo de contenido */}
<div className="mb-6 flex justify-center">
<div className="inline-flex bg-card border border-border rounded-lg p-1">
<button
onClick={() => handleContentTypeChange('image')}
className={`
flex items-center gap-2 px-6 py-2 rounded-md font-medium transition-all
${contentType === 'image'
? 'bg-primary text-primary-foreground shadow-sm'
: 'text-muted-foreground hover:text-foreground'
}
`}
>
<ImageIcon className="w-4 h-4" />
Imágenes
</button>
<button
onClick={() => handleContentTypeChange('video')}
className={`
flex items-center gap-2 px-6 py-2 rounded-md font-medium transition-all
${contentType === 'video'
? 'bg-primary text-primary-foreground shadow-sm'
: 'text-muted-foreground hover:text-foreground'
}
`}
>
<Video className="w-4 h-4" />
Videos
</button>
</div>
</div>
<div className="grid lg:grid-cols-[350px,1fr] gap-6">
{/* Panel de configuración */}
<div className="space-y-6">
<div className="bg-card border border-border rounded-xl p-6 shadow-lg space-y-6">
<ModelSelector
selectedModel={selectedModel}
onModelChange={setSelectedModel}
contentType={contentType}
/>
<div className="border-t border-border pt-6">
{contentType === 'image' ? (
<ImageConfig
model={selectedModel}
aspectRatio={aspectRatio}
onAspectRatioChange={setAspectRatio}
numberOfImages={numberOfImages}
onNumberOfImagesChange={setNumberOfImages}
negativePrompt={negativePrompt}
onNegativePromptChange={setNegativePrompt}
safetyLevel={safetyLevel}
onSafetyLevelChange={setSafetyLevel}
/>
) : (
<VideoConfig
model={selectedModel}
aspectRatio={aspectRatio}
onAspectRatioChange={setAspectRatio}
duration={videoDuration}
onDurationChange={setVideoDuration}
negativePrompt={negativePrompt}
onNegativePromptChange={setNegativePrompt}
safetyLevel={safetyLevel}
onSafetyLevelChange={setSafetyLevel}
/>
)}
</div>
{contentType === 'image' && (
<div className="border-t border-border pt-6">
<ReferenceImages
images={referenceImages}
onImagesChange={setReferenceImages}
maxImages={3}
/>
</div>
)}
</div>
{/* Stats */}
<div className="bg-card border border-border rounded-xl p-4">
<div className="flex items-center gap-2 text-sm text-muted-foreground mb-2">
{contentType === 'image' ? (
<ImageIcon className="w-4 h-4" />
) : (
<Video className="w-4 h-4" />
)}
<span className="font-medium">Estadísticas</span>
</div>
<div className="space-y-1 text-sm">
{contentType === 'image' ? (
<p>Imágenes generadas: <strong>{generatedImages.length}</strong></p>
) : (
<p>Videos generados: <strong>{generatedVideos.length}</strong></p>
)}
<p>Modelo actual: <strong>{selectedModel}</strong></p>
</div>
</div>
</div>
{/* Área principal */}
<div className="space-y-6">
{/* Input de prompt */}
<div className="bg-card border border-border rounded-xl p-6 shadow-lg">
<label className="text-sm font-medium text-foreground mb-2 block">
Describe tu {contentType === 'image' ? 'imagen' : 'video'}
</label>
<div className="space-y-3">
<textarea
value={prompt}
onChange={(e) => setPrompt(e.target.value)}
onKeyPress={handleKeyPress}
placeholder={
contentType === 'image'
? 'Ejemplo: Un atardecer en una playa tropical con palmeras...'
: 'Ejemplo: Un timelapse de una ciudad con tráfico fluido al atardecer...'
}
className="w-full px-4 py-3 bg-secondary border border-border rounded-lg text-sm focus:outline-none focus:ring-2 focus:ring-primary resize-none"
rows={4}
disabled={isGenerating}
/>
<button
onClick={handleGenerate}
disabled={isGenerating || !prompt.trim()}
className="w-full px-6 py-3 bg-primary text-primary-foreground rounded-lg font-medium hover:opacity-90 disabled:opacity-50 disabled:cursor-not-allowed transition-all flex items-center justify-center gap-2"
>
{isGenerating ? (
<>
<Loader2 className="w-5 h-5 animate-spin" />
Generando...
</>
) : (
<>
<Sparkles className="w-5 h-5" />
Generar {contentType === 'image' ? 'Imagen' : 'Video'}
</>
)}
</button>
</div>
{/* Error message */}
{error && (
<div className="mt-4 p-4 bg-red-500/10 border border-red-500/20 rounded-lg">
<p className="text-sm text-red-500">
<strong>Error:</strong> {error}
</p>
</div>
)}
</div>
{/* Galería */}
<div>
<h2 className="text-lg font-semibold mb-4 flex items-center gap-2">
{contentType === 'image' ? (
<>
<ImageIcon className="w-5 h-5" />
Galería {generatedImages.length > 0 && `(${generatedImages.length})`}
</>
) : (
<>
<Video className="w-5 h-5" />
Videos {generatedVideos.length > 0 && `(${generatedVideos.length})`}
</>
)}
</h2>
{contentType === 'image' && generatedImages.length === 0 && !isGenerating && (
<div className="bg-card border border-dashed border-border rounded-xl p-12 text-center">
<ImageIcon className="w-16 h-16 text-muted-foreground mx-auto mb-4 opacity-50" />
<p className="text-muted-foreground">
Aún no has generado ninguna imagen.
</p>
<p className="text-sm text-muted-foreground mt-2">
Escribe un prompt y haz clic en &quot;Generar Imagen&quot; para comenzar.
</p>
</div>
)}
{contentType === 'video' && generatedVideos.length === 0 && !isGenerating && (
<div className="bg-card border border-dashed border-border rounded-xl p-12 text-center">
<Video className="w-16 h-16 text-muted-foreground mx-auto mb-4 opacity-50" />
<p className="text-muted-foreground">
Aún no has generado ningún video.
</p>
<p className="text-sm text-muted-foreground mt-2">
Escribe un prompt y haz clic en &quot;Generar Video&quot; para comenzar.
</p>
</div>
)}
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
{contentType === 'image' && generatedImages.map((image) => (
<ImageCard
key={image.id}
imageData={image.imageData}
prompt={image.prompt}
model={image.model}
/>
))}
{contentType === 'video' && generatedVideos.map((video) => (
<VideoCard
key={video.id}
videoData={video.videoData}
prompt={video.prompt}
model={video.model}
/>
))}
</div>
</div>
</div>
</div>
</div>
</div>
);
}