This starter example shows how to use Next.js, the Vercel AI SDK, Ollama and ModelFusion to create a ChatGPT-like AI-powered streaming chat bot.
- Install Ollama on your machine.
- Clone the repository:
git clone https://github.com/lgrammel/modelfusion-ollama-nextjs-starter.git
- Install dependencies:
npm install
- Start the development server:
npm run dev
For each example, you also need to pull the AI model with Ollama.
- Pull the model:
ollama pull llama2:chat
(reference) - Go to http://localhost:3000/llama2
- Code:
app/api/llama/route.ts
- Pull the model:
ollama pull mistral:text
(reference) - Go to http://localhost:3000/mistral
- Code:
app/api/mistral/route.ts
- Pull the model:
ollama pull openhermes2.5-mistral
(reference) - Go to http://localhost:3000/openhermes
- Code:
app/api/openhermes/route.ts
- Pull the model:
ollama pull neural-chat
(reference) - Go to http://localhost:3000/neural-chat
- Code:
app/api/neural-chat/route.ts
- Pull the model:
ollama pull vicuna
(reference) - Go to http://localhost:3000/vicuna
- Code:
app/api/vicuna/route.ts
import { ModelFusionTextStream } from "@modelfusion/vercel-ai";
import { Message, StreamingTextResponse } from "ai";
import { Llama2Prompt, TextChatMessage, ollama, streamText } from "modelfusion";
export const runtime = "edge";
export async function POST(req: Request) {
const { messages }: { messages: Message[] } = await req.json();
// Use ModelFusion to call Ollama:
const textStream = await streamText(
ollama
.TextGenerator({
model: "llama2:chat",
maxCompletionTokens: -1, // infinite generation
temperature: 0,
raw: true, // use raw inputs and map to prompt template below
})
.withPromptTemplate(Llama2Prompt.chat()),
{
system:
"You are an AI chat bot. " +
"Follow the user's instructions carefully.",
// map Vercel AI SDK Message to ModelFusion TextChatMessage:
messages: messages.filter(
// only user and assistant roles are supported:
(message) => message.role === "user" || message.role === "assistant"
) as TextChatMessage[],
}
);
// Return the result using the Vercel AI SDK:
return new StreamingTextResponse(
ModelFusionTextStream(
textStream,
// optional callbacks:
{
onStart() {
console.log("onStart");
},
onToken(token) {
console.log("onToken", token);
},
onCompletion: () => {
console.log("onCompletion");
},
onFinal(completion) {
console.log("onFinal", completion);
},
}
)
);
}