Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions .changeset/xsai-voice-provider.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
---
"@voltagent/voice": minor
---

feat(xsAI): add xsAI voice provider

This adds support for the xsAI voice provider, including:

- Core provider implementation support
- Support for API key authentication and custom headers
- Base URL configuration for API endpoints
4 changes: 4 additions & 0 deletions examples/with-voice-xsai/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
node_modules
dist
.DS_Store
.env
53 changes: 53 additions & 0 deletions examples/with-voice-xsai/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
<div align="center">
<a href="https://voltagent.dev/">
<img width="1800" alt="435380213-b6253409-8741-462b-a346-834cd18565a9" src="https://github.com/user-attachments/assets/452a03e7-eeda-4394-9ee7-0ffbcf37245c" />
</a>

<br/>
<br/>

<div align="center">
<a href="https://voltagent.dev">Home Page</a> |
<a href="https://voltagent.dev/docs/">Documentation</a> |
<a href="https://github.com/voltagent/voltagent/tree/main/examples">Examples</a> |
<a href="https://s.voltagent.dev/discord">Discord</a> |
<a href="https://voltagent.dev/blog/">Blog</a>
</div>
</div>

<br/>

<div align="center">
<strong>VoltAgent is an open source TypeScript framework for building and orchestrating AI agents.</strong><br>
Escape the limitations of no-code builders and the complexity of starting from scratch.
<br />
<br />
</div>

<div align="center">

[![npm version](https://img.shields.io/npm/v/@voltagent/core.svg)](https://www.npmjs.com/package/@voltagent/core)
[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.0-4baaaa.svg)](CODE_OF_CONDUCT.md)
[![Discord](https://img.shields.io/discord/1361559153780195478.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://s.voltagent.dev/discord)
[![Twitter Follow](https://img.shields.io/twitter/follow/voltagent_dev?style=social)](https://twitter.com/voltagent_dev)

</div>

<br/>

<div align="center">
<a href="https://voltagent.dev/">
<img width="896" alt="VoltAgent Schema" src="https://github.com/user-attachments/assets/f0627868-6153-4f63-ba7f-bdfcc5dd603d" />
</a>

</div>

## VoltAgent: Build AI Agents Fast and Flexibly

VoltAgent is an open-source TypeScript framework for creating and managing AI agents. It provides modular components to build, customize, and scale agents with ease. From connecting to APIs and memory management to supporting multiple LLMs, VoltAgent simplifies the process of creating sophisticated AI systems. It enables fast development, maintains clean code, and offers flexibility to switch between models and tools without vendor lock-in.

## Try Example

```bash
npm create voltagent-app@latest -- --example with-voice-xsai
```
Binary file added examples/with-voice-xsai/output.mp3
Binary file not shown.
35 changes: 35 additions & 0 deletions examples/with-voice-xsai/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
{
"name": "voltagent-example-with-voice-xsai",
"private": true,
"keywords": [
"voltagent",
"ai",
"agent",
"voice",
"xsai"
],
"license": "MIT",
"author": "",
"type": "module",
"scripts": {
"build": "tsc",
"dev": "tsx watch --env-file=.env ./src ",
"start": "node dist/index.js",
"volt": "volt"
},
"dependencies": {
"@ai-sdk/openai": "^1.3.10",
"@voltagent/cli": "^0.1.4",
"@voltagent/core": "^0.1.12",
"@voltagent/vercel-ai": "^0.1.5",
"@voltagent/voice": "^0.1.4",
"dotenv": "^16.4.5",
"openai": "^4.91.0",
"zod": "^3.24.2"
},
"devDependencies": {
"@types/node": "^22.13.5",
"tsx": "^4.19.3",
"typescript": "^5.8.2"
}
}
65 changes: 65 additions & 0 deletions examples/with-voice-xsai/src/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
import { join } from "path";
import { createReadStream, createWriteStream } from "fs";
import { VercelAIProvider } from "@voltagent/vercel-ai";
import { VoltAgent, Agent } from "@voltagent/core";
import { XsAIVoiceProvider } from "@voltagent/voice";
import { openai } from "@ai-sdk/openai";

const voiceProvider = new XsAIVoiceProvider({
apiKey: process.env.OPENAI_API_KEY!,
});

const agent = new Agent({
name: "Voice Assistant",
description: "Speaks & listens via xsAI",
llm: new VercelAIProvider(),
model: openai("gpt-4o-mini"),
voice: voiceProvider,
});

// Create the VoltAgent with our voice-enabled agent
new VoltAgent({
agents: {
agent,
},
});

(async () => {
const voices = await agent.voice?.getVoices();
console.log("Available voices:", voices);

const audioStream = await agent.voice?.speak(
"Hello, VoltAgent is best framework for building voice agents! Yeah!",
{
speed: 1.0,
},
);

console.log("audioStream", audioStream);

// Save the audio stream to a file (for demonstration)
const outputPath = join(process.cwd(), "output.mp3");
const writeStream = createWriteStream(outputPath);
audioStream?.pipe(writeStream);
console.log("Audio saved to:", outputPath);

const audioFile = createReadStream(outputPath);
const transcribedText = await agent.voice?.listen(audioFile, {
language: "en",
stream: false,
});
console.log("Transcribed text:", transcribedText);
})();

// Event listeners for voice interactions
voiceProvider.on("speaking", (event: { text: string }) => {
console.log(`Speaking: ${event.text.substring(0, 50)}...`);
});

voiceProvider.on("listening", () => {
console.log("Listening to audio input...");
});

voiceProvider.on("error", (error: { message: string }) => {
console.error("Voice error:", error.message);
});
14 changes: 14 additions & 0 deletions examples/with-voice-xsai/tsconfig.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "NodeNext",
"moduleResolution": "NodeNext",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"strict": true,
"outDir": "dist",
"skipLibCheck": true
},
"include": ["src"],
"exclude": ["node_modules", "dist"]
}
2 changes: 2 additions & 0 deletions packages/voice/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
},
"dependencies": {
"@voltagent/core": "^0.1.8",
"@xsai/generate-speech": "^0.2.0",
"@xsai/generate-transcription": "^0.2.0",
"elevenlabs": "^1.55.0",
"openai": "^4.91.0"
},
Expand Down
1 change: 1 addition & 0 deletions packages/voice/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@ export * from "./types";
export * from "./providers/base";
export * from "./providers/openai";
export * from "./providers/elevenlabs";
export * from "./providers/xsai";
149 changes: 149 additions & 0 deletions packages/voice/src/providers/xsai/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
import { PassThrough } from "node:stream";
import type { VoiceMetadata, ReadableStreamType } from "@voltagent/core";
import { BaseVoiceProvider } from "../base";
import { XsaiVoiceOptions, XsaiSpeakOptions, XsaiListenOptions } from "./types";
import { generateSpeech, GenerateSpeechOptions } from "@xsai/generate-speech";
import { generateTranscription, GenerateTranscriptionOptions } from "@xsai/generate-transcription";

/* ------------------------------------------------------------------ */
/* Helper: bufferise a Node stream */
/* ------------------------------------------------------------------ */
async function collectChunks(stream: NodeJS.ReadableStream): Promise<Buffer> {
const chunks: Buffer[] = [];
for await (const c of stream) {
chunks.push(typeof c === "string" ? Buffer.from(c) : c);
}
return Buffer.concat(chunks);
}

/* ------------------------------------------------------------------ */
/* xsAI provider */
/* ------------------------------------------------------------------ */
export class XsAIVoiceProvider extends BaseVoiceProvider {
private readonly apiKey: string;
private readonly baseURL: string;
private readonly ttsModel: string;
private readonly speechModel: string;
private readonly voice: string;
private readonly headers?: Record<string, string>;

constructor(options: XsaiVoiceOptions) {
super(options);

this.apiKey = options.apiKey;
this.baseURL = options.baseURL ?? "https://api.openai.com/v1";
this.ttsModel = options.ttsModel ?? "tts-1";
this.speechModel = options.speechModel ?? "whisper-1";
this.voice = options.voice ?? "alloy";
this.headers = options.options?.headers;
}

/* ------------------------------------------------------------------ */
/* TEXT ➜ SPEECH */
/* ------------------------------------------------------------------ */
async speak(
input: string | NodeJS.ReadableStream,
opts: XsaiSpeakOptions = {},
): Promise<NodeJS.ReadableStream> {
try {
const text =
typeof input === "string" ? input : (await collectChunks(input)).toString("utf8");

if (!text.trim()) throw new Error("Input text is empty");
this.emit("speaking", { text });

// Dynamically import the module

const generateSpeechOptions: GenerateSpeechOptions = {
input: text,
voice: opts.voice ?? this.voice ?? "default",
responseFormat: opts.format ?? "mp3",
speed: opts.speed ?? 1.0,
/* CommonRequestOptions */
apiKey: this.apiKey,
baseURL: this.baseURL,
model: this.ttsModel,
headers: this.headers,
};

const arrayBuf = await generateSpeech(generateSpeechOptions);

const stream = new PassThrough();
stream.end(Buffer.from(arrayBuf));
return stream;
} catch (err) {
this.emit("error", {
message: err instanceof Error ? err.message : "Unknown error",
code: "SPEAK_ERROR",
details: err,
});
throw err;
}
}

/* ------------------------------------------------------------------ */
/* SPEECH ➜ TEXT */
/* ------------------------------------------------------------------ */
async listen(
audio: NodeJS.ReadableStream,
opts: XsaiListenOptions = {},
): Promise<string | ReadableStreamType> {
try {
this.emit("listening", { audio });
const buf = await collectChunks(audio);

const blob = new Blob([buf]);

const generateTranscriptionOptions: GenerateTranscriptionOptions = {
file: blob,
fileName: opts.fileName ?? "audio.wav",
language: opts.language,
prompt: opts.prompt,
temperature: opts.temperature,
/* CommonRequestOptions */
apiKey: this.apiKey,
baseURL: this.baseURL,
model: this.speechModel,
headers: this.headers,
};

const { text } = await generateTranscription(generateTranscriptionOptions);

return text;
} catch (err) {
this.emit("error", {
message: err instanceof Error ? err.message : "Unknown error",
code: "LISTEN_ERROR",
details: err,
});
throw err;
}
}

/* ------------------------------------------------------------------ */
/* Real‑time streaming not yet available */
/* ------------------------------------------------------------------ */
async connect(): Promise<void> {
throw new Error("Real‑time streaming not supported by xsAI");
}
disconnect(): void {
/* noop */
}
async send(): Promise<void> {
throw new Error("Real‑time streaming not supported by xsAI");
}

/* ------------------------------------------------------------------ */
/* xsAI hasn't published a voice list API – stub with default */
/* ------------------------------------------------------------------ */
async getVoices(): Promise<VoiceMetadata[]> {
return [
{
id: this.voice ?? "default",
name: "xsAI default",
language: "en",
gender: "neutral",
},
];
}
}
Loading