diff --git a/.changeset/xsai-voice-provider.md b/.changeset/xsai-voice-provider.md
new file mode 100644
index 000000000..90afeb67e
--- /dev/null
+++ b/.changeset/xsai-voice-provider.md
@@ -0,0 +1,11 @@
+---
+"@voltagent/voice": minor
+---
+
+feat(xsAI): add xsAI voice provider
+
+This adds support for the xsAI voice provider, including:
+
+- Core provider implementation support
+- Support for API key authentication and custom headers
+- Base URL configuration for API endpoints
diff --git a/examples/with-voice-xsai/.gitignore b/examples/with-voice-xsai/.gitignore
new file mode 100644
index 000000000..99f7bea53
--- /dev/null
+++ b/examples/with-voice-xsai/.gitignore
@@ -0,0 +1,4 @@
+node_modules
+dist
+.DS_Store
+.env
\ No newline at end of file
diff --git a/examples/with-voice-xsai/README.md b/examples/with-voice-xsai/README.md
new file mode 100644
index 000000000..a81ead075
--- /dev/null
+++ b/examples/with-voice-xsai/README.md
@@ -0,0 +1,53 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ VoltAgent is an open source TypeScript framework for building and orchestrating AI agents.
+Escape the limitations of no-code builders and the complexity of starting from scratch.
+
+
+
+
+
+
+[](https://www.npmjs.com/package/@voltagent/core)
+[](CODE_OF_CONDUCT.md)
+[](https://s.voltagent.dev/discord)
+[](https://twitter.com/voltagent_dev)
+
+
+
+
+
+
+
+## VoltAgent: Build AI Agents Fast and Flexibly
+
+VoltAgent is an open-source TypeScript framework for creating and managing AI agents. It provides modular components to build, customize, and scale agents with ease. From connecting to APIs and memory management to supporting multiple LLMs, VoltAgent simplifies the process of creating sophisticated AI systems. It enables fast development, maintains clean code, and offers flexibility to switch between models and tools without vendor lock-in.
+
+## Try Example
+
+```bash
+npm create voltagent-app@latest -- --example with-voice-xsai
+```
diff --git a/examples/with-voice-xsai/output.mp3 b/examples/with-voice-xsai/output.mp3
new file mode 100644
index 000000000..878026b6c
Binary files /dev/null and b/examples/with-voice-xsai/output.mp3 differ
diff --git a/examples/with-voice-xsai/package.json b/examples/with-voice-xsai/package.json
new file mode 100644
index 000000000..78469b72d
--- /dev/null
+++ b/examples/with-voice-xsai/package.json
@@ -0,0 +1,35 @@
+{
+ "name": "voltagent-example-with-voice-xsai",
+ "private": true,
+ "keywords": [
+ "voltagent",
+ "ai",
+ "agent",
+ "voice",
+ "xsai"
+ ],
+ "license": "MIT",
+ "author": "",
+ "type": "module",
+ "scripts": {
+ "build": "tsc",
+ "dev": "tsx watch --env-file=.env ./src ",
+ "start": "node dist/index.js",
+ "volt": "volt"
+ },
+ "dependencies": {
+ "@ai-sdk/openai": "^1.3.10",
+ "@voltagent/cli": "^0.1.4",
+ "@voltagent/core": "^0.1.12",
+ "@voltagent/vercel-ai": "^0.1.5",
+ "@voltagent/voice": "^0.1.4",
+ "dotenv": "^16.4.5",
+ "openai": "^4.91.0",
+ "zod": "^3.24.2"
+ },
+ "devDependencies": {
+ "@types/node": "^22.13.5",
+ "tsx": "^4.19.3",
+ "typescript": "^5.8.2"
+ }
+}
diff --git a/examples/with-voice-xsai/src/index.ts b/examples/with-voice-xsai/src/index.ts
new file mode 100644
index 000000000..62b75c5ab
--- /dev/null
+++ b/examples/with-voice-xsai/src/index.ts
@@ -0,0 +1,65 @@
+import { join } from "path";
+import { createReadStream, createWriteStream } from "fs";
+import { VercelAIProvider } from "@voltagent/vercel-ai";
+import { VoltAgent, Agent } from "@voltagent/core";
+import { XsAIVoiceProvider } from "@voltagent/voice";
+import { openai } from "@ai-sdk/openai";
+
+const voiceProvider = new XsAIVoiceProvider({
+ apiKey: process.env.OPENAI_API_KEY!,
+});
+
+const agent = new Agent({
+ name: "Voice Assistant",
+ description: "Speaks & listens via xsAI",
+ llm: new VercelAIProvider(),
+ model: openai("gpt-4o-mini"),
+ voice: voiceProvider,
+});
+
+// Create the VoltAgent with our voice-enabled agent
+new VoltAgent({
+ agents: {
+ agent,
+ },
+});
+
+(async () => {
+ const voices = await agent.voice?.getVoices();
+ console.log("Available voices:", voices);
+
+ const audioStream = await agent.voice?.speak(
+ "Hello, VoltAgent is best framework for building voice agents! Yeah!",
+ {
+ speed: 1.0,
+ },
+ );
+
+ console.log("audioStream", audioStream);
+
+ // Save the audio stream to a file (for demonstration)
+ const outputPath = join(process.cwd(), "output.mp3");
+ const writeStream = createWriteStream(outputPath);
+ audioStream?.pipe(writeStream);
+ console.log("Audio saved to:", outputPath);
+
+ const audioFile = createReadStream(outputPath);
+ const transcribedText = await agent.voice?.listen(audioFile, {
+ language: "en",
+ stream: false,
+ });
+ console.log("Transcribed text:", transcribedText);
+})();
+
+// Event listeners for voice interactions
+voiceProvider.on("speaking", (event: { text: string }) => {
+ console.log(`Speaking: ${event.text.substring(0, 50)}...`);
+});
+
+voiceProvider.on("listening", () => {
+ console.log("Listening to audio input...");
+});
+
+voiceProvider.on("error", (error: { message: string }) => {
+ console.error("Voice error:", error.message);
+});
diff --git a/examples/with-voice-xsai/tsconfig.json b/examples/with-voice-xsai/tsconfig.json
new file mode 100644
index 000000000..cee90c6f3
--- /dev/null
+++ b/examples/with-voice-xsai/tsconfig.json
@@ -0,0 +1,14 @@
+{
+ "compilerOptions": {
+ "target": "ES2022",
+ "module": "NodeNext",
+ "moduleResolution": "NodeNext",
+ "esModuleInterop": true,
+ "forceConsistentCasingInFileNames": true,
+ "strict": true,
+ "outDir": "dist",
+ "skipLibCheck": true
+ },
+ "include": ["src"],
+ "exclude": ["node_modules", "dist"]
+}
diff --git a/packages/voice/package.json b/packages/voice/package.json
index f103aec66..df7767ed4 100644
--- a/packages/voice/package.json
+++ b/packages/voice/package.json
@@ -24,6 +24,8 @@
},
"dependencies": {
"@voltagent/core": "^0.1.8",
+ "@xsai/generate-speech": "^0.2.0",
+ "@xsai/generate-transcription": "^0.2.0",
"elevenlabs": "^1.55.0",
"openai": "^4.91.0"
},
diff --git a/packages/voice/src/index.ts b/packages/voice/src/index.ts
index 3b6762ddf..72644e5e5 100644
--- a/packages/voice/src/index.ts
+++ b/packages/voice/src/index.ts
@@ -2,3 +2,4 @@ export * from "./types";
export * from "./providers/base";
export * from "./providers/openai";
export * from "./providers/elevenlabs";
+export * from "./providers/xsai";
diff --git a/packages/voice/src/providers/xsai/index.ts b/packages/voice/src/providers/xsai/index.ts
new file mode 100644
index 000000000..863c8167a
--- /dev/null
+++ b/packages/voice/src/providers/xsai/index.ts
@@ -0,0 +1,149 @@
+import { PassThrough } from "node:stream";
+import type { VoiceMetadata, ReadableStreamType } from "@voltagent/core";
+import { BaseVoiceProvider } from "../base";
+import { XsaiVoiceOptions, XsaiSpeakOptions, XsaiListenOptions } from "./types";
+import { generateSpeech, GenerateSpeechOptions } from "@xsai/generate-speech";
+import { generateTranscription, GenerateTranscriptionOptions } from "@xsai/generate-transcription";
+
+/* ------------------------------------------------------------------ */
+/* Helper: bufferise a Node stream */
+/* ------------------------------------------------------------------ */
+async function collectChunks(stream: NodeJS.ReadableStream): Promise {
+ const chunks: Buffer[] = [];
+ for await (const c of stream) {
+ chunks.push(typeof c === "string" ? Buffer.from(c) : c);
+ }
+ return Buffer.concat(chunks);
+}
+
+/* ------------------------------------------------------------------ */
+/* xsAI provider */
+/* ------------------------------------------------------------------ */
+export class XsAIVoiceProvider extends BaseVoiceProvider {
+ private readonly apiKey: string;
+ private readonly baseURL: string;
+ private readonly ttsModel: string;
+ private readonly speechModel: string;
+ private readonly voice: string;
+ private readonly headers?: Record;
+
+ constructor(options: XsaiVoiceOptions) {
+ super(options);
+
+ this.apiKey = options.apiKey;
+ this.baseURL = options.baseURL ?? "https://api.openai.com/v1";
+ this.ttsModel = options.ttsModel ?? "tts-1";
+ this.speechModel = options.speechModel ?? "whisper-1";
+ this.voice = options.voice ?? "alloy";
+ this.headers = options.options?.headers;
+ }
+
+ /* ------------------------------------------------------------------ */
+ /* TEXT ➜ SPEECH */
+ /* ------------------------------------------------------------------ */
+ async speak(
+ input: string | NodeJS.ReadableStream,
+ opts: XsaiSpeakOptions = {},
+ ): Promise {
+ try {
+ const text =
+ typeof input === "string" ? input : (await collectChunks(input)).toString("utf8");
+
+ if (!text.trim()) throw new Error("Input text is empty");
+ this.emit("speaking", { text });
+
+ // Dynamically import the module
+
+ const generateSpeechOptions: GenerateSpeechOptions = {
+ input: text,
+ voice: opts.voice ?? this.voice ?? "default",
+ responseFormat: opts.format ?? "mp3",
+ speed: opts.speed ?? 1.0,
+ /* CommonRequestOptions */
+ apiKey: this.apiKey,
+ baseURL: this.baseURL,
+ model: this.ttsModel,
+ headers: this.headers,
+ };
+
+ const arrayBuf = await generateSpeech(generateSpeechOptions);
+
+ const stream = new PassThrough();
+ stream.end(Buffer.from(arrayBuf));
+ return stream;
+ } catch (err) {
+ this.emit("error", {
+ message: err instanceof Error ? err.message : "Unknown error",
+ code: "SPEAK_ERROR",
+ details: err,
+ });
+ throw err;
+ }
+ }
+
+ /* ------------------------------------------------------------------ */
+ /* SPEECH ➜ TEXT */
+ /* ------------------------------------------------------------------ */
+ async listen(
+ audio: NodeJS.ReadableStream,
+ opts: XsaiListenOptions = {},
+ ): Promise {
+ try {
+ this.emit("listening", { audio });
+ const buf = await collectChunks(audio);
+
+ const blob = new Blob([buf]);
+
+ const generateTranscriptionOptions: GenerateTranscriptionOptions = {
+ file: blob,
+ fileName: opts.fileName ?? "audio.wav",
+ language: opts.language,
+ prompt: opts.prompt,
+ temperature: opts.temperature,
+ /* CommonRequestOptions */
+ apiKey: this.apiKey,
+ baseURL: this.baseURL,
+ model: this.speechModel,
+ headers: this.headers,
+ };
+
+ const { text } = await generateTranscription(generateTranscriptionOptions);
+
+ return text;
+ } catch (err) {
+ this.emit("error", {
+ message: err instanceof Error ? err.message : "Unknown error",
+ code: "LISTEN_ERROR",
+ details: err,
+ });
+ throw err;
+ }
+ }
+
+ /* ------------------------------------------------------------------ */
+ /* Real‑time streaming not yet available */
+ /* ------------------------------------------------------------------ */
+ async connect(): Promise {
+ throw new Error("Real‑time streaming not supported by xsAI");
+ }
+ disconnect(): void {
+ /* noop */
+ }
+ async send(): Promise {
+ throw new Error("Real‑time streaming not supported by xsAI");
+ }
+
+ /* ------------------------------------------------------------------ */
+ /* xsAI hasn't published a voice list API – stub with default */
+ /* ------------------------------------------------------------------ */
+ async getVoices(): Promise {
+ return [
+ {
+ id: this.voice ?? "default",
+ name: "xsAI default",
+ language: "en",
+ gender: "neutral",
+ },
+ ];
+ }
+}
diff --git a/packages/voice/src/providers/xsai/types.ts b/packages/voice/src/providers/xsai/types.ts
new file mode 100644
index 000000000..5d1b0cc3b
--- /dev/null
+++ b/packages/voice/src/providers/xsai/types.ts
@@ -0,0 +1,49 @@
+import type { BaseVoiceProviderOptions } from "../base/types";
+
+/* ------------------------------------------------------------------ */
+/* xsAI model & voice constants */
+/* ------------------------------------------------------------------ */
+
+/* ------------------------------------------------------------------ */
+/* Provider‑level options */
+/* ------------------------------------------------------------------ */
+export type XsaiVoiceOptions = BaseVoiceProviderOptions & {
+ /** xsAI dashboard key */
+ apiKey: string;
+
+ /** xsAI base URL – defaults to `"https://api.openai.com/v1"` */
+ baseURL?: string;
+
+ /** Model *id* for TTS (required by xsAI) – default `"tts-1"` */
+ ttsModel?: string;
+
+ /** Model *id* for STT (required by xsAI) – default `"whisper-1"` */
+ speechModel?: string;
+
+ /** Voice ID (library‑specific) – defaults to `"alloy"` */
+ voice?: string;
+
+ /** Extra per‑provider knobs */
+ options?: {
+ headers?: Record;
+ };
+};
+
+/* ------------------------------------------------------------------ */
+/* speak & listen option helpers */
+/* ------------------------------------------------------------------ */
+export type XsaiSpeakOptions = {
+ voice?: string;
+ /** @default `"mp3"` */
+ format?: "aac" | "flac" | "mp3" | "opus" | "pcm" | "wav";
+ /** @default `1.0` */
+ speed?: number;
+};
+
+export type XsaiListenOptions = {
+ language?: string;
+ prompt?: string;
+ temperature?: string;
+ /** custom filename hint for the Blob sent to xsAI */
+ fileName?: string;
+};
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 315ac3af5..740ab6db0 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -812,6 +812,43 @@ importers:
specifier: ^5.8.2
version: 5.8.2
+ examples/with-voice-xsai:
+ dependencies:
+ '@ai-sdk/openai':
+ specifier: ^1.3.10
+ version: 1.3.10(zod@3.24.3)
+ '@voltagent/cli':
+ specifier: ^0.1.4
+ version: link:../../packages/cli
+ '@voltagent/core':
+ specifier: ^0.1.12
+ version: link:../../packages/core
+ '@voltagent/vercel-ai':
+ specifier: ^0.1.5
+ version: link:../../packages/vercel-ai
+ '@voltagent/voice':
+ specifier: ^0.1.4
+ version: link:../../packages/voice
+ dotenv:
+ specifier: ^16.4.5
+ version: 16.4.7
+ openai:
+ specifier: ^4.91.0
+ version: 4.96.0(zod@3.24.3)
+ zod:
+ specifier: ^3.24.2
+ version: 3.24.3
+ devDependencies:
+ '@types/node':
+ specifier: ^22.13.5
+ version: 22.15.3
+ tsx:
+ specifier: ^4.19.3
+ version: 4.19.3
+ typescript:
+ specifier: ^5.8.2
+ version: 5.8.2
+
examples/with-xsai:
dependencies:
'@voltagent/cli':
@@ -1274,12 +1311,18 @@ importers:
'@voltagent/core':
specifier: ^0.1.8
version: link:../core
+ '@xsai/generate-speech':
+ specifier: ^0.2.0
+ version: 0.2.0
+ '@xsai/generate-transcription':
+ specifier: ^0.2.0
+ version: 0.2.0
elevenlabs:
specifier: ^1.55.0
version: 1.55.0
openai:
specifier: ^4.91.0
- version: 4.91.0(zod@3.24.3)
+ version: 4.91.0(zod@3.24.2)
devDependencies:
'@types/jest':
specifier: ^29.5.0
@@ -11153,7 +11196,7 @@ packages:
optional: true
dependencies:
dotenv: 16.4.7
- openai: 4.91.0(zod@3.24.3)
+ openai: 4.96.0(zod@3.24.3)
zod: 3.24.3
zod-validation-error: 3.4.0(zod@3.24.3)
transitivePeerDependencies:
@@ -12998,30 +13041,6 @@ packages:
- encoding
dev: false
- /openai@4.91.0(zod@3.24.3):
- resolution: {integrity: sha512-zdDg6eyvUmCP58QAW7/aPb+XdeavJ51pK6AcwZOWG5QNSLIovVz0XonRL9vARGJRmw8iImmvf2A31Q7hoh544w==}
- hasBin: true
- peerDependencies:
- ws: ^8.18.0
- zod: ^3.23.8
- peerDependenciesMeta:
- ws:
- optional: true
- zod:
- optional: true
- dependencies:
- '@types/node': 18.19.79
- '@types/node-fetch': 2.6.12
- abort-controller: 3.0.0
- agentkeepalive: 4.6.0
- form-data-encoder: 1.7.2
- formdata-node: 4.4.1
- node-fetch: 2.7.0
- zod: 3.24.3
- transitivePeerDependencies:
- - encoding
- dev: false
-
/openai@4.96.0(zod@3.24.2):
resolution: {integrity: sha512-dKoW56i02Prv2XQolJ9Rl9Svqubqkzg3QpwEOBuSVZLk05Shelu7s+ErRTwFc1Bs3JZ2qBqBfVpXQiJhwOGG8A==}
hasBin: true
diff --git a/website/docs/agents/voice.md b/website/docs/agents/voice.md
index ef66acbc4..ba472c988 100644
--- a/website/docs/agents/voice.md
+++ b/website/docs/agents/voice.md
@@ -42,6 +42,7 @@ pnpm add @voltagent/voice
- **OpenAI**: High-quality voices and transcription.
- **ElevenLabs**: Realistic, customizable voices.
+- **xsAI**: Lightweight OpenAI-compatible voice API.
## Basic Usage
@@ -67,6 +68,16 @@ const elevenLabsVoice = new ElevenLabsVoiceProvider({
ttsModel: "eleven_multilingual_v2",
voice: "Rachel", // Example voice ID
});
+
+// Or initialize with xsAI
+import { XsAIVoiceProvider } from "@voltagent/voice";
+
+const xsAIVoice = new XsAIVoiceProvider({
+ apiKey: process.env.OPENAI_API_KEY!,
+ ttsModel: "tts-1",
+ voice: "alloy",
+ // If you are not using OpenAI, simply specify the `baseURL`
+});
```
**Note:** It's recommended to manage API keys securely, for example, using environment variables.