Skip to content
This repository was archived by the owner on Mar 28, 2025. It is now read-only.

Commit 79c0dcc

Browse files
authored
Update TypeScript examples and READMEs (#35)
* update examples and readmes * update readme
1 parent fedd5ca commit 79c0dcc

File tree

11 files changed

+1815
-12
lines changed

11 files changed

+1815
-12
lines changed

README.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,9 @@ const client = await IndexifyClient.createClient();
2121

2222
## Usage
2323

24-
See the [getting started](https://getindexify.com/getting_started/) guide for examples of how to use the client.
24+
See the [getting started](https://docs.getindexify.ai/getting_started/) guide on how to use Indexify.
2525

2626

27+
## Examples
28+
29+
You can find various examples in our repo [located here](https://github.com/tensorlakeai/indexify-typescript-client/tree/main/examples).

examples/.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
node_modules

examples/README.md

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Indexify TypeScript Client Examples
2+
This directory contains various examples using Indexify Typescript Client
3+
4+
Install dependencies
5+
```bash
6+
npm install
7+
```
8+
9+
#### [OpenAI Rag](./openaiRag.ts)
10+
Use OpenAI to answer questions about Kevin Durant based on context from Wikipedia in this RAG example.
11+
```bash
12+
npm run openai-rag
13+
```
14+
15+
#### [Langchain Rag](./langchainRag.ts)
16+
Langchain retriever example for RAG for answering basic question from a piece of content.
17+
```bash
18+
npm run langchain-rag
19+
```
20+
21+
#### [Extracting Content](./extractContent.ts)
22+
Example extracting content directly from an extractor using Indexify Client
23+
```bash
24+
npm run extract-content
25+
```

examples/extractContent.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { IndexifyClient } from "../src";
1+
import { IndexifyClient } from "getindexify";
22
const fs = require("fs");
33

44
// Extract wikipedia article directly from client

examples/langchainRag.ts

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -5,20 +5,25 @@ import {
55
} from "@langchain/core/runnables";
66
import { PromptTemplate } from "@langchain/core/prompts";
77
import { StringOutputParser } from "@langchain/core/output_parsers";
8-
import { IndexifyClient } from "getindexify";
8+
import { ExtractionGraph, IndexifyClient } from "getindexify";
99
import { IndexifyRetriever } from "@getindexify/langchain";
1010
import { formatDocumentsAsString } from "langchain/util/document";
1111

1212
(async () => {
1313
// setup client
14-
const client = await IndexifyClient.createNamespace("testlangchain");
15-
client.addExtractionPolicy({
16-
extractor: "tensorlake/minilm-l6",
17-
name: "minilml6",
14+
const graph = ExtractionGraph.fromYaml(`
15+
name: 'knowledgebase'
16+
extraction_policies:
17+
- extractor: 'tensorlake/minilm-l6'
18+
name: 'minilml6'
19+
`);
20+
const client = await IndexifyClient.createNamespace({
21+
name: "testlangchain",
22+
extractionGraphs: [graph],
1823
});
1924

2025
// add documents
21-
client.addDocuments("Lucas is from Los Angeles, California");
26+
await client.addDocuments("knowledgebase", "Lucas is from Los Angeles, California");
2227

2328
await new Promise((r) => setTimeout(r, 5000));
2429

@@ -48,7 +53,7 @@ import { formatDocumentsAsString } from "langchain/util/document";
4853
]);
4954

5055
const question = "Where is Lucas From?";
51-
console.log(`Question: ${question}`)
56+
console.log(`Question: ${question}`);
5257
const result = await chain.invoke(question);
53-
console.log(result)
58+
console.log(result);
5459
})();

examples/openaiRag.ts

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
2+
import { OpenAI } from "openai";
3+
import wiki from "wikijs";
4+
import { IndexifyClient, ExtractionGraph } from "getindexify";
5+
6+
// RAG example with OpenAI and indexify
7+
(async () => {
8+
const client = await IndexifyClient.createClient();
9+
const graph = ExtractionGraph.fromYaml(`
10+
name: 'nbakb'
11+
extraction_policies:
12+
- extractor: 'tensorlake/minilm-l6'
13+
name: 'wikipediaembedding'
14+
`);
15+
await client.createExtractionGraph(graph);
16+
17+
// Function to load wikipedia article from query
18+
async function loadWikipediaArticle(query: string) {
19+
const page = await wiki().page(query);
20+
const wikipediaContent = await page.rawContent();
21+
// chunk text
22+
const splitText = require("split-text");
23+
const chunks = splitText(wikipediaContent, { length: 500 });
24+
chunks.forEach(async (chunk: string) => {
25+
await client.addDocuments("nbakb", chunk);
26+
});
27+
}
28+
29+
// Function to get context
30+
// This will search index
31+
async function getContext(
32+
question: string,
33+
index: string,
34+
topK: number = 3
35+
): Promise<string> {
36+
const results = await client.searchIndex(index, question, topK);
37+
let context = "";
38+
results.forEach((result) => {
39+
context += `content id: ${result.content_id} \n\n passage: ${result.text}\n`;
40+
});
41+
return context;
42+
}
43+
44+
// Create prompt from question and context
45+
function createPrompt(question: string, context: string): string {
46+
return `Answer the question, based on the context.\n question: ${question} \n context: ${context}`;
47+
}
48+
49+
// Load wikipedia article
50+
await loadWikipediaArticle("kevin durant");
51+
52+
// Setup OpenAI Client, Prompts and Context
53+
const clientOpenAI = new OpenAI();
54+
const question = "When and where did Kevin Durant win NBA championships?";
55+
const context = await getContext(
56+
question,
57+
"nbakb.wikipediaembedding.embedding"
58+
);
59+
const prompt = createPrompt(question, context);
60+
61+
// Perform rag with prompt
62+
clientOpenAI.chat.completions
63+
.create({
64+
messages: [
65+
{
66+
role: "user",
67+
content: prompt,
68+
},
69+
],
70+
model: "gpt-3.5-turbo",
71+
})
72+
.then((chatCompletion) => {
73+
console.log(chatCompletion.choices[0].message.content);
74+
})
75+
.catch((error) => {
76+
console.error(error);
77+
});
78+
})();

0 commit comments

Comments
 (0)