Skip to content

Commit 1db964a

Browse files
authored
Merge pull request #6 from golivecosmos/support-hf
Support HuggingFace Models
2 parents b4887d9 + 48ebf85 commit 1db964a

File tree

8 files changed

+111
-28
lines changed

8 files changed

+111
-28
lines changed

.SAMPLE_env

+7-1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,8 @@
1+
// Set which model provider you want to use, HUGGING_FACE or OPEN_AI
2+
ENABLED_MODEL_STORE=HUGGING_FACE
3+
4+
// Hugging Face API Key
5+
HUGGINGFACEHUB_API_KEY=
6+
17
//Open API API Key
2-
OPENAI_API_KEY=""
8+
OPENAI_API_KEY=

README.md

+19-4
Original file line numberDiff line numberDiff line change
@@ -23,18 +23,22 @@ This template is an example project for a simple Large Language Model (LLM) appl
2323

2424
To get started, follow the below steps:
2525

26-
1. Create an `.env` file by copying the `SAMPLE_env` file and add API keys for the models you are going to use
26+
1. Create an `.env` file by copying the `SAMPLE_env` file and add the model store provider you'll be using (e.g. HuggingFace or OpenAI) and the API keys for the models you are going to use
2727
1. Install packages
2828
1. Run the backend server that will start with a default port of `3100`
29+
2930
```bash
3031
yarn start-server
3132
```
33+
3234
1. Run the frontend server that will start with a default port of `5173`.
35+
3336
```bash
3437
yarn start
3538
```
36-
39+
3740
_Note:_ You can use the `-p` flag to specify a port for the frontend server. To do this, you can either run `yarn start` with an additional flag, like so:
41+
3842
```bash
3943
yarn start -- --port 3000
4044
```
@@ -44,15 +48,26 @@ To get started, follow the below steps:
4448
```bash
4549
vite --port 3000
4650
```
47-
51+
4852
Additional scripts are provided to prepare the app for production
53+
4954
- `yarn build` — This will output a production build of the frontend app in the `dist` directory.
5055
- `yarn preview` — This will run the production build of the frontend app locally with a default port of `5173` (_note_: this will not work if you haven't generated the production build yet).
5156
57+
### Tutorials
5258
5359
👽 If you're looking for more thorough instructions follow [this tutorial on running an LLM React Node app](https://blog.golivecosmos.com/build-an-llm-app-with-node-react-and-langchain-js/). 📚
5460

5561
-------------
5662

57-
## Shout out to the ⭐star gazers⭐ supporting the project
63+
## How to Contribute
64+
65+
Feel free to try out the template and open any issues if there's something you'd like to see added or fixed, or open a pull request to contribute.
66+
67+
### Shout out to the ⭐star gazers⭐ supporting the project
68+
5869
[![Stargazers repo roster for @golivecosmos/llm-react-node-app-template](https://reporoster.com/stars/golivecosmos/llm-react-node-app-template)](https://github.com/golivecosmos/llm-react-node-app-template/stargazers)
70+
71+
### Thanks for the forks🍴
72+
73+
[![Forkers repo roster for @golivecosmos/llm-react-node-app-template](https://reporoster.com/forks/golivecosmos/llm-react-node-app-template)](https://github.com/golivecosmos/llm-react-node-app-template/network/members)

package.json

+2-1
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,12 @@
88
"start": "vite",
99
"preview": "vite preview",
1010
"build": "vite build",
11-
"start-server": "node ./server/index.js"
11+
"start-server": "nodemon ./server/index.js"
1212
},
1313
"dependencies": {
1414
"@emotion/react": "^11.11.0",
1515
"@emotion/styled": "^11.11.0",
16+
"@huggingface/inference": "^2.5.0",
1617
"@koa/cors": "^4.0.0",
1718
"@koa/router": "^12.0.0",
1819
"@mui/icons-material": "^5.11.16",
+12
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import { HuggingFaceService } from '../services/hf.js'
2+
import { OpenAiService } from '../services/openai.js'
3+
4+
export const MODEL_STORES = {
5+
'HUGGING_FACE': HuggingFaceService,
6+
'OPEN_AI': OpenAiService,
7+
};
8+
9+
export const { ENABLED_MODEL_STORE } = process.env;
10+
export const DEFAULT_ENABLED_MODEL_STORE = 'HUGGING_FACE';
11+
12+
export const enabledModel = ENABLED_MODEL_STORE || DEFAULT_ENABLED_MODEL_STORE;

server/handlers/chat_handler.js

+3-22
Original file line numberDiff line numberDiff line change
@@ -1,32 +1,13 @@
1-
import { ConversationChain } from 'langchain/chains';
2-
import { ChatOpenAI } from 'langchain/chat_models/openai';
3-
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts';
4-
import { ConversationSummaryMemory } from 'langchain/memory';
1+
import { MODEL_STORES, enabledModel } from '../config/model_store_constants.js';
52

63
class ChatService {
74
constructor () {
8-
this.chat = new ChatOpenAI({ temperature: 0, verbose: true });
9-
this.chatPrompt = ChatPromptTemplate.fromPromptMessages([
10-
SystemMessagePromptTemplate.fromTemplate('The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.'),
11-
new MessagesPlaceholder('history'),
12-
HumanMessagePromptTemplate.fromTemplate('{input}'),
13-
]);
14-
15-
this.memory = new ConversationSummaryMemory({ llm: this.chat, returnMessages: true });
5+
this.model = new MODEL_STORES[enabledModel]
166
}
177

188
async startChat(data) {
199
const { body: { userInput } } = data;
20-
21-
const chain = new ConversationChain({
22-
memory: this.memory,
23-
prompt: this.chatPrompt,
24-
llm: this.chat,
25-
});
26-
27-
const response = await chain.call({
28-
input: userInput,
29-
});
10+
const response = await this.model.call(userInput);
3011

3112
return response;
3213
}

server/services/hf.js

+25
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
import { HfInference } from "@huggingface/inference";
2+
3+
const { HUGGINGFACEHUB_API_KEY } = process.env;
4+
5+
class HuggingFaceService {
6+
constructor () {
7+
this.modelName = 'microsoft/DialoGPT-large';
8+
this.model = new HfInference(HUGGINGFACEHUB_API_KEY);
9+
}
10+
11+
async call(userInput) {
12+
// TO DO: pass in past_user_inputs for context
13+
const response = await this.model.conversational({
14+
model: this.modelName,
15+
temperature: 0,
16+
inputs: {
17+
text: userInput,
18+
}
19+
});
20+
21+
return { response: response && response.generated_text };
22+
}
23+
}
24+
25+
export { HuggingFaceService }

server/services/openai.js

+38
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
import { ConversationChain } from 'langchain/chains';
2+
import { ChatOpenAI } from 'langchain/chat_models/openai';
3+
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts';
4+
import { ConversationSummaryMemory } from 'langchain/memory';
5+
6+
class OpenAiService {
7+
constructor () {
8+
this.model = new ChatOpenAI({ temperature: 0, verbose: true });
9+
10+
this.chatPrompt = ChatPromptTemplate.fromPromptMessages([
11+
SystemMessagePromptTemplate.fromTemplate('The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.'),
12+
new MessagesPlaceholder('history'),
13+
HumanMessagePromptTemplate.fromTemplate('{input}'),
14+
]);
15+
16+
this.memory = new ConversationSummaryMemory({ llm: this.model, returnMessages: true });
17+
}
18+
19+
assembleChain () {
20+
const chain = new ConversationChain({
21+
memory: this.memory,
22+
prompt: this.chatPrompt,
23+
llm: this.model,
24+
});
25+
return chain;
26+
}
27+
28+
call = async (userInput) => {
29+
const chain = this.assembleChain();
30+
31+
const response = await chain.call({
32+
input: userInput,
33+
});
34+
return response;
35+
}
36+
}
37+
38+
export { OpenAiService };

yarn.lock

+5
Original file line numberDiff line numberDiff line change
@@ -520,6 +520,11 @@
520520
resolved "https://registry.yarnpkg.com/@fortaine/fetch-event-source/-/fetch-event-source-3.0.6.tgz#b8552a2ca2c5202f5699b93a92be0188d422b06e"
521521
integrity sha512-621GAuLMvKtyZQ3IA6nlDWhV1V/7PGOTNIGLUifxt0KzM+dZIweJ6F3XvQF3QnqeNfS1N7WQ0Kil1Di/lhChEw==
522522

523+
"@huggingface/inference@^2.5.0":
524+
version "2.5.0"
525+
resolved "https://registry.yarnpkg.com/@huggingface/inference/-/inference-2.5.0.tgz#8e14ee6696e91aecb132c90d3b07be8373e70338"
526+
integrity sha512-X3NSdrWAKNTLAsEKabH48Wc+Osys+S7ilRcH1bf9trSDmJlzPVXDseXMRBHCFPCYd5AAAIakhENO4zCqstVg8g==
527+
523528
"@jridgewell/gen-mapping@^0.3.0", "@jridgewell/gen-mapping@^0.3.2":
524529
version "0.3.3"
525530
resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz#7e02e6eb5df901aaedb08514203b096614024098"

0 commit comments

Comments
 (0)