Files
page-assist/src/hooks/useMessage.tsx

429 lines
9.8 KiB
TypeScript
Raw Normal View History

2024-02-01 13:40:44 +05:30
import React from "react"
import { cleanUrl } from "~libs/clean-url"
2024-02-01 23:48:40 +05:30
import { getOllamaURL, systemPromptForNonRag } from "~services/ollama"
import { useStoreMessage, type ChatHistory, type Message } from "~store"
2024-02-01 13:40:44 +05:30
import { ChatOllama } from "@langchain/community/chat_models/ollama"
import {
HumanMessage,
AIMessage,
type MessageContent
} from "@langchain/core/messages"
2024-02-02 22:01:16 +05:30
import { getHtmlOfCurrentTab } from "~libs/get-html"
import { PageAssistHtmlLoader } from "~loader/html"
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama"
2024-02-02 22:01:16 +05:30
import { createChatWithWebsiteChain } from "~chain/chat-with-website"
import { MemoryVectorStore } from "langchain/vectorstores/memory"
2024-02-01 13:40:44 +05:30
export type BotResponse = {
bot: {
text: string
sourceDocuments: any[]
}
history: ChatHistory
history_id: string
}
const generateHistory = (
messages: {
role: "user" | "assistant" | "system"
content: string
image?: string
2024-02-01 13:40:44 +05:30
}[]
) => {
let history = []
for (const message of messages) {
if (message.role === "user") {
let content: MessageContent = [
{
type: "text",
text: message.content
}
]
if (message.image) {
content = [
{
type: "image_url",
image_url: message.image
},
{
type: "text",
text: message.content
}
]
}
2024-02-01 13:40:44 +05:30
history.push(
new HumanMessage({
content: content
2024-02-01 13:40:44 +05:30
})
)
} else if (message.role === "assistant") {
history.push(
new AIMessage({
content: [
{
type: "text",
text: message.content
}
]
})
)
}
}
return history
}
export const useMessage = () => {
const {
history,
messages,
setHistory,
setMessages,
setStreaming,
streaming,
setIsFirstMessage,
historyId,
setHistoryId,
isLoading,
setIsLoading,
isProcessing,
setIsProcessing,
selectedModel,
setSelectedModel,
chatMode,
setChatMode,
setIsEmbedding,
isEmbedding
2024-02-01 13:40:44 +05:30
} = useStoreMessage()
const abortControllerRef = React.useRef<AbortController | null>(null)
2024-02-02 22:01:16 +05:30
const [keepTrackOfEmbedding, setKeepTrackOfEmbedding] = React.useState<{
[key: string]: MemoryVectorStore
}>({})
2024-02-01 13:40:44 +05:30
const clearChat = () => {
stopStreamingRequest()
setMessages([])
setHistory([])
setHistoryId(null)
setIsFirstMessage(true)
setIsLoading(false)
setIsProcessing(false)
setStreaming(false)
2024-02-01 13:40:44 +05:30
}
const memoryEmbedding = async (
2024-02-02 22:01:16 +05:30
url: string,
html: string,
ollamaEmbedding: OllamaEmbeddings
) => {
const loader = new PageAssistHtmlLoader({
html,
url
})
const docs = await loader.load()
const textSplitter = new RecursiveCharacterTextSplitter({
chunkSize: 1000,
chunkOverlap: 200
})
const chunks = await textSplitter.splitDocuments(docs)
const store = new MemoryVectorStore(ollamaEmbedding)
setIsEmbedding(true)
2024-02-02 22:01:16 +05:30
await store.addDocuments(chunks)
2024-02-02 22:01:16 +05:30
setKeepTrackOfEmbedding({
...keepTrackOfEmbedding,
[url]: store
})
setIsEmbedding(false)
2024-02-02 22:01:16 +05:30
return store
}
const chatWithWebsiteMode = async (message: string) => {
const ollamaUrl = await getOllamaURL()
const { html, url } = await getHtmlOfCurrentTab()
const isAlreadyExistEmbedding = keepTrackOfEmbedding[url]
let newMessage: Message[] = [
...messages,
{
isBot: false,
name: "You",
message,
sources: []
},
{
isBot: true,
name: selectedModel,
message: "▋",
sources: []
}
]
const appendingIndex = newMessage.length - 1
setMessages(newMessage)
const ollamaEmbedding = new OllamaEmbeddings({
model: selectedModel,
baseUrl: cleanUrl(ollamaUrl)
})
const ollamaChat = new ChatOllama({
model: selectedModel,
baseUrl: cleanUrl(ollamaUrl)
})
let vectorstore: MemoryVectorStore
if (isAlreadyExistEmbedding) {
vectorstore = isAlreadyExistEmbedding
} else {
vectorstore = await memoryEmbedding(url, html, ollamaEmbedding)
2024-02-02 22:01:16 +05:30
}
const questionPrompt =
"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. Chat History: {chat_history} Follow Up Input: {question} Standalone question:"
const systemPrompt = `You are a helpful AI assistant. Use the following pieces of context to answer the question at the end. If you don't know the answer, just say you don't know. DO NOT try to make up an answer. If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context. {context} Question: {question} Helpful answer in markdown:`
const sanitizedQuestion = message.trim().replaceAll("\n", " ")
const chain = createChatWithWebsiteChain({
llm: ollamaChat,
question_llm: ollamaChat,
question_template: questionPrompt,
response_template: systemPrompt,
retriever: vectorstore.asRetriever()
})
try {
const chunks = await chain.stream({
question: sanitizedQuestion
})
let count = 0
for await (const chunk of chunks) {
if (count === 0) {
setIsProcessing(true)
newMessage[appendingIndex].message = chunk + "▋"
setMessages(newMessage)
} else {
newMessage[appendingIndex].message =
newMessage[appendingIndex].message.slice(0, -1) + chunk + "▋"
setMessages(newMessage)
}
count++
}
newMessage[appendingIndex].message = newMessage[
appendingIndex
].message.slice(0, -1)
setHistory([
...history,
{
role: "user",
content: message
},
{
role: "assistant",
content: newMessage[appendingIndex].message
}
])
setIsProcessing(false)
} catch (e) {
console.log(e)
setIsProcessing(false)
setStreaming(false)
setMessages([
...messages,
{
isBot: true,
name: selectedModel,
message: `Something went wrong. Check out the following logs:
\`\`\`
${e?.message}
\`\`\`
`,
sources: []
}
])
}
}
const normalChatMode = async (message: string, image: string) => {
2024-02-01 13:40:44 +05:30
const url = await getOllamaURL()
if (image.length > 0) {
image = `data:image/jpeg;base64,${image.split(",")[1]}`
}
2024-02-01 13:40:44 +05:30
abortControllerRef.current = new AbortController()
const ollama = new ChatOllama({
model: selectedModel,
baseUrl: cleanUrl(url)
})
2024-02-01 23:48:40 +05:30
let newMessage: Message[] = [
2024-02-01 13:40:44 +05:30
...messages,
{
isBot: false,
2024-02-01 23:48:40 +05:30
name: "You",
2024-02-01 13:40:44 +05:30
message,
sources: [],
images: [image]
2024-02-01 13:40:44 +05:30
},
{
isBot: true,
2024-02-01 23:48:40 +05:30
name: selectedModel,
2024-02-01 13:40:44 +05:30
message: "▋",
sources: [],
2024-02-01 13:40:44 +05:30
}
]
const appendingIndex = newMessage.length - 1
setMessages(newMessage)
try {
2024-02-01 23:48:40 +05:30
const prompt = await systemPromptForNonRag()
let humanMessage = new HumanMessage({
content: [
{
text: message,
type: "text"
}
]
})
if (image.length > 0) {
humanMessage = new HumanMessage({
content: [
{
text: message,
type: "text"
},
{
image_url: image,
type: "image_url"
}
]
})
}
console.log("humanMessage", humanMessage)
2024-02-01 13:40:44 +05:30
const chunks = await ollama.stream(
[
...generateHistory(history),
humanMessage
2024-02-01 13:40:44 +05:30
],
{
signal: abortControllerRef.current.signal
}
)
let count = 0
for await (const chunk of chunks) {
if (count === 0) {
setIsProcessing(true)
newMessage[appendingIndex].message = chunk.content + "▋"
setMessages(newMessage)
} else {
newMessage[appendingIndex].message =
newMessage[appendingIndex].message.slice(0, -1) +
chunk.content +
"▋"
setMessages(newMessage)
}
count++
}
newMessage[appendingIndex].message = newMessage[
appendingIndex
].message.slice(0, -1)
setHistory([
...history,
{
role: "user",
content: message,
image
2024-02-01 13:40:44 +05:30
},
{
role: "assistant",
content: newMessage[appendingIndex].message
}
])
setIsProcessing(false)
} catch (e) {
console.log(e)
setIsProcessing(false)
setStreaming(false)
setMessages([
...messages,
{
isBot: true,
2024-02-01 23:48:40 +05:30
name: selectedModel,
2024-02-01 13:40:44 +05:30
message: `Something went wrong. Check out the following logs:
\`\`\`
${e?.message}
\`\`\`
`,
sources: []
}
])
}
}
const onSubmit = async ({
message,
image
}: {
message: string
image: string
}) => {
if (chatMode === "normal") {
await normalChatMode(message, image)
} else {
await chatWithWebsiteMode(message)
}
2024-02-01 13:40:44 +05:30
}
const stopStreamingRequest = () => {
if (abortControllerRef.current) {
abortControllerRef.current.abort()
abortControllerRef.current = null
}
}
return {
messages,
setMessages,
onSubmit,
setStreaming,
streaming,
setHistory,
historyId,
setHistoryId,
setIsFirstMessage,
isLoading,
setIsLoading,
isProcessing,
stopStreamingRequest,
clearChat,
selectedModel,
setSelectedModel,
chatMode,
setChatMode,
isEmbedding
2024-02-01 13:40:44 +05:30
}
}