feat: support for GPU layer

This commit is contained in:
n4ze3m
2024-08-20 16:11:50 +05:30
parent 00735cddad
commit 4ef17ff479
16 changed files with 108 additions and 25 deletions

View File

@@ -117,7 +117,9 @@ export const useMessageOption = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -190,7 +192,9 @@ export const useMessageOption = () => {
numCtx:
currentChatModelSettings?.numCtx ??
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()
@@ -360,7 +364,9 @@ export const useMessageOption = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -576,7 +582,9 @@ export const useMessageOption = () => {
topP: currentChatModelSettings?.topP ?? userDefaultModelSettings?.topP,
numCtx:
currentChatModelSettings?.numCtx ?? userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
let newMessage: Message[] = []
@@ -665,7 +673,9 @@ export const useMessageOption = () => {
numCtx:
currentChatModelSettings?.numCtx ??
userDefaultModelSettings?.numCtx,
seed: currentChatModelSettings?.seed
seed: currentChatModelSettings?.seed,
numGpu:
currentChatModelSettings?.numGpu ?? userDefaultModelSettings?.numGpu
})
const response = await questionOllama.invoke(promptForQuestion)
query = response.content.toString()