From 0bc2c71b0c906c1e70f5e557e2f742bcabb8ef17 Mon Sep 17 00:00:00 2001 From: Yidadaa Date: Tue, 4 Jul 2023 00:14:57 +0800 Subject: [PATCH 1/4] fix: #2230 hide chat actions for context prompts --- app/components/chat.tsx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/app/components/chat.tsx b/app/components/chat.tsx index ff0bc5b347d..a0b0a297a0b 100644 --- a/app/components/chat.tsx +++ b/app/components/chat.tsx @@ -888,7 +888,8 @@ export function Chat() { const showActions = !isUser && i > 0 && - !(message.preview || message.content.length === 0); + !(message.preview || message.content.length === 0) && + i >= context.length; // do not show actions for context prompts const showTyping = message.preview || message.streaming; const shouldShowClearContextDivider = i === clearContextIndex - 1; From 59634594994bfc00facf4ea7b6160a4e2ed1f49e Mon Sep 17 00:00:00 2001 From: Yidadaa Date: Tue, 4 Jul 2023 00:22:30 +0800 Subject: [PATCH 2/4] fix: #2221 user prompts in front of all prompts --- app/store/prompt.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/store/prompt.ts b/app/store/prompt.ts index 98d4193bec9..4e370161948 100644 --- a/app/store/prompt.ts +++ b/app/store/prompt.ts @@ -127,7 +127,7 @@ export const usePromptStore = create()( search(text) { if (text.length === 0) { // return all rompts - return SearchService.allPrompts.concat([...get().getUserPrompts()]); + return get().getUserPrompts().concat(SearchService.builtinPrompts); } return SearchService.search(text) as Prompt[]; }, From 823032617dfd9928544f38c928085b9b41ba8691 Mon Sep 17 00:00:00 2001 From: Yidadaa Date: Tue, 4 Jul 2023 00:39:54 +0800 Subject: [PATCH 3/4] feat: add top p config --- app/components/model-config.tsx | 19 +++++++++++++++++++ app/locales/cn.ts | 4 ++++ app/locales/en.ts | 6 +++++- app/store/config.ts | 9 +++++++-- 4 files changed, 35 insertions(+), 3 deletions(-) diff --git a/app/components/model-config.tsx b/app/components/model-config.tsx index f79e0e8f6f1..9fd4677e72b 100644 --- a/app/components/model-config.tsx +++ b/app/components/model-config.tsx @@ -48,6 +48,25 @@ export function ModelConfigList(props: { }} > + + { + props.updateConfig( + (config) => + (config.temperature = ModalConfigValidator.top_p( + e.currentTarget.valueAsNumber, + )), + ); + }} + > + `With ${x} contextual prompts`, - Edit: "Contextual and Memory Prompts", + Edit: "Current Chat Settings", Add: "Add a Prompt", Clear: "Context Cleared", Revert: "Revert", diff --git a/app/store/config.ts b/app/store/config.ts index 945e1be7c45..68e299150ef 100644 --- a/app/store/config.ts +++ b/app/store/config.ts @@ -33,6 +33,7 @@ export const DEFAULT_CONFIG = { modelConfig: { model: "gpt-3.5-turbo" as ModelType, temperature: 0.5, + top_p: 1, max_tokens: 2000, presence_penalty: 0, frequency_penalty: 0, @@ -158,6 +159,9 @@ export const ModalConfigValidator = { temperature(x: number) { return limitNumber(x, 0, 1, 1); }, + top_p(x: number) { + return limitNumber(x, 0, 1, 1); + }, }; export const useAppConfig = create()( @@ -177,15 +181,16 @@ export const useAppConfig = create()( }), { name: StoreKey.Config, - version: 3.2, + version: 3.3, migrate(persistedState, version) { - if (version === 3.2) return persistedState as any; + if (version === 3.3) return persistedState as any; const state = persistedState as ChatConfig; state.modelConfig.sendMemory = true; state.modelConfig.historyMessageCount = 4; state.modelConfig.compressMessageLengthThreshold = 1000; state.modelConfig.frequency_penalty = 0; + state.modelConfig.top_p = 1; state.modelConfig.template = DEFAULT_INPUT_TEMPLATE; state.dontShowMaskSplashScreen = false; From cda074fe243183a3b6e3801f21c954007e221157 Mon Sep 17 00:00:00 2001 From: Yidadaa Date: Tue, 4 Jul 2023 00:40:57 +0800 Subject: [PATCH 4/4] fixup --- app/client/platforms/openai.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 79d485562bb..bbd14d61337 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -50,6 +50,7 @@ export class ChatGPTApi implements LLMApi { temperature: modelConfig.temperature, presence_penalty: modelConfig.presence_penalty, frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, }; console.log("[Request] openai payload: ", requestPayload);