From b09f458aeb1e204410fc8188b04347c18d991535 Mon Sep 17 00:00:00 2001 From: "Q.A.zh" <40236765+QAbot-zh@users.noreply.github.com> Date: Sat, 28 Dec 2024 04:17:22 +0000 Subject: [PATCH 1/7] Introducing the ability to remove specified Providers. --- README.md | 5 ++++- app/utils/model.ts | 7 +++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9168480c5e2..f93e1530efb 100644 --- a/README.md +++ b/README.md @@ -343,11 +343,14 @@ If you want to disable parse settings from url, set this to 1. To control custom models, use `+` to add a custom model, use `-` to hide a model, use `name=displayName` to customize model name, separated by comma. -User `-all` to disable all default models, `+all` to enable all default models. +Use `-all` to disable all default models, `+all` to enable all default models. +Use `-*provider` to disable specified models. +Current valid providers: `openai,azure,google,anthropic,baidu,bytedance,alibaba,tencent,moonshot,iflytek,xai,chatglm` and more to come. For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name. > Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list. > If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list. +> If you don't want to use Azure model, using `-*azure` will prevent Azure models from appearing in the model list. For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name. > Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list. diff --git a/app/utils/model.ts b/app/utils/model.ts index a1b7df1b61e..a8ce93f4de8 100644 --- a/app/utils/model.ts +++ b/app/utils/model.ts @@ -88,6 +88,13 @@ export function collectModelTable( Object.values(modelTable).forEach( (model) => (model.available = available), ); + } else if (name.startsWith("*")) { + const modelId = name.substring(1).toLowerCase(); + Object.values(modelTable).forEach((model) => { + if (model?.provider?.id === modelId) { + model.available = available; + } + }); } else { // 1. find model by name, and set available value const [customModelName, customProviderName] = getModelProvider(name); From 4d6c82deb91dd3dce83431012968f153442ba8fd Mon Sep 17 00:00:00 2001 From: "Q.A.zh" <40236765+QAbot-zh@users.noreply.github.com> Date: Sat, 28 Dec 2024 09:09:41 +0000 Subject: [PATCH 2/7] add collectModelTable unit test --- test/model.test.ts | 143 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 test/model.test.ts diff --git a/test/model.test.ts b/test/model.test.ts new file mode 100644 index 00000000000..36f9e34728b --- /dev/null +++ b/test/model.test.ts @@ -0,0 +1,143 @@ +import { collectModelTable } from "@/app/utils/model" +import { LLMModel,LLMModelProvider } from "@/app/client/api"; + +describe('collectModelTable', () => { + const mockModels: readonly LLMModel[] = [ + { + name: 'gpt-3.5-turbo', + available: true, + provider: { + id: 'openai', + providerName: 'OpenAI', + providerType: 'openai', + } as LLMModelProvider, + sorted: 1, + }, + { + name: 'gpt-4', + available: true, + provider: { + id: 'openai', + providerName: 'OpenAI', + providerType: 'openai', + } as LLMModelProvider, + sorted: 1, + }, + { + name: 'gpt-3.5-turbo', + available: true, + provider: { + id: 'azure', + providerName: 'Azure', + providerType: 'azure', + } as LLMModelProvider, + sorted: 2, + }, + { + name: 'gpt-4', + available: true, + provider: { + id: 'azure', + providerName: 'Azure', + providerType: 'azure', + } as LLMModelProvider, + sorted: 2, + }, + { + name: 'gemini-pro', + available: true, + provider: { + id: 'google', + providerName: 'Google', + providerType: 'google', + } as LLMModelProvider, + sorted: 3, + }, + , + { + name: 'claude-3-haiku-20240307', + available: true, + provider: { + id: 'anthropic', + providerName: 'Anthropic', + providerType: 'anthropic', + } as LLMModelProvider, + sorted: 4, + }, + { + name: 'grok-beta', + available: true, + provider: { + id: 'xai', + providerName: 'XAI', + providerType: 'xai', + } as LLMModelProvider, + sorted: 11, + }, + ]; + + test('all models shoule be available', () => { + const customModels = ''; + const result = collectModelTable(mockModels, customModels); + + expect(result['gpt-3.5@openai'].available).toBe(true); + expect(result['gpt-4@openai'].available).toBe(true); + expect(result['gpt-3.5@azure'].available).toBe(true); + expect(result['gpt-4@azure'].available).toBe(true); + expect(result['gemini-pro@google'].available).toBe(true); + expect(result['claude-3-haiku-20240307@claude'].available).toBe(true); + expect(result['grok-beta@xai'].available).toBe(true); + }); + test('should exclude all models when custom is "-all"', () => { + const customModels = '-all'; + const result = collectModelTable(mockModels, customModels); + + expect(result['gpt-3.5@openai'].available).toBe(false); + expect(result['gpt-4@openai'].available).toBe(false); + expect(result['gpt-3.5@azure'].available).toBe(false); + expect(result['gpt-4@azure'].available).toBe(false); + expect(result['gemini-pro@google'].available).toBe(false); + expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false); + expect(result['grok-beta@xai'].available).toBe(false); + }); + + test('should exclude all Azure models when custom is "-*azure"', () => { + const customModels = '-*azure'; + const result = collectModelTable(mockModels, customModels); + + expect(result['gpt-3.5@openai'].available).toBe(true); + expect(result['gpt-4@openai'].available).toBe(true); + expect(result['gpt-3.5@azure'].available).toBe(false); + expect(result['gpt-4@azure'].available).toBe(false); + expect(result['gemini-pro@google'].available).toBe(true); + expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true); + expect(result['grok-beta@xai'].available).toBe(true); + }); + + test('should exclude Google and XAI models when custom is "-*google,-*xai"', () => { + const customModels = '-*google,-*xai'; + const result = collectModelTable(mockModels, customModels); + + expect(result['gpt-3.5@openai'].available).toBe(true); + expect(result['gpt-4@openai'].available).toBe(true); + expect(result['gpt-3.5@azure'].available).toBe(true); + expect(result['gpt-4@azure'].available).toBe(true); + expect(result['gemini-pro@google'].available).toBe(false); + expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true); + expect(result['grok-beta@xai'].available).toBe(false); + }); + + test('should exclude all models except OpenAI when custom is "-all, +*openai"', () => { + const customModels = '-all, +*openai,gpt-4o@azure'; + const result = collectModelTable(mockModels, customModels); + + expect(result['gpt-3.5@openai'].available).toBe(true); + expect(result['gpt-4@openai'].available).toBe(true); + expect(result['gpt-3.5@azure'].available).toBe(false); + expect(result['gpt-4@azure'].available).toBe(false); + expect(result['gemini-pro@google'].available).toBe(false); + expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false); + expect(result['grok-beta@xai'].available).toBe(false); + expect(result['gpt-4o@azure'].available).toBe(true); + }); +}); \ No newline at end of file From c41c2b538a2101c7519a610fd64b0f45eee023d8 Mon Sep 17 00:00:00 2001 From: "Q.A.zh" <40236765+QAbot-zh@users.noreply.github.com> Date: Sat, 28 Dec 2024 09:14:27 +0000 Subject: [PATCH 3/7] Remove the empty array slot --- test/model.test.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/test/model.test.ts b/test/model.test.ts index 36f9e34728b..a9b6c78efa9 100644 --- a/test/model.test.ts +++ b/test/model.test.ts @@ -53,7 +53,6 @@ describe('collectModelTable', () => { } as LLMModelProvider, sorted: 3, }, - , { name: 'claude-3-haiku-20240307', available: true, From fd8ad636552c75c2e68154c7125e502d412b4ac8 Mon Sep 17 00:00:00 2001 From: "Q.A.zh" <40236765+QAbot-zh@users.noreply.github.com> Date: Sat, 28 Dec 2024 15:38:01 +0000 Subject: [PATCH 4/7] fix error model name --- test/model.test.ts | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/model.test.ts b/test/model.test.ts index a9b6c78efa9..4a19e3f4110 100644 --- a/test/model.test.ts +++ b/test/model.test.ts @@ -79,7 +79,7 @@ describe('collectModelTable', () => { const customModels = ''; const result = collectModelTable(mockModels, customModels); - expect(result['gpt-3.5@openai'].available).toBe(true); + expect(result['gpt-3.5-turbo@openai'].available).toBe(true); expect(result['gpt-4@openai'].available).toBe(true); expect(result['gpt-3.5@azure'].available).toBe(true); expect(result['gpt-4@azure'].available).toBe(true); @@ -91,7 +91,7 @@ describe('collectModelTable', () => { const customModels = '-all'; const result = collectModelTable(mockModels, customModels); - expect(result['gpt-3.5@openai'].available).toBe(false); + expect(result['gpt-3.5-turbo@openai'].available).toBe(false); expect(result['gpt-4@openai'].available).toBe(false); expect(result['gpt-3.5@azure'].available).toBe(false); expect(result['gpt-4@azure'].available).toBe(false); @@ -104,7 +104,7 @@ describe('collectModelTable', () => { const customModels = '-*azure'; const result = collectModelTable(mockModels, customModels); - expect(result['gpt-3.5@openai'].available).toBe(true); + expect(result['gpt-3.5-turbo@openai'].available).toBe(true); expect(result['gpt-4@openai'].available).toBe(true); expect(result['gpt-3.5@azure'].available).toBe(false); expect(result['gpt-4@azure'].available).toBe(false); @@ -117,7 +117,7 @@ describe('collectModelTable', () => { const customModels = '-*google,-*xai'; const result = collectModelTable(mockModels, customModels); - expect(result['gpt-3.5@openai'].available).toBe(true); + expect(result['gpt-3.5-turbo@openai'].available).toBe(true); expect(result['gpt-4@openai'].available).toBe(true); expect(result['gpt-3.5@azure'].available).toBe(true); expect(result['gpt-4@azure'].available).toBe(true); @@ -130,7 +130,7 @@ describe('collectModelTable', () => { const customModels = '-all, +*openai,gpt-4o@azure'; const result = collectModelTable(mockModels, customModels); - expect(result['gpt-3.5@openai'].available).toBe(true); + expect(result['gpt-3.5-turbo@openai'].available).toBe(true); expect(result['gpt-4@openai'].available).toBe(true); expect(result['gpt-3.5@azure'].available).toBe(false); expect(result['gpt-4@azure'].available).toBe(false); From 2db4caace44b001d36f6b578797e6f35f91930f9 Mon Sep 17 00:00:00 2001 From: "Q.A.zh" <40236765+QAbot-zh@users.noreply.github.com> Date: Sat, 28 Dec 2024 15:43:27 +0000 Subject: [PATCH 5/7] fix model name --- test/model.test.ts | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/model.test.ts b/test/model.test.ts index 4a19e3f4110..f3757fac2fc 100644 --- a/test/model.test.ts +++ b/test/model.test.ts @@ -81,10 +81,10 @@ describe('collectModelTable', () => { expect(result['gpt-3.5-turbo@openai'].available).toBe(true); expect(result['gpt-4@openai'].available).toBe(true); - expect(result['gpt-3.5@azure'].available).toBe(true); + expect(result['gpt-3.5-turbo@azure'].available).toBe(true); expect(result['gpt-4@azure'].available).toBe(true); expect(result['gemini-pro@google'].available).toBe(true); - expect(result['claude-3-haiku-20240307@claude'].available).toBe(true); + expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true); expect(result['grok-beta@xai'].available).toBe(true); }); test('should exclude all models when custom is "-all"', () => { @@ -93,7 +93,7 @@ describe('collectModelTable', () => { expect(result['gpt-3.5-turbo@openai'].available).toBe(false); expect(result['gpt-4@openai'].available).toBe(false); - expect(result['gpt-3.5@azure'].available).toBe(false); + expect(result['gpt-3.5-turbo@azure'].available).toBe(false); expect(result['gpt-4@azure'].available).toBe(false); expect(result['gemini-pro@google'].available).toBe(false); expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false); @@ -106,7 +106,7 @@ describe('collectModelTable', () => { expect(result['gpt-3.5-turbo@openai'].available).toBe(true); expect(result['gpt-4@openai'].available).toBe(true); - expect(result['gpt-3.5@azure'].available).toBe(false); + expect(result['gpt-3.5-turbo@azure'].available).toBe(false); expect(result['gpt-4@azure'].available).toBe(false); expect(result['gemini-pro@google'].available).toBe(true); expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true); @@ -119,7 +119,7 @@ describe('collectModelTable', () => { expect(result['gpt-3.5-turbo@openai'].available).toBe(true); expect(result['gpt-4@openai'].available).toBe(true); - expect(result['gpt-3.5@azure'].available).toBe(true); + expect(result['gpt-3.5-turbo@azure'].available).toBe(true); expect(result['gpt-4@azure'].available).toBe(true); expect(result['gemini-pro@google'].available).toBe(false); expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true); @@ -132,7 +132,7 @@ describe('collectModelTable', () => { expect(result['gpt-3.5-turbo@openai'].available).toBe(true); expect(result['gpt-4@openai'].available).toBe(true); - expect(result['gpt-3.5@azure'].available).toBe(false); + expect(result['gpt-3.5-turbo@azure'].available).toBe(false); expect(result['gpt-4@azure'].available).toBe(false); expect(result['gemini-pro@google'].available).toBe(false); expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false); From 3b5b4965995ed9593a0ce9a84abadaa4476601fc Mon Sep 17 00:00:00 2001 From: "Q.A.zh" <40236765+QAbot-zh@users.noreply.github.com> Date: Sat, 28 Dec 2024 15:53:35 +0000 Subject: [PATCH 6/7] modify test comment --- test/model.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/model.test.ts b/test/model.test.ts index f3757fac2fc..e9bc986f32a 100644 --- a/test/model.test.ts +++ b/test/model.test.ts @@ -126,7 +126,7 @@ describe('collectModelTable', () => { expect(result['grok-beta@xai'].available).toBe(false); }); - test('should exclude all models except OpenAI when custom is "-all, +*openai"', () => { + test('All models except OpenAI should be excluded, and additional models should be added when customized as "-all, +*openai,gpt-4o@azure"', () => { const customModels = '-all, +*openai,gpt-4o@azure'; const result = collectModelTable(mockModels, customModels); From 69fcb92a3b1c9417ebe8be88ebc88a8dd9192446 Mon Sep 17 00:00:00 2001 From: "Q.A.zh" <40236765+QAbot-zh@users.noreply.github.com> Date: Mon, 30 Dec 2024 02:54:15 +0000 Subject: [PATCH 7/7] =?UTF-8?q?=E7=A7=BB=E9=99=A4=E7=A9=BA=E6=A0=BC?= =?UTF-8?q?=EF=BC=8C=E5=A2=9E=E5=8A=A0custom=5Fmodels=20=E5=AE=B9=E9=94=99?= =?UTF-8?q?=E6=80=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/utils/model.ts | 1 + test/model.test.ts | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/app/utils/model.ts b/app/utils/model.ts index a8ce93f4de8..c20e59594d2 100644 --- a/app/utils/model.ts +++ b/app/utils/model.ts @@ -76,6 +76,7 @@ export function collectModelTable( // server custom models customModels .split(",") + .map((v) => v.trim()) .filter((v) => !!v && v.length > 0) .forEach((m) => { const available = !m.startsWith("-"); diff --git a/test/model.test.ts b/test/model.test.ts index e9bc986f32a..74f4639c4ad 100644 --- a/test/model.test.ts +++ b/test/model.test.ts @@ -127,7 +127,7 @@ describe('collectModelTable', () => { }); test('All models except OpenAI should be excluded, and additional models should be added when customized as "-all, +*openai,gpt-4o@azure"', () => { - const customModels = '-all, +*openai,gpt-4o@azure'; + const customModels = '-all,+*openai,gpt-4o@azure'; const result = collectModelTable(mockModels, customModels); expect(result['gpt-3.5-turbo@openai'].available).toBe(true);