diff --git a/README.md b/README.md index 9168480c5e2..f93e1530efb 100644 --- a/README.md +++ b/README.md @@ -343,11 +343,14 @@ If you want to disable parse settings from url, set this to 1. To control custom models, use `+` to add a custom model, use `-` to hide a model, use `name=displayName` to customize model name, separated by comma. -User `-all` to disable all default models, `+all` to enable all default models. +Use `-all` to disable all default models, `+all` to enable all default models. +Use `-*provider` to disable specified models. +Current valid providers: `openai,azure,google,anthropic,baidu,bytedance,alibaba,tencent,moonshot,iflytek,xai,chatglm` and more to come. For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name. > Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list. > If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list. +> If you don't want to use Azure model, using `-*azure` will prevent Azure models from appearing in the model list. For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name. > Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list. diff --git a/app/utils/model.ts b/app/utils/model.ts index a1b7df1b61e..c20e59594d2 100644 --- a/app/utils/model.ts +++ b/app/utils/model.ts @@ -76,6 +76,7 @@ export function collectModelTable( // server custom models customModels .split(",") + .map((v) => v.trim()) .filter((v) => !!v && v.length > 0) .forEach((m) => { const available = !m.startsWith("-"); @@ -88,6 +89,13 @@ export function collectModelTable( Object.values(modelTable).forEach( (model) => (model.available = available), ); + } else if (name.startsWith("*")) { + const modelId = name.substring(1).toLowerCase(); + Object.values(modelTable).forEach((model) => { + if (model?.provider?.id === modelId) { + model.available = available; + } + }); } else { // 1. find model by name, and set available value const [customModelName, customProviderName] = getModelProvider(name); diff --git a/test/model.test.ts b/test/model.test.ts new file mode 100644 index 00000000000..74f4639c4ad --- /dev/null +++ b/test/model.test.ts @@ -0,0 +1,142 @@ +import { collectModelTable } from "@/app/utils/model" +import { LLMModel,LLMModelProvider } from "@/app/client/api"; + +describe('collectModelTable', () => { + const mockModels: readonly LLMModel[] = [ + { + name: 'gpt-3.5-turbo', + available: true, + provider: { + id: 'openai', + providerName: 'OpenAI', + providerType: 'openai', + } as LLMModelProvider, + sorted: 1, + }, + { + name: 'gpt-4', + available: true, + provider: { + id: 'openai', + providerName: 'OpenAI', + providerType: 'openai', + } as LLMModelProvider, + sorted: 1, + }, + { + name: 'gpt-3.5-turbo', + available: true, + provider: { + id: 'azure', + providerName: 'Azure', + providerType: 'azure', + } as LLMModelProvider, + sorted: 2, + }, + { + name: 'gpt-4', + available: true, + provider: { + id: 'azure', + providerName: 'Azure', + providerType: 'azure', + } as LLMModelProvider, + sorted: 2, + }, + { + name: 'gemini-pro', + available: true, + provider: { + id: 'google', + providerName: 'Google', + providerType: 'google', + } as LLMModelProvider, + sorted: 3, + }, + { + name: 'claude-3-haiku-20240307', + available: true, + provider: { + id: 'anthropic', + providerName: 'Anthropic', + providerType: 'anthropic', + } as LLMModelProvider, + sorted: 4, + }, + { + name: 'grok-beta', + available: true, + provider: { + id: 'xai', + providerName: 'XAI', + providerType: 'xai', + } as LLMModelProvider, + sorted: 11, + }, + ]; + + test('all models shoule be available', () => { + const customModels = ''; + const result = collectModelTable(mockModels, customModels); + + expect(result['gpt-3.5-turbo@openai'].available).toBe(true); + expect(result['gpt-4@openai'].available).toBe(true); + expect(result['gpt-3.5-turbo@azure'].available).toBe(true); + expect(result['gpt-4@azure'].available).toBe(true); + expect(result['gemini-pro@google'].available).toBe(true); + expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true); + expect(result['grok-beta@xai'].available).toBe(true); + }); + test('should exclude all models when custom is "-all"', () => { + const customModels = '-all'; + const result = collectModelTable(mockModels, customModels); + + expect(result['gpt-3.5-turbo@openai'].available).toBe(false); + expect(result['gpt-4@openai'].available).toBe(false); + expect(result['gpt-3.5-turbo@azure'].available).toBe(false); + expect(result['gpt-4@azure'].available).toBe(false); + expect(result['gemini-pro@google'].available).toBe(false); + expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false); + expect(result['grok-beta@xai'].available).toBe(false); + }); + + test('should exclude all Azure models when custom is "-*azure"', () => { + const customModels = '-*azure'; + const result = collectModelTable(mockModels, customModels); + + expect(result['gpt-3.5-turbo@openai'].available).toBe(true); + expect(result['gpt-4@openai'].available).toBe(true); + expect(result['gpt-3.5-turbo@azure'].available).toBe(false); + expect(result['gpt-4@azure'].available).toBe(false); + expect(result['gemini-pro@google'].available).toBe(true); + expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true); + expect(result['grok-beta@xai'].available).toBe(true); + }); + + test('should exclude Google and XAI models when custom is "-*google,-*xai"', () => { + const customModels = '-*google,-*xai'; + const result = collectModelTable(mockModels, customModels); + + expect(result['gpt-3.5-turbo@openai'].available).toBe(true); + expect(result['gpt-4@openai'].available).toBe(true); + expect(result['gpt-3.5-turbo@azure'].available).toBe(true); + expect(result['gpt-4@azure'].available).toBe(true); + expect(result['gemini-pro@google'].available).toBe(false); + expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true); + expect(result['grok-beta@xai'].available).toBe(false); + }); + + test('All models except OpenAI should be excluded, and additional models should be added when customized as "-all, +*openai,gpt-4o@azure"', () => { + const customModels = '-all,+*openai,gpt-4o@azure'; + const result = collectModelTable(mockModels, customModels); + + expect(result['gpt-3.5-turbo@openai'].available).toBe(true); + expect(result['gpt-4@openai'].available).toBe(true); + expect(result['gpt-3.5-turbo@azure'].available).toBe(false); + expect(result['gpt-4@azure'].available).toBe(false); + expect(result['gemini-pro@google'].available).toBe(false); + expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false); + expect(result['grok-beta@xai'].available).toBe(false); + expect(result['gpt-4o@azure'].available).toBe(true); + }); +}); \ No newline at end of file