From a0695e3d6721576b97f03b33a2cc7314f3da3d95 Mon Sep 17 00:00:00 2001 From: gentlementlegen Date: Sun, 29 Dec 2024 15:36:00 +0900 Subject: [PATCH] chore: token limit is configuration through the configuration file --- src/configuration/content-evaluator-config.ts | 6 ++++++ src/parser/content-evaluator-module.ts | 8 +++----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/configuration/content-evaluator-config.ts b/src/configuration/content-evaluator-config.ts index 050900c5..b0eb7d4d 100644 --- a/src/configuration/content-evaluator-config.ts +++ b/src/configuration/content-evaluator-config.ts @@ -11,6 +11,12 @@ const openAiType = Type.Object( description: "OpenAI model, e.g. gpt-4o", examples: ["gpt-4o"], }), + tokenCountLimit: Type.Integer({ + default: 124000, + description: + "Token count limit for a given model. If the content goes beyond the token limit, content will get truncated during evaluation.", + examples: [124000], + }), /** * Specific endpoint to send the comments to. */ diff --git a/src/parser/content-evaluator-module.ts b/src/parser/content-evaluator-module.ts index 8a8deb5d..cd394658 100644 --- a/src/parser/content-evaluator-module.ts +++ b/src/parser/content-evaluator-module.ts @@ -17,8 +17,6 @@ import { ContextPlugin } from "../types/plugin-input"; import { GithubCommentScore, Result } from "../types/results"; import { TfIdf } from "../helpers/tf-idf"; -const TOKEN_MODEL_LIMIT = 124000; - /** * Evaluates and rates comments. */ @@ -178,13 +176,14 @@ export class ContentEvaluatorModule extends BaseModule { ): Promise { let commentRelevances: Relevances = {}; let prCommentRelevances: Relevances = {}; + const tokenLimit = this._configuration?.openAi.tokenCountLimit ?? 124000; if (comments.length) { const dummyResponse = JSON.stringify(this._generateDummyResponse(comments), null, 2); const maxTokens = this._calculateMaxTokens(dummyResponse); let promptForComments = this._generatePromptForComments(specification, comments, allComments); - if (this._calculateMaxTokens(promptForComments, Infinity) > TOKEN_MODEL_LIMIT) { + if (this._calculateMaxTokens(promptForComments, Infinity) > tokenLimit) { const tfidf = new TfIdf(); const mostImportantComments = tfidf.getTopComments(specification, allComments); promptForComments = this._generatePromptForComments( @@ -193,7 +192,6 @@ export class ContentEvaluatorModule extends BaseModule { mostImportantComments.map((o) => o.comment) ); } - console.log(promptForComments); commentRelevances = await this._submitPrompt(promptForComments, maxTokens); } @@ -202,7 +200,7 @@ export class ContentEvaluatorModule extends BaseModule { const maxTokens = this._calculateMaxTokens(dummyResponse); let promptForPrComments = this._generatePromptForPrComments(specification, prComments); - if (this._calculateMaxTokens(promptForPrComments, Infinity) > TOKEN_MODEL_LIMIT) { + if (this._calculateMaxTokens(promptForPrComments, Infinity) > tokenLimit) { const tfidf = new TfIdf(); const mostImportantComments = tfidf.getTopComments(specification, allComments); promptForPrComments = this._generatePromptForComments(