Skip to content

Commit

Permalink
chore: completions
Browse files Browse the repository at this point in the history
  • Loading branch information
Keyrxng committed Nov 4, 2024
1 parent d5b62e5 commit ccfc8be
Showing 1 changed file with 102 additions and 0 deletions.
102 changes: 102 additions & 0 deletions src/adapters/openai/openai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
import OpenAI from "openai";
import { PluginContext } from "../../types/plugin-context-single";

export interface ResponseFromLlm {
answer: string;
tokenUsage: {
input: number;
output: number;
total: number;
};
}

export class Completions {
protected client: OpenAI;

constructor(apiKey: string) {
this.client = new OpenAI({ apiKey: apiKey });
}

createSystemMessage({
additionalContext,
constraints,
directives,
embeddingsSearch,
outputStyle,
query,
}: {
directives: string[];
constraints: string[];
query: string;
embeddingsSearch: string[];
additionalContext: string[];
outputStyle: string;
}): OpenAI.Chat.Completions.ChatCompletionMessageParam[] {
return [
{
role: "system",
content: `You are UbiquityOS, a Telegram-integrated GitHub-first assistant for UbiquityDAO.
# Directives
${directives.join("\n- ")}
# Constraints
${constraints.join("\n- ")}
${embeddingsSearch.length > 0 ? `## Embeddings Search Results\n${embeddingsSearch.join("\n- ")}` : ""}
${additionalContext.length > 0 ? `### Additional Context\n${additionalContext.join("\n- ")}` : ""}
# Output Style
${outputStyle}
`
.replace(/ {16}/g, "")
.trim(),
},
{
role: "user",
content: query,
},
];
}

async createCompletion({
directives,
constraints,
additionalContext,
embeddingsSearch,
outputStyle,
query,
model,
}: {
directives: string[];
constraints: string[];
additionalContext: string[];
embeddingsSearch: string[];
outputStyle: string;
query: string;
model: string;
}): Promise<ResponseFromLlm | undefined> {
const config = PluginContext.getInstance().config;
const res: OpenAI.Chat.Completions.ChatCompletion = await this.client.chat.completions.create({
model: model,
messages: this.createSystemMessage({ directives, constraints, query, embeddingsSearch, additionalContext, outputStyle }),
temperature: 0.2,
max_completion_tokens: config.maxCompletionTokens,
top_p: 0.5,
frequency_penalty: 0,
presence_penalty: 0,
response_format: {
type: "text",
},
});
const answer = res.choices[0].message;
if (answer?.content && res.usage) {
const { prompt_tokens, completion_tokens, total_tokens } = res.usage;
return {
answer: answer.content,
tokenUsage: { input: prompt_tokens, output: completion_tokens, total: total_tokens },
};
}
}
}

0 comments on commit ccfc8be

Please sign in to comment.