feat: Add maxTokens parameter to LLMCompletionRequest and update related methods

This commit is contained in:
Nawaz Dhandala 2025-12-16 15:33:33 +00:00
parent 64a584dd76
commit bdd894f57e
No known key found for this signature in database
GPG key ID: 96C5DCA24769DBCA
2 changed files with 5 additions and 0 deletions

1
.gitignore vendored
View file

@ -128,3 +128,4 @@ MCP/.env
MCP/node_modules
Dashboard/public/sw.js
.claude/settings.local.json
Common/.claude/settings.local.json

View file

@ -16,6 +16,7 @@ export interface LLMMessage {
export interface LLMCompletionRequest {
messages: Array<LLMMessage>;
temperature?: number;
maxTokens?: number;
llmProviderConfig: LLMProviderConfig;
}
@ -80,6 +81,7 @@ export default class LLMService {
};
}),
temperature: request.temperature ?? 0.7,
max_tokens: request.maxTokens || 4096,
},
headers: {
Authorization: `Bearer ${config.apiKey}`,
@ -153,6 +155,7 @@ export default class LLMService {
model: modelName,
messages: userMessages,
temperature: request.temperature ?? 0.7,
max_tokens: request.maxTokens || 4096,
};
if (systemMessage) {
@ -241,6 +244,7 @@ export default class LLMService {
stream: false,
options: {
temperature: request.temperature ?? 0.7,
num_predict: request.maxTokens || 4096,
},
},
headers: {