Skip to content

Commit f28d516

Browse files
authored
Merge pull request #1487 from rocket-admin/backend_aws_bedrock
refactor: enhance logging and code formatting in SharedJobsService
2 parents 1c455a0 + ae8300c commit f28d516

File tree

2 files changed

+370
-366
lines changed

2 files changed

+370
-366
lines changed

backend/src/entities/ai/amazon-bedrock/amazon-bedrock.ai.provider.ts

Lines changed: 34 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -4,38 +4,40 @@ import { IAIProvider } from './ai-provider.interface.js';
44

55
@Injectable()
66
export class AmazonBedrockAiProvider implements IAIProvider {
7-
private readonly bedrockRuntimeClient: BedrockRuntimeClient;
8-
private readonly modelId: string = 'global.anthropic.claude-sonnet-4-5-20250929-v1:0';
9-
private readonly temperature: number = 0.7;
10-
private readonly maxTokens: number = 1024;
11-
private readonly region: string = 'us-west-2';
12-
private readonly topP: number = 0.9;
7+
private readonly bedrockRuntimeClient: BedrockRuntimeClient;
8+
private readonly modelId: string = 'global.anthropic.claude-sonnet-4-5-20250929-v1:0';
9+
private readonly temperature: number = 0.7;
10+
private readonly maxTokens: number = 1024;
11+
private readonly region: string = 'us-west-2';
12+
private readonly topP: number = 0.9;
1313

14-
constructor() {
15-
this.bedrockRuntimeClient = new BedrockRuntimeClient({
16-
region: this.region,
17-
});
18-
}
19-
public async generateResponse(prompt: string): Promise<string> {
20-
const conversation = [
21-
{
22-
role: 'user' as const,
23-
content: [{ text: prompt }],
24-
},
25-
];
14+
constructor() {
15+
this.bedrockRuntimeClient = new BedrockRuntimeClient({
16+
region: this.region,
17+
});
18+
}
19+
public async generateResponse(prompt: string): Promise<string> {
20+
const conversation = [
21+
{
22+
role: 'user' as const,
23+
content: [{ text: prompt }],
24+
},
25+
];
2626

27-
const command = new ConverseCommand({
28-
modelId: this.modelId,
29-
messages: conversation,
30-
inferenceConfig: { maxTokens: this.maxTokens, temperature: this.temperature, topP: this.topP },
31-
});
32-
try {
33-
const response = await this.bedrockRuntimeClient.send(command);
34-
const responseText = response.output.message?.content[0].text;
35-
return responseText || 'No response generated.';
36-
} catch (error) {
37-
console.error('Error generating AI response:', error);
38-
throw new Error('Failed to generate AI response.');
39-
}
40-
}
27+
const command = new ConverseCommand({
28+
modelId: this.modelId,
29+
messages: conversation,
30+
inferenceConfig: { maxTokens: this.maxTokens, temperature: this.temperature, topP: this.topP },
31+
});
32+
try {
33+
const response = await this.bedrockRuntimeClient.send(command);
34+
console.info('AI response received from Amazon Bedrock.');
35+
const responseText = response.output.message?.content[0].text;
36+
console.info('AI response text. ', responseText);
37+
return responseText || 'No response generated.';
38+
} catch (error) {
39+
console.error('Error generating AI response:', error);
40+
throw new Error('Failed to generate AI response.');
41+
}
42+
}
4143
}

0 commit comments

Comments
 (0)