Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions backend/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions backend/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
"author": "",
"license": "ISC",
"dependencies": {
"@google/generative-ai": "^0.24.0",
"axios": "^1.7.9",
"cors": "^2.8.5",
"csv-parse": "^5.6.0",
Expand Down
2 changes: 2 additions & 0 deletions backend/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import logtypeStatus from './routes/logTypeStatus';
import connectToDatabase from './db/connect';
import severityInfo from './routes/severityInfo';
import getLogAnalytics from './routes/getLogAnalytics';
import chatRouter from './routes/chat';

dotenv.config();

Expand All @@ -32,6 +33,7 @@ app.use('/api', getStats)
app.use('/api', logtypeStatus)
app.use('/api', severityInfo)
app.use('/api', getLogAnalytics)
app.use('/api', chatRouter)

app.listen(PORT, (): void => {
console.log(`Server is running on http://localhost:${PORT}`);
Expand Down
72 changes: 72 additions & 0 deletions backend/src/routes/chat.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import express from 'express';
import { Request, Response } from 'express';
import { LinuxLogModel } from '../models/LinuxLogModel';
import { GoogleGenerativeAI } from '@google/generative-ai';
import dotenv from 'dotenv';

dotenv.config();

const router = express.Router();
router.use(express.json());

if (!process.env.GOOGLE_API_KEY) {
throw new Error('Missing GOOGLE_API_KEY in environment variables');
}

const genAI = new GoogleGenerativeAI(process.env.GOOGLE_API_KEY);
const model = genAI.getGenerativeModel({ model: 'gemini-2.0-flash' });

const getSeverityPrompt = (line: string, userText?: string) => {
const lower = line.toLowerCase();
let prompt: string;

if (lower.includes("error")) {
prompt = `This is an error log: "${line}". What caused this and how can we fix it? Provide a step-by-step fix if possible.`;
} else if (lower.includes("info")) {
prompt = `This is an informational log: "${line}". Why did this happen, and how can we improve our system to avoid this in future?`;
} else {
prompt = `Here is a log line: "${line}". Can you explain what it might mean and whether any action is required?`;
}

if (userText) {
prompt += `\nUser's additional question: "${userText}"`;
}
prompt += `\n\nPlease provide a detailed analysis of the log and give answer in step wise. and your answer should not exceed more than 500 words.`;

return prompt;
};

router.post('/chat', async (req: Request, res: Response): Promise<void> => {
console.log("Received request body:", req.body);
const { logId, userText } = req.body;
console.log("Received logId:", logId);
console.log("Received userText:", userText);

if (!logId) {
console.log("logId is missing in the request body.");
res.status(400).json({ error: "logId is required." });
}

try {
const rowLog = await LinuxLogModel.findById(logId);

if (!rowLog) {
res.status(404).json({ error: "Log not found." });
return;
}

const prompt = getSeverityPrompt(rowLog.rawLine, userText);
const response = await model.generateContent(prompt);
const generatedText = response.response.text();

rowLog.analyzed = true;
await rowLog.save();

res.json({ response: generatedText });
} catch (error) {
console.error("Error while processing log:", error);
res.status(500).json({ error: "Something went wrong while analyzing the log." });
}
});

export default router;
Loading
Loading