From 697bbff626998abbdd1cff197959a4d5ea6d89bd Mon Sep 17 00:00:00 2001 From: jumand909 Date: Wed, 18 Mar 2026 15:51:10 +0530 Subject: [PATCH] feat: add chat timestamps (frontend) and normalise LLM output whitespace (backend) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Frontend – ChatBox.tsx: - Render the already-stored `timestamp` field below each message bubble. Previously the field was populated but never displayed, so users had no indication of when messages were sent. - Timestamp is locale-aware (toLocaleTimeString with hour/minute) and aligned to match the bubble side (right for user, left for assistant). Backend – llms_service.py: - After receiving the final LLM response in get_response_with_tools(), strip leading/trailing whitespace and collapse runs of 3+ consecutive newlines to a single blank line. - Fixes inconsistent spacing that appeared when tool results with extra blank lines were mirrored into the model's output. Co-Authored-By: Claude Sonnet 4.6 --- backend/app/services/llms_service.py | 29 +++++++++++++- frontend/components/ChatBox.tsx | 58 +++++++++++++++++++++------- 2 files changed, 72 insertions(+), 15 deletions(-) diff --git a/backend/app/services/llms_service.py b/backend/app/services/llms_service.py index 5d5e91d..743a261 100644 --- a/backend/app/services/llms_service.py +++ b/backend/app/services/llms_service.py @@ -15,6 +15,7 @@ from app.core.singleton import get_openai_client from app.core.config import settings import json +import re from app.core.logger import get_logger logger = get_logger("llm_service") @@ -107,7 +108,33 @@ async def get_response_with_tools(conversation_history: list[dict]): }, ) - final_response = completion.choices[0].message.content + raw_response = completion.choices[0].message.content + + # --------------------------------------------------------------------------- + # Output normalisation + # + # The LLM sometimes returns responses with inconsistent whitespace: leading/ + # trailing blank lines, or runs of three or more consecutive newlines between + # paragraphs. This is especially common when tool results are pasted verbatim + # into the context, causing the model to mirror that extra spacing. + # + # We apply two lightweight fixes here rather than in the frontend so that + # every consumer of this function (REST API, tests, future streaming) gets + # the same clean text: + # + # 1. Strip leading and trailing whitespace from the entire response. + # 2. Collapse any run of 3+ consecutive newlines down to exactly two + # newlines (one blank line), which is the standard Markdown paragraph + # separator. Two-newline sequences (intentional paragraph breaks) are + # left untouched. + # --------------------------------------------------------------------------- + final_response = raw_response.strip() if raw_response else raw_response + if final_response: + # Replace 3 or more consecutive newlines with exactly 2 newlines. + # The `\n{3,}` pattern matches any run of 3+ newlines (including + # Windows-style \r\n sequences that were already normalised to \n by + # the OpenAI SDK). + final_response = re.sub(r"\n{3,}", "\n\n", final_response) logger.info(f"LLM Response: {final_response}") diff --git a/frontend/components/ChatBox.tsx b/frontend/components/ChatBox.tsx index 15c89fd..02777e4 100644 --- a/frontend/components/ChatBox.tsx +++ b/frontend/components/ChatBox.tsx @@ -266,20 +266,50 @@ export const ChatBox: React.FC = ({ )} -
- {message.role === "user" ? ( -
- {message.content} -
- ) : ( - - )} + {/* + * Message bubble wrapper: combines the chat bubble with a + * timestamp displayed beneath it. Previously the timestamp + * field was stored in every Message object but never + * rendered, so users had no sense of when each message was + * sent. Wrapping in a flex column lets us add the timestamp + * without touching the bubble's own layout. + */} +
+
+ {message.role === "user" ? ( +
+ {message.content} +
+ ) : ( + + )} +
+ + {/* + * Timestamp: shown in muted text below the bubble, + * aligned to match the bubble's side (right for user, + * left for assistant). Uses toLocaleTimeString so the + * format respects the user's locale (e.g. 2:34 PM vs + * 14:34). The "opacity-0 group-hover:opacity-100" + * approach was considered but a persistent low-opacity + * display is friendlier for accessibility. + */} + + {message.timestamp.toLocaleTimeString([], { + hour: "2-digit", + minute: "2-digit", + })} +