From 33480f493221951266b6bb39c953154ca3627835 Mon Sep 17 00:00:00 2001 From: Mathis Rouget Date: Wed, 6 Aug 2025 22:56:29 +0200 Subject: [PATCH] =?UTF-8?q?=E2=9C=A8(ai)=20add=20backend=20for=20message?= =?UTF-8?q?=20generation?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/env.md | 2 + env.d/development/backend.defaults | 1 + src/backend/core/ai/ai_prompts.json | 10 +- src/backend/core/ai/message_generator.py | 51 ++++ src/backend/core/ai/thread_summarizer.py | 14 +- src/backend/core/ai/utils.py | 26 +- src/backend/core/api/openapi.json | 155 ++++++++++++ src/backend/core/api/viewsets/config.py | 8 +- src/backend/core/api/viewsets/thread.py | 86 +++++++ src/backend/core/tests/api/test_config.py | 2 + src/backend/messages/settings.py | 5 + .../api/gen/models/config_retrieve200.ts | 1 + .../generate_new_message_request_request.ts | 20 ++ .../generate_reply_message_request_request.ts | 20 ++ .../src/features/api/gen/models/index.ts | 4 + .../threads_generate_new_message_create200.ts | 11 + ...hreads_generate_reply_message_create200.ts | 11 + .../src/features/api/gen/threads/threads.ts | 227 ++++++++++++++++++ .../src/features/providers/config.tsx | 1 + src/frontend/src/hooks/use-feature.ts | 3 + 20 files changed, 642 insertions(+), 16 deletions(-) create mode 100644 src/backend/core/ai/message_generator.py create mode 100644 src/frontend/src/features/api/gen/models/generate_new_message_request_request.ts create mode 100644 src/frontend/src/features/api/gen/models/generate_reply_message_request_request.ts create mode 100644 src/frontend/src/features/api/gen/models/threads_generate_new_message_create200.ts create mode 100644 src/frontend/src/features/api/gen/models/threads_generate_reply_message_create200.ts diff --git a/docs/env.md b/docs/env.md index 0843e423..fdbcb463 100644 --- a/docs/env.md +++ b/docs/env.md @@ -277,6 +277,8 @@ The application uses a new environment file structure with `.defaults` and `.loc | `AI_MODEL` | None | Default model used for AI features | Optional | | `AI_FEATURE_SUMMARY_ENABLED` | `False` | Default enabled mode for summary AI features | Required | | `AI_FEATURE_AUTOLABELS_ENABLED` | `False` | Default enabled mode for label AI features | Required | +| `AI_FEATURE_MESSAGES_GENERATION_ENABLED` | `False` | Default enabled mode for message generation AI features | Required | + ### External Services diff --git a/env.d/development/backend.defaults b/env.d/development/backend.defaults index b0bca65c..be2a1932 100644 --- a/env.d/development/backend.defaults +++ b/env.d/development/backend.defaults @@ -81,6 +81,7 @@ AI_MODEL= # AI features AI_FEATURE_SUMMARY_ENABLED=False AI_FEATURE_AUTOLABELS_ENABLED=False +AI_FEATURE_MESSAGES_GENERATION_ENABLED=False # Interoperability # Drive - https://github.com/suitenumerique/drive diff --git a/src/backend/core/ai/ai_prompts.json b/src/backend/core/ai/ai_prompts.json index bf035aaa..cc9afcdd 100644 --- a/src/backend/core/ai/ai_prompts.json +++ b/src/backend/core/ai/ai_prompts.json @@ -1,10 +1,14 @@ { "en-us": { "summary_query": "You are an intelligent assistant that summarizes email threads. You should summarize the content of the conversation in one or two lines maximum WITHOUT stating 'Summary:'. If important links appear in the emails, you must mention them clearly in Markdown format and integrate them into the summary. If there are no important links, the summary should not contain any information about links. \n Here is the conversation:\n\n{messages}\n\n. Summarize the above emails in Markdown in the language '{language}' :", - "autolabels_query": "Current date and time: {date_time}\nEmail or email thread: {messages}\nList of existing labels: {labels}\nFor this email or email thread, you must check for each label whether it is relevant to assign it.\nYou should assign a label only if it is truly relevant!\nRead the description of each label carefully to strictly follow the criteria provided by the user.\nIf no label is relevant, you must not assign any.\n\nYour response must be in the following format:\n[\"LABEL1\", \"LABEL2\", ...]\nNO additional text like 'Here is the list of relevant labels:'.\nIf no label is relevant, you must return an empty list: [] with no other comment !! Your reponse must always be only a list (than can be empty). No explanation or additional text should be included than the list itself." + "autolabels_query": "Current date and time: {date_time}\nEmail or email thread: {messages}\nList of existing labels: {labels}\nFor this email or email thread, you must check for each label whether it is relevant to assign it.\nYou should assign a label only if it is truly relevant!\nRead the description of each label carefully to strictly follow the criteria provided by the user.\nIf no label is relevant, you must not assign any.\n\nYour response must be in the following format:\n[\"LABEL1\", \"LABEL2\", ...]\nNO additional text like 'Here is the list of relevant labels:'.\nIf no label is relevant, you must return an empty list: [] with no other comment !! Your reponse must always be only a list (than can be empty). No explanation or additional text should be included than the list itself.", + "new_message_generation_query": "You are an assistant integrated into the email writing area of an email client. THE EXPECTED RESPONSE IS AN EMAIL BODY IN THE LANGUAGE {language}. Your response should ONLY be the body of the message (do not mention the subject or recipients) DO NOT put quotes around your response. If your response contains fields to be filled by the user, MAKE THEM ALL IN BOLD MARKDOWN AND IN BRACKETS so that the user notices them (e.g., **[Your Name]**, **[Date]**, **[Signature]**, **[Number of Users]**). Here is an example of generation: 'User prompt: 'Request an API key for Mistral'. Your response: 'Hello,\nCould you please provide me with an API key for Mistral?\nBest regards,\n[Signature]' Here is the current email draft:\n\n{draft}\n\n. Here is the user prompt : {user_prompt}\nIf the draft does not contain a signature and only in this case, then sign with {name}. NEVER CHANGE ANYTHING ELSE IN THE DRAFT OTHER THAN WHAT THE USER REQUESTS!!!", + "thread_response_generation_query": "You are an assistant integrated into the email writing area of an email client. THE EXPECTED RESPONSE IS AN EMAIL BODY IN THE LANGUAGE {language}. Your response should ONLY be the body of the message (do not mention the subject or recipients) DO NOT put quotes around your response. If your response contains fields to be filled by the user, MAKE THEM ALL IN BOLD MARKDOWN AND IN BRACKETS so that the user notices them (e.g., **[Your Name]**, **[Date]**, **[Signature]**, **[Number of Users]**). Here is an example of generation: 'User prompt: 'Request an API key for Mistral'. Previous messages: 'Hello Thomas,\nI recommend using Mistral for AI.\nBest regards,\nNicolas' User prompt: 'request the key'. Your response: Hello Nicolas,\nThank you for your response, could you please provide me with an API key for Mistral?\nBest regards,\nThomas' Your response: 'Hello,\nCould you please provide me with an API key for Mistral?\nBest regards,\n[Signature]' Here is the current email draft:\n\n{draft}\n\n. Here are the previous messages in the thread:\n\n{messages}\n\n. Here is the user prompt : {user_prompt}\nIf the draft does not contain a signature and only in this case, then sign with {name}. NEVER CHANGE ANYTHING ELSE IN THE DRAFT OTHER THAN WHAT THE USER REQUESTS!!!" }, "fr-fr": { "summary_query": "Tu es un assistant intelligent qui résume des boucles de mails. Tu dois résumer le contenu de la conversation en une ou deux lignes maximum SANS préciser 'Résumé:'. Si des liens importants apparaissent dans les emails, tu dois les mentionner dans le résumé de façon claire en Markdown et les intégrer au résumé. Si les liens ne sont pas importants ou qu'il n'y en a pas, le résumé ne doit pas contenir d'information à ce sujet ni même le mentionner. Voici la conversation:\n\n{messages}\n\n. Résumé des emails ci-dessus en Markdown dans la langue '{language}' :", - "autolabels_query": "Date et heure actuelle: {date_time}\nEmail ou conversation d'emails: {messages}\nListe des labels existants: {labels}\nA partir de ce mail ou de cette conversation d'emails, tu dois regarder pour chaque label s'il est pertinent de l'assigner à ce mail ou cette conversation.\nTu ne dois assigner un label que s'il est réellement pertinent!\nLis bien la description de chaque label pour bien respecter les critères renseignés par l'utilisateur.\nSi aucun label n'est pertinent, tu ne dois en assigner aucun.\n\nTa réponse doit être au format suivant:\n[\"LABEL1\", \"LABEL2\", ...]\nSANS aucun texte supplémentaire du style \"Voici la liste des labels pertinents:\".\nSi aucun label n'est pertinent, tu dois renvoyer une liste vide: [] sans aucun autre commentaire !! Ta réponse doit dans tous les cas être uniquement une liste (éventuellement vide)" + "autolabels_query": "Date et heure actuelle: {date_time}\nEmail ou conversation d'emails: {messages}\nListe des labels existants: {labels}\nA partir de ce mail ou de cette conversation d'emails, tu dois regarder pour chaque label s'il est pertinent de l'assigner à ce mail ou cette conversation.\nTu ne dois assigner un label que s'il est réellement pertinent!\nLis bien la description de chaque label pour bien respecter les critères renseignés par l'utilisateur.\nSi aucun label n'est pertinent, tu ne dois en assigner aucun.\n\nTa réponse doit être au format suivant:\n[\"LABEL1\", \"LABEL2\", ...]\nSANS aucun texte supplémentaire du style \"Voici la liste des labels pertinents:\".\nSi aucun label n'est pertinent, tu dois renvoyer une liste vide: [] sans aucun autre commentaire !! Ta réponse doit dans tous les cas être uniquement une liste (éventuellement vide)", + "new_message_generation_query": "Tu es un assistant intégré dans la zone d'écriture des mails dans une boîte mail. LA REPONSE ATTENDUE EST UN CORPS DE MAIL dans la langue {language}. Ta réponse doit être UNIQUEMENT le corps du message (ne mentionne pas l'objet ni les destinataires) NE mets PAS de guillemets autour de ta réponse. Si ta réponse contient des champs à remplir par l'utilisateur, METS LES TOUS EN GRAS MARKDOWN ET ENTRE CROCHETS pour que l'utilisateur le remarque. (ex : **[Votre nom]**, **[Date]**, **[Signature]**, **[Nombre d'utilisateurs]**). Voici un exemple de génération : 'Prompt de l'utilisateur : 'Demande une clé API pour Mistral'. Ta réponse : 'Bonjour,\nPourriez-vous me fournir une clé API pour Mistral ?\nBien à vous,\n[Signature]' Voici le brouillon de mail actuel :\n\n{draft}\n\n. Voici le prompt de l'utilisateur : {user_prompt}\nSi le brouillon ne contient pas de signature et uniquement dans ce cas, alors signe avec {name}. NE CHANGE JAMAIS RIEN D'AUTRE PAR RAPPORT AU BROUILLON QUE CE QUE L'UTILISATEUR TE DEMANDE !!!", + "thread_response_generation_query": "Tu es un assistant intégré dans la zone d'écriture des mails dans une boîte mail. LA REPONSE ATTENDUE EST UN CORPS DE MAIL dans la langue {language}. Ta réponse doit être UNIQUEMENT le corps du message (ne mentionne pas l'objet ni les destinataires) NE mets PAS de guillemets autour de ta réponse. Si ta réponse contient des champs à remplir par l'utilisateur, METS LES TOUS EN GRAS MARKDOWN ET ENTRE CROCHETS pour que l'utilisateur le remarque. (ex : **[Votre nom]**, **[Date]**, **[Signature]**, **[Nombre d'utilisateurs]**). Voici un exemple de génération : 'Prompt de l'utilisateur : 'Demande une clé API pour Mistral'. Messages précédents: 'Bonjour Thomas,\nJe vous conseille d'utiliser Mistral pour l'IA.\nBien à vous,\nNicolas' Prompt de l'utilisateur : 'demande la clé'. Ta réponse : Bonjour Nicolas,\nMerci pour votre réponse, pourriez-vous me fournir une clé API pour Mistral ?\nBien à vous,\nThomas' Ta réponse : 'Bonjour,\nPourriez-vous me fournir une clé API pour Mistral ?\nBien à vous,\n[Signature]' Voici le brouillon de mail actuel :\n\n{draft}\n\n. Voici les anciens messages du thread : \n\n{messages}\n\n. Voici le prompt de l'utilisateur : {user_prompt}\nSi le brouillon ne contient pas de signature et uniquement dans ce cas, alors signe avec {name}. NE CHANGE JAMAIS RIEN D'AUTRE PAR RAPPORT AU BROUILLON QUE CE QUE L'UTILISATEUR TE DEMANDE !!!" } -} \ No newline at end of file +} diff --git a/src/backend/core/ai/message_generator.py b/src/backend/core/ai/message_generator.py new file mode 100644 index 00000000..80e8fd10 --- /dev/null +++ b/src/backend/core/ai/message_generator.py @@ -0,0 +1,51 @@ +from django.conf import settings +from django.utils import translation + +from core.ai.utils import get_messages_from_thread, load_ai_prompts +from core.models import Thread +from core.services.ai_service import AIService + + +def generate_new_message(draft: str, user_prompt: str, name: str) -> str: + """Generates a new mail using the AI model base on user prompt.""" + + # Determine the active or fallback language + active_language = translation.get_language() or settings.LANGUAGE_CODE + + # Get the prompt for the active language + prompts = load_ai_prompts() + prompt_template = prompts.get(active_language) + prompt_query = prompt_template["new_message_generation_query"] + prompt = prompt_query.format( + draft=draft, language=active_language, user_prompt=user_prompt, name=name + ) + + answer = AIService().call_ai_api(prompt) + + return answer + + +def generate_reply_message(draft: str, thread: Thread, user_prompt: str) -> str: + """Generates a reply message using the AI model based on the thread context and user prompt.""" + + # Determine the active or fallback language + active_language = translation.get_language() or settings.LANGUAGE_CODE + + # Extract messages from the thread + messages = get_messages_from_thread(thread) + messages_as_text = "\n\n".join([message.get_as_text() for message in messages]) + + # Get the prompt for the active language + prompts = load_ai_prompts() + prompt_template = prompts.get(active_language) + prompt_query = prompt_template["reply_message_generation_query"] + prompt = prompt_query.format( + draft=draft, + messages=messages_as_text, + language=active_language, + user_prompt=user_prompt, + ) + + answer = AIService().call_ai_api(prompt) + + return answer diff --git a/src/backend/core/ai/thread_summarizer.py b/src/backend/core/ai/thread_summarizer.py index ccccc611..dd743ab7 100644 --- a/src/backend/core/ai/thread_summarizer.py +++ b/src/backend/core/ai/thread_summarizer.py @@ -1,10 +1,6 @@ -import json -from pathlib import Path - -from django.conf import settings from django.utils import translation -from core.ai.utils import get_messages_from_thread +from core.ai.utils import get_active_language, get_messages_from_thread, load_ai_prompts from core.models import Thread from core.services.ai_service import AIService @@ -13,18 +9,14 @@ def summarize_thread(thread: Thread) -> str: """Summarizes a thread using the OpenAI client based on the active Django language.""" # Determine the active or fallback language - active_language = translation.get_language() or settings.LANGUAGE_CODE + active_language = get_active_language() # Extract messages from the thread messages = get_messages_from_thread(thread) messages_as_text = "\n\n".join([message.get_as_text() for message in messages]) - # Load prompt templates from ai_prompts.json - prompts_path = Path(__file__).parent / "ai_prompts.json" - with open(prompts_path, encoding="utf-8") as f: - prompts = json.load(f) - # Get the prompt for the active language + prompts = load_ai_prompts() prompt_template = prompts.get(active_language) prompt_query = prompt_template["summary_query"] prompt = prompt_query.format(messages=messages_as_text, language=active_language) diff --git a/src/backend/core/ai/utils.py b/src/backend/core/ai/utils.py index ca0986d9..88828b6f 100644 --- a/src/backend/core/ai/utils.py +++ b/src/backend/core/ai/utils.py @@ -1,10 +1,25 @@ +import json +from pathlib import Path from typing import List from django.conf import settings +from django.utils import translation from core.models import Message, Thread +def get_active_language() -> str: + """Get the active language or fallback to the default language code.""" + return translation.get_language() or settings.LANGUAGE_CODE + + +def load_ai_prompts() -> dict: + """Load AI prompts from the ai_prompts.json file.""" + prompts_path = Path(__file__).parent / "ai_prompts.json" + with open(prompts_path, encoding="utf-8") as f: + return json.load(f) + + def get_messages_from_thread(thread: Thread) -> List[Message]: """ Extract messages from a thread and return them as a list of text representations using Message.get_as_text(). @@ -32,7 +47,7 @@ def is_ai_enabled() -> bool: def is_ai_summary_enabled() -> bool: """ - Check if AI summary features are enabled. + Check if AI summary feature is enabled. This is determined by the presence of the AI settings and if AI_FEATURE_SUMMARY_ENABLED is set to 1. """ return all( @@ -48,3 +63,12 @@ def is_auto_labels_enabled() -> bool: return all( [is_ai_enabled(), getattr(settings, "AI_FEATURE_AUTOLABELS_ENABLED", False)] ) + +def is_ai_messages_generation_enabled() -> bool: + """ + Check if AI messages generation feature is enabled. + This is determined by the presence of the AI settings and if AI_FEATURE_MESSAGES_GENERATION_ENABLED is set to 1. + """ + return all( + [is_ai_enabled(), getattr(settings, "AI_FEATURE_MESSAGES_GENERATION_ENABLED", False)] + ) diff --git a/src/backend/core/api/openapi.json b/src/backend/core/api/openapi.json index 8a221f36..ea4d59c5 100644 --- a/src/backend/core/api/openapi.json +++ b/src/backend/core/api/openapi.json @@ -186,6 +186,10 @@ "type": "boolean", "readOnly": true }, + "AI_FEATURE_MESSAGES_GENERATION_ENABLED": { + "type": "boolean", + "readOnly": true + }, "DRIVE": { "type": "object", "description": "The URLs of the Drive external service.", @@ -224,6 +228,7 @@ "AI_ENABLED", "AI_FEATURE_SUMMARY_ENABLED", "AI_FEATURE_AUTOLABELS_ENABLED", + "AI_FEATURE_MESSAGES_GENERATION_ENABLED", "SCHEMA_CUSTOM_ATTRIBUTES_USER", "SCHEMA_CUSTOM_ATTRIBUTES_MAILDOMAIN" ] @@ -3235,6 +3240,71 @@ } } }, + "/api/v1.0/threads/{id}/generate-reply-message/": { + "post": { + "operationId": "threads_generate_reply_message_create", + "description": "Generate a reply message using the AI model based on the thread context and user's prompt.", + "parameters": [ + { + "in": "path", + "name": "id", + "schema": { + "type": "string" + }, + "required": true + } + ], + "tags": [ + "threads" + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GenerateReplyMessageRequestRequest" + } + }, + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/GenerateReplyMessageRequestRequest" + } + } + } + }, + "security": [ + { + "cookieAuth": [] + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + } + } + } + }, + "description": "Reply message successfully generated." + }, + "403": { + "content": { + "application/json": { + "schema": { + "detail": "Permission denied" + } + } + }, + "description": "User does not have permission to generate a reply message." + } + } + } + }, "/api/v1.0/threads/{id}/refresh-summary/": { "post": { "operationId": "threads_refresh_summary_create", @@ -3637,6 +3707,61 @@ } } }, + "/api/v1.0/threads/generate-new-message/": { + "post": { + "operationId": "threads_generate_new_message_create", + "description": "Generate a new message using the AI model based on the user's prompt and draft.", + "tags": [ + "threads" + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GenerateNewMessageRequestRequest" + } + }, + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/GenerateNewMessageRequestRequest" + } + } + } + }, + "security": [ + { + "cookieAuth": [] + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "string" + } + } + } + } + }, + "description": "Message successfully generated." + }, + "403": { + "content": { + "application/json": { + "schema": { + "detail": "Permission denied" + } + } + }, + "description": "User does not have permission to generate a new message." + } + } + } + }, "/api/v1.0/threads/stats/": { "get": { "operationId": "threads_stats_retrieve", @@ -4046,6 +4171,36 @@ "type": "string", "description": "* `unread` - unread\n* `starred` - starred\n* `trashed` - trashed" }, + "GenerateNewMessageRequestRequest": { + "type": "object", + "properties": { + "draft": { + "type": "string", + "minLength": 1, + "description": "Current message draft." + }, + "user_prompt": { + "type": "string", + "minLength": 1, + "description": "User prompt for the AI." + } + } + }, + "GenerateReplyMessageRequestRequest": { + "type": "object", + "properties": { + "draft": { + "type": "string", + "minLength": 1, + "description": "Current message draft." + }, + "user_prompt": { + "type": "string", + "minLength": 1, + "description": "User prompt for the AI." + } + } + }, "ImportFileRequest": { "type": "object", "description": "Serializer for importing email files.", diff --git a/src/backend/core/api/viewsets/config.py b/src/backend/core/api/viewsets/config.py index 44331a19..113ef141 100644 --- a/src/backend/core/api/viewsets/config.py +++ b/src/backend/core/api/viewsets/config.py @@ -6,7 +6,7 @@ from drf_spectacular.utils import OpenApiResponse, extend_schema from rest_framework.permissions import AllowAny -from core.ai.utils import is_ai_enabled, is_ai_summary_enabled, is_auto_labels_enabled +from core.ai.utils import is_ai_enabled, is_ai_summary_enabled, is_auto_labels_enabled, is_ai_messages_generation_enabled class ConfigView(drf.views.APIView): @@ -53,6 +53,10 @@ class ConfigView(drf.views.APIView): "type": "boolean", "readOnly": True, }, + "AI_FEATURE_MESSAGES_GENERATION_ENABLED": { + "type": "boolean", + "readOnly": True, + }, "DRIVE": { "type": "object", "description": "The URLs of the Drive external service.", @@ -88,6 +92,7 @@ class ConfigView(drf.views.APIView): "AI_ENABLED", "AI_FEATURE_SUMMARY_ENABLED", "AI_FEATURE_AUTOLABELS_ENABLED", + "AI_FEATURE_MESSAGES_GENERATION_ENABLED", "SCHEMA_CUSTOM_ATTRIBUTES_USER", "SCHEMA_CUSTOM_ATTRIBUTES_MAILDOMAIN", ], @@ -120,6 +125,7 @@ def get(self, request): dict_settings["AI_ENABLED"] = is_ai_enabled() dict_settings["AI_FEATURE_SUMMARY_ENABLED"] = is_ai_summary_enabled() dict_settings["AI_FEATURE_AUTOLABELS_ENABLED"] = is_auto_labels_enabled() + dict_settings["AI_FEATURE_MESSAGES_GENERATION_ENABLED"] = is_ai_messages_generation_enabled() # Drive service if base_url := settings.DRIVE_CONFIG.get("base_url"): diff --git a/src/backend/core/api/viewsets/thread.py b/src/backend/core/api/viewsets/thread.py index 3b13f3f5..83318dea 100644 --- a/src/backend/core/api/viewsets/thread.py +++ b/src/backend/core/api/viewsets/thread.py @@ -9,10 +9,13 @@ OpenApiParameter, OpenApiResponse, extend_schema, + inline_serializer, ) from rest_framework import mixins, status, viewsets +from rest_framework import serializers as drf_serializers from core import enums, models +from core.ai.message_generator import generate_new_message, generate_reply_message from core.ai.thread_summarizer import summarize_thread from core.services.search import search_threads @@ -467,6 +470,89 @@ def refresh_summary(self, request, pk): # pylint: disable=unused-argument {"summary": thread.summary}, status=status.HTTP_200_OK ) + @extend_schema( + request=inline_serializer( + name="GenerateNewMessageRequest", + fields={ + "draft": drf_serializers.CharField( + required=False, help_text="Current message draft." + ), + "user_prompt": drf_serializers.CharField( + required=False, help_text="User prompt for the AI." + ), + }, + ), + responses={ + 200: OpenApiResponse( + response={ + "type": "object", + "properties": {"message": {"type": "string"}}, + }, + description="Message successfully generated.", + ), + 403: OpenApiResponse( + response={"detail": "Permission denied"}, + description="User does not have permission to generate a new message.", + ), + }, + tags=["threads"], + ) + @drf.decorators.action( + detail=False, + methods=["post"], + url_path="generate-new-message", + url_name="generate-new-message", + ) + def generate_new_message(self, request): + """Generate a new message using the AI model based on the user's prompt and draft.""" + draft = request.data.get("draft", "") + user_prompt = request.data.get("user_prompt", "") + user = self.request.user + name = getattr(user, "full_name", getattr(user, "username", "")) + message = generate_new_message(draft, user_prompt, name) + return drf.response.Response({"message": message}, status=status.HTTP_200_OK) + + @extend_schema( + request=inline_serializer( + name="GenerateReplyMessageRequest", + fields={ + "draft": drf_serializers.CharField( + required=False, help_text="Current message draft." + ), + "user_prompt": drf_serializers.CharField( + required=False, help_text="User prompt for the AI." + ), + }, + ), + responses={ + 200: OpenApiResponse( + response={ + "type": "object", + "properties": {"message": {"type": "string"}}, + }, + description="Reply message successfully generated.", + ), + 403: OpenApiResponse( + response={"detail": "Permission denied"}, + description="User does not have permission to generate a reply message.", + ), + }, + tags=["threads"], + ) + @drf.decorators.action( + detail=True, + methods=["post"], + url_path="generate-reply-message", + url_name="generate-reply-message", + ) + def generate_reply_message(self, request, pk): # pylint: disable=unused-argument + """Generate a reply message using the AI model based on the thread context and user's prompt.""" + thread = self.get_object() + draft = request.data.get("draft", "") + user_prompt = request.data.get("user_prompt", "") + message = generate_reply_message(draft, thread, user_prompt) + return drf.response.Response({"message": message}, status=status.HTTP_200_OK) + # @extend_schema( # tags=["threads"], # request=inline_serializer( diff --git a/src/backend/core/tests/api/test_config.py b/src/backend/core/tests/api/test_config.py index 87174127..36feb9f5 100644 --- a/src/backend/core/tests/api/test_config.py +++ b/src/backend/core/tests/api/test_config.py @@ -26,6 +26,7 @@ AI_MODEL=None, AI_FEATURE_SUMMARY_ENABLED=False, AI_FEATURE_AUTOLABELS_ENABLED=False, + AI_FEATURE_MESSAGES_GENERATION_ENABLED=False, DRIVE_CONFIG={"base_url": None}, ) @pytest.mark.parametrize("is_authenticated", [False, True]) @@ -49,6 +50,7 @@ def test_api_config(is_authenticated): "AI_ENABLED": False, "AI_FEATURE_SUMMARY_ENABLED": False, "AI_FEATURE_AUTOLABELS_ENABLED": False, + "AI_FEATURE_MESSAGES_GENERATION_ENABLED": False, "SCHEMA_CUSTOM_ATTRIBUTES_USER": {}, "SCHEMA_CUSTOM_ATTRIBUTES_MAILDOMAIN": {}, } diff --git a/src/backend/messages/settings.py b/src/backend/messages/settings.py index 7717dcfd..0265b35c 100755 --- a/src/backend/messages/settings.py +++ b/src/backend/messages/settings.py @@ -557,6 +557,11 @@ class Base(Configuration): AI_FEATURE_SUMMARY_ENABLED = values.BooleanValue( default=False, environ_name="AI_FEATURE_SUMMARY_ENABLED", environ_prefix=None ) + AI_FEATURE_MESSAGES_GENERATION_ENABLED = values.BooleanValue( + default=False, + environ_name="AI_FEATURE_MESSAGES_GENERATION_ENABLED", + environ_prefix=None, + ) AI_FEATURE_AUTOLABELS_ENABLED = values.BooleanValue( default=False, environ_name="AI_FEATURE_AUTOLABELS_ENABLED", environ_prefix=None diff --git a/src/frontend/src/features/api/gen/models/config_retrieve200.ts b/src/frontend/src/features/api/gen/models/config_retrieve200.ts index 43d242c5..e021a98f 100644 --- a/src/frontend/src/features/api/gen/models/config_retrieve200.ts +++ b/src/frontend/src/features/api/gen/models/config_retrieve200.ts @@ -22,6 +22,7 @@ export type ConfigRetrieve200 = { readonly AI_ENABLED: boolean; readonly AI_FEATURE_SUMMARY_ENABLED: boolean; readonly AI_FEATURE_AUTOLABELS_ENABLED: boolean; + readonly AI_FEATURE_MESSAGES_GENERATION_ENABLED: boolean; /** The URLs of the Drive external service. */ readonly DRIVE?: ConfigRetrieve200DRIVE; readonly SCHEMA_CUSTOM_ATTRIBUTES_USER: ConfigRetrieve200SCHEMACUSTOMATTRIBUTESUSER; diff --git a/src/frontend/src/features/api/gen/models/generate_new_message_request_request.ts b/src/frontend/src/features/api/gen/models/generate_new_message_request_request.ts new file mode 100644 index 00000000..162500d6 --- /dev/null +++ b/src/frontend/src/features/api/gen/models/generate_new_message_request_request.ts @@ -0,0 +1,20 @@ +/** + * Generated by orval v7.10.0 🍺 + * Do not edit manually. + * messages API + * This is the messages API schema. + * OpenAPI spec version: 1.0.0 (v1.0) + */ + +export interface GenerateNewMessageRequestRequest { + /** + * Current message draft. + * @minLength 1 + */ + draft?: string; + /** + * User prompt for the AI. + * @minLength 1 + */ + user_prompt?: string; +} diff --git a/src/frontend/src/features/api/gen/models/generate_reply_message_request_request.ts b/src/frontend/src/features/api/gen/models/generate_reply_message_request_request.ts new file mode 100644 index 00000000..a26e2fa5 --- /dev/null +++ b/src/frontend/src/features/api/gen/models/generate_reply_message_request_request.ts @@ -0,0 +1,20 @@ +/** + * Generated by orval v7.10.0 🍺 + * Do not edit manually. + * messages API + * This is the messages API schema. + * OpenAPI spec version: 1.0.0 (v1.0) + */ + +export interface GenerateReplyMessageRequestRequest { + /** + * Current message draft. + * @minLength 1 + */ + draft?: string; + /** + * User prompt for the AI. + * @minLength 1 + */ + user_prompt?: string; +} diff --git a/src/frontend/src/features/api/gen/models/index.ts b/src/frontend/src/features/api/gen/models/index.ts index 14a88f06..6a010986 100644 --- a/src/frontend/src/features/api/gen/models/index.ts +++ b/src/frontend/src/features/api/gen/models/index.ts @@ -37,6 +37,8 @@ export * from "./flag_create200"; export * from "./flag_create400"; export * from "./flag_create403"; export * from "./flag_enum"; +export * from "./generate_new_message_request_request"; +export * from "./generate_reply_message_request_request"; export * from "./import_file_create202"; export * from "./import_file_request"; export * from "./import_imap_create202"; @@ -97,6 +99,8 @@ export * from "./threads_accesses_destroy_params"; export * from "./threads_accesses_list_params"; export * from "./threads_accesses_partial_update_params"; export * from "./threads_accesses_update_params"; +export * from "./threads_generate_new_message_create200"; +export * from "./threads_generate_reply_message_create200"; export * from "./threads_list_params"; export * from "./threads_refresh_summary_create200"; export * from "./threads_stats_retrieve200"; diff --git a/src/frontend/src/features/api/gen/models/threads_generate_new_message_create200.ts b/src/frontend/src/features/api/gen/models/threads_generate_new_message_create200.ts new file mode 100644 index 00000000..8ddb8891 --- /dev/null +++ b/src/frontend/src/features/api/gen/models/threads_generate_new_message_create200.ts @@ -0,0 +1,11 @@ +/** + * Generated by orval v7.10.0 🍺 + * Do not edit manually. + * messages API + * This is the messages API schema. + * OpenAPI spec version: 1.0.0 (v1.0) + */ + +export type ThreadsGenerateNewMessageCreate200 = { + message?: string; +}; diff --git a/src/frontend/src/features/api/gen/models/threads_generate_reply_message_create200.ts b/src/frontend/src/features/api/gen/models/threads_generate_reply_message_create200.ts new file mode 100644 index 00000000..f08fcf8d --- /dev/null +++ b/src/frontend/src/features/api/gen/models/threads_generate_reply_message_create200.ts @@ -0,0 +1,11 @@ +/** + * Generated by orval v7.10.0 🍺 + * Do not edit manually. + * messages API + * This is the messages API schema. + * OpenAPI spec version: 1.0.0 (v1.0) + */ + +export type ThreadsGenerateReplyMessageCreate200 = { + message?: string; +}; diff --git a/src/frontend/src/features/api/gen/threads/threads.ts b/src/frontend/src/features/api/gen/threads/threads.ts index 82bfaa08..368090fd 100644 --- a/src/frontend/src/features/api/gen/threads/threads.ts +++ b/src/frontend/src/features/api/gen/threads/threads.ts @@ -26,8 +26,12 @@ import type { } from "@tanstack/react-query"; import type { + GenerateNewMessageRequestRequest, + GenerateReplyMessageRequestRequest, PaginatedThreadList, Thread, + ThreadsGenerateNewMessageCreate200, + ThreadsGenerateReplyMessageCreate200, ThreadsListParams, ThreadsRefreshSummaryCreate200, ThreadsStatsRetrieve200, @@ -654,6 +658,118 @@ export const useThreadsDestroy = ( return useMutation(mutationOptions, queryClient); }; +/** + * Generate a reply message using the AI model based on the thread context and user's prompt. + */ +export type threadsGenerateReplyMessageCreateResponse200 = { + data: ThreadsGenerateReplyMessageCreate200; + status: 200; +}; + +export type threadsGenerateReplyMessageCreateResponse403 = { + data: unknown; + status: 403; +}; + +export type threadsGenerateReplyMessageCreateResponseComposite = + | threadsGenerateReplyMessageCreateResponse200 + | threadsGenerateReplyMessageCreateResponse403; + +export type threadsGenerateReplyMessageCreateResponse = + threadsGenerateReplyMessageCreateResponseComposite & { + headers: Headers; + }; + +export const getThreadsGenerateReplyMessageCreateUrl = (id: string) => { + return `/api/v1.0/threads/${id}/generate-reply-message/`; +}; + +export const threadsGenerateReplyMessageCreate = async ( + id: string, + generateReplyMessageRequestRequest: GenerateReplyMessageRequestRequest, + options?: RequestInit, +): Promise => { + return fetchAPI( + getThreadsGenerateReplyMessageCreateUrl(id), + { + ...options, + method: "POST", + headers: { "Content-Type": "application/json", ...options?.headers }, + body: JSON.stringify(generateReplyMessageRequestRequest), + }, + ); +}; + +export const getThreadsGenerateReplyMessageCreateMutationOptions = < + TError = unknown, + TContext = unknown, +>(options?: { + mutation?: UseMutationOptions< + Awaited>, + TError, + { id: string; data: GenerateReplyMessageRequestRequest }, + TContext + >; + request?: SecondParameter; +}): UseMutationOptions< + Awaited>, + TError, + { id: string; data: GenerateReplyMessageRequestRequest }, + TContext +> => { + const mutationKey = ["threadsGenerateReplyMessageCreate"]; + const { mutation: mutationOptions, request: requestOptions } = options + ? options.mutation && + "mutationKey" in options.mutation && + options.mutation.mutationKey + ? options + : { ...options, mutation: { ...options.mutation, mutationKey } } + : { mutation: { mutationKey }, request: undefined }; + + const mutationFn: MutationFunction< + Awaited>, + { id: string; data: GenerateReplyMessageRequestRequest } + > = (props) => { + const { id, data } = props ?? {}; + + return threadsGenerateReplyMessageCreate(id, data, requestOptions); + }; + + return { mutationFn, ...mutationOptions }; +}; + +export type ThreadsGenerateReplyMessageCreateMutationResult = NonNullable< + Awaited> +>; +export type ThreadsGenerateReplyMessageCreateMutationBody = + GenerateReplyMessageRequestRequest; +export type ThreadsGenerateReplyMessageCreateMutationError = unknown; + +export const useThreadsGenerateReplyMessageCreate = < + TError = unknown, + TContext = unknown, +>( + options?: { + mutation?: UseMutationOptions< + Awaited>, + TError, + { id: string; data: GenerateReplyMessageRequestRequest }, + TContext + >; + request?: SecondParameter; + }, + queryClient?: QueryClient, +): UseMutationResult< + Awaited>, + TError, + { id: string; data: GenerateReplyMessageRequestRequest }, + TContext +> => { + const mutationOptions = + getThreadsGenerateReplyMessageCreateMutationOptions(options); + + return useMutation(mutationOptions, queryClient); +}; /** * Refresh the summary of a thread. */ @@ -952,6 +1068,117 @@ export function useThreadsSummaryRetrieve< return query; } +/** + * Generate a new message using the AI model based on the user's prompt and draft. + */ +export type threadsGenerateNewMessageCreateResponse200 = { + data: ThreadsGenerateNewMessageCreate200; + status: 200; +}; + +export type threadsGenerateNewMessageCreateResponse403 = { + data: unknown; + status: 403; +}; + +export type threadsGenerateNewMessageCreateResponseComposite = + | threadsGenerateNewMessageCreateResponse200 + | threadsGenerateNewMessageCreateResponse403; + +export type threadsGenerateNewMessageCreateResponse = + threadsGenerateNewMessageCreateResponseComposite & { + headers: Headers; + }; + +export const getThreadsGenerateNewMessageCreateUrl = () => { + return `/api/v1.0/threads/generate-new-message/`; +}; + +export const threadsGenerateNewMessageCreate = async ( + generateNewMessageRequestRequest: GenerateNewMessageRequestRequest, + options?: RequestInit, +): Promise => { + return fetchAPI( + getThreadsGenerateNewMessageCreateUrl(), + { + ...options, + method: "POST", + headers: { "Content-Type": "application/json", ...options?.headers }, + body: JSON.stringify(generateNewMessageRequestRequest), + }, + ); +}; + +export const getThreadsGenerateNewMessageCreateMutationOptions = < + TError = unknown, + TContext = unknown, +>(options?: { + mutation?: UseMutationOptions< + Awaited>, + TError, + { data: GenerateNewMessageRequestRequest }, + TContext + >; + request?: SecondParameter; +}): UseMutationOptions< + Awaited>, + TError, + { data: GenerateNewMessageRequestRequest }, + TContext +> => { + const mutationKey = ["threadsGenerateNewMessageCreate"]; + const { mutation: mutationOptions, request: requestOptions } = options + ? options.mutation && + "mutationKey" in options.mutation && + options.mutation.mutationKey + ? options + : { ...options, mutation: { ...options.mutation, mutationKey } } + : { mutation: { mutationKey }, request: undefined }; + + const mutationFn: MutationFunction< + Awaited>, + { data: GenerateNewMessageRequestRequest } + > = (props) => { + const { data } = props ?? {}; + + return threadsGenerateNewMessageCreate(data, requestOptions); + }; + + return { mutationFn, ...mutationOptions }; +}; + +export type ThreadsGenerateNewMessageCreateMutationResult = NonNullable< + Awaited> +>; +export type ThreadsGenerateNewMessageCreateMutationBody = + GenerateNewMessageRequestRequest; +export type ThreadsGenerateNewMessageCreateMutationError = unknown; + +export const useThreadsGenerateNewMessageCreate = < + TError = unknown, + TContext = unknown, +>( + options?: { + mutation?: UseMutationOptions< + Awaited>, + TError, + { data: GenerateNewMessageRequestRequest }, + TContext + >; + request?: SecondParameter; + }, + queryClient?: QueryClient, +): UseMutationResult< + Awaited>, + TError, + { data: GenerateNewMessageRequestRequest }, + TContext +> => { + const mutationOptions = + getThreadsGenerateNewMessageCreateMutationOptions(options); + + return useMutation(mutationOptions, queryClient); +}; /** * Get aggregated statistics for threads based on filters. */ diff --git a/src/frontend/src/features/providers/config.tsx b/src/frontend/src/features/providers/config.tsx index fd4b81ac..92c0b2fe 100644 --- a/src/frontend/src/features/providers/config.tsx +++ b/src/frontend/src/features/providers/config.tsx @@ -12,6 +12,7 @@ const DEFAULT_CONFIG: ConfigRetrieve200 = { AI_ENABLED: false, AI_FEATURE_SUMMARY_ENABLED: false, AI_FEATURE_AUTOLABELS_ENABLED: false, + AI_FEATURE_MESSAGES_GENERATION_ENABLED: false, SCHEMA_CUSTOM_ATTRIBUTES_USER: {}, SCHEMA_CUSTOM_ATTRIBUTES_MAILDOMAIN: {}, } diff --git a/src/frontend/src/hooks/use-feature.ts b/src/frontend/src/hooks/use-feature.ts index 6d5ae215..3923a333 100644 --- a/src/frontend/src/hooks/use-feature.ts +++ b/src/frontend/src/hooks/use-feature.ts @@ -4,6 +4,7 @@ export enum FEATURE_KEYS { DRIVE = 'drive', AI_SUMMARY = 'ai_summary', AI_AUTOLABELS = 'ai_autolabels', + AI_MESSAGES_GENERATION = 'ai_messages_generation', } /** @@ -23,6 +24,8 @@ export const useFeatureFlag = (featureKey: FEATURE_KEYS) => { return config.AI_ENABLED === true && config.AI_FEATURE_SUMMARY_ENABLED === true; case FEATURE_KEYS.AI_AUTOLABELS: return config.AI_ENABLED === true && config.AI_FEATURE_AUTOLABELS_ENABLED === true; + case FEATURE_KEYS.AI_MESSAGES_GENERATION: + return config.AI_ENABLED === true && config.AI_FEATURE_MESSAGES_GENERATION_ENABLED === true; default: throw new Error(`Unknown feature key: ${featureKey}`); }