diff --git a/chains/employee_match/chain.py b/chains/employee_match/chain.py index f5a8098..5a919c8 100644 --- a/chains/employee_match/chain.py +++ b/chains/employee_match/chain.py @@ -10,28 +10,25 @@ from database.database import database_langchain_get_schema, execute_langchain_query analyse_requirement_chain = ( - RunnablePassthrough.assign() - | REQUEST_ANALYSER_PROMPT - | llm - | StrOutputParser() + RunnablePassthrough.assign() | REQUEST_ANALYSER_PROMPT | llm | StrOutputParser() ) sql_builder_chain = ( - RunnablePassthrough - .assign(employee_data=analyse_requirement_chain) - .assign(schema=database_langchain_get_schema) - | QUERY_BUILDER_PROMPT - | llm - | StrOutputParser() + RunnablePassthrough.assign(employee_data=analyse_requirement_chain).assign( + schema=database_langchain_get_schema + ) + | QUERY_BUILDER_PROMPT + | llm + | StrOutputParser() ) html_builder_chain = ( - RunnablePassthrough - .assign(sql_query=sql_builder_chain) - .assign( - sql_query_result=lambda chain_variables: execute_langchain_query(chain_variables['sql_query'], chain_variables), - ) - | HTML_RESPONSE_BUILDER_PROMPT - | llm - | StrOutputParser() + RunnablePassthrough.assign(sql_query=sql_builder_chain).assign( + sql_query_result=lambda chain_variables: execute_langchain_query( + chain_variables["sql_query"], chain_variables + ), + ) + | HTML_RESPONSE_BUILDER_PROMPT + | llm + | StrOutputParser() ) diff --git a/chains/employee_match/prompts.py b/chains/employee_match/prompts.py index 81f0f51..8ec8445 100644 --- a/chains/employee_match/prompts.py +++ b/chains/employee_match/prompts.py @@ -5,7 +5,7 @@ 2. **Commercial Level**: For example, Junior, Middle, Senior, Lead. 3. **Sales campaign**: Programming languages or software platforms required to perform the employee's duties (e.g., Python, JavaScript, SQL, Hybris, Liferay, Alfresco, etc.). 4. **Other Skills**: Skills that may be beneficial but are not essential (e.g., Docker, Kubernetes, Agile, React, Vue, etc.). -5. **English Level**: Determine the level of English proficiency (e.g., Beginner, Intermediate, Upper IntermediateAdvanced, Fluent). +5. **English Level**: Determine the level of English proficiency (e.g., Beginner, Intermediate, Upper-Intermediate, Advanced, Fluent). If any of the fields are not specified in the text, do not include them in the output. diff --git a/chains/employee_match/prompts_configuration.py b/chains/employee_match/prompts_configuration.py index 3016698..6f5c443 100644 --- a/chains/employee_match/prompts_configuration.py +++ b/chains/employee_match/prompts_configuration.py @@ -5,21 +5,39 @@ REQUEST_ANALYSER_PROMPT = ChatPromptTemplate.from_messages( [ - (LangChainConstants.SYSTEM_PROMPT, skills_extractor_prompt.REQUEST_ANALYSER_SYSTEM_PROMPT), - (LangChainConstants.HUMAN_PROMPT, skills_extractor_prompt.REQUEST_ANALYSER_HUMAN_PROMPT), + ( + LangChainConstants.SYSTEM_PROMPT, + skills_extractor_prompt.REQUEST_ANALYSER_SYSTEM_PROMPT, + ), + ( + LangChainConstants.HUMAN_PROMPT, + skills_extractor_prompt.REQUEST_ANALYSER_HUMAN_PROMPT, + ), ] ) QUERY_BUILDER_PROMPT = ChatPromptTemplate.from_messages( [ - (LangChainConstants.SYSTEM_PROMPT, skills_extractor_prompt.QUERY_BUILDER_SYSTEM_PROMPT), - (LangChainConstants.HUMAN_PROMPT, skills_extractor_prompt.QUERY_BUILDER_HUMAN_PROMPT), + ( + LangChainConstants.SYSTEM_PROMPT, + skills_extractor_prompt.QUERY_BUILDER_SYSTEM_PROMPT, + ), + ( + LangChainConstants.HUMAN_PROMPT, + skills_extractor_prompt.QUERY_BUILDER_HUMAN_PROMPT, + ), ] ) HTML_RESPONSE_BUILDER_PROMPT = ChatPromptTemplate.from_messages( [ - (LangChainConstants.SYSTEM_PROMPT, skills_extractor_prompt.HTML_RESPONSE_BUILDER_SYSTEM_PROMPT), - (LangChainConstants.HUMAN_PROMPT, skills_extractor_prompt.HTML_RESPONSE_BUILDER_HUMAN_PROMPT), + ( + LangChainConstants.SYSTEM_PROMPT, + skills_extractor_prompt.HTML_RESPONSE_BUILDER_SYSTEM_PROMPT, + ), + ( + LangChainConstants.HUMAN_PROMPT, + skills_extractor_prompt.HTML_RESPONSE_BUILDER_HUMAN_PROMPT, + ), ] ) diff --git a/chains/employee_match/v2/__init__.py b/chains/employee_match/v2/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/chains/employee_match/v2/chains.py b/chains/employee_match/v2/chains.py new file mode 100644 index 0000000..fdb56b8 --- /dev/null +++ b/chains/employee_match/v2/chains.py @@ -0,0 +1,27 @@ +from database.database import ( + database_langchain_get_schema, + execute_langchain_query, +) +from langchain_core.runnables import RunnablePassthrough +from langchain_core.output_parsers import StrOutputParser +from configuration.llm.llama import llm +from chains.employee_match.v2.prompts_configuration import prompt, prompt_response + +sql_chain = ( + RunnablePassthrough.assign(schema=database_langchain_get_schema) + | prompt + | llm.bind(stop=["SQL Result:"]) + | StrOutputParser() +) + +full_chain = ( + RunnablePassthrough.assign(query=sql_chain).assign( + schema=database_langchain_get_schema, + response=lambda variables: execute_langchain_query( + variables["query"], variables + ), + ) + | prompt_response + | llm + | StrOutputParser() +) diff --git a/chains/employee_match/v2/prompts.py b/chains/employee_match/v2/prompts.py new file mode 100644 index 0000000..3580e9b --- /dev/null +++ b/chains/employee_match/v2/prompts.py @@ -0,0 +1,75 @@ +from common.enums import EmployeeEnglishLevel, EmployeeLevel + + +SYSTEM_QUESTION = """ +Given an input question, convert it to a SQL query compatible with SQLite. + +Decompose the question into SQL query requirements and combine them into a single SQL query by specific columns. + +### Query Requirements: +1. **Spelling and Splitting**: + - Correct spelling mistakes in the question. + - Split all words by spaces, commas, underscores, and other common symbols. + +2. **Matching and Case Sensitivity**: + - Use the `LIKE` operator to find the best match with the table schema (e.g., "upper intermediate" -> "%upper%intermediate%"). + - Ignore case and special characters using the `LOWER()` function (e.g., "Upper Intermediate" -> "upper intermediate"). + - Convert all `VARCHAR` columns to lower case using the `LOWER()` function (e.g., `c.level` -> `LOWER(c.level)`). + +3. **English Level Formatting**: + - Add underscores between words for English levels, but not at the start or end of the search string (e.g., "upper intermediate" -> "upper_intermediate"). + +4. **Grouping Criteria**: + - Group criteria with the same column name in brackets using the `OR` operator inside brackets. + - Use the `AND` operator between groups, starting from the `WHERE` clause. + - Example: `WHERE (LOWER(e.english_level) LIKE '%intermediate%' OR LOWER(e.english_level) LIKE '%upper_intermediate%') AND (LOWER(e.level) LIKE '%senior%' OR LOWER(e.level) LIKE '%middle%') AND LOWER(e.sales_campaign) LIKE '%hybris%' AND LOWER(e.sales_campaign) LIKE '%java%'`. + +5. **Column Filters**: + - Group every column filter with brackets. + +6. **Specific Column Values**: + - English level (`english_level`) can only be one of the values: {english_level}. + - Employee position (`position`) can be converted as follows: "full stack" -> "FS", "back end" -> "BE", "front end" -> "FE". + - Seniority level (`level`) can only be one of the values: {levels}. + +7. **Exclusions**: + - Do not use the `name` column in the query. + - Do not use tables other than `employees` in the query. + +8. **Optional Criteria**: + - Exclude criteria from the query if they are not presented or cannot be identified: + - Level + - English level + - Position + - Sales campaign + - Other skills + - Employee position + +9. **Position Relationships**: + - If the employee position is "BE", also add "FS" and vice versa (e.g., if the employee is "full stack" (FS), also add "back end" (BE) and vice versa). +Return only the SQL query without any explanations. No pre-amble. +""".format( + english_level=[level.value for level in EmployeeEnglishLevel], + levels=[level.value for level in EmployeeLevel], +) + + +SYSTEM_RESPONSE = """ +Given an input question and SQL response, convert list of employees to human-readable format. + +Requirements: +- Important: Do not use jinja2 or any other template engine. Use only string formatting. +- If no results are found, return "No results found for requested criteria". +- User header for each employee with the name of the employee and the name of the team. +- Show employee information in human-readable format where each component is from the new line with format : . +- Each column should be on a new line. +- Wrap all response in HTML tags. +- Wrap each employees in a div tag with class "employee". +- Wrap each columns for each employee in table inside employee div +- Do not add columns: last_interview, attendance_link, team_id, user_id, id +- Do not use spaces in the response, only if they are in source data and empty line between employee. +- Sort records by the best match with the question (best match on the top). +- Important: Add empty row between employees! + +No preamble. +""" diff --git a/chains/employee_match/v2/prompts_configuration.py b/chains/employee_match/v2/prompts_configuration.py new file mode 100644 index 0000000..700a260 --- /dev/null +++ b/chains/employee_match/v2/prompts_configuration.py @@ -0,0 +1,18 @@ +from langchain_core.prompts import ChatPromptTemplate +from chains.employee_match.v2.prompts import SYSTEM_QUESTION, SYSTEM_RESPONSE +from chains.employee_match.v2.templates import question_template, response_template + +prompt = ChatPromptTemplate.from_messages( + [ + ("system", SYSTEM_QUESTION), + ("human", question_template), + ] +) + + +prompt_response = ChatPromptTemplate.from_messages( + [ + ("system", SYSTEM_RESPONSE), + ("human", response_template), + ] +) diff --git a/chains/employee_match/v2/templates.py b/chains/employee_match/v2/templates.py new file mode 100644 index 0000000..166c6ea --- /dev/null +++ b/chains/employee_match/v2/templates.py @@ -0,0 +1,15 @@ +question_template = """ +Based on the table schema below, write a SQL query that would answer the user's question: +{schema} + +Question: {question} +SQL Query:""" + + +response_template = """ +Based on the table schema below, question, sql query, and sql response, write a natural language response: {schema}. +Convert whole list of records + +Question: {question} +SQL Query: {query} +SQL Response: {response}""" diff --git a/common/constants/llm.py b/common/constants/llm.py index 953a566..5308a97 100644 --- a/common/constants/llm.py +++ b/common/constants/llm.py @@ -11,7 +11,8 @@ class LLMConstants: MAX_TOKENS (ClassVar[int]): The maximum number of tokens allowed for a single request. A value of -1 indicates no limit. TEMPERATURE (ClassVar[int]): The temperature setting for the models, controlling randomness in output generation. """ + MODEL_NAME_LLAMA: ClassVar[str] = "llama3" MODEL_NAME_GPT: ClassVar[str] = "gpt-3.5-turbo-instruct" - MAX_TOKENS: ClassVar[int] = -1 + MAX_TOKENS: ClassVar[int] = 150 TEMPERATURE: ClassVar[int] = 0.1 diff --git a/enums.py b/common/enums.py similarity index 100% rename from enums.py rename to common/enums.py diff --git a/configuration/llm/llama.py b/configuration/llm/llama.py index 96de97a..5a5b59a 100644 --- a/configuration/llm/llama.py +++ b/configuration/llm/llama.py @@ -1,9 +1,12 @@ +import os from langchain_ollama import ChatOllama from common.constants.llm import LLMConstants +OLLAMA_HOST = os.getenv("OLLAMA_HOST", "") + llm = ChatOllama( model=LLMConstants.MODEL_NAME_LLAMA, temperature=LLMConstants.TEMPERATURE, - max_tokens=LLMConstants.MAX_TOKENS, -) \ No newline at end of file + base_url=OLLAMA_HOST, +) diff --git a/database/database.py b/database/database.py index fa5f938..44c2d60 100644 --- a/database/database.py +++ b/database/database.py @@ -17,11 +17,10 @@ DATABASE_URL, sample_rows_in_table_info=DatabaseConstants.SAMPLE_ROWS_IN_TABLE_INFO, ) -database_langchain_table_info = database_langchain.get_table_info() def database_langchain_get_schema(_): - return database_langchain_table_info + return database_langchain.get_table_info() def execute_langchain_query(query: str, chain_variables: dict): @@ -30,7 +29,7 @@ def execute_langchain_query(query: str, chain_variables: dict): query_result = database_langchain.run(query, include_columns=True) print(f"Query result: {query_result}") - if len(query_result) == 0: + if not query_result: return [] return query_result diff --git a/main.py b/main.py index 82b23fe..0ad5014 100644 --- a/main.py +++ b/main.py @@ -5,27 +5,19 @@ load_dotenv() from fastapi import FastAPI -from fastapi.responses import HTMLResponse, JSONResponse + from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates -from langchain_community.utilities.sql_database import SQLDatabase -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import RunnablePassthrough -from langchain_ollama import ChatOllama -from langchain_openai import OpenAI -from pydantic import BaseModel -from chains.employee_match import chain as skills_extractor_chain -from common.constants.database import DatabaseConstants -from common.constants.llm import LLMConstants -from database.database import DATABASE_URL + from routes.employees import employee_router from routes.faker import fake_router from routes.recourses import resource_router from routes.teams import team_router from routes.upload import upload_router from routes.users import user_router +from routes.chat import chat_router +from routes.chat_v2 import chat_router_v2 app = FastAPI() app.include_router(user_router) @@ -34,161 +26,12 @@ app.include_router(team_router) app.include_router(fake_router) app.include_router(upload_router) +app.include_router(chat_router) +app.include_router(chat_router_v2) templates = Jinja2Templates(directory="templates") app.mount("/static", StaticFiles(directory="static"), name="static") -question_template = """ -Based on the table schema below, write a SQL query that would answer the user's question: -{schema} - -Question: {question} -SQL Query:""" - -SYSTEM_QUESTION = """ -Given an input question, convert it to a SQL query compatible with SQLite. - -Decompose the question into SQL query requirements and combine them into a single SQL query by specific columns. - -Programming languages can be only in sales campaign. Other languager can't be in sales campaign. -Libraries and other skills without programming languages can be only in other_skills column. -If no english level specified in the question, it means that the user is looking for any english level. - -Information about employee position can be converted by the rules "full stack" = "FS", "back end" = "BE", "front-end" = "FE" - -Query requirements: -- Correct spelling mistakes in question. -- Split all words by space, coma or underscore and other classic symbols in the question and try to find the best match with the table schema. For example, "upper intermediate" needs to be split into "upper" and "intermediate". -- Use like operator to find the best match with the table schema. For example, "upper intermediate" needs to be converted to "%upper%intermediate%". -- Use ignore case and ignore special characters to find the best match with function lower(). For example "Upper Intermediate" needs to be converted to "upper intermediate". -- Use lower() function to convert all varchar columns to lower case. For example c.level needs to be converted to lower(c.level). -- Add underscore only between words for english level not in the start of the search string and not in the end of the search string. For example "upper intermediate" needs to be converted to "upper_intermediate". -- Group criteria with same column name in brackets with "OR" operator inside brackets. Use "AND" operator between groups. Required to start grouping from Where clause. For example WHERE (lower(e.english_level) LIKE '%intermediate%' OR lower(e.english_level) LIKE '%upper_intermediate%') AND (lower(e.level) LIKE '%senior%' OR lower(e.level) LIKE '%middle%') AND lower(e.sales_campaign) LIKE '%hybris%' AND lower(e.sales_campaign) LIKE '%java%'; -- IMPORTANT: Group every column filter with brackets! -- English level (column "english_level") can be only one and from the values: "beginner", "intermediate", "upper intermediate", "advanced", "fluent". -- Information about employee position (column "position") can be converted by the rules "full stack" = "FS", "back end" = "BE", "front-end" = "FE". -- Seniority level (column "level") can be only one and from the values: "Intern", "Junior", "Middle", "Senior", "Lead", "Principal", "Architect", "Manager", "Director", "Vice President", "Chief Technology Officer", "Consultant". -- Do not use column "name" in the query. -- Do not use tables except "employees" in the query. - -If level is not presented or you can't identify it - exclude it from the query. -If english level is not presented or you can't identify it - exclude it from the query. -If position is not presented or you can't identify it - exclude it from the query. -If sales campaign is not presented or you can't identify it - exclude it from the query. -If other skills is not presented or you can't identify it - exclude it from the query. -If employee position is not presented or you can't identify it - exclude it from the query. -If employee position is "BE" also add "FS" and wise versa. For example if employee is "full stack" (FS) also add "back end" (BE) and wise versa. -Do not use full names for employee position, only short names. - - -Return only the SQL query without any explanations. No pre-amble. -""" -prompt = ChatPromptTemplate.from_messages( - [ - ("system", SYSTEM_QUESTION), - ("human", question_template), - ] -) - -response_template = """ -Based on the table schema below, question, sql query, and sql response, write a natural language response: {schema}. -Convert whole list of records - -Question: {question} -SQL Query: {query} -SQL Response: {response}""" - -SYSTEM_RESPONSE = """ -Given an input question and SQL response, convert list of employees to human-readable format. - -Requirements: -- Important: Do not use jinja2 or any other templating engine. Use only string formatting. -- If no results are found, return "No results found for requested criteria". -- User header for each employee with the name of the employee and the name of the team. -- Show employee information in human-readable format where each component is from the new line with format : . -- Each column should be on a new line. -- Wrap all response in HTML tags. -- Wrap each employees in a div tag with class "employee". -- Wrap each columns for each employee in table inside employee div -- Do not add columns: last_interview, attendance_link, team_id, user_id, id -- Do not use spaces in the response, only if they are in source data and empty line between employee. -- Sort records by the best match with the question (best match on the top). -- Important: Add empty row between employees! - -No preamble. -""" - -prompt_response = ChatPromptTemplate.from_messages( - [ - ("system", SYSTEM_RESPONSE), - ("human", response_template), - ] -) - -db_lc = SQLDatabase.from_uri(DATABASE_URL, sample_rows_in_table_info=DatabaseConstants.SAMPLE_ROWS_IN_TABLE_INFO) -table_info = db_lc.get_table_info() - - -def get_schema(_): - return table_info - - -def run_query(variables: dict): - query = variables["query"] - print(f"Executing query: {query}") - query_result = db_lc.run(query, include_columns=True) - - print(f"Query result: {query_result}") - - return query_result - - -llm = ChatOllama( - model=LLMConstants.MODEL_NAME_LLAMA, - temperature=LLMConstants.TEMPERATURE, - max_tokens=LLMConstants.MAX_TOKENS, -) -llm_openai = OpenAI( - model_name=LLMConstants.MODEL_NAME_GPT, - max_tokens=LLMConstants.MAX_TOKENS, - temperature=LLMConstants.TEMPERATURE, -) - -sql_chain = ( - RunnablePassthrough.assign(schema=get_schema) - | prompt - | llm.bind(stop=["SQL Result:"]) - | StrOutputParser() -) - -full_chain = ( - RunnablePassthrough.assign(query=sql_chain).assign( - schema=get_schema, - response=lambda variables: run_query(variables), - ) - | prompt_response - | llm - | StrOutputParser() -) - - -class ChatRequest(BaseModel): - message: str - - -@app.get("/", response_class=HTMLResponse) -async def get_chat_page(): - with open("templates/chat.html") as f: - return HTMLResponse(content=f.read(), status_code=200) - - -@app.post("/chat", response_class=JSONResponse) -async def chat(request: ChatRequest): - results: str = skills_extractor_chain.html_builder_chain.invoke({"question": request.message}) - # results = full_chain.invoke({"question": request.message}) - - return JSONResponse(content={"reply": results}) - if __name__ == "__main__": uvicorn.run(app) diff --git a/models/models.py b/models/models.py index 05f06cd..8030f00 100644 --- a/models/models.py +++ b/models/models.py @@ -4,7 +4,7 @@ from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import Mapped, mapped_column -from enums import EmployeeLevel, EmployeeEnglishLevel, Position +from common.enums import EmployeeLevel, EmployeeEnglishLevel, Position Base = declarative_base() diff --git a/routes/chat.py b/routes/chat.py new file mode 100644 index 0000000..80a9ff4 --- /dev/null +++ b/routes/chat.py @@ -0,0 +1,25 @@ +from fastapi import APIRouter +from fastapi.responses import HTMLResponse, JSONResponse +from pydantic import BaseModel +from chains.employee_match import chain as skills_extractor_chain + +chat_router = APIRouter() + + +class ChatRequest(BaseModel): + message: str + + +@chat_router.get("/", response_class=HTMLResponse) +async def get_chat_page(): + with open("templates/chat.html") as f: + return HTMLResponse(content=f.read(), status_code=200) + + +@chat_router.post("/chat", response_class=JSONResponse) +async def chat(request: ChatRequest): + results: str = skills_extractor_chain.html_builder_chain.invoke( + {"question": request.message} + ) + + return JSONResponse(content={"reply": results}) diff --git a/routes/chat_v2.py b/routes/chat_v2.py new file mode 100644 index 0000000..60a3984 --- /dev/null +++ b/routes/chat_v2.py @@ -0,0 +1,24 @@ +from fastapi import APIRouter +from fastapi.responses import HTMLResponse, JSONResponse +from pydantic import BaseModel +from chains.employee_match.v2.chains import full_chain + + +chat_router_v2 = APIRouter() + + +@chat_router_v2.get("/v2", response_class=HTMLResponse) +async def get_chat_page(): + with open("templates/chat_v2.html") as f: + return HTMLResponse(content=f.read(), status_code=200) + + +class ChatRequest(BaseModel): + message: str + + +@chat_router_v2.post("/chat-v2", response_class=JSONResponse) +def chat(request: ChatRequest): + results = full_chain.invoke({"question": request.message}) + + return JSONResponse(content={"reply": results}) diff --git a/routes/faker.py b/routes/faker.py index 0d3ebd9..7350ffc 100644 --- a/routes/faker.py +++ b/routes/faker.py @@ -4,7 +4,7 @@ from sqlalchemy.orm import Session -from enums import EmployeeEnglishLevel, EmployeeLevel, Position +from common.enums import EmployeeEnglishLevel, EmployeeLevel, Position from database.database import get_db from models.models import Employee, Skill, Team, User from faker import Faker diff --git a/routes/recourses.py b/routes/recourses.py index 13d4c21..ac15259 100644 --- a/routes/recourses.py +++ b/routes/recourses.py @@ -5,7 +5,7 @@ import schemas from database.database import get_db -from enums import EmployeeEnglishLevel, Position +from common.enums import EmployeeEnglishLevel, Position from models.models import Resource, Employee, Team diff --git a/routes/upload.py b/routes/upload.py index 3b35ad8..abe382b 100644 --- a/routes/upload.py +++ b/routes/upload.py @@ -3,7 +3,7 @@ from sqlalchemy.orm import Session from database.database import get_db -from enums import EmployeeEnglishLevel, EmployeeLevel, Position +from common.enums import EmployeeEnglishLevel, EmployeeLevel, Position from models.models import Employee import pdfplumber import pandas as pd diff --git a/static/chat_v2.js b/static/chat_v2.js new file mode 100644 index 0000000..91a2a1e --- /dev/null +++ b/static/chat_v2.js @@ -0,0 +1,20 @@ +document.getElementById('chat-form').addEventListener('submit', async function (e) { + e.preventDefault(); + const userInput = document.getElementById('user-input').value; + document.getElementById('user-input').value = ''; + + const chatBox = document.getElementById('chat-box'); + chatBox.innerHTML += `
You: ${userInput}
`; + + const response = await fetch('/chat-v2', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ message: userInput }) + }); + + const data = await response.json(); + chatBox.innerHTML += `
robot: ${data.reply}
`; + chatBox.scrollTop = chatBox.scrollHeight; +}); diff --git a/templates/chat_v2.html b/templates/chat_v2.html new file mode 100644 index 0000000..35f46a9 --- /dev/null +++ b/templates/chat_v2.html @@ -0,0 +1,21 @@ + + + + + AI assistant + + + + +
+

Chat with AI

+
+
+ + +
+
+ + + + \ No newline at end of file