|
| 1 | +"""Example of an application that uses Python Transformers library with Talk Bot APIs.""" |
| 2 | + |
| 3 | +# This line should be on top before any import of the "Transformers" library. |
| 4 | +from nc_py_api.ex_app import persist_transformers_cache # noqa # isort:skip |
| 5 | +import re |
| 6 | +from threading import Thread |
| 7 | +from typing import Annotated |
| 8 | + |
| 9 | +import requests |
| 10 | +from fastapi import BackgroundTasks, Depends, FastAPI |
| 11 | +from transformers import pipeline |
| 12 | + |
| 13 | +from nc_py_api import NextcloudApp, talk_bot |
| 14 | +from nc_py_api.ex_app import run_app, set_handlers, talk_bot_app |
| 15 | + |
| 16 | +APP = FastAPI() |
| 17 | +AI_BOT = talk_bot.TalkBot("/ai_talk_bot", "AI talk bot", "Usage: `@ai What sounds do cats make?`") |
| 18 | +MODEL_NAME = "MBZUAI/LaMini-Flan-T5-77M" |
| 19 | +MODEL_INIT_THREAD = None |
| 20 | + |
| 21 | + |
| 22 | +def ai_talk_bot_process_request(message: talk_bot.TalkBotMessage): |
| 23 | + r = re.search(r"@ai\s(.*)", message.object_content["message"], re.IGNORECASE) |
| 24 | + if r is None: |
| 25 | + return |
| 26 | + model = pipeline("text2text-generation", model="MBZUAI/LaMini-Flan-T5-77M") |
| 27 | + response_text = model(r.group(1), max_length=64, do_sample=True)[0]["generated_text"] |
| 28 | + AI_BOT.send_message(response_text, message) |
| 29 | + |
| 30 | + |
| 31 | +@APP.post("/ai_talk_bot") |
| 32 | +async def ai_talk_bot( |
| 33 | + message: Annotated[talk_bot.TalkBotMessage, Depends(talk_bot_app)], |
| 34 | + background_tasks: BackgroundTasks, |
| 35 | +): |
| 36 | + if message.object_name == "message": |
| 37 | + background_tasks.add_task(ai_talk_bot_process_request, message) |
| 38 | + return requests.Response() |
| 39 | + |
| 40 | + |
| 41 | +def enabled_handler(enabled: bool, nc: NextcloudApp) -> str: |
| 42 | + print(f"enabled={enabled}") |
| 43 | + try: |
| 44 | + AI_BOT.enabled_handler(enabled, nc) |
| 45 | + except Exception as e: |
| 46 | + return str(e) |
| 47 | + return "" |
| 48 | + |
| 49 | + |
| 50 | +def download_models(): |
| 51 | + pipeline("text2text-generation", model=MODEL_NAME) |
| 52 | + |
| 53 | + |
| 54 | +def heartbeat_handler() -> str: |
| 55 | + global MODEL_INIT_THREAD |
| 56 | + print("heartbeat_handler: called") |
| 57 | + if MODEL_INIT_THREAD is None: |
| 58 | + MODEL_INIT_THREAD = Thread(target=download_models) |
| 59 | + MODEL_INIT_THREAD.start() |
| 60 | + print("heartbeat_handler: started initialization thread") |
| 61 | + r = "init" if MODEL_INIT_THREAD.is_alive() else "ok" |
| 62 | + print(f"heartbeat_handler: result={r}") |
| 63 | + return r |
| 64 | + |
| 65 | + |
| 66 | +@APP.on_event("startup") |
| 67 | +def initialization(): |
| 68 | + set_handlers(APP, enabled_handler, heartbeat_handler) |
| 69 | + |
| 70 | + |
| 71 | +if __name__ == "__main__": |
| 72 | + run_app("main:APP", log_level="trace") |
0 commit comments