-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathaiEmail.py
More file actions
36 lines (31 loc) · 1.11 KB
/
aiEmail.py
File metadata and controls
36 lines (31 loc) · 1.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# Using local LLM via Ollama (llama3.1:8b)
import requests
import json
OLLAMA_URL = "http://localhost:11434/api/generate"
OLLAMA_MODEL = "llama3.2:1b"
def call_ollama(prompt):
"""
Send a prompt to the local Ollama server and collect the response.
Handles streaming and multiline output gracefully.
"""
try:
response = requests.post(
OLLAMA_URL,
json={
"model": OLLAMA_MODEL,
"prompt": prompt,
"stream": False
},
timeout=120
)
response.raise_for_status()
# Extract the response text from the JSON response
response_data = response.json()
return response_data.get("response", "").strip()
except requests.exceptions.RequestException as e:
print(f"Error calling Ollama: {e}")
return ""
def generate_email(subject, recipient, tone="professional", length="medium"):
prompt = f"Write a {tone} email to {recipient} about '{subject}'. The email should be {length} in length."
email_content = call_ollama(prompt)
return email_content