-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathchef.py
155 lines (131 loc) · 4.64 KB
/
chef.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import os
import streamlit as st
import logging
from google.cloud import logging as cloud_logging
import vertexai
from vertexai.preview.generative_models import (
GenerationConfig,
GenerativeModel,
HarmBlockThreshold,
HarmCategory,
Part,
)
from datetime import (
date,
timedelta,
)
# configure logging
logging.basicConfig(level=logging.INFO)
# attach a Cloud Logging handler to the root logger
log_client = cloud_logging.Client()
log_client.setup_logging()
PROJECT_ID = os.environ.get("qwiklabs-gcp-00-984c16900e33") # Your Google Cloud Project ID
LOCATION = os.environ.get("us-central1") # Your Google Cloud Project Region
vertexai.init(project=PROJECT_ID, location=LOCATION)
@st.cache_resource
def load_models():
text_model_pro = GenerativeModel("gemini-pro")
return text_model_pro
def get_gemini_pro_text_response(
model: GenerativeModel,
contents: str,
generation_config: GenerationConfig,
stream: bool = True,
):
safety_settings = {
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
}
responses = model.generate_content(
prompt,
generation_config=generation_config,
safety_settings=safety_settings,
stream=stream,
)
final_response = []
for response in responses:
try:
# st.write(response.text)
final_response.append(response.text)
except IndexError:
# st.write(response)
final_response.append("")
continue
return " ".join(final_response)
st.header("AI Chef for your Recipes", divider="blue") #Vertex AI Gemini API
text_model_pro = load_models()
#st.write("Using Gemini Pro - Text only model")
st.subheader("Design your own Recipe") #AI Chef
cuisine = st.selectbox(
"What cuisine do you desire?",
("American", "Chinese", "French", "Indian", "Italian", "Japanese", "Mexican", "Turkish"),
index=None,
placeholder="Select your desired cuisine."
)
dietary_preference = st.selectbox(
"Do you have any dietary preferences?",
("Diabetese", "Glueten free", "Halal", "Keto", "Kosher", "Lactose Intolerance", "Paleo", "Vegan", "Vegetarian", "None"),
index=None,
placeholder="Select your desired dietary preference."
)
allergy = st.text_input(
"Enter your food allergy: \n\n", key="allergy", value="peanuts"
)
ingredient_1 = st.text_input(
"Enter your first ingredient: \n\n", key="ingredient_1", value="ahi tuna"
)
ingredient_2 = st.text_input(
"Enter your second ingredient: \n\n", key="ingredient_2", value="chicken breast"
)
ingredient_3 = st.text_input(
"Enter your third ingredient: \n\n", key="ingredient_3", value="tofu"
)
# Define the wine options
wine_options = ["Red", "White", "None"]
# Create a radio button for the wine variable
selected_wine = st.radio("Select Wine Type:", wine_options)
# Display the selected wine (optional)
if selected_wine:
st.write("You selected:", selected_wine)
max_output_tokens = 2048
# Task 2.6
# Modify this prompt with the custom chef prompt.
prompt = f"""I am a Chef. I need to create {cuisine} \n
recipes for customers who want {dietary_preference} meals. \n
However, don't include recipes that use ingredients with the customer's {allergy} allergy. \n
I have {ingredient_1}, \n
{ingredient_2}, \n
and {ingredient_3} \n
in my kitchen and other ingredients. \n
The customer's wine preference is {selected_wine} \n
Please provide some for meal recommendations.
For each recommendation include preparation instructions,
time to prepare
and the recipe title at the begining of the response.
Then include the wine paring for each recommendation.
At the end of the recommendation provide the calories associated with the meal
and the nutritional facts.
"""
config = {
"temperature": 0.8,
"max_output_tokens": 2048,
}
generate_t2t = st.button("Generate my recipes.", key="generate_t2t")
if generate_t2t and prompt:
# st.write(prompt)
with st.spinner("Generating your recipes using Gemini..."):
first_tab1, first_tab2 = st.tabs(["Recipes", "Prompt"])
with first_tab1:
response = get_gemini_pro_text_response(
text_model_pro,
prompt,
generation_config=config,
)
if response:
st.write("Your recipes:")
st.write(response)
logging.info(response)
with first_tab2:
st.text(prompt)