|
| 1 | + |
| 2 | +""" This example shows how to build a unique 'Dueling Q&A ChatBot' in which a question-generating model 'chats' |
| 3 | +with a question-answering model using a selected context passage from the user. |
| 4 | +
|
| 5 | + The user provides input of selecting the context passage, and then typing "Go" on the prompt bar, and the |
| 6 | +'dueling' bots take it from there ... |
| 7 | +
|
| 8 | + Please note that two models will be downloaded and cached locally on first use - so please expect 1-2 minutes |
| 9 | +for the first run, and then much faster loads on subsequent tries. |
| 10 | +
|
| 11 | + This example uses Streamlit for the UI. If you are new to using Steamlit, to run this example: |
| 12 | +
|
| 13 | + 1. `pip3 install streamlit` |
| 14 | +
|
| 15 | + 2. to run, go to the command line: streamlit run "path/to/dueling_chatbot.py" |
| 16 | +
|
| 17 | +""" |
| 18 | + |
| 19 | +import streamlit as st |
| 20 | +from llmware.models import ModelCatalog |
| 21 | +from llmware.gguf_configs import GGUFConfigs |
| 22 | + |
| 23 | +GGUFConfigs().set_config("max_output_tokens", 500) |
| 24 | + |
| 25 | +if "question_history" not in st.session_state: |
| 26 | + st.session_state["question_history"] = [] |
| 27 | + |
| 28 | + |
| 29 | +# test passage pulled from CNBC news story on Tuesday, May 28, 2024 |
| 30 | +test_passage = ("OpenAI said Tuesday it has established a new committee to make recommendations to the " |
| 31 | + "company’s board about safety and security, weeks after dissolving a team focused on AI safety. " |
| 32 | + "In a blog post, OpenAI said the new committee would be led by CEO Sam Altman as well as " |
| 33 | + "Bret Taylor, the company’s board chair, and board member Nicole Seligman. The announcement " |
| 34 | + "follows the high-profile exit this month of an OpenAI executive focused on safety, " |
| 35 | + "Jan Leike. Leike resigned from OpenAI leveling criticisms that the company had " |
| 36 | + "under-invested in AI safety work and that tensions with OpenAI’s leadership had " |
| 37 | + "reached a breaking point.") |
| 38 | + |
| 39 | + |
| 40 | +@st.cache_resource |
| 41 | +def load_question_model(temperature=0.5): |
| 42 | + """ Loads the Question Generating Model. """ |
| 43 | + question_model = ModelCatalog().load_model("slim-q-gen-tiny-tool", |
| 44 | + temperature=temperature, |
| 45 | + sample=True) |
| 46 | + return question_model |
| 47 | + |
| 48 | + |
| 49 | +@st.cache_resource |
| 50 | +def load_answer_model(): |
| 51 | + """ Loads the Answering Model. """ |
| 52 | + answer_model = ModelCatalog().load_model("bling-phi-3-gguf",temperature=0.0, sample=False) |
| 53 | + return answer_model |
| 54 | + |
| 55 | + |
| 56 | +def get_new_question(q_model, question_type, test_passage): |
| 57 | + |
| 58 | + new_q = "" |
| 59 | + max_tries = 10 |
| 60 | + tries = 0 |
| 61 | + |
| 62 | + while not new_q or new_q in st.session_state["question_history"]: |
| 63 | + |
| 64 | + response = q_model.function_call(test_passage, params=[question_type], get_logits=False) |
| 65 | + if response: |
| 66 | + if "llm_response" in response: |
| 67 | + if "question" in response["llm_response"]: |
| 68 | + new_q = response["llm_response"]["question"] |
| 69 | + if isinstance(new_q, list) and len(new_q) > 0: |
| 70 | + new_q = new_q[0] |
| 71 | + if new_q not in st.session_state["question_history"]: |
| 72 | + st.session_state["question_history"].append(new_q) |
| 73 | + break |
| 74 | + |
| 75 | + tries += 1 |
| 76 | + if tries >= max_tries: |
| 77 | + break |
| 78 | + |
| 79 | + return new_q |
| 80 | + |
| 81 | + |
| 82 | +def get_new_answer(question, ans_model, test_passage): |
| 83 | + |
| 84 | + response = ans_model.inference(question, add_context=test_passage) |
| 85 | + answer = response["llm_response"] |
| 86 | + |
| 87 | + return answer |
| 88 | + |
| 89 | + |
| 90 | +def get_input_passage(sample_passage_name, custom_passage_text): |
| 91 | + |
| 92 | + if sample_passage_name != "None": |
| 93 | + |
| 94 | + if sample_passage_name == "OpenAI": |
| 95 | + |
| 96 | + return ("OpenAI said Tuesday it has established a new committee to make recommendations to the " |
| 97 | + "company’s board about safety and security, weeks after dissolving a team focused on AI safety. " |
| 98 | + "In a blog post, OpenAI said the new committee would be led by CEO Sam Altman as well as " |
| 99 | + "Bret Taylor, the company’s board chair, and board member Nicole Seligman. The announcement " |
| 100 | + "follows the high-profile exit this month of an OpenAI executive focused on safety, " |
| 101 | + "Jan Leike. Leike resigned from OpenAI leveling criticisms that the company had " |
| 102 | + "under-invested in AI safety work and that tensions with OpenAI’s leadership had " |
| 103 | + "reached a breaking point. The name of the new committee is the AI Safety Committee.") |
| 104 | + |
| 105 | + elif sample_passage_name == "Apple": |
| 106 | + |
| 107 | + return ("Apple shares popped 5% to a new record high of around $203 per share on Tuesday, a day " |
| 108 | + "after the company announced its long-awaited push into artificial intelligence at its annual " |
| 109 | + "developer conference on Monday. Apple introduced a range of new AI features during the event, " |
| 110 | + "including an overhaul of its voice assistant Siri, integration with OpenAI’s ChatGPT, " |
| 111 | + "a range of writing assistance tools and new customizable emojis. The company pitched the " |
| 112 | + "features as AI for the average person, though users will likely need to upgrade their " |
| 113 | + "iPhones to access the tools. With Tuesday’s share move, Apple bested its previous record " |
| 114 | + "from Dec. 14. The company’s developer conference came as a welcome sign for investors who " |
| 115 | + "have been watching to see how Apple will capitalize on the ongoing AI boom. Analysts from " |
| 116 | + "Morgan Stanley said Apple’s AI features strongly position the company with “the most " |
| 117 | + "differentiated consumer digital agent.” Additionally, the analysts believe that the " |
| 118 | + "features will drive consumers to upgrade their iPhones, which should “accelerate " |
| 119 | + "device replacement cycles.” They said Apple will still have to deliver when the AI " |
| 120 | + "features are first available in the fall, but they think the “building blocks are in " |
| 121 | + "place for a return to growth and more sustained outperformance.") |
| 122 | + |
| 123 | + elif sample_passage_name == "Los Angeles Lakers": |
| 124 | + |
| 125 | + return ("The Lakers have finished better than seventh in the Western Conference standings just once " |
| 126 | + "in the past 12 seasons (when they won the title in 2019-20). Their franchise player, " |
| 127 | + "LeBron James, turns 40 in December. They have no salary-cap space unless James were " |
| 128 | + "to leave in free agency (he has until June 29 to make a decision on his $51.4 million " |
| 129 | + "player option). They have limited trade assets. They play in a super-competitive conference " |
| 130 | + "where the teams behind them are upwardly mobile and aggressive and most of the teams in " |
| 131 | + "front of them are going to continue to be good -- or get even better -- in the " |
| 132 | + "immediate future. And they have a massive and highly demanding fanbase and are " |
| 133 | + "under a constant microscope by the national media because they drive massive " |
| 134 | + "audience engagement across the world. Vogel won a title in 2020. Darvin Ham reached the " |
| 135 | + "Western Conference finals in 2023. Neither lasted longer than three seasons. No " |
| 136 | + "Lakers coach has since Phil Jackson has lasted more than three seasons. These are some of " |
| 137 | + "the factors Hurley undoubtedly was weighing before making his choice over the weekend. " |
| 138 | + "It's hard to even quantify what would be considered a successful season for the Lakers " |
| 139 | + "in 2024-25 without knowing what changes are made to the roster. Avoiding the play-in tournament, " |
| 140 | + "frankly, would be a reasonable if challenging goal.") |
| 141 | + |
| 142 | + elif sample_passage_name == "Buy Home": |
| 143 | + |
| 144 | + return ("The price for owning a home is rising rapidly and not just the mortgage payments. " |
| 145 | + "US homeowners are now paying an average of $18,118 a year on property taxes, homeowners’ " |
| 146 | + "insurance, maintenance, energy and various other expenses linked to owning a home, " |
| 147 | + "according to a new Bankrate study. That’s nearly the cost to buy a used car and represents " |
| 148 | + "a 26% increase from four years ago when it cost $14,428 annually to own and maintain a home. " |
| 149 | + "All of these variable expenses are on top of the fixed cost of a mortgage, including " |
| 150 | + "property taxes, homeowners insurance, energy costs, internet, cable bills and " |
| 151 | + "home maintenance. The findings are another reminder of how much more expensive life " |
| 152 | + "has become since Covid-19. Many Americans would like to buy a home but have been unable to " |
| 153 | + "because home prices have spiked to record highs and mortgage rates remain elevated. " |
| 154 | + "The housing market is historically unaffordable. But even the ones fortunate enough to " |
| 155 | + "have bought a home over the past few years are grappling with sticker shock over the cost " |
| 156 | + "of maintaining it. The per-month cost of owning and maintaining a home has gone from " |
| 157 | + "$1,202 a month in 2020 to $1,510 now, Bankrate found.") |
| 158 | + |
| 159 | + elif sample_passage_name == "Vacation": |
| 160 | + |
| 161 | + return ("Temperatures are rising. Hotel prices are exploding. And travelers are already behaving badly. " |
| 162 | + "Welcome to another summer in Europe. From the headlines, things already look chaotic. " |
| 163 | + "Famous sites are raising their entry fees. Hotel rooms are like gold dust. And the " |
| 164 | + "dollar has slipped against both the pound and the euro. Oh, and there’s the small matter " |
| 165 | + "of crowds. “There’s been a substantial increase on last year’s demand,” says Tom " |
| 166 | + "Jenkins, CEO of the European Tourism Organisation, speaking about US travelers to " |
| 167 | + "Europe. “2023 saw higher numbers than 2019, and this year we’re comfortably seeing more – " |
| 168 | + "record volumes of Americans coming to Europe.” Kayla Zeigler agrees. As the owner of " |
| 169 | + "Destination Europe, she is sending “record numbers” of clients to the continent this year. " |
| 170 | + "Graham Carter, director of Unforgettable Travel, a tour operator with a 90% US " |
| 171 | + "client base, says that many guests are finding the idea of Europe prohibitively " |
| 172 | + "expensive this year. People are wondering, is Europe worth it?” he says. “It’s " |
| 173 | + "booking up in advance and prices are quite high. There’s been such a huge demand " |
| 174 | + "for travel in the past three years, and lots of places are pushing up prices.” " |
| 175 | + "Is summer in Europe already a washout? According to the experts, that all depends " |
| 176 | + "on what kind of sacrifices you’re prepared to make. A weak dollar First things first: " |
| 177 | + "travelers from the US are already at a disadvantage due to a weak dollar. Against " |
| 178 | + "the euro, $1 was worth around 91 or 92 euro cents as of June 5, at mid-market rates. " |
| 179 | + "Sure, that’s better than the December 2020-January 2021 five-year low when the " |
| 180 | + "dollar was hovering around 82 cents. But it’s also down from a year earlier, " |
| 181 | + "when a dollar was worth about 95 euro cents – and it’s way down from last " |
| 182 | + "September’s five-year high when it peaked at 1.04 euros, according to currency " |
| 183 | + "conversion specialists Wise. For those traveling to the UK it’s a similar state of " |
| 184 | + "affairs. This time last year, $1 netted travelers 80 pence. As of Wednesday, it was " |
| 185 | + "78p – a fall from the September peak of nearly 83p. The dollar is also down, year on year, " |
| 186 | + "against 11 more European currencies. From Bosnia to Bulgaria, Denmark to Iceland, " |
| 187 | + "Poland to Romania and Sweden to Switzerland, travelers changing dollars will be worse off. " |
| 188 | + "While a few cents to the dollar doesn’t sound much on a single transaction, the small " |
| 189 | + "drops can make a difference on credit card bills on the return home. A 500 euro hotel " |
| 190 | + "room equates to $543 at Friday’s mid-market exchange rate, where it would have been " |
| 191 | + "$480 in September.") |
| 192 | + |
| 193 | + elif sample_passage_name == "Taylor Swift": |
| 194 | + |
| 195 | + return ("Taylor Swift stopped her concert in Edinburgh, Scotland, on Friday to help a fan. Swift was " |
| 196 | + "in the middle of singing her “Midnights” song “Would’ve Could’ve Should’ve” when she noticed " |
| 197 | + "a fan who was in distress. In a video that went viral on social media, the singer-songwriter " |
| 198 | + "is seen requesting assistance for the fan. “We need help right in front of me, please, " |
| 199 | + "right in front of me,” Swift sang while playing her guitar and keeping her eyes locked on " |
| 200 | + "the fan. “Just gonna keep playing until we notice where it is. Swift continued strumming " |
| 201 | + "her guitar while motioning over to the person in need of help.") |
| 202 | + |
| 203 | + else: |
| 204 | + return custom_passage_text |
| 205 | + else: |
| 206 | + return custom_passage_text |
| 207 | + |
| 208 | + |
| 209 | +def ask_and_answer_game(source_passage, q_model="slim-q-gen-tiny-tool", number_of_tries=10, question_type="question", |
| 210 | + temperature=0.5): |
| 211 | + |
| 212 | + """ Shows a simple two model game of using q-gen model to generate a question, and then a second model |
| 213 | + to answer the question generated. """ |
| 214 | + |
| 215 | + # this is the model that will generate the 'question' |
| 216 | + q_model = ModelCatalog().load_model(q_model, sample=True, temperature=temperature) |
| 217 | + |
| 218 | + # this will be the model used to 'answer' the question |
| 219 | + answer_model = ModelCatalog().load_model("bling-phi-3-gguf") |
| 220 | + |
| 221 | + questions = [] |
| 222 | + |
| 223 | + print(f"\nGenerating a set of questions automatically from the source passage.\n") |
| 224 | + |
| 225 | + for x in range(0,number_of_tries): |
| 226 | + |
| 227 | + response = q_model.function_call(source_passage, params=[question_type], get_logits=False) |
| 228 | + |
| 229 | + if response: |
| 230 | + if "llm_response" in response: |
| 231 | + if "question" in response["llm_response"]: |
| 232 | + new_q = response["llm_response"]["question"] |
| 233 | + |
| 234 | + # only keep new questions |
| 235 | + if new_q and new_q not in questions: |
| 236 | + questions.append(new_q) |
| 237 | + |
| 238 | + print(f"inference - {x} - response: {response}") |
| 239 | + |
| 240 | + print("\nAnswering the generated questions\n") |
| 241 | + for i, question in enumerate(questions): |
| 242 | + |
| 243 | + print(f"\nquestion: {i} - {question}") |
| 244 | + if isinstance(question, list) and len(question) > 0: |
| 245 | + response = answer_model.inference(question[0], add_context=test_passage) |
| 246 | + print(f"response: ", response["llm_response"]) |
| 247 | + |
| 248 | + return True |
| 249 | + |
| 250 | + |
| 251 | +def ask_and_answer_dueling_bots_ui_app (input_passage): |
| 252 | + |
| 253 | + question_model = "slim-q-gen-phi-3-tool" |
| 254 | + answer_model = "bling-stablelm-3b-tool" |
| 255 | + |
| 256 | + st.title(f"Ask and Answer Dueling Bots") |
| 257 | + st.write(f"Asking the questions: {question_model}") |
| 258 | + st.write(f"Answering the questions: {answer_model}") |
| 259 | + |
| 260 | + question_model = load_question_model() |
| 261 | + answer_model = load_answer_model() |
| 262 | + |
| 263 | + with st.sidebar: |
| 264 | + |
| 265 | + st.write("Today's Subject") |
| 266 | + sample_passage = st.selectbox("sample_passage", ["OpenAI", "Apple", "Los Angeles Lakers", "Buy Home", |
| 267 | + "Taylor Swift", "Vacation", "None"], index=0) |
| 268 | + # custom_passage = st.text_area(label="Subject",value=input_passage,height=100) |
| 269 | + custom_passage = test_passage |
| 270 | + |
| 271 | + input_passage = get_input_passage(sample_passage,custom_passage) |
| 272 | + |
| 273 | + mode = st.selectbox("mode", ["question", "boolean", "multiple choice"],index=0) |
| 274 | + |
| 275 | + number_of_tries = st.selectbox("tries", [5,10,20],index=0) |
| 276 | + |
| 277 | + st.write(input_passage) |
| 278 | + |
| 279 | + # initialize chat history |
| 280 | + if "messages" not in st.session_state: |
| 281 | + st.session_state.messages = [] |
| 282 | + |
| 283 | + # display chat messages from history on app rerun |
| 284 | + for message in st.session_state.messages: |
| 285 | + with st.chat_message(message["role"]): |
| 286 | + st.markdown(message["content"]) |
| 287 | + |
| 288 | + # accept user input |
| 289 | + prompt = st.chat_input("Say 'Go' and the Bots will Start") |
| 290 | + if prompt: |
| 291 | + |
| 292 | + for x in range(0, number_of_tries): |
| 293 | + |
| 294 | + with st.chat_message("user"): |
| 295 | + |
| 296 | + new_question = get_new_question(question_model,mode, input_passage) |
| 297 | + st.markdown(new_question) |
| 298 | + |
| 299 | + with st.chat_message("assistant"): |
| 300 | + |
| 301 | + new_answer = get_new_answer(new_question,answer_model, input_passage) |
| 302 | + st.markdown(new_answer) |
| 303 | + |
| 304 | + st.session_state.messages.append({"role": "user", "content": new_question}) |
| 305 | + st.session_state.messages.append({"role": "assistant", "content": new_answer}) |
| 306 | + |
| 307 | + return 0 |
| 308 | + |
| 309 | + |
| 310 | +if __name__ == "__main__": |
| 311 | + |
| 312 | + ask_and_answer_dueling_bots_ui_app(test_passage) |
| 313 | + |
| 314 | + |
| 315 | + |
| 316 | + |
0 commit comments