-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathLLMInference.py
36 lines (31 loc) · 1.61 KB
/
LLMInference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import openai
import os
import re
openai.api_key = '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'
def execute_caption_base(best_caption,all_places):
print("Using Caption as Base")
formatted_descriptions = ", ".join([f"{i+1}. {desc}" for i, desc in enumerate(all_places)])
prompt_base_caption = "Only one answer. Print corresponding order number of correct option: which of the following place is most probably can have a scenario in it as {} : - {}. If none, return 1".format(best_caption,formatted_descriptions)
print(prompt_base_caption)
response = openai.Completion.create(
engine="text-davinci-002", # You can select the model to use here
prompt=prompt_base_caption,
max_tokens=50,
)
generated_text = response.choices[0].text.strip()
number = re.sub('[^0-9]', '', generated_text)
return number
def execute_scene_base(best_place, all_captions):
print("Using Place as Base")
formatted_descriptions = ", ".join([f"{i+1}. {desc}" for i, desc in enumerate(all_captions)])
prompt_base_scene = "Only one answer. Print corresponding order number of correct option: which of the following descriptions match can be most suitable at {} place: - {}. If none, return 1.".format(best_place,formatted_descriptions)
print(prompt_base_scene)
response = openai.Completion.create(
engine="text-davinci-002", # You can select the model to use here
prompt=prompt_base_scene,
max_tokens=50,
)
generated_text = response.choices[0].text.strip()
print(generated_text)
number = re.sub('[^0-9]', '', generated_text)
return number