forked from IBM/ibm-generative-ai
-
Notifications
You must be signed in to change notification settings - Fork 0
/
self-reflection.py
60 lines (48 loc) · 1.57 KB
/
self-reflection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import os
import pathlib
from dotenv import load_dotenv
from genai.model import Credentials, Model
from genai.prompt_pattern import PromptPattern
from genai.schemas import GenerateParams, ModelType
# make sure you have a .env file under genai root with
# GENAI_KEY=<your-genai-key>
load_dotenv()
api_key = os.getenv("GENAI_KEY", None)
PATH = pathlib.Path(__file__).parent.resolve()
print("\n------------- Example (PromptPatterns) -------------\n")
params = GenerateParams(
decoding_method="greedy",
max_new_tokens=20,
min_new_tokens=1,
stream=False,
temperature=0.7,
top_k=50,
top_p=1,
)
creds = Credentials(api_key)
model = Model(ModelType.FLAN_UL2, params=params, credentials=creds)
# (1) Prompt
prompt = "Is McDonald's or Burger King better?"
print("INPUT>> " + prompt + "\n")
responses = model.generate([prompt])
gen_response = responses[0].generated_text.strip()
# (2) Internal monolog
print("[Internal monologue]")
print(f"[My answer]: {gen_response}")
# (2.1) critic myself with template
pt = PromptPattern.from_file(str(PATH) + os.sep + "templates" + os.sep + "self-reflection.yaml")
pt.sub("prompt", prompt)
pt.sub("response", gen_response)
print(f"[Self reflection]: {pt}")
# (2.2) critic my answer
# adjust params
params.min_new_tokens = 1
params.max_new_tokens = 1
responses = model.generate([pt])
gen_control_ans = responses[0].generated_text
print(f"[Self reflection answer]: {gen_control_ans}" + "\n")
# (3) self-control flow
if "yes" in gen_control_ans.lower():
print("<< Sorry I should not comment.")
else:
print("<<OUTPUT " + gen_response)