10
10
from llmware .models import PromptCatalog
11
11
from llmware .library import Library
12
12
from llmware .retrieval import Query
13
+ from llmware .configs import LLMWareConfig
13
14
14
15
15
16
def prompt_with_sources (model_name , library_name ):
@@ -27,6 +28,11 @@ def prompt_with_sources(model_name, library_name):
27
28
28
29
prompter = Prompt ().load_model (model_name )
29
30
31
+ # Use #1 - add_source_document - parses the document in memory, filters the text chunks by query, and then
32
+ # creates a 'source' context to be passed to the model
33
+
34
+ print (f"\n #1 - add a source document file directly into a prompt" )
35
+
30
36
sources2 = prompter .add_source_document (ingestion_folder_path , local_file , query = "base salary" )
31
37
32
38
prompt = "What is the base salary amount?"
@@ -35,14 +41,24 @@ def prompt_with_sources(model_name, library_name):
35
41
print (f"- Context: { local_file } \n - Prompt: { prompt } \n - LLM Response:\n { response } " )
36
42
prompter .clear_source_materials ()
37
43
44
+ # Use #2 - add_source_wikipedia - gets a source document from Wikipedia on Barack Obama,
45
+ # and creates source context
46
+
47
+ print (f"\n #2 - add a wikipedia article by api call by topic into a prompt" )
48
+
38
49
prompt = "Was Barack Obama the Prime Minister of Canada?"
39
50
wiki_topic = "Barack Obama"
40
51
prompt_instruction = "yes_no"
41
52
sources3 = prompter .add_source_wikipedia (wiki_topic , article_count = 1 )
42
53
response = prompter .prompt_with_source (prompt = prompt , prompt_name = prompt_instruction )[0 ]["llm_response" ]
43
- print (f"- Context: { local_file } \n - Prompt: { prompt } \n - LLM Response:\n { response } " )
54
+ print (f"- Context: { wiki_topic } \n - Prompt: { prompt } \n - LLM Response:\n { response } " )
44
55
prompter .clear_source_materials ()
45
56
57
+ # Use #3 - add_source_query_results - produces the same results as the first case, but runs a query on the library
58
+ # and then adds the query results to the prompt which are concatenated into a source context
59
+
60
+ print (f"\n #3 - run a query on a library and then pass the query results into a prompt" )
61
+
46
62
query_results = Query (library ).text_query ("base salary" )
47
63
prompt = "What is the annual rate of the base salary?"
48
64
sources4 = prompter .add_source_query_results (query_results )
@@ -55,9 +71,7 @@ def prompt_with_sources(model_name, library_name):
55
71
56
72
if __name__ == "__main__" :
57
73
58
- # to use API-based model for this example, set API keys in os.environ variable
59
- # e.g., see example: set_model_api_keys.py
60
- # e.g., os.environ["USER_MANAGED_OPENAI_API_KEY"] = "<insert-your-api-key>"
74
+ LLMWareConfig ().set_active_db ("sqlite" )
61
75
62
76
# this model is a placeholder which will run on local laptop - swap out for higher accuracy, larger models
63
77
model_name = "llmware/bling-1b-0.1"
0 commit comments