-
-
Notifications
You must be signed in to change notification settings - Fork 1.6k
/
Copy pathcustom_graph_ollama.py
103 lines (89 loc) · 2.41 KB
/
custom_graph_ollama.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
"""
Example of custom graph using existing nodes
"""
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from scrapegraphai.graphs import BaseGraph
from scrapegraphai.nodes import (
FetchNode,
GenerateAnswerNode,
ParseNode,
RobotsNode,
)
# ************************************************
# Define the configuration for the graph
# ************************************************
graph_config = {
"llm": {
"model": "ollama/mistral",
"temperature": 0,
"format": "json", # Ollama needs the format to be specified explicitly
# "model_tokens": 2000, # set context length arbitrarily
"base_url": "http://localhost:11434",
},
"verbose": True,
}
# ************************************************
# Define the graph nodes
# ************************************************
llm_model = ChatOpenAI(graph_config["llm"])
embedder = OpenAIEmbeddings(api_key=llm_model.openai_api_key)
# define the nodes for the graph
robot_node = RobotsNode(
input="url",
output=["is_scrapable"],
node_config={
"llm_model": llm_model,
"force_scraping": True,
"verbose": True,
},
)
fetch_node = FetchNode(
input="url | local_dir",
output=["doc"],
node_config={
"verbose": True,
"headless": True,
},
)
parse_node = ParseNode(
input="doc",
output=["parsed_doc"],
node_config={
"chunk_size": 4096,
"verbose": True,
},
)
generate_answer_node = GenerateAnswerNode(
input="user_prompt & (relevant_chunks | parsed_doc | doc)",
output=["answer"],
node_config={
"llm_model": llm_model,
"verbose": True,
},
)
# ************************************************
# Create the graph by defining the connections
# ************************************************
graph = BaseGraph(
nodes=[
robot_node,
fetch_node,
parse_node,
generate_answer_node,
],
edges=[
(robot_node, fetch_node),
(fetch_node, parse_node),
(parse_node, generate_answer_node),
],
entry_point=robot_node,
)
# ************************************************
# Execute the graph
# ************************************************
result, execution_info = graph.execute(
{"user_prompt": "Describe the content", "url": "https://example.com/"}
)
# get the answer from the result
result = result.get("answer", "No answer found.")
print(result)