Skip to content

Commit 37ca9c2

Browse files
authored
Merge pull request #838 from ScrapeGraphAI/pre/beta
Pre/beta
2 parents fde878f + 7f0e1d2 commit 37ca9c2

18 files changed

+588
-333
lines changed

CHANGELOG.md

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,8 @@
1+
## [1.32.0-beta.5](https://github.com/ScrapeGraphAI/Scrapegraph-ai/compare/v1.32.0-beta.4...v1.32.0-beta.5) (2024-12-02)
12
## [1.32.0](https://github.com/ScrapeGraphAI/Scrapegraph-ai/compare/v1.31.1...v1.32.0) (2024-12-02)
23

34

4-
### Features
55

6-
* add API integration ([46373af](https://github.com/ScrapeGraphAI/Scrapegraph-ai/commit/46373afe6d8c05ad26039e68190f13d82b20a349))
76

87
## [1.32.0-beta.4](https://github.com/ScrapeGraphAI/Scrapegraph-ai/compare/v1.32.0-beta.3...v1.32.0-beta.4) (2024-12-02)
98

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
"""
2+
Example leveraging a state file containing session cookies which
3+
might be leveraged to authenticate to a website and scrape protected
4+
content.
5+
"""
6+
7+
import os
8+
import random
9+
from dotenv import load_dotenv
10+
11+
# import playwright so we can use it to create the state file
12+
from playwright.async_api import async_playwright
13+
14+
from scrapegraphai.graphs import OmniScraperGraph
15+
from scrapegraphai.utils import prettify_exec_info
16+
17+
load_dotenv()
18+
19+
# ************************************************
20+
# Leveraging Playwright external to the invocation of the graph to
21+
# login and create the state file
22+
# ************************************************
23+
24+
25+
# note this is just an example and probably won't actually work on
26+
# LinkedIn, the implementation of the login is highly dependent on the website
27+
async def do_login():
28+
async with async_playwright() as playwright:
29+
browser = await playwright.chromium.launch(
30+
timeout=30000,
31+
headless=False,
32+
slow_mo=random.uniform(500, 1500),
33+
)
34+
page = await browser.new_page()
35+
36+
# very basic implementation of a login, in reality it may be trickier
37+
await page.goto("https://www.linkedin.com/login")
38+
await page.get_by_label("Email or phone").fill("some_bloke@some_domain.com")
39+
await page.get_by_label("Password").fill("test1234")
40+
await page.get_by_role("button", name="Sign in").click()
41+
await page.wait_for_timeout(3000)
42+
43+
# assuming a successful login, we save the cookies to a file
44+
await page.context.storage_state(path="./state.json")
45+
46+
47+
async def main():
48+
await do_login()
49+
50+
# ************************************************
51+
# Define the configuration for the graph
52+
# ************************************************
53+
54+
openai_api_key = os.getenv("OPENAI_APIKEY")
55+
56+
graph_config = {
57+
"llm": {
58+
"api_key": openai_api_key,
59+
"model": "openai/gpt-4o",
60+
},
61+
"max_images": 10,
62+
"headless": False,
63+
# provide the path to the state file
64+
"storage_state": "./state.json",
65+
}
66+
67+
# ************************************************
68+
# Create the OmniScraperGraph instance and run it
69+
# ************************************************
70+
71+
omni_scraper_graph = OmniScraperGraph(
72+
prompt="List me all the projects with their description.",
73+
source="https://www.linkedin.com/feed/",
74+
config=graph_config,
75+
)
76+
77+
# the storage_state is used to load the cookies from the state file
78+
# so we are authenticated and able to scrape protected content
79+
result = omni_scraper_graph.run()
80+
print(result)
81+
82+
# ************************************************
83+
# Get graph execution info
84+
# ************************************************
85+
86+
graph_exec_info = omni_scraper_graph.get_execution_info()
87+
print(prettify_exec_info(graph_exec_info))
88+
89+
90+
if __name__ == "__main__":
91+
import asyncio
92+
93+
asyncio.run(main())
File renamed without changes.

pyproject.toml

+2-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,8 @@ name = "scrapegraphai"
33

44

55

6-
version = "1.32.0"
6+
version = "1.32.0b5"
7+
78

89

910

scrapegraphai/docloaders/chromium.py

+27-10
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88

99
logger = get_logger("web-loader")
1010

11+
1112
class ChromiumLoader(BaseLoader):
1213
"""Scrapes HTML pages from URLs using a (headless) instance of the
1314
Chromium web driver with proxy protection.
@@ -33,6 +34,7 @@ def __init__(
3334
proxy: Optional[Proxy] = None,
3435
load_state: str = "domcontentloaded",
3536
requires_js_support: bool = False,
37+
storage_state: Optional[str] = None,
3638
**kwargs: Any,
3739
):
3840
"""Initialize the loader with a list of URL paths.
@@ -62,6 +64,7 @@ def __init__(
6264
self.urls = urls
6365
self.load_state = load_state
6466
self.requires_js_support = requires_js_support
67+
self.storage_state = storage_state
6568

6669
async def ascrape_undetected_chromedriver(self, url: str) -> str:
6770
"""
@@ -91,7 +94,9 @@ async def ascrape_undetected_chromedriver(self, url: str) -> str:
9194
attempt += 1
9295
logger.error(f"Attempt {attempt} failed: {e}")
9396
if attempt == self.RETRY_LIMIT:
94-
results = f"Error: Network error after {self.RETRY_LIMIT} attempts - {e}"
97+
results = (
98+
f"Error: Network error after {self.RETRY_LIMIT} attempts - {e}"
99+
)
95100
finally:
96101
driver.quit()
97102

@@ -113,7 +118,9 @@ async def ascrape_playwright(self, url: str) -> str:
113118
browser = await p.chromium.launch(
114119
headless=self.headless, proxy=self.proxy, **self.browser_config
115120
)
116-
context = await browser.new_context()
121+
context = await browser.new_context(
122+
storage_state=self.storage_state
123+
)
117124
await Malenia.apply_stealth(context)
118125
page = await context.new_page()
119126
await page.goto(url, wait_until="domcontentloaded")
@@ -125,10 +132,12 @@ async def ascrape_playwright(self, url: str) -> str:
125132
attempt += 1
126133
logger.error(f"Attempt {attempt} failed: {e}")
127134
if attempt == self.RETRY_LIMIT:
128-
raise RuntimeError(f"Failed to fetch {url} after {self.RETRY_LIMIT} attempts: {e}")
135+
raise RuntimeError(
136+
f"Failed to fetch {url} after {self.RETRY_LIMIT} attempts: {e}"
137+
)
129138
finally:
130-
if 'browser' in locals():
131-
await browser.close()
139+
if "browser" in locals():
140+
132141

133142
async def ascrape_with_js_support(self, url: str) -> str:
134143
"""
@@ -138,7 +147,7 @@ async def ascrape_with_js_support(self, url: str) -> str:
138147
url (str): The URL to scrape.
139148
140149
Returns:
141-
str: The fully rendered HTML content after JavaScript execution,
150+
str: The fully rendered HTML content after JavaScript execution,
142151
or an error message if an exception occurs.
143152
"""
144153
from playwright.async_api import async_playwright
@@ -153,7 +162,9 @@ async def ascrape_with_js_support(self, url: str) -> str:
153162
browser = await p.chromium.launch(
154163
headless=self.headless, proxy=self.proxy, **self.browser_config
155164
)
156-
context = await browser.new_context()
165+
context = await browser.new_context(
166+
storage_state=self.storage_state
167+
)
157168
page = await context.new_page()
158169
await page.goto(url, wait_until="networkidle")
159170
results = await page.content()
@@ -163,7 +174,9 @@ async def ascrape_with_js_support(self, url: str) -> str:
163174
attempt += 1
164175
logger.error(f"Attempt {attempt} failed: {e}")
165176
if attempt == self.RETRY_LIMIT:
166-
results = f"Error: Network error after {self.RETRY_LIMIT} attempts - {e}"
177+
results = (
178+
f"Error: Network error after {self.RETRY_LIMIT} attempts - {e}"
179+
)
167180
finally:
168181
await browser.close()
169182

@@ -180,7 +193,9 @@ def lazy_load(self) -> Iterator[Document]:
180193
Document: The scraped content encapsulated within a Document object.
181194
"""
182195
scraping_fn = (
183-
self.ascrape_with_js_support if self.requires_js_support else getattr(self, f"ascrape_{self.backend}")
196+
self.ascrape_with_js_support
197+
if self.requires_js_support
198+
else getattr(self, f"ascrape_{self.backend}")
184199
)
185200

186201
for url in self.urls:
@@ -202,7 +217,9 @@ async def alazy_load(self) -> AsyncIterator[Document]:
202217
source URL as metadata.
203218
"""
204219
scraping_fn = (
205-
self.ascrape_with_js_support if self.requires_js_support else getattr(self, f"ascrape_{self.backend}")
220+
self.ascrape_with_js_support
221+
if self.requires_js_support
222+
else getattr(self, f"ascrape_{self.backend}")
206223
)
207224

208225
tasks = [scraping_fn(url) for url in self.urls]

0 commit comments

Comments
 (0)