Skip to content

refactor!: Introduce new storage client system #1194

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 13 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,13 @@ type-check:
uv run mypy

unit-tests:
uv run pytest --numprocesses=auto --verbose --cov=src/crawlee tests/unit
uv run pytest --numprocesses=auto -vv --cov=src/crawlee tests/unit

unit-tests-cov:
uv run pytest --numprocesses=auto --verbose --cov=src/crawlee --cov-report=html tests/unit
uv run pytest --numprocesses=auto -vv --cov=src/crawlee --cov-report=html tests/unit

e2e-templates-tests $(args):
uv run pytest --numprocesses=$(E2E_TESTS_CONCURRENCY) --verbose tests/e2e/project_template "$(args)"
uv run pytest --numprocesses=$(E2E_TESTS_CONCURRENCY) -vv tests/e2e/project_template "$(args)"

format:
uv run ruff check --fix
Expand Down
15 changes: 7 additions & 8 deletions docs/deployment/code_examples/google/cloud_run_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,24 +5,23 @@
import uvicorn
from litestar import Litestar, get

from crawlee import service_locator
from crawlee.crawlers import PlaywrightCrawler, PlaywrightCrawlingContext

# highlight-start
# Disable writing storage data to the file system
configuration = service_locator.get_configuration()
configuration.persist_storage = False
configuration.write_metadata = False
# highlight-end
from crawlee.storage_clients import MemoryStorageClient


@get('/')
async def main() -> str:
"""The crawler entry point that will be called when the HTTP endpoint is accessed."""
# highlight-start
# Disable writing storage data to the file system
storage_client = MemoryStorageClient()
# highlight-end

crawler = PlaywrightCrawler(
headless=True,
max_requests_per_crawl=10,
browser_type='firefox',
storage_client=storage_client,
)

@crawler.router.default_handler
Expand Down
15 changes: 7 additions & 8 deletions docs/deployment/code_examples/google/google_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,22 +6,21 @@
import functions_framework
from flask import Request, Response

from crawlee import service_locator
from crawlee.crawlers import (
BeautifulSoupCrawler,
BeautifulSoupCrawlingContext,
)

# highlight-start
# Disable writing storage data to the file system
configuration = service_locator.get_configuration()
configuration.persist_storage = False
configuration.write_metadata = False
# highlight-end
from crawlee.storage_clients import MemoryStorageClient


async def main() -> str:
# highlight-start
# Disable writing storage data to the file system
storage_client = MemoryStorageClient()
# highlight-end

crawler = BeautifulSoupCrawler(
storage_client=storage_client,
max_request_retries=1,
request_handler_timeout=timedelta(seconds=30),
max_requests_per_crawl=10,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ async def request_handler(context: BeautifulSoupCrawlingContext) -> None:
await crawler.run(['https://crawlee.dev'])

# Export the entire dataset to a CSV file.
await crawler.export_data_csv(path='results.csv')
await crawler.export_data(path='results.csv')


if __name__ == '__main__':
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ async def request_handler(context: BeautifulSoupCrawlingContext) -> None:
await crawler.run(['https://crawlee.dev'])

# Export the entire dataset to a JSON file.
await crawler.export_data_json(path='results.json')
await crawler.export_data(path='results.json')


if __name__ == '__main__':
Expand Down
2 changes: 1 addition & 1 deletion docs/examples/code_examples/parsel_crawler.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ async def some_hook(context: BasicCrawlingContext) -> None:
await crawler.run(['https://github.com'])

# Export the entire dataset to a JSON file.
await crawler.export_data_json(path='results.json')
await crawler.export_data(path='results.json')


if __name__ == '__main__':
Expand Down

This file was deleted.

2 changes: 1 addition & 1 deletion docs/guides/code_examples/storages/rq_basic_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ async def main() -> None:
await request_queue.add_request('https://apify.com/')

# Add multiple requests as a batch.
await request_queue.add_requests_batched(
await request_queue.add_requests(
['https://crawlee.dev/', 'https://crawlee.dev/python/']
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,7 @@ async def main() -> None:
request_queue = await RequestQueue.open(name='my-request-queue')

# Interact with the request queue directly, e.g. add a batch of requests.
await request_queue.add_requests_batched(
['https://apify.com/', 'https://crawlee.dev/']
)
await request_queue.add_requests(['https://apify.com/', 'https://crawlee.dev/'])

# Create a new crawler (it can be any subclass of BasicCrawler) and pass the request
# list as request manager to it. It will be managed by the crawler.
Expand Down
4 changes: 2 additions & 2 deletions docs/guides/request_loaders.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,12 @@ class BaseStorage {

class RequestLoader {
<<abstract>>
+ handled_count
+ total_count
+ fetch_next_request()
+ mark_request_as_handled()
+ is_empty()
+ is_finished()
+ get_handled_count()
+ get_total_count()
+ to_tandem()
}

Expand Down
7 changes: 0 additions & 7 deletions docs/guides/storages.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import KvsWithCrawlerExample from '!!raw-loader!roa-loader!./code_examples/stora
import KvsWithCrawlerExplicitExample from '!!raw-loader!roa-loader!./code_examples/storages/kvs_with_crawler_explicit_example.py';

import CleaningDoNotPurgeExample from '!!raw-loader!roa-loader!./code_examples/storages/cleaning_do_not_purge_example.py';
import CleaningPurgeExplicitlyExample from '!!raw-loader!roa-loader!./code_examples/storages/cleaning_purge_explicitly_example.py';

Crawlee offers multiple storage types for managing and persisting your crawling data. Request-oriented storages, such as the <ApiLink to="class/RequestQueue">`RequestQueue`</ApiLink>, help you store and deduplicate URLs, while result-oriented storages, like <ApiLink to="class/Dataset">`Dataset`</ApiLink> and <ApiLink to="class/KeyValueStore">`KeyValueStore`</ApiLink>, focus on storing and retrieving scraping results. This guide helps you choose the storage type that suits your needs.

Expand Down Expand Up @@ -210,12 +209,6 @@ Default storages are purged before the crawler starts, unless explicitly configu

If you do not explicitly interact with storages in your code, the purging will occur automatically when the <ApiLink to="class/BasicCrawler#run">`BasicCrawler.run`</ApiLink> method is invoked.

If you need to purge storages earlier, you can call <ApiLink to="class/MemoryStorageClient#purge_on_start">`MemoryStorageClient.purge_on_start`</ApiLink> directly if you are using the default storage client. This method triggers the purging process for the underlying storage implementation you are currently using.

<RunnableCodeBlock className="language-python" language="python">
{CleaningPurgeExplicitlyExample}
</RunnableCodeBlock>

## Conclusion

This guide introduced you to the different storage types available in Crawlee and how to interact with them. You learned how to manage requests and store and retrieve scraping results using the `RequestQueue`, `Dataset`, and `KeyValueStore`. You also discovered how to use helper functions to simplify interactions with these storages. Finally, you learned how to clean up storages before starting a crawler run and how to purge them explicitly. If you have questions or need assistance, feel free to reach out on our [GitHub](https://github.com/apify/crawlee-python) or join our [Discord community](https://discord.com/invite/jyEM2PRvMU). Happy scraping!
8 changes: 3 additions & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ crawlee = "crawlee._cli:cli"

[dependency-groups]
dev = [
"apify_client", # For e2e tests.
"apify-client", # For e2e tests.
"build~=1.2.2", # For e2e tests.
"mypy~=1.16.0",
"pre-commit~=4.2.0",
Expand Down Expand Up @@ -143,9 +143,9 @@ ignore = [
"ISC001", # This rule may cause conflicts when used with the formatter
"FIX", # flake8-fixme
"PLR0911", # Too many return statements
"PLR0912", # Too many branches
"PLR0913", # Too many arguments in function definition
"PLR0915", # Too many statements
"PTH", # flake8-use-pathlib
"PYI034", # `__aenter__` methods in classes like `{name}` usually return `self` at runtime
"PYI036", # The second argument in `__aexit__` should be annotated with `object` or `BaseException | None`
"S102", # Use of `exec` detected
Expand All @@ -167,6 +167,7 @@ indent-style = "space"
"F401", # Unused imports
]
"**/{tests}/*" = [
"ASYNC230", # Async functions should not open files with blocking methods like `open`
"D", # Everything from the pydocstyle
"INP001", # File {filename} is part of an implicit namespace package, add an __init__.py
"PLR2004", # Magic value used in comparison, consider replacing {value} with a constant variable
Expand Down Expand Up @@ -204,9 +205,6 @@ builtins-ignorelist = ["id"]
[tool.ruff.lint.isort]
known-first-party = ["crawlee"]

[tool.ruff.lint.pylint]
max-branches = 18

[tool.pytest.ini_options]
addopts = "-ra"
asyncio_default_fixture_loop_scope = "function"
Expand Down
2 changes: 1 addition & 1 deletion src/crawlee/_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
cli = typer.Typer(no_args_is_help=True)

template_directory = importlib.resources.files('crawlee') / 'project_template'
with open(str(template_directory / 'cookiecutter.json')) as f:
with (template_directory / 'cookiecutter.json').open() as f:
cookiecutter_json = json.load(f)

crawler_choices = cookiecutter_json['crawler_type']
Expand Down
1 change: 1 addition & 0 deletions src/crawlee/_consts.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import annotations

METADATA_FILENAME = '__metadata__.json'
"""The name of the metadata file for storage clients."""
39 changes: 22 additions & 17 deletions src/crawlee/_request.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,23 @@ class Request(BaseModel):
```
"""

model_config = ConfigDict(populate_by_name=True)
model_config = ConfigDict(populate_by_name=True, extra='allow')

id: str
"""A unique identifier for the request. Note that this is not used for deduplication, and should not be confused
with `unique_key`."""

unique_key: Annotated[str, Field(alias='uniqueKey')]
"""A unique key identifying the request. Two requests with the same `unique_key` are considered as pointing
to the same URL.

If `unique_key` is not provided, then it is automatically generated by normalizing the URL.
For example, the URL of `HTTP://www.EXAMPLE.com/something/` will produce the `unique_key`
of `http://www.example.com/something`.

Pass an arbitrary non-empty text value to the `unique_key` property to override the default behavior
and specify which URLs shall be considered equal.
"""

url: Annotated[str, BeforeValidator(validate_http_url), Field()]
"""The URL of the web page to crawl. Must be a valid HTTP or HTTPS URL, and may include query parameters
Expand Down Expand Up @@ -207,22 +223,6 @@ class Request(BaseModel):
handled_at: Annotated[datetime | None, Field(alias='handledAt')] = None
"""Timestamp when the request was handled."""

unique_key: Annotated[str, Field(alias='uniqueKey')]
"""A unique key identifying the request. Two requests with the same `unique_key` are considered as pointing
to the same URL.

If `unique_key` is not provided, then it is automatically generated by normalizing the URL.
For example, the URL of `HTTP://www.EXAMPLE.com/something/` will produce the `unique_key`
of `http://www.example.com/something`.

Pass an arbitrary non-empty text value to the `unique_key` property
to override the default behavior and specify which URLs shall be considered equal.
"""

id: str
"""A unique identifier for the request. Note that this is not used for deduplication, and should not be confused
with `unique_key`."""

@classmethod
def from_url(
cls,
Expand Down Expand Up @@ -398,6 +398,11 @@ def forefront(self) -> bool:
def forefront(self, new_value: bool) -> None:
self.crawlee_data.forefront = new_value

@property
def was_already_handled(self) -> bool:
"""Indicates whether the request was handled."""
return self.handled_at is not None


class RequestWithLock(Request):
"""A crawling request with information about locks."""
Expand Down
14 changes: 3 additions & 11 deletions src/crawlee/_service_locator.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
from crawlee._utils.docs import docs_group
from crawlee.configuration import Configuration
from crawlee.errors import ServiceConflictError
from crawlee.events import EventManager
from crawlee.storage_clients import StorageClient
from crawlee.events import EventManager, LocalEventManager
from crawlee.storage_clients import FileSystemStorageClient, StorageClient


@docs_group('Classes')
Expand Down Expand Up @@ -49,8 +49,6 @@ def set_configuration(self, configuration: Configuration) -> None:
def get_event_manager(self) -> EventManager:
"""Get the event manager."""
if self._event_manager is None:
from crawlee.events import LocalEventManager

self._event_manager = (
LocalEventManager().from_config(config=self._configuration)
if self._configuration
Expand All @@ -77,13 +75,7 @@ def set_event_manager(self, event_manager: EventManager) -> None:
def get_storage_client(self) -> StorageClient:
"""Get the storage client."""
if self._storage_client is None:
from crawlee.storage_clients import MemoryStorageClient

self._storage_client = (
MemoryStorageClient.from_config(config=self._configuration)
if self._configuration
else MemoryStorageClient.from_config()
)
self._storage_client = FileSystemStorageClient()

self._storage_client_was_retrieved = True
return self._storage_client
Expand Down
Loading
Loading