diff --git a/README.rst b/README.rst index cbb59205..1d52caf3 100644 --- a/README.rst +++ b/README.rst @@ -10,6 +10,8 @@ odoo-module-migrator ==================== +TODO documentar uso con controlador + ``odoo-module-migrator`` is a python3 library that allows you to automatically migrate module code to make it compatible with newer Odoo versions. For example: diff --git a/odoo_module_migrate/ai_migration_helper.py b/odoo_module_migrate/ai_migration_helper.py new file mode 100644 index 00000000..ea6438c7 --- /dev/null +++ b/odoo_module_migrate/ai_migration_helper.py @@ -0,0 +1,113 @@ +import requests +import re +import os +from .log import logger +from typing import List, Optional + + +class AIMigrationHelper: + def __init__(self): + self._session = requests.Session() + self._timeout = int(os.getenv("AI_MIGRATION_TIMEOUT", 30)) + self._webhook_url = os.getenv("AI_SUGGESTION_WEBHOOK") + self.suggestions = {} + + def transform_code_block(self, code_block: str, prompt: str) -> Optional[str]: + if not self._webhook_url: + return None + + payload = {"content": code_block, "prompt": prompt} + + try: + response = self._session.post( + self._webhook_url, + json=payload, + timeout=self._timeout, + headers={"Content-Type": "application/json"}, + ) + + if response.status_code != 200: + logger.error(f"AI agent error {response.status_code}: {response.text}") + return None + + result = response.json() + content = result.get("output", "") + show_suggestion = result.get("show_change", False) + + if not content: + return None + + if show_suggestion: + return content + + except requests.exceptions.Timeout: + logger.error("AI agent timeout") + except requests.exceptions.RequestException as e: + logger.error(f"AI agent connection error: {e}") + except Exception as e: + logger.error(f"AI agent unexpected error: {e}") + return "" + + def apply_ai_transforms( + self, filename: str, extension: str, content: str, ai_transforms: List[tuple] + ): + if not ai_transforms: + return + + for extensions, patterns, prompt in ai_transforms: + if not prompt or extension not in extensions: + continue + for pattern in patterns: + matches = list(re.finditer(pattern, content, re.MULTILINE | re.DOTALL)) + if not matches: + continue + for match in reversed(matches): + ( + code_with_context, + line_start, + line_end, + ) = self._get_code_with_context(content, match) + suggestion = self.transform_code_block(code_with_context, prompt) + if ( + suggestion + and (filename, line_start, line_end) not in self.suggestions + ): + self.suggestions[(filename, line_start, line_end)] = suggestion + + def _get_code_with_context(self, content: str, match) -> tuple: + lines = content.split("\n") + match_line = content[: match.start()].count("\n") + + function_start = None + for i in range(match_line, -1, -1): + line = lines[i].strip() + if line.startswith(("def ", "class ", "async def ")): + function_start = i + break + + if function_start is None: + offset = 20 + context_start = max(0, match_line - offset) + context_end = min(len(lines) - 1, match_line + offset) + return ( + "\n".join(lines[context_start : context_end + 1]), + context_start + 1, + context_end + 1, + ) + + base_indent = len(lines[function_start]) - len(lines[function_start].lstrip()) + function_end = len(lines) - 1 + + for i in range(function_start + 1, len(lines)): + line = lines[i] + if line.strip(): + current_indent = len(line) - len(line.lstrip()) + if current_indent <= base_indent: + function_end = i - 1 + break + + return ( + "\n".join(lines[function_start : function_end + 1]), + function_start + 1, + function_end + 1, + ) diff --git a/odoo_module_migrate/base_migration_script.py b/odoo_module_migrate/base_migration_script.py index 2d225c4d..96eee33e 100644 --- a/odoo_module_migrate/base_migration_script.py +++ b/odoo_module_migrate/base_migration_script.py @@ -10,6 +10,9 @@ import glob import yaml import importlib +import requests +from tqdm import tqdm +from .ai_migration_helper import AIMigrationHelper class BaseMigrationScript(object): @@ -23,8 +26,48 @@ class BaseMigrationScript(object): _RENAMED_MODELS = [] _REMOVED_MODELS = [] _GLOBAL_FUNCTIONS = [] # [function_object] + _AI_TRANSFORMS = [] _module_path = "" + def __init__(self): + self._warnings_by_message = {} + self._errors_by_message = {} + self._repo_root = None + self._ai_helper = AIMigrationHelper() + + def _get_controller_data(self, version_from_to): + # data = request.get(url, version_from, version_to) + # version_from_to: + # - migrate_100_allways.py + # - migrate_160_170.py + # - migrate_allways.py + # [0] - migrate + # [1] - version_from + # [2] - version_to + list_version_from_to = version_from_to.split("_") + if len(list_version_from_to) != 3 or "allways" in list_version_from_to: + return False + version_from = list_version_from_to[1] + version_to = list_version_from_to[2] + return self._get_changes_from_adhoc(version_from, version_to) + + def _get_changes_from_adhoc(self, init_version_name, target_version_name): + base_url = os.getenv("ADHOC_URL", False) + if not base_url: + logger.warning("No ADHOC_URL env variable found. Version Changes skipped") + return False + endpoint = "/version_changes/{from_version}/{to_version}".format( + from_version=init_version_name, to_version=target_version_name + ) + uri = base_url + endpoint + self._requests = requests.Session() + response = self._requests.get(uri) + + if response and response.ok: + data_version_changes = response.json() + return data_version_changes + return False + def parse_rules(self): script_parts = inspect.getfile(self.__class__).split("/") migrate_from_to = script_parts[-1].split(".")[0] @@ -79,6 +122,11 @@ def parse_rules(self): "type": TYPE_ARRAY, "doc": [], }, + # [([regex_patterns], prompt), ...] + "_AI_TRANSFORMS": { + "type": TYPE_ARRAY, + "doc": [], + }, } # read for rule in rules.keys(): @@ -99,7 +147,137 @@ def parse_rules(self): elif rules[rule]["type"] == TYPE_DICT: rules[rule]["doc"].update(new_rules) elif rules[rule]["type"] == TYPE_ARRAY: - rules[rule]["doc"].extend(new_rules) + if rule == "_AI_TRANSFORMS": + # Convert YAML format to expected tuple format + for ai_transform_item in new_rules: + extensions = ai_transform_item.get("extensions", []) + patterns = ai_transform_item.get("patterns", []) + prompt = ai_transform_item.get("prompt", "") + rules[rule]["doc"].append( + (extensions, patterns, prompt) + ) + else: + rules[rule]["doc"].extend(new_rules) + + # Read from controller + data_version_changes = self._get_controller_data(migrate_from_to) + if data_version_changes: + for change in data_version_changes.values(): + # {'2': { + # 'change_type': 'rename', + # 'major_version_id': '17.0', + # 'model': False, + # 'field': False, + # 'model_type': 'model', + # 'old_name': 'mail.channel', + # 'new_name': 'discuss.channel', + # 'notes': '
Más información sobre este cambio en PR 118354
' + # } + # } + + if ( + change["change_type"] == "rename" + and change["model_type"] == "model" + ): + # [(old.model.name, new.model.name, more_info)] + new_rules = [ + [change["old_name"], change["new_name"], change["notes"]] + ] + rules["_RENAMED_MODELS"]["doc"].extend(new_rules) + + if ( + change["change_type"] == "rename" + and change["model_type"] == "field" + ): + # [(model_name, old_field_name, new_field_name, more_info), ...)] + new_rules = [ + [ + change["model"], + change["old_name"], + change["new_name"], + change["notes"], + ] + ] + rules["_RENAMED_FIELDS"]["doc"].extend(new_rules) + + if ( + change["change_type"] == "remove" + and change["model_type"] == "model" + ): + # [(old.model.name, more_info)] + new_rules = [[change["old_name"], change["notes"]]] + rules["_REMOVED_MODELS"]["doc"].extend(new_rules) + + if ( + change["change_type"] == "remove" + and change["model_type"] == "field" + ): + # [(model_name, field_name, more_info), ...)] + new_rules = [[change["model"], change["old_name"], change["notes"]]] + rules["_REMOVED_FIELDS"]["doc"].extend(new_rules) + + if ( + change["change_type"] == "rename" + and change["model_type"] == "xmlid" + ): + # [(model_name, old_field_name, new_field_name, more_info), ...)] + new_rules = [ + [ + change["model"], + change["old_name"], + change["new_name"], + change["notes"], + ] + ] + warnings = rules["_TEXT_REPLACES"]["doc"].get("*", {}) + warnings[change["old_name"]] = change["new_name"] + rules["_TEXT_REPLACES"]["doc"]["*"] = warnings + + if ( + change["change_type"] == "remove" + and change["model_type"] == "xmlid" + ): + # [(model_name, field_name, more_info), ...)] + warnings = rules["_TEXT_WARNINGS"]["doc"].get("*", {}) + warnings[change["old_name"]] = change["notes"] + rules["_TEXT_WARNINGS"]["doc"]["*"] = warnings + + if ( + change["change_type"] == "change_type" + and change["model_type"] == "field" + ): + warnings = rules["_TEXT_WARNINGS"]["doc"].get(".py", {}) + model_info = ( + f"On the model {change['model']} " + if change.get("model") + else "" + ) + field_info = ( + f"for field {change['field']} " if change.get("field") else "" + ) + warnings[change["field"]] = ( + model_info + field_info + change["notes"] + ) + rules["_TEXT_WARNINGS"]["doc"][".py"] = warnings + + if ( + change["change_type"] == "remove" + and change["model_type"] == "selection_value" + ): + warnings = rules["_TEXT_WARNINGS"]["doc"].get("*", {}) + model_info = ( + f"On the model {change['model']} " + if change.get("model") + else "" + ) + field_info = ( + f"for field {change['field']} " if change.get("field") else "" + ) + warnings[change["old_name"]] = ( + model_info + field_info + change["notes"] + ) + rules["_TEXT_WARNINGS"]["doc"]["*"] = warnings + # extend for rule, data in rules.items(): rtype = data["type"] @@ -152,19 +330,30 @@ def run( manifest_path = self._get_correct_manifest_path( manifest_path, self._FILE_RENAMES ) + self._warnings_by_message = {} + self._repo_root = str(module_path.resolve()) + + all_files = [] for root, directories, filenames in os.walk(module_path.resolve()): + if 'migrations' in root.split(os.sep): + continue for filename in filenames: extension = os.path.splitext(filename)[1] - if extension not in _ALLOWED_EXTENSIONS: - continue - self.process_file( - root, - filename, - extension, - self._FILE_RENAMES, - directory_path, - commit_enabled, - ) + if extension in _ALLOWED_EXTENSIONS: + all_files.append((root, filename, extension)) + + if not (os.getenv("PROGRESS_DISABLE", "0") == "1"): + all_files = tqdm(all_files, desc="Processing files") + + for root, filename, extension in all_files: + self.process_file( + root, + filename, + extension, + self._FILE_RENAMES, + directory_path, + commit_enabled, + ) self.handle_deprecated_modules(manifest_path, self._DEPRECATED_MODULES) @@ -179,6 +368,26 @@ def run( tools=tools, ) + for error_message, files in self._errors_by_message.items(): + rel_files = [os.path.relpath(f, self._repo_root) for f in sorted(files)] + logger.error("%s\n %s" % (error_message, "\n ".join(rel_files))) + + for warning_message, files in self._warnings_by_message.items(): + rel_files = [os.path.relpath(f, self._repo_root) for f in sorted(files)] + logger.warning("%s\n %s" % (warning_message, "\n ".join(rel_files))) + + for ( + filename, + line_start, + line_end, + ), suggestion in self._ai_helper.suggestions.items(): + logger.info( + "AI Suggestion for %s (lines %d-%d):\n\n%s" + % (filename, line_start, line_end, suggestion) + ) + + self._ai_helper.suggestions.clear() + def process_file( self, root, filename, extension, file_renames, directory_path, commit_enabled ): @@ -221,7 +430,8 @@ def process_file( errors.update(removed_models.get("errors")) for pattern, error_message in errors.items(): if re.findall(pattern, new_text): - logger.error(error_message + "\nFile " + os.path.join(root, filename)) + file_path = os.path.join(root, filename) + self._errors_by_message.setdefault(error_message, set()).add(file_path) warnings = self._TEXT_WARNINGS.get("*", {}) warnings.update(self._TEXT_WARNINGS.get(extension, {})) @@ -231,7 +441,22 @@ def process_file( warnings.update(removed_models.get("warnings")) for pattern, warning_message in warnings.items(): if re.findall(pattern, new_text): - logger.warning(warning_message + ". File " + root + os.sep + filename) + file_path = os.path.join(root, filename) + self._warnings_by_message.setdefault(warning_message, set()).add( + file_path + ) + + if extension == ".py": + tools.analyze_field_changes( + absolute_file_path, + self._RENAMED_FIELDS, + self._REMOVED_FIELDS, + self._warnings_by_message, + ) + + self._ai_helper.apply_ai_transforms( + filename, extension, content=new_text, ai_transforms=self._AI_TRANSFORMS + ) def handle_removed_fields(self, removed_fields): """Give warnings if field_name is found on the code. To minimize two @@ -249,7 +474,11 @@ def handle_removed_fields(self, removed_fields): field_name, " %s" % more_info if more_info else "", ) - res[r"""(['"]{0}['"]|\.{0}[\s,=])""".format(field_name)] = msg + res[ + r"""(?)` antes de iniciar las iteraciones, indicando el total de registros a procesar **(sin try/except)**. + - Dentro del bucle, llamar a `_commit_progress(processed=1)` después de cada registro procesado **(cada llamada debe ir dentro de un bloque `try/except`)**. + - En caso de excepción dentro del bucle, ejecutar `self.env.cr.rollback()`. + - Si antes se usaban batches (`[:batch_size]`) y `_trigger`, eliminar tanto el slicing de batches como el uso de `_trigger`. + + ### Reglas + - El refactor **solo aplica si el código contiene `_notify_progress` o `_trigger` usado dentro de un cron iterativo por lotes**. + - **No refactorizar** métodos que solo contienen un `_trigger` suelto sin bucle ni batches. + - Si corresponde aplicar el refactor, devolver un objeto JSON con `show_change: true` y en `content` incluir únicamente el **código refactorizado**. + - Si no corresponde, devolver `show_change: false` y `content: ""`. + + ### Ejemplo genérico válido + + #### Antes + + ```python + batch_size = 100 + data =
+ total_len = len(data)
+ batch_size = min(total_len, batch_size)
+ for i, rec in enumerate(data[:batch_size]):
+
+ self.env["ir.cron"]._notify_progress(done=i + 1, remaining=batch_size - (i + 1))
+
+ if total_len > batch_size:
+ self.env.ref("saas_provider_upgrade.ir_cron_update_client_data_records")._trigger()
+ ```
+
+ #### Después
+
+ ```python
+ data =
+ total_len = len(data)
+ self.env["ir.cron"]._commit_progress(remaining=total_len)
+ for rec in data:
+ try:
+
+ self.env["ir.cron"]._commit_progress(processed=1)
+ except Exception:
+ self.env.cr.rollback()
+ ```
diff --git a/odoo_module_migrate/migration_scripts/ai_transforms/migrate_180_190/domains.yaml b/odoo_module_migrate/migration_scripts/ai_transforms/migrate_180_190/domains.yaml
new file mode 100644
index 00000000..5a35536f
--- /dev/null
+++ b/odoo_module_migrate/migration_scripts/ai_transforms/migrate_180_190/domains.yaml
@@ -0,0 +1,41 @@
+- extensions:
+ - ".py"
+ patterns:
+ - "expression\\.AND\\s*\\("
+ - "expression\\.OR\\s*\\("
+ - "Domain\\.(AND|OR)\\s*\\(\\s*\\["
+ prompt: |
+ # Prompt: Refactorización de Domains
+
+ Analiza el siguiente código de Odoo y determina si corresponde aplicar una refactorización de **domains**:
+
+ - Sustituir `Domain.AND([...])` por el uso de `&` cuando aplique.
+ - Sustituir `Domain.OR([...])` por el uso de `|` cuando aplique.
+ - Simplificar dominios anidados usando `&` y `|`.
+ - Sustituir [("field", "operator", value), ...] por Domain("field", "operator", value) cuando aplique.
+
+ ### Reglas
+ - Si se detecta este patrón, devolver un objeto JSON con `show_change: true` y en `content` incluir únicamente el **código refactorizado**.
+ - Si no corresponde, devolver `show_change: false` y `content: ""`.
+
+ ### Ejemplo válido
+
+ #### Antes
+
+ ```python
+ def _get_read_domain(self):
+ res = super()._get_read_domain()
+ if self.env.user.share:
+ res = Domain.OR([res, [("id", "child_of", self.env.user.documentation_ids.ids)]])
+ return res
+ ```
+
+ #### Después
+
+ ```python
+ def _get_read_domain(self):
+ res = super()._get_read_domain()
+ if self.env.user.share:
+ res = res | Domain("id", "child_of", self.env.user.documentation_ids.ids)
+ return res
+ ```
diff --git a/odoo_module_migrate/migration_scripts/migrate_180_190.py b/odoo_module_migrate/migration_scripts/migrate_180_190.py
index 59e1467f..6e7ac140 100644
--- a/odoo_module_migrate/migration_scripts/migrate_180_190.py
+++ b/odoo_module_migrate/migration_scripts/migrate_180_190.py
@@ -1,4 +1,6 @@
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl).
+# This script is based on the original code from:
+# https://github.com/odoo/odoo/blob/master/odoo/upgrade_code/17.5-00-tree-to-list.py
from odoo_module_migrate.base_migration_script import BaseMigrationScript
import re
diff --git a/odoo_module_migrate/tools.py b/odoo_module_migrate/tools.py
index 7618bcc4..761f5fe1 100644
--- a/odoo_module_migrate/tools.py
+++ b/odoo_module_migrate/tools.py
@@ -5,6 +5,7 @@
import subprocess
import re
import pathlib
+import ast
from .config import _AVAILABLE_MIGRATION_STEPS
from .log import logger
@@ -49,6 +50,12 @@ def _write_content(file_path, content):
f.close()
+def read_lines(file_path):
+ content = _read_content(file_path)
+ lines = content.splitlines()
+ return enumerate(lines)
+
+
def _replace_in_file(file_path, replaces, log_message=False):
current_text = _read_content(file_path)
new_text = current_text
@@ -79,3 +86,194 @@ def get_files(module_path, extensions):
file_paths.extend(module_dir.rglob(f"*{ext}"))
return file_paths
+
+
+class OdooClassAnalyzer:
+ def __init__(self, file_path):
+ with open(file_path, "r") as f:
+ self.content = f.read()
+
+ tree = ast.parse(self.content)
+ self.classes = {}
+
+ for node in ast.walk(tree):
+ if isinstance(node, ast.ClassDef):
+ class_name = node.name
+ _name = None
+ _inherit = []
+
+ for item in node.body:
+ if isinstance(item, ast.Assign):
+ for target in item.targets:
+ if isinstance(target, ast.Name):
+ if target.id == "_name" and isinstance(
+ item.value, ast.Constant
+ ):
+ _name = item.value.value
+ elif target.id == "_inherit":
+ if isinstance(item.value, ast.Constant):
+ _inherit = [item.value.value]
+ elif isinstance(item.value, ast.List):
+ _inherit = [
+ elt.value
+ for elt in item.value.elts
+ if isinstance(elt, ast.Constant)
+ ]
+
+ if _name:
+ loops = self._find_loops_in_class(node)
+ self.classes[_name] = {
+ "class_name": class_name,
+ "_name": _name,
+ "_inherit": _inherit,
+ "start_line": node.lineno,
+ "end_line": node.end_lineno,
+ "loops": loops,
+ }
+
+ def _find_loops_in_class(self, class_node):
+ """
+ Find 'for variable in self:' loops in the class definition.
+ Returns a dictionary with variable names as keys and their start and end lines.
+ """
+ loops = {}
+ start_line = class_node.lineno
+ end_line = class_node.end_lineno
+ class_lines = self.content.split("\n")[start_line - 1 : end_line]
+
+ for i, line in enumerate(class_lines):
+ match = re.search(r"^(\s*)for\s+([a-zA-Z_]\w*)\s+in\s+self\s*:", line)
+ if match:
+ indentation = len(match.group(1))
+ variable_name = match.group(2)
+ loop_start_relative = i
+
+ loop_end_relative = len(class_lines) - 1
+ for j in range(i + 1, len(class_lines)):
+ current_line = class_lines[j]
+ if current_line.strip() == "":
+ continue
+
+ current_indentation = len(current_line) - len(current_line.lstrip())
+ if current_indentation <= indentation:
+ loop_end_relative = j - 1
+ break
+
+ loops[variable_name] = {
+ "start_line_relative": loop_start_relative,
+ "end_line_relative": loop_end_relative,
+ }
+
+ return loops
+
+ def has_model(self, model_name):
+ for value in self.classes.values():
+ if model_name == value["_name"] or model_name in value["_inherit"]:
+ return True
+ return False
+
+ def get_model_info(self, model_name):
+ for value in self.classes.values():
+ if model_name == value["_name"] or model_name in value["_inherit"]:
+ return value
+ return False
+
+
+def analyze_field_changes(
+ file_path, field_replacements, field_removals, warnings_by_message
+):
+ """
+ Analyzes a Python file to find and replace field names in Odoo model definitions.
+ Handles 'self.' and 'for variable in self:' loops.
+ """
+ analyzer = OdooClassAnalyzer(file_path)
+ content = _read_content(file_path)
+
+ base_patterns = [
+ (r"\b{variable}\.{old_field}\b", r"{variable}.{new_field}"),
+ (
+ r'({variable}\.(write|create)\s*\([^}}]*?)(["\']){old_field}\3',
+ r'\1"{new_field}"',
+ ),
+ ]
+ total_replacements = 0
+ for model_name, old_field, new_field, _ in field_replacements:
+ if analyzer.has_model(model_name):
+ model_info = analyzer.get_model_info(model_name)
+ start_line = model_info["start_line"]
+ end_line = model_info["end_line"]
+ loops = model_info["loops"]
+
+ lines = content.split("\n")
+ model_lines = lines[start_line - 1 : end_line]
+ model_content = "\n".join(model_lines)
+ original_model_content = model_content
+
+ count = 0
+ for pattern, replace in base_patterns:
+ pattern_formatted = pattern.format(
+ variable="self", old_field=re.escape(old_field)
+ )
+ replace_formatted = replace.format(variable="self", new_field=new_field)
+ matches = re.findall(
+ pattern_formatted, original_model_content, flags=re.DOTALL
+ )
+ count += len(matches)
+ model_content = re.sub(
+ pattern_formatted, replace_formatted, model_content, flags=re.DOTALL
+ )
+
+ model_lines_modified = model_content.split("\n")
+ for variable_name, loop_info in loops.items():
+ loop_start = loop_info["start_line_relative"] + 1
+ loop_end = loop_info["end_line_relative"] + 1
+
+ loop_lines = model_lines_modified[loop_start:loop_end]
+ loop_content = "\n".join(loop_lines)
+ original_loop_content = loop_content
+
+ for pattern, replace in base_patterns:
+ pattern = pattern.format(
+ variable=re.escape(variable_name),
+ old_field=re.escape(old_field),
+ )
+ replace = replace.format(
+ variable=variable_name, new_field=new_field
+ )
+ matches = re.findall(
+ pattern, original_loop_content, flags=re.DOTALL
+ )
+ count += len(matches)
+ loop_content = re.sub(
+ pattern, replace, loop_content, flags=re.DOTALL
+ )
+
+ model_lines_modified[loop_start:loop_end] = loop_content.split("\n")
+
+ model_content_final = "\n".join(model_lines_modified)
+ if count > 0:
+ lines[start_line - 1 : end_line] = model_content_final.split("\n")
+ content = "\n".join(lines)
+ total_replacements += count
+ logger.info(
+ f"{model_name}: {old_field} -> {new_field} ({count} changes)"
+ )
+
+ _write_content(file_path, content)
+
+ for model_name, field_name, more_info in field_removals:
+ if analyzer.has_model(model_name):
+ model_info = analyzer.get_model_info(model_name)
+ lines = content.split("\n")
+ model_content = "\n".join(
+ lines[model_info["start_line"] - 1 : model_info["end_line"]]
+ )
+
+ for pattern, _ in base_patterns:
+ pattern_formatted = pattern.format(
+ variable="self",
+ old_field=re.escape(field_name)
+ )
+ if re.search(pattern_formatted, model_content, flags=re.DOTALL):
+ msg = f"Field '{field_name}' of model '{model_name}' was removed. {more_info or ''}"
+ warnings_by_message.setdefault(msg, set()).add(str(file_path))
diff --git a/requirements.txt b/requirements.txt
index cb916edd..26a13aa7 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,3 +3,4 @@ colorama
lxml
pyyaml
requests
+tqdm