diff --git a/oioioi/problems/management/commands/mass_create_tool.py b/oioioi/problems/management/commands/mass_create_tool.py index 305e731fa..575795aa8 100644 --- a/oioioi/problems/management/commands/mass_create_tool.py +++ b/oioioi/problems/management/commands/mass_create_tool.py @@ -1,11 +1,25 @@ +import os import random import string import sys +from io import StringIO +from types import SimpleNamespace from django.conf import settings from django.contrib.auth import get_user_model +from django.core.files.base import ContentFile +from django.core.management import call_command from django.core.management.base import BaseCommand, CommandError - +from django.db import transaction +from django.utils import timezone + +from oioioi.contests.models import ( + Contest, + ProblemInstance, + RegistrationAvailabilityConfig, + Submission, +) +from oioioi.participants.models import Participant from oioioi.problems.models import ( AlgorithmTag, AlgorithmTagProposal, @@ -17,6 +31,7 @@ ProblemName, ProblemSite, ) +from oioioi.problems.utils import get_new_problem_instance User = get_user_model() @@ -46,12 +61,32 @@ class Command(BaseCommand): "Allows the creation of mock data for testing purposes. " "Creates Problems, Users, Algorithm Tags, Difficulty Tags, Algorithm Tag Proposals, and " "Algorithm Tag Through and Difficulty Tag Through records to assign tags to problems. Use with caution in production environments. " + "Additionally, can create a Contest and add created Problems and Users to it, along with Submissions. " ) def add_arguments(self, parser): parser.add_argument("--wipe", "-w", action="store_true", help="Remove all previously generated mock data before creating new data") + parser.add_argument("--createcontest", "-cc", action="store_true", help="Adds created problems and users to a new contest") + parser.add_argument("--contestname", "-cn", type=str, help="Name of the contest to create (default: random ID)") parser.add_argument("--problems", "-p", type=unsigned_int, default=0, metavar="N", help="Number of problems to create (default: 0)") - parser.add_argument("--users", "-u", type=unsigned_int, default=0, metavar="N", help="Number of users to create (default: 0)") + parser.add_argument( + '--problempackages', '-pp', + nargs='+', + type=str, + help='List of problem package files to create problems from (optionally added to a new contest when --createcontest is used)' + ) + parser.add_argument( + "--users", "-u", type=unsigned_int, default=0, metavar="N", + help="Number of users to create (default: 0)" + ) + parser.add_argument( + "--submission_files", "-sf", nargs='+', type=str, + help="List of source code files to use for submissions" + ) + parser.add_argument( + "--submissions_per_user", "-spu", type=unsigned_int, default=0, metavar="N", + help="Number of submissions per user to create (default: 0)" + ) parser.add_argument("--algotags", "-at", type=unsigned_int, default=0, metavar="N", help="Number of algorithm tags to create (default: 0)") parser.add_argument("--difftags", "-dt", type=unsigned_int, default=0, metavar="N", help="Number of difficulty tags to create (default: 0)") parser.add_argument( @@ -132,7 +167,10 @@ def create_problems(self, count, verbosity): candidate_prefix="prob_", random_length=10, uniqueness_fn=lambda s: not Problem.objects.filter(short_name=s).exists(), - create_instance_fn=lambda candidate: Problem.create(short_name=candidate), + create_instance_fn=lambda candidate: Problem.create( + short_name=candidate, + controller_name='oioioi.problems.controllers.ProblemController' + ), verbose_name="Problem", verbosity=verbosity, ) @@ -149,6 +187,198 @@ def create_problems(self, count, verbosity): ) return created_problems + def create_problems_from_package(self, package_file_names, verbosity): + """ + Creates problems from package files. + Ensures that the controller_name is properly set after unpacking. + Returns a list of created Problem objects. + - package_file_names: List of package file names to unpack. + - verbosity: The current verbosity level. + """ + if not package_file_names: + return [] + + TEST_FILES_DIR = os.path.join(settings.BASE_DIR, 'oioioi', 'sinolpack', 'files') + created_problems = [] + + for package_file_name in package_file_names: + package_path = os.path.join(TEST_FILES_DIR, package_file_name) + + out = StringIO() + try: + call_command('addproblem', package_path, stdout=out) + problem_id = int(out.getvalue().strip()) + if verbosity >= 2: + self.stdout.write(f"Unpacked package {package_file_name} to create problem with ID {problem_id}") + problem = Problem.objects.get(id=problem_id) + + if not problem.controller_name: + problem.controller_name = 'oioioi.problems.controllers.ProblemController' + problem.save() + if verbosity >= 2: + self.stdout.write(self.style.WARNING(f"Warning: Set default controller_name for problem {problem.short_name}")) + + created_problems.append(problem) + + if verbosity >= 2: + self.stdout.write(f"Created problem: {problem.short_name}, {problem.id}, {problem.controller_name} from package {package_file_name}") + except Exception as e: + self.stderr.write(self.style.ERROR(f"Failed to unpack package {package_file_name} located at {package_path}. Error: {e}")) + + return created_problems + + def add_problems_to_new_round(self, problems, contest, verbosity): + """ + Adds all provided problems to a new round in the given contest. + Returns the created Round object. + - problems: List of Problem objects to add. + - contest: Contest object to which the round will be added. + - verbosity: The current verbosity level. + """ + with transaction.atomic(): + round_number = contest.round_set.count() + 1 + new_round = contest.round_set.create(name=f"Round {round_number}") + + for problem in problems: + pi = get_new_problem_instance(problem, contest) + pi.short_name = problem.short_name + pi.round = new_round + pi.save() + + if verbosity >= 2: + self.stdout.write(f"Added {len(problems)} problems to new round '{new_round.name}' in contest '{contest.name}' (ID: {contest.id})") + + return new_round + + def fetch_submission_files(self, submission_file_names, verbosity): + """ + Fetches submission files from the specified file names. + Returns a list of tuples (file_name, source_code). + - submission_file_names: List of submission file names to read. + - verbosity: The current verbosity level. + """ + if not submission_file_names: + return [] + TEST_FILES_DIR = os.path.join(settings.BASE_DIR, 'oioioi', 'sinolpack', 'files') + submission_files = [] + for file_name in submission_file_names: + file_path = os.path.join(TEST_FILES_DIR, file_name) + try: + with open(file_path) as f: + source_code = f.read() + submission_files.append((file_name, source_code)) + if verbosity >= 2: + self.stdout.write(f"Loaded submission file: {file_name}") + except Exception as e: + self.stderr.write(self.style.ERROR(f"Failed to read submission file {file_name} located at {file_path}. Error: {e}")) + if submission_file_names and not submission_files: + raise CommandError( + "No submission files could be loaded. Please verify the file names and paths." + ) + return submission_files + + def submit_by_user(self, user, problem_instance, source_code, source_code_name, verbosity): + """ + Submits source code to a problem instance on behalf of a user. + - user: User object submitting the code. + - problem_instance: ProblemInstance object to which the code is submitted. + - source_code: The source code string to submit. + - source_code_name: The filename to use for the submitted source code. + - verbosity: The current verbosity level. + """ + # Mock request object with necessary attributes + request = SimpleNamespace() + request.user = user + request.timestamp = timezone.now() + request.contest = problem_instance.contest + request.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'en'} + request._cache = {} + + if verbosity >= 3: + msg = ( + f"User {user.username} is submitting to problem " + f"{problem_instance.problem.short_name} in contest {problem_instance.contest.id}" + ) + self.stdout.write(self.style.SUCCESS(msg)) + with transaction.atomic(): + submission = problem_instance.controller.create_submission( + request=request, + problem_instance=problem_instance, + form_data={ + 'user': user, + 'file': ContentFile(source_code, source_code_name), + 'kind': 'NORMAL' + }, + judge_after_create=False + ) + submission.refresh_from_db() + problem_instance.controller.judge(submission) + + def create_and_populate_contest(self, problems, users, verbose_name, verbosity, contest_name=None, submission_file_names=None, num_submissions_per_user=0): + """ + Creates a Contest with a given (or generated) name and an auto-prefixed ID. + Then adds all provided problems, users and submissions to it. + - problems: List of Problem objects to add. + - users: List of User objects to add. + - verbose_name: A description used in output. + - verbosity: The current verbosity level. + - contest_name: Optional name for the contest. If None, a random ID is used. + - submission_file_names: List of submission file names to use for submissions + (could be None if num_submissions_per_user = 0). + - num_submissions_per_user: Number of submissions to create per user. + + Returns the created Contest object. + """ + if contest_name is None: + candidate_prefix, random_length = "contest_", 10 + else: + candidate_prefix, random_length = contest_name, 0 + + created_contests = self.create_unique_objects( + count=1, + candidate_prefix=candidate_prefix, + random_length=random_length, + uniqueness_fn=lambda s: not Contest.objects.filter(id=s).exists(), + create_instance_fn=lambda candidate: Contest( + id=candidate, + name=contest_name or candidate, + controller_name='oioioi.programs.controllers.ProgrammingContestController' + ), + verbose_name=verbose_name, + verbosity=verbosity, + ) + + contest = created_contests[0] + + RegistrationAvailabilityConfig.objects.create( + contest=contest, + enabled='YES' + ) + + self.add_problems_to_new_round(problems, contest, verbosity) + submission_files = self.fetch_submission_files(submission_file_names, verbosity=verbosity) + + for user in users: + Participant.objects.create(contest=contest, user=user) + + for _submission_no in range(num_submissions_per_user): + file_name, source_code = random.choice(submission_files) + for problem in problems: + try: + pi = ProblemInstance.objects.get(problem=problem, contest=contest) + self.submit_by_user(user, pi, source_code, file_name, verbosity=verbosity) + except ProblemInstance.DoesNotExist: + err_msg = ( + f"ProblemInstance does not exist for problem {problem.short_name} " + f"in contest {contest.id}. Skipping submission." + ) + self.stderr.write(self.style.ERROR(err_msg)) + + if verbosity >= 2: + self.stdout.write(f"Created {verbose_name}: {contest.name} (ID: {contest.id}) with {len(problems)} problems and {len(users)} users") + + return contest + def create_through_records(self, count, problems, tags, through_model, verbose_name, verbosity): """ Creates exactly `count` distinct through-records connecting problems and tags. @@ -221,6 +451,13 @@ def remove_all_generated_data(self): Removes all mass-generated mock data created using this tool. """ + submission_qs = Submission.objects.filter(user__username__startswith=self.auto_prefix) + submission_count = submission_qs.count() + if submission_count: + self.stdout.write(self.style.WARNING(f"Deleting {submission_count} Submissions for generated users")) + submission_qs.delete() + self.stdout.write(self.style.SUCCESS(f"Deleted {submission_count} Submissions")) + prob_qs = Problem.objects.filter(short_name__startswith=self.auto_prefix) prob_count = prob_qs.count() prob_qs.delete() @@ -231,6 +468,12 @@ def remove_all_generated_data(self): user_qs.delete() self.stdout.write(self.style.SUCCESS(f"Deleted {user_count} Users")) + contest_qs = Contest.objects.filter(id__startswith=self.auto_prefix) + self.stdout.write(self.style.WARNING(f"Deleting the following contests: {contest_qs.values_list('id', flat=True)}")) + contest_count = contest_qs.count() + contest_qs.delete() + self.stdout.write(self.style.SUCCESS(f"Deleted {contest_count} Contests")) + algo_tag_qs = AlgorithmTag.objects.filter(name__startswith=self.auto_prefix) algo_tag_count = algo_tag_qs.count() algo_tag_qs.delete() @@ -251,8 +494,13 @@ def handle(self, *args, **options): self.auto_prefix = "_auto_" wipe = options["wipe"] + create_contest = options["createcontest"] + contest_name = options["contestname"] num_problems = options["problems"] + package_file_names = options["problempackages"] num_users = options["users"] + submission_file_names = options["submission_files"] + num_submissions_per_user = options["submissions_per_user"] num_algotags = options["algotags"] num_difftags = options["difftags"] num_algothrough = options["algothrough"] @@ -263,7 +511,8 @@ def handle(self, *args, **options): verbosity = int(options.get("verbosity", 1)) total_objects_to_create = ( - num_problems + num_users + num_algotags + num_difftags + num_algothrough + num_diffthrough + num_algoproposals + num_diffproposals + int(bool(create_contest)) + num_problems + num_users + num_algotags + + num_difftags + num_algothrough + num_diffthrough + num_algoproposals + num_diffproposals ) max_algothrough = num_problems * num_algotags max_diffthrough = num_problems @@ -311,6 +560,11 @@ def handle(self, *args, **options): verbosity=verbosity, ) + created_problems += self.create_problems_from_package( + package_file_names=package_file_names, + verbosity=verbosity, + ) + created_users = self.create_unique_objects( count=num_users, candidate_prefix="user_", @@ -321,6 +575,26 @@ def handle(self, *args, **options): verbosity=verbosity, ) + if create_contest and num_submissions_per_user > 0 and not submission_file_names: + raise CommandError( + "When creating a contest with submissions, --submission-file-names must be provided " + "and contain at least one file." + ) + if create_contest: + created_contest = self.create_and_populate_contest( + contest_name=contest_name, + problems=created_problems, + users=created_users, + submission_file_names=submission_file_names, + num_submissions_per_user=num_submissions_per_user, + verbose_name="Contest", + verbosity=verbosity, + ) + if (verbosity >= 1) and created_contest: + self.stdout.write( + self.style.SUCCESS(f"Created Contest: {created_contest.name} (ID: {created_contest.id})") + ) + created_algotags = self.create_unique_objects( count=num_algotags, candidate_prefix="algo_", diff --git a/oioioi/problems/tests/test_commands.py b/oioioi/problems/tests/test_commands.py index f2729a5f7..81448c858 100644 --- a/oioioi/problems/tests/test_commands.py +++ b/oioioi/problems/tests/test_commands.py @@ -5,6 +5,7 @@ from django.core.management import CommandError, call_command from django.test import TestCase, override_settings +from oioioi.contests.models import Contest, Submission from oioioi.problems.models import ( AlgorithmTag, AlgorithmTagProposal, @@ -30,19 +31,28 @@ class TestMassCreateTool(TestCase): "diffthrough": DifficultyTagThrough, "algoproposals": AlgorithmTagProposal, "diffproposals": DifficultyTagProposal, + "contests": Contest, + "submissions": Submission, } - def _assert_model_counts(self, expected_counts): + def _assert_model_counts(self, expected_counts, check_problem_names=True): + auto_prefix = "_auto_" for name, model in self.name_to_model.items(): - count = model.objects.count() + # For submissions, only count those from auto-generated users + # (the packages create submissions with model solutions). + if name == "submissions": + count = model.objects.filter(user__username__startswith=auto_prefix).count() + else: + count = model.objects.count() expected_count = expected_counts.get(name, 0) assert count == expected_count, f"Expected {expected_count} {name}, got {count}" - # Validation for i18n problem names - problem_amount: int = expected_counts.get("problems", Problem.objects.count()) - expected_probname_count = len(settings.LANGUAGES) * problem_amount - probname_count = ProblemName.objects.count() - assert probname_count == expected_probname_count, f"Expected {expected_probname_count} probnames, got {probname_count}" + # Validation for i18n problem names, can be omitted (e.g. for problems for packages). + if check_problem_names: + problem_amount: int = expected_counts.get("problems", Problem.objects.count()) + expected_probname_count = len(settings.LANGUAGES) * problem_amount + probname_count = ProblemName.objects.count() + assert probname_count == expected_probname_count, f"Expected {expected_probname_count} probnames, got {probname_count}" def test_long_flags(self): out = StringIO() @@ -115,6 +125,48 @@ def test_long_flags(self): call_command("mass_create_tool", "--wipe") self._assert_model_counts({}) + def test_long_flags_contest(self): + out = StringIO() + call_command( + "mass_create_tool", + "--contestname", + "demo", + "--createcontest", + "--problempackages", + "test_full_package.tgz", + "--users", + "10", + "--submission_files", + "sum-correct.cpp", + "sum-various-results.cpp", + "--submissions_per_user", + "3", + "--wipe", + stdout=out, + ) + self._assert_model_counts({"users": 10, "problems": 1, "contests": 1, "submissions": 30}, check_problem_names=False) + + def test_short_flags_contest(self): + out = StringIO() + call_command( + "mass_create_tool", + "-cn", + "demo", + "-cc", + "-pp", + "test_full_package.tgz", + "-u", + "10", + "-sf", + "sum-correct.cpp", + "sum-various-results.cpp", + "-spu", + "3", + "-w", + stdout=out, + ) + self._assert_model_counts({"users": 10, "problems": 1, "contests": 1, "submissions": 30}, check_problem_names=False) + def test_short_flags(self): out = StringIO() call_command(