Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor: OPTIC-1621: Remove Stale Feature Flag - fflag_perf_back_lsdv_4695_update_prediction_query_to_use_direct_project_relation #7002

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion label_studio/core/feature_flags/stale_feature_flags.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
'fflag_feat_front_optic_66_lazy_chart_evaluation_19092023_short': False,
'fflag_fix_back_leap_24_tasks_api_optimization_05092023_short': False,
'fflag_feat_optic_2_ensure_draft_saved_short': True,
'fflag_perf_back_lsdv_4695_update_prediction_query_to_use_direct_project_relation': True,
'fflag_feat_front_lsdv_4620_outliner_optimization_310723_short': True,
'fflag_fix_front_lsdv_4600_lead_time_27072023_short': False,
'fflag_feat_front_lops_e_10_updated_ux_short': False,
Expand Down
8 changes: 1 addition & 7 deletions label_studio/projects/functions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,7 @@ def annotate_finished_task_number(queryset):


def annotate_total_predictions_number(queryset):
if flag_set(
'fflag_perf_back_lsdv_4695_update_prediction_query_to_use_direct_project_relation',
user='auto',
):
predictions = Prediction.objects.filter(project=OuterRef('id')).values('id')
else:
predictions = Prediction.objects.filter(task__project=OuterRef('id')).values('id')
predictions = Prediction.objects.filter(project=OuterRef('id')).values('id')
return queryset.annotate(total_predictions_number=SQCount(predictions))


Expand Down
18 changes: 2 additions & 16 deletions label_studio/projects/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from typing import Any, Mapping, Optional

from annoying.fields import AutoOneToOneField
from core.feature_flags import flag_set
from core.label_config import (
check_control_in_config_by_regex,
check_toname_in_config_by_regex,
Expand Down Expand Up @@ -327,13 +326,7 @@

@property
def has_any_predictions(self):
if flag_set(
'fflag_perf_back_lsdv_4695_update_prediction_query_to_use_direct_project_relation',
user='auto',
):
return Prediction.objects.filter(Q(project=self.id)).exists()
else:
return Prediction.objects.filter(Q(task__project=self.id)).exists()
return Prediction.objects.filter(Q(project=self.id)).exists()

Check warning on line 329 in label_studio/projects/models.py

View check run for this annotation

Codecov / codecov/patch

label_studio/projects/models.py#L329

Added line #L329 was not covered by tests

@property
def business(self):
Expand Down Expand Up @@ -933,14 +926,7 @@
:param extended: Boolean, if True, returns additional information. Default is False.
:return: Dict or list containing model versions and their count predictions.
"""
if flag_set(
'fflag_perf_back_lsdv_4695_update_prediction_query_to_use_direct_project_relation',
user='auto',
):
predictions = Prediction.objects.filter(project=self)
else:
predictions = Prediction.objects.filter(task__project=self)
# model_versions = set(predictions.values_list('model_version', flat=True).distinct())
predictions = Prediction.objects.filter(project=self)

if extended:
model_versions = list(
Expand Down
8 changes: 1 addition & 7 deletions label_studio/tasks/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -778,13 +778,7 @@ class PredictionAPI(viewsets.ModelViewSet):
filterset_fields = ['task', 'task__project', 'project']

def get_queryset(self):
if flag_set(
'fflag_perf_back_lsdv_4695_update_prediction_query_to_use_direct_project_relation',
user='auto',
):
return Prediction.objects.filter(project__organization=self.request.user.active_organization)
else:
return Prediction.objects.filter(task__project__organization=self.request.user.active_organization)
return Prediction.objects.filter(project__organization=self.request.user.active_organization)


@method_decorator(name='get', decorator=swagger_auto_schema(auto_schema=None))
Expand Down
20 changes: 4 additions & 16 deletions label_studio/tasks/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -933,15 +933,8 @@ def created_ago(self):
return timesince(self.created_at)

def has_permission(self, user):
if flag_set(
'fflag_perf_back_lsdv_4695_update_prediction_query_to_use_direct_project_relation',
user='auto',
):
user.project = self.project # link for activity log
return self.project.has_permission(user)
else:
user.project = self.task.project # link for activity log
return self.task.project.has_permission(user)
user.project = self.project # link for activity log
return self.project.has_permission(user)

@classmethod
def prepare_prediction_result(cls, result, project):
Expand Down Expand Up @@ -1009,13 +1002,8 @@ def save(self, *args, update_fields=None, **kwargs):
update_fields = {'project_id'}.union(update_fields)

# "result" data can come in different forms - normalize them to JSON
if flag_set(
'fflag_perf_back_lsdv_4695_update_prediction_query_to_use_direct_project_relation',
user='auto',
):
self.result = self.prepare_prediction_result(self.result, self.project)
else:
self.result = self.prepare_prediction_result(self.result, self.task.project)
self.result = self.prepare_prediction_result(self.result, self.project)

if update_fields is not None:
update_fields = {'result'}.union(update_fields)
# set updated_at field of task to now()
Expand Down
Loading