Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "uipath"
version = "2.1.78"
version = "2.1.80"
description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools."
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.10"
Expand Down
4 changes: 2 additions & 2 deletions samples/calculator/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,15 +35,15 @@ class Wrapper:

@traced()
@mockable(example_calls=GET_RANDOM_OPERATOR_EXAMPLES)
def get_random_operator() -> Wrapper:
async def get_random_operator() -> Wrapper:
"""Get a random operator."""
return Wrapper(result=random.choice([Operator.ADD, Operator.SUBTRACT, Operator.MULTIPLY, Operator.DIVIDE]))


@traced()
async def main(input: CalculatorInput) -> CalculatorOutput:
if input.operator == Operator.RANDOM:
operator = get_random_operator().result
operator = (await get_random_operator()).result
else:
operator = input.operator
match operator:
Expand Down
32 changes: 17 additions & 15 deletions src/uipath/_cli/_evals/_models/_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,46 +64,49 @@ class EvaluationRunResultDto(BaseModel):
model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)

evaluator_name: str
evaluator_id: str
result: EvaluationResultDto

@model_serializer(mode="wrap")
def serialize_model(self, serializer, info):
data = serializer(self)
if isinstance(data, dict):
data.pop("evaluatorId", None)
return data


class EvaluationRunResult(BaseModel):
model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)

score: float = 0.0
evaluation_name: str
evaluation_run_results: List[EvaluationRunResultDto]

def compute_average_score(self) -> None:
@property
def score(self) -> float:
"""Compute average score for this single eval_item."""
if not self.evaluation_run_results:
self.score = 0.0
return
return 0.0

total_score = sum(dto.result.score for dto in self.evaluation_run_results)
self.score = total_score / len(self.evaluation_run_results)
return total_score / len(self.evaluation_run_results)


class UiPathEvalOutput(BaseModel):
model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)

evaluation_set_name: str
score: float
evaluation_set_results: List[EvaluationRunResult]

def compute_average_score(self) -> None:
"""Compute overall average by calling eval_item.compute_average_score()."""
@property
def score(self) -> float:
"""Compute overall average score from evaluation results."""
if not self.evaluation_set_results:
self.score = 0.0
return

for eval_result in self.evaluation_set_results:
eval_result.compute_average_score()
return 0.0

eval_item_scores = [
eval_result.score for eval_result in self.evaluation_set_results
]
self.score = sum(eval_item_scores) / len(eval_item_scores)
return sum(eval_item_scores) / len(eval_item_scores)

def calculate_final_score(
self,
Expand Down Expand Up @@ -181,5 +184,4 @@ def calculate_final_score(

final_score = total_weighted_score / total_weight if total_weight > 0 else 0.0

self.score = final_score
return final_score, agg_metrics_per_evaluator
Loading