-
-
Notifications
You must be signed in to change notification settings - Fork 1.7k
fix: show dismissed PR review findings in UI instead of silently dropping them #1852
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
d05ffd5
900a968
94c1420
d5ca83e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -1289,12 +1289,30 @@ async def review(self, context: PRContext) -> PRReviewResult: | |
| f"{len(filtered_findings)} filtered" | ||
| ) | ||
|
|
||
| # No confidence routing - validation is binary via finding-validator | ||
| unique_findings = validated_findings | ||
| logger.info(f"[PRReview] Final findings: {len(unique_findings)} validated") | ||
| # Separate active findings (drive verdict) from dismissed (shown in UI only) | ||
| active_findings = [] | ||
| dismissed_findings = [] | ||
| for f in validated_findings: | ||
| if f.validation_status == "dismissed_false_positive": | ||
| dismissed_findings.append(f) | ||
| else: | ||
| active_findings.append(f) | ||
|
|
||
| safe_print( | ||
| f"[ParallelOrchestrator] Final: {len(active_findings)} active, " | ||
| f"{len(dismissed_findings)} disputed by validator", | ||
| flush=True, | ||
| ) | ||
| logger.info( | ||
| f"[PRReview] Final findings: {len(active_findings)} active, " | ||
| f"{len(dismissed_findings)} disputed" | ||
| ) | ||
|
|
||
| # All findings (active + dismissed) go in the result for UI display | ||
| all_review_findings = validated_findings | ||
| logger.info( | ||
| f"[ParallelOrchestrator] Review complete: {len(unique_findings)} findings" | ||
| f"[ParallelOrchestrator] Review complete: {len(all_review_findings)} findings " | ||
| f"({len(active_findings)} active, {len(dismissed_findings)} disputed)" | ||
| ) | ||
coderabbitai[bot] marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| # Fetch CI status for verdict consideration | ||
|
|
@@ -1304,9 +1322,9 @@ async def review(self, context: PRContext) -> PRReviewResult: | |
| f"{ci_status.get('failing', 0)} failing, {ci_status.get('pending', 0)} pending" | ||
| ) | ||
|
|
||
| # Generate verdict (includes merge conflict check, branch-behind check, and CI status) | ||
| # Generate verdict from ACTIVE findings only (dismissed don't affect verdict) | ||
| verdict, verdict_reasoning, blockers = self._generate_verdict( | ||
| unique_findings, | ||
| active_findings, | ||
| has_merge_conflicts=context.has_merge_conflicts, | ||
| merge_state_status=context.merge_state_status, | ||
| ci_status=ci_status, | ||
|
|
@@ -1317,7 +1335,7 @@ async def review(self, context: PRContext) -> PRReviewResult: | |
| verdict=verdict, | ||
| verdict_reasoning=verdict_reasoning, | ||
| blockers=blockers, | ||
| findings=unique_findings, | ||
| findings=all_review_findings, | ||
| agents_invoked=agents_invoked, | ||
| ) | ||
|
|
||
|
|
@@ -1362,7 +1380,7 @@ async def review(self, context: PRContext) -> PRReviewResult: | |
| pr_number=context.pr_number, | ||
| repo=self.config.repo, | ||
| success=True, | ||
| findings=unique_findings, | ||
| findings=all_review_findings, | ||
| summary=summary, | ||
| overall_status=overall_status, | ||
| verdict=verdict, | ||
|
|
@@ -1937,12 +1955,38 @@ async def _validate_findings( | |
| validated_findings.append(finding) | ||
|
|
||
| elif validation.validation_status == "dismissed_false_positive": | ||
| # Dismiss - do not include | ||
| dismissed_count += 1 | ||
| logger.info( | ||
| f"[PRReview] Dismissed {finding.id} as false positive: " | ||
| f"{validation.explanation[:100]}" | ||
| ) | ||
| # Protect cross-validated findings from dismissal — | ||
| # if multiple specialists independently found the same issue, | ||
| # a single validator should not override that consensus | ||
| if finding.cross_validated: | ||
| finding.validation_status = "confirmed_valid" | ||
| finding.validation_evidence = validation.code_evidence | ||
| finding.validation_explanation = ( | ||
| f"[Auto-kept: cross-validated by {len(finding.source_agents)} agents] " | ||
| f"{validation.explanation}" | ||
| ) | ||
| validated_findings.append(finding) | ||
| safe_print( | ||
| f"[FindingValidator] Kept cross-validated finding '{finding.title}' " | ||
| f"despite dismissal (agents={finding.source_agents})", | ||
| flush=True, | ||
| ) | ||
| else: | ||
| # Keep finding but mark as dismissed (user can see it in UI) | ||
| finding.validation_status = "dismissed_false_positive" | ||
| finding.validation_evidence = validation.code_evidence | ||
| finding.validation_explanation = validation.explanation | ||
| validated_findings.append(finding) | ||
| dismissed_count += 1 | ||
| safe_print( | ||
| f"[FindingValidator] Disputed '{finding.title}': " | ||
| f"{validation.explanation} (file={finding.file}:{finding.line})", | ||
| flush=True, | ||
| ) | ||
| logger.info( | ||
| f"[PRReview] Disputed {finding.id}: " | ||
| f"{validation.explanation[:200]}" | ||
| ) | ||
|
Comment on lines
1957
to
+1989
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Cross-validation protection and dismissed-finding retention look correct. Two downstream issues to address:
Proposed fixesFix 1 — Accurate log at line 2000: logger.info(
- f"[PRReview] Validation complete: {len(validated_findings)} valid, "
+ f"[PRReview] Validation complete: {len(validated_findings)} total "
+ f"({len(validated_findings) - dismissed_count} confirmed, "
f"{dismissed_count} dismissed, {needs_human_count} need human review"
)Fix 2 — Add label mapping at line 2205: status_label = {
"confirmed_valid": "Confirmed",
+ "dismissed_false_positive": "Disputed by Validator",
"needs_human_review": "Needs human review",
}.get(f.validation_status, f.validation_status)🤖 Prompt for AI Agents |
||
|
|
||
| elif validation.validation_status == "needs_human_review": | ||
| # Keep but flag | ||
|
|
@@ -2127,11 +2171,16 @@ def _generate_summary( | |
| sev = f.severity.value | ||
| emoji = severity_emoji.get(sev, "⚪") | ||
|
|
||
| is_disputed = f.validation_status == "dismissed_false_positive" | ||
|
|
||
| # Finding header with location | ||
| line_range = f"L{f.line}" | ||
| if f.end_line and f.end_line != f.line: | ||
| line_range = f"L{f.line}-L{f.end_line}" | ||
| lines.append(f"#### {emoji} [{sev.upper()}] {f.title}") | ||
| if is_disputed: | ||
| lines.append(f"#### ⚪ [DISPUTED] ~~{f.title}~~") | ||
| else: | ||
| lines.append(f"#### {emoji} [{sev.upper()}] {f.title}") | ||
| lines.append(f"**File:** `{f.file}` ({line_range})") | ||
|
|
||
| # Cross-validation badge | ||
|
|
@@ -2161,6 +2210,7 @@ def _generate_summary( | |
| status_label = { | ||
| "confirmed_valid": "Confirmed", | ||
| "needs_human_review": "Needs human review", | ||
| "dismissed_false_positive": "Disputed by validator", | ||
| }.get(f.validation_status, f.validation_status) | ||
| lines.append("") | ||
| lines.append(f"**Validation:** {status_label}") | ||
|
|
@@ -2182,18 +2232,27 @@ def _generate_summary( | |
|
|
||
| lines.append("") | ||
|
|
||
| # Findings count summary | ||
| # Findings count summary (exclude dismissed from active count) | ||
| active_count = 0 | ||
| dismissed_count = 0 | ||
| by_severity: dict[str, int] = {} | ||
| for f in findings: | ||
| if f.validation_status == "dismissed_false_positive": | ||
| dismissed_count += 1 | ||
| continue | ||
| active_count += 1 | ||
| sev = f.severity.value | ||
| by_severity[sev] = by_severity.get(sev, 0) + 1 | ||
| summary_parts = [] | ||
| for sev in ["critical", "high", "medium", "low"]: | ||
| if sev in by_severity: | ||
| summary_parts.append(f"{by_severity[sev]} {sev}") | ||
| lines.append( | ||
| f"**Total:** {len(findings)} finding(s) ({', '.join(summary_parts)})" | ||
| count_text = ( | ||
| f"**Total:** {active_count} finding(s) ({', '.join(summary_parts)})" | ||
| ) | ||
| if dismissed_count > 0: | ||
| count_text += f" + {dismissed_count} disputed" | ||
| lines.append(count_text) | ||
| lines.append("") | ||
|
|
||
| lines.append("---") | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
🧹 Nitpick | 🔵 Trivial
Consider extracting
"dismissed_false_positive"to a constant.The string
"dismissed_false_positive"appears in 5 locations across this file (lines 1296, 1976, 2174, 2210, 2240). A single constant (e.g.,VALIDATION_DISMISSED = "dismissed_false_positive") would eliminate risk of typo-induced bugs and make future status changes easier.Suggested approach
Then replace all bare string comparisons with the constants.
Also applies to: 2174-2174, 2240-2240
🤖 Prompt for AI Agents