|
| 1 | +import json |
| 2 | + |
| 3 | +from dojo.models import Finding |
| 4 | + |
| 5 | + |
| 6 | +class GithubSecretsDetectionReportParser: |
| 7 | + def get_scan_types(self): |
| 8 | + return ["Github Secrets Detection Report Scan"] |
| 9 | + |
| 10 | + def get_label_for_scan_types(self, scan_type): |
| 11 | + return "Github Secrets Detection Report Scan" |
| 12 | + |
| 13 | + def get_description_for_scan_types(self, scan_type): |
| 14 | + return "Github Secrets Detection Report report file can be imported in JSON format (option --json)." |
| 15 | + |
| 16 | + def get_findings(self, file, test): |
| 17 | + data = json.load(file) |
| 18 | + |
| 19 | + if not isinstance(data, list): |
| 20 | + error_msg = "Invalid GitHub secrets detection report format, expected a JSON list of alerts." |
| 21 | + raise TypeError(error_msg) |
| 22 | + |
| 23 | + findings = [] |
| 24 | + for alert in data: |
| 25 | + # Extract basic alert information |
| 26 | + alert_number = alert.get("number") |
| 27 | + state = alert.get("state", "open") |
| 28 | + secret_type = alert.get("secret_type", "Unknown") |
| 29 | + secret_type_display_name = alert.get("secret_type_display_name", secret_type) |
| 30 | + html_url = alert.get("html_url", "") |
| 31 | + |
| 32 | + # Create title |
| 33 | + title = f"Exposed Secret Detected: {secret_type_display_name}" |
| 34 | + |
| 35 | + # Build description |
| 36 | + desc_lines = [] |
| 37 | + if html_url: |
| 38 | + desc_lines.append(f"**GitHub Alert**: [{html_url}]({html_url})") |
| 39 | + |
| 40 | + desc_lines.extend([f"**Secret Type**: {secret_type_display_name}", f"**Alert State**: {state}"]) |
| 41 | + |
| 42 | + # Add repository information |
| 43 | + repository = alert.get("repository", {}) |
| 44 | + if repository: |
| 45 | + repo_full_name = repository.get("full_name") |
| 46 | + if repo_full_name: |
| 47 | + desc_lines.append(f"**Repository**: {repo_full_name}") |
| 48 | + |
| 49 | + # Add location information |
| 50 | + first_location = alert.get("first_location_detected", {}) |
| 51 | + if first_location: |
| 52 | + file_path = first_location.get("path") |
| 53 | + start_line = first_location.get("start_line") |
| 54 | + end_line = first_location.get("end_line") |
| 55 | + |
| 56 | + if file_path: |
| 57 | + desc_lines.append(f"**File Path**: {file_path}") |
| 58 | + if start_line: |
| 59 | + if end_line and end_line != start_line: |
| 60 | + desc_lines.append(f"**Lines**: {start_line}-{end_line}") |
| 61 | + else: |
| 62 | + desc_lines.append(f"**Line**: {start_line}") |
| 63 | + |
| 64 | + # Add resolution information |
| 65 | + resolution = alert.get("resolution") |
| 66 | + if resolution: |
| 67 | + desc_lines.append(f"**Resolution**: {resolution}") |
| 68 | + |
| 69 | + resolved_by = alert.get("resolved_by") |
| 70 | + if resolved_by: |
| 71 | + resolved_by_login = resolved_by.get("login", "Unknown") |
| 72 | + desc_lines.append(f"**Resolved By**: {resolved_by_login}") |
| 73 | + |
| 74 | + resolved_at = alert.get("resolved_at") |
| 75 | + if resolved_at: |
| 76 | + desc_lines.append(f"**Resolved At**: {resolved_at}") |
| 77 | + |
| 78 | + resolution_comment = alert.get("resolution_comment") |
| 79 | + if resolution_comment: |
| 80 | + desc_lines.append(f"**Resolution Comment**: {resolution_comment}") |
| 81 | + |
| 82 | + # Add push protection information |
| 83 | + push_protection_bypassed = alert.get("push_protection_bypassed", False) |
| 84 | + if push_protection_bypassed: |
| 85 | + desc_lines.append("**Push Protection Bypassed**: True") |
| 86 | + |
| 87 | + bypassed_by = alert.get("push_protection_bypassed_by") |
| 88 | + if bypassed_by: |
| 89 | + bypassed_by_login = bypassed_by.get("login", "Unknown") |
| 90 | + desc_lines.append(f"**Bypassed By**: {bypassed_by_login}") |
| 91 | + |
| 92 | + bypassed_at = alert.get("push_protection_bypassed_at") |
| 93 | + if bypassed_at: |
| 94 | + desc_lines.append(f"**Bypassed At**: {bypassed_at}") |
| 95 | + else: |
| 96 | + desc_lines.append("**Push Protection Bypassed**: False") |
| 97 | + |
| 98 | + # Add additional metadata |
| 99 | + validity = alert.get("validity", "unknown") |
| 100 | + desc_lines.append(f"**Validity**: {validity}") |
| 101 | + |
| 102 | + publicly_leaked = alert.get("publicly_leaked", False) |
| 103 | + desc_lines.append(f"**Publicly Leaked**: {'Yes' if publicly_leaked else 'No'}") |
| 104 | + |
| 105 | + multi_repo = alert.get("multi_repo", False) |
| 106 | + desc_lines.append(f"**Multi-Repository**: {'Yes' if multi_repo else 'No'}") |
| 107 | + |
| 108 | + has_more_locations = alert.get("has_more_locations", False) |
| 109 | + if has_more_locations: |
| 110 | + desc_lines.append("**Note**: This secret has been detected in multiple locations") |
| 111 | + |
| 112 | + description = "\n\n".join(desc_lines) |
| 113 | + |
| 114 | + # Determine severity based on state and other factors |
| 115 | + if state == "resolved": |
| 116 | + severity = "Info" |
| 117 | + elif validity == "active" and publicly_leaked: |
| 118 | + severity = "Critical" |
| 119 | + elif validity == "active": |
| 120 | + severity = "High" |
| 121 | + else: |
| 122 | + severity = "Medium" |
| 123 | + |
| 124 | + # Create finding |
| 125 | + finding = Finding( |
| 126 | + title=title, |
| 127 | + test=test, |
| 128 | + description=description, |
| 129 | + severity=severity, |
| 130 | + static_finding=True, |
| 131 | + dynamic_finding=False, |
| 132 | + vuln_id_from_tool=str(alert_number) if alert_number else None, |
| 133 | + ) |
| 134 | + |
| 135 | + # Set file path and line information |
| 136 | + if first_location: |
| 137 | + finding.file_path = first_location.get("path") |
| 138 | + finding.line = first_location.get("start_line") |
| 139 | + |
| 140 | + # Set external URL |
| 141 | + if html_url: |
| 142 | + finding.url = html_url |
| 143 | + |
| 144 | + findings.append(finding) |
| 145 | + |
| 146 | + return findings |
0 commit comments