-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathclean_boilerplate.py
More file actions
217 lines (180 loc) · 7.46 KB
/
clean_boilerplate.py
File metadata and controls
217 lines (180 loc) · 7.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
"""
Boilerplate cleaner - strips noise from job listing markdown files.
Removes:
1. Entire "## AI Analysis" section (Copilot reasoning, match scores)
2. Entire "## Interview Insights" section (resume match data)
3. "DESCRIPTION\nAbout the job" and standalone "About the job" headers
4. Lines matching boilerplate patterns (EEO, benefits, legal, etc.)
5. LinkedIn redirect URLs
6. Code fence markers (``` lines)
7. Separator lines (-------)
8. "REQUIREMENTS" label + standalone "N/A" lines
9. Leading whitespace artifacts from HTML scraping
10. Excessive blank lines left behind
Usage:
python clean_boilerplate.py # Clean jobs/jobs-no-boilerplate/
python clean_boilerplate.py --target-dir jobs/jobs-structured # Clean a specific directory
python clean_boilerplate.py --dry-run # Preview without writing
python clean_boilerplate.py --limit 10 # Process first 10 files only
"""
import argparse
import os
import re
import glob
from datetime import datetime
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_TARGET = os.path.join(SCRIPT_DIR, "jobs", "jobs-no-boilerplate")
# Line-level boilerplate patterns (case-insensitive substring match).
# These are safe for single-line matching - specific enough to not hit real job content.
# Derived from BOILERPLATE_PATTERNS in rag_pipeline.py but filtered for line-level safety.
LINE_PATTERNS = [
# EEO / legal
"sexual orientation", "national origin", "equal opportunity", "gender identity",
"race, color", "without regard to", "religion, sex", "marital status",
"protected veteran", "reasonable accommodation", "genetic information",
"affirmative action", "we do not discriminate", "work authorization",
"background check", "e-verify", "right to work", "accommodations due to",
"americans with disabilities", "employment eligibility", "eeoc",
# Benefits boilerplate
"401k", "401(k)", "health insurance", "paid time off", "dental and vision",
"benefits include", "compensation range", "pay range", "base salary",
"salary range",
# Pipeline artifacts
"ai reasoning", "copilot:", "[copilot:", "weak_match", "strong_match",
"gpt-4.1]",
]
def remove_sections(content):
"""Remove ## AI Analysis and ## Interview Insights sections entirely."""
content = re.sub(
r'\n## AI Analysis\n.*?(?=\n## |\Z)',
'',
content,
flags=re.DOTALL
)
content = re.sub(
r'\n## Interview Insights\n.*?(?=\n## |\Z)',
'',
content,
flags=re.DOTALL
)
return content
def remove_description_header(content):
"""Remove 'DESCRIPTION\\nAbout the job' and standalone 'About the job' headers."""
content = re.sub(r'DESCRIPTION\s*\nAbout the job\s*\n', '', content)
content = re.sub(r'^About the job\s*$', '', content, flags=re.MULTILINE)
return content
def remove_structural_noise(content):
"""Remove code fences, separators, REQUIREMENTS label, standalone N/A."""
lines = content.split('\n')
cleaned = []
for line in lines:
stripped = line.strip()
# Code fence markers
if stripped == '```':
continue
# Separator lines (5+ dashes)
if re.match(r'^-{5,}$', stripped):
continue
# Standalone REQUIREMENTS label
if stripped == 'REQUIREMENTS':
continue
# Standalone N/A
if stripped == 'N/A':
continue
cleaned.append(line)
return '\n'.join(cleaned)
def strip_leading_whitespace(content):
"""Strip excessive leading whitespace from lines (HTML scraping artifacts)."""
lines = content.split('\n')
cleaned = []
for line in lines:
# Strip leading whitespace but preserve intentional indentation (up to 4 spaces)
stripped = line.lstrip()
if stripped:
# Preserve markdown list indentation (2-4 spaces) but remove 5+ spaces
leading = len(line) - len(stripped)
if leading > 4:
line = stripped
cleaned.append(line)
return '\n'.join(cleaned)
def remove_linkedin_urls(content):
"""Remove lines containing LinkedIn redirect URLs."""
lines = content.split('\n')
lines = [l for l in lines if 'linkedin.com/redir' not in l.lower()]
return '\n'.join(lines)
def remove_boilerplate_lines(content):
"""Remove lines that match boilerplate patterns."""
lines = content.split('\n')
cleaned = []
for line in lines:
line_lower = line.lower().strip()
if not line_lower:
cleaned.append(line)
continue
hit = False
for pattern in LINE_PATTERNS:
if pattern in line_lower:
hit = True
break
if not hit:
cleaned.append(line)
return '\n'.join(cleaned)
def collapse_blank_lines(content):
"""Collapse 3+ consecutive blank lines down to 2."""
return re.sub(r'\n{4,}', '\n\n\n', content)
def clean_file(filepath, dry_run=False):
"""Clean a single markdown file. Returns (original_chars, cleaned_chars, changed)."""
with open(filepath, 'r', encoding='utf-8') as f:
original = f.read()
cleaned = original
cleaned = remove_sections(cleaned)
cleaned = remove_description_header(cleaned)
cleaned = remove_structural_noise(cleaned)
cleaned = strip_leading_whitespace(cleaned)
cleaned = remove_linkedin_urls(cleaned)
cleaned = remove_boilerplate_lines(cleaned)
cleaned = collapse_blank_lines(cleaned)
cleaned = cleaned.rstrip() + '\n'
changed = cleaned != original
if changed and not dry_run:
with open(filepath, 'w', encoding='utf-8') as f:
f.write(cleaned)
return len(original), len(cleaned), changed
def main():
parser = argparse.ArgumentParser(description="Strip boilerplate from job markdown files")
parser.add_argument("--target-dir", type=str, default=None, help="Directory to clean (default: jobs/jobs-no-boilerplate)")
parser.add_argument("--dry-run", action="store_true", help="Preview changes without writing")
parser.add_argument("--limit", type=int, default=0, help="Process first N files (0 = all)")
args = parser.parse_args()
target = args.target_dir
if target:
# Resolve relative paths from SCRIPT_DIR
if not os.path.isabs(target):
target = os.path.join(SCRIPT_DIR, target)
else:
target = DEFAULT_TARGET
if not os.path.isdir(target):
print(f"Target directory not found: {target}")
return
files = sorted(glob.glob(os.path.join(target, "*.md")))
if args.limit > 0:
files = files[:args.limit]
ts = datetime.now().strftime("%H:%M:%S")
mode = "DRY RUN" if args.dry_run else "CLEANING"
print(f"{ts} [{mode}] {len(files)} files in {os.path.basename(target)}/")
total_original = 0
total_cleaned = 0
changed_count = 0
for filepath in files:
orig, clean, changed = clean_file(filepath, dry_run=args.dry_run)
total_original += orig
total_cleaned += clean
if changed:
changed_count += 1
removed = total_original - total_cleaned
pct = (removed / total_original * 100) if total_original > 0 else 0
ts = datetime.now().strftime("%H:%M:%S")
print(f"{ts} [DONE] {changed_count}/{len(files)} files modified")
print(f"{ts} [DONE] {total_original:,} -> {total_cleaned:,} chars ({removed:,} removed, {pct:.1f}%)")
if __name__ == "__main__":
main()