Skip to content

Commit

Permalink
hashed values in report
Browse files Browse the repository at this point in the history
  • Loading branch information
babenek committed Jul 12, 2024
1 parent 97cdc6e commit 68ea464
Show file tree
Hide file tree
Showing 13 changed files with 3,589 additions and 3,534 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ jobs:
- name: Run CredSweeper tool
run: |
credsweeper --banner --jobs $(nproc) --path data --save-json report.${{ github.event.pull_request.head.sha }}.json | tee credsweeper.${{ github.event.pull_request.head.sha }}.log
credsweeper --banner --jobs $(nproc) --path data --hashed --save-json report.${{ github.event.pull_request.head.sha }}.json | tee credsweeper.${{ github.event.pull_request.head.sha }}.log
- name: Run Benchmark
run: |
Expand Down Expand Up @@ -426,7 +426,7 @@ jobs:
# crc32 should be changed
python -m credsweeper --banner
# run quick scan
python -m credsweeper --log debug --path ../tests/samples --save-json
python -m credsweeper --log debug --path ../tests/samples --hashed --save-json
NEW_MODEL_FOUND_SAMPLES=$(jq '.|length' output.json)
if [ 100 -gt ${NEW_MODEL_FOUND_SAMPLES} ]; then
echo "Failure: found ${NEW_MODEL_FOUND_SAMPLES} credentials"
Expand Down
10 changes: 10 additions & 0 deletions credsweeper/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,14 @@ def get_arguments() -> Namespace:
const="output.xlsx",
dest="xlsx_filename",
metavar="PATH")
parser.add_argument("--subtext",
help="only part of text will be outputted",
action="store_const",
const=True)
parser.add_argument("--hashed",
help="line, variable, value will be hashed in output",
action="store_const",
const=True)
parser.add_argument("--sort", help="enable output sorting", dest="sort_output", action="store_true")
parser.add_argument("--log",
"-l",
Expand Down Expand Up @@ -282,6 +290,8 @@ def scan(args: Namespace, content_provider: AbstractProvider, json_filename: Opt
api_validation=args.api_validation,
json_filename=json_filename,
xlsx_filename=xlsx_filename,
subtext=args.subtext,
hashed=args.hashed,
sort_output=args.sort_output,
use_filters=args.no_filters,
pool_count=args.jobs,
Expand Down
12 changes: 9 additions & 3 deletions credsweeper/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ def __init__(self,
api_validation: bool = False,
json_filename: Union[None, str, Path] = None,
xlsx_filename: Union[None, str, Path] = None,
subtext: bool = False,
hashed: bool = False,
sort_output: bool = False,
use_filters: bool = True,
pool_count: int = 1,
Expand Down Expand Up @@ -70,6 +72,8 @@ def __init__(self,
to json
xlsx_filename: optional string variable, path to save result
to xlsx
subtext: use subtext of line near value like it performed in ML
hashed: use hash of line, value and variable instead plain text
use_filters: boolean variable, specifying the need of rule filters
pool_count: int value, number of parallel processes to use
ml_batch_size: int value, size of the batch for model inference
Expand Down Expand Up @@ -104,6 +108,8 @@ def __init__(self,
self.credential_manager = CredentialManager()
self.json_filename: Union[None, str, Path] = json_filename
self.xlsx_filename: Union[None, str, Path] = xlsx_filename
self.subtext = subtext
self.hashed = hashed
self.sort_output = sort_output
self.ml_batch_size = ml_batch_size if ml_batch_size and 0 < ml_batch_size else 16
self.ml_threshold = ml_threshold
Expand Down Expand Up @@ -405,16 +411,16 @@ def export_results(self) -> None:

if self.json_filename:
is_exported = True
Util.json_dump([credential.to_json() for credential in credentials], file_path=self.json_filename)
Util.json_dump([credential.to_json(subtext=self.subtext,hashed=self.hashed) for credential in credentials], file_path=self.json_filename)

if self.xlsx_filename:
is_exported = True
data_list = []
for credential in credentials:
data_list.extend(credential.to_dict_list())
data_list.extend(credential.to_dict_list(subtext=self.subtext,hashed=self.hashed))
df = pd.DataFrame(data=data_list)
df.to_excel(self.xlsx_filename, index=False)

if is_exported is False:
for credential in credentials:
print(credential)
print(credential.to_str(subtext=self.subtext,hashed=self.hashed))
19 changes: 13 additions & 6 deletions credsweeper/credentials/candidate.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,15 +88,21 @@ def is_api_validation_available(self) -> bool:
"""
return len(self.validations) > 0

def __str__(self) -> str:
def to_str(self, subtext: bool = False, hashed: bool = False) -> str:
return f"rule: {self.rule_name}" \
f" | severity: {self.severity.value}" \
f" | confidence: {self.confidence.value}" \
f" | line_data_list: {self.line_data_list}" \
f" | line_data_list: [{', '.join([x.to_str(subtext, hashed) for x in self.line_data_list])}]" \
f" | api_validation: {self.api_validation.name}" \
f" | ml_validation: {self.ml_validation.name}"

def to_json(self) -> Dict:
def __str__(self):
return self.to_str()

def __repr__(self):
return self.to_str(subtext=True)

def to_json(self, subtext: bool, hashed: bool) -> Dict:
"""Convert credential candidate object to dictionary.
Return:
Expand All @@ -113,23 +119,24 @@ def to_json(self) -> Dict:
"confidence": self.confidence.value,
"use_ml": self.use_ml,
# put the array to end to make json more readable
"line_data_list": [line_data.to_json() for line_data in self.line_data_list],
"line_data_list": [line_data.to_json(subtext=subtext, hashed=hashed) for line_data in
self.line_data_list],
}
if self.config is not None:
reported_output = {k: v for k, v in full_output.items() if k in self.config.candidate_output}
else:
reported_output = full_output
return reported_output

def to_dict_list(self) -> List[dict]:
def to_dict_list(self, subtext: bool, hashed: bool) -> List[dict]:
"""Convert credential candidate object to List[dict].
Return:
List[dict] object generated from current credential candidate
"""
reported_output = []
json_output = self.to_json()
json_output = self.to_json(subtext, hashed)
refined_data = copy.deepcopy(json_output)
del refined_data["line_data_list"]
for line_data in json_output["line_data_list"]:
Expand Down
35 changes: 26 additions & 9 deletions credsweeper/credentials/line_data.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import contextlib
import hashlib
import re
import string
from functools import cached_property
from typing import Any, Dict, Optional, Tuple

from credsweeper.common.constants import MAX_LINE_LENGTH
from credsweeper.common.constants import MAX_LINE_LENGTH, ML_HUNK, UTF_8
from credsweeper.config import Config
from credsweeper.utils import Util
from credsweeper.utils.entropy_validator import EntropyValidator
Expand Down Expand Up @@ -282,11 +283,26 @@ def is_source_file_with_quotes(self) -> bool:
return True
return False

def __repr__(self) -> str:
return f"line: '{self.line}' | line_num: {self.line_num} | path: {self.path}" \
f" | value: '{self.value}' | entropy_validation: {EntropyValidator(self.value)}"
@staticmethod
def get_subtext_or_hash(text: Optional[str], pos: int, subtext: bool, hashed: bool) -> Optional[str]:
text = Util.subtext(text, pos, ML_HUNK) if subtext and text is not None else text
if hashed:
text = hashlib.sha256(text.encode(UTF_8, errors="replace")).hexdigest() if text is not None else None
return text

def to_json(self) -> Dict:
def to_str(self, subtext: bool = False, hashed: bool = False) -> str:
return f"line: '{self.get_subtext_or_hash(self.line, self.value_start, subtext, hashed)}'" \
f" | line_num: {self.line_num} | path: {self.path}" \
f" | value: '{self.get_subtext_or_hash(self.value, 0, subtext, hashed)}'" \
f" | entropy_validation: {EntropyValidator(self.value)}"

def __str__(self):
return self.to_str()

def __repr__(self):
return self.to_str(subtext=True)

def to_json(self, subtext: bool, hashed: bool) -> Dict:
"""Convert line data object to dictionary.
Return:
Expand All @@ -295,18 +311,19 @@ def to_json(self) -> Dict:
"""
full_output = {
"key": self.key,
"line": self.line,
"line": self.get_subtext_or_hash(self.line, self.value_start, subtext, hashed),
"line_num": self.line_num,
"path": self.path,
"info": self.info,
# info may contain variable name - so let it be hashed if requested
"info": self.get_subtext_or_hash(self.info, -1, False, hashed) if self.info else self.info,
"pattern": self.pattern.pattern,
"separator": self.separator,
"separator_start": self.separator_start,
"separator_end": self.separator_end,
"value": self.value,
"value": self.get_subtext_or_hash(self.value, 0, subtext, hashed),
"value_start": self.value_start,
"value_end": self.value_end,
"variable": self.variable,
"variable": self.get_subtext_or_hash(self.variable, 0, subtext, hashed),
"variable_start": self.variable_start,
"variable_end": self.variable_end,
"value_leftquote": self.value_leftquote,
Expand Down
12 changes: 6 additions & 6 deletions credsweeper/utils/pem_key_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,13 +59,13 @@ def detect_pem_key(cls, config: Config, target: AnalysisTarget) -> List[LineData
# replace escaped line ends with real and process them - PEM does not contain '\' sign
while "\\\\" in line:
line = line.replace("\\\\", "\\")
sublines = line.replace("\\r", '\n').replace("\\n", '\n').splitlines()
for subline in sublines:
if begin_pattern_not_passed or cls.is_leading_config_line(subline):
if PEM_BEGIN_PATTERN in subline:
subtexts = line.replace("\\r", '\n').replace("\\n", '\n').splitlines()
for subtext in subtexts:
if begin_pattern_not_passed or cls.is_leading_config_line(subtext):
if PEM_BEGIN_PATTERN in subtext:
begin_pattern_not_passed = False
continue
elif PEM_END_PATTERN in subline:
elif PEM_END_PATTERN in subtext:
if "PGP" in target.line_strip:
# Check if entropy is high enough for base64 set with padding sign
entropy_validator = EntropyValidator(key_data, Chars.BASE64_CHARS)
Expand All @@ -90,7 +90,7 @@ def detect_pem_key(cls, config: Config, target: AnalysisTarget) -> List[LineData
logger.debug("Filtered with non asn1 '%s'", key_data)
return []
else:
sanitized_line = cls.sanitize_line(subline)
sanitized_line = cls.sanitize_line(subtext)
# PEM key line should not contain spaces or . (and especially not ...)
for i in sanitized_line:
if i not in cls.base64set:
Expand Down
4 changes: 3 additions & 1 deletion docs/source/guide.rst
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ Get all argument list:
usage: python -m credsweeper [-h] (--path PATH [PATH ...] | --diff_path PATH [PATH ...] | --export_config [PATH] | --export_log_config [PATH]) [--rules [PATH]] [--severity SEVERITY] [--config [PATH]]
[--log_config [PATH]] [--denylist PATH] [--find-by-ext] [--depth POSITIVE_INT] [--no-filters] [--doc] [--ml_threshold FLOAT_OR_STR] [--ml_batch_size POSITIVE_INT]
[--azure | --cuda] [--api_validation] [--jobs POSITIVE_INT] [--skip_ignored] [--save-json [PATH]] [--save-xlsx [PATH]] [--sort] [--log LOG_LEVEL] [--size_limit SIZE_LIMIT]
[--azure | --cuda] [--api_validation] [--jobs POSITIVE_INT] [--skip_ignored] [--save-json [PATH]] [--save-xlsx [PATH]] [--subtext] [--hashed] [--sort] [--log LOG_LEVEL] [--size_limit SIZE_LIMIT]
[--banner] [--version]
options:
-h, --help show this help message and exit
Expand Down Expand Up @@ -49,6 +49,8 @@ Get all argument list:
--skip_ignored parse .gitignore files and skip credentials from ignored objects
--save-json [PATH] save result to json file (default: output.json)
--save-xlsx [PATH] save result to xlsx file (default: output.xlsx)
--subtext only part of text will be outputted
--hashed line, variable, value will be hashed in output
--sort enable output sorting
--log LOG_LEVEL, -l LOG_LEVEL
provide logging level of ['DEBUG', 'INFO', 'WARN', 'WARNING', 'ERROR', 'FATAL', 'CRITICAL', 'SILENCE'](default: 'warning', case insensitive)
Expand Down
7 changes: 6 additions & 1 deletion tests/data/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from typing import Dict, Any, List

from tests import SAMPLES_POST_CRED_COUNT, SAMPLES_IN_DEEP_3, SAMPLES_CRED_COUNT, SAMPLES_IN_DOC, NEGLIGIBLE_ML_THRESHOLD
from tests import SAMPLES_POST_CRED_COUNT, SAMPLES_IN_DEEP_3, SAMPLES_CRED_COUNT, SAMPLES_IN_DOC, \
NEGLIGIBLE_ML_THRESHOLD

DATA_TEST_CFG: List[Dict[str, Any]] = [{
"__cred_count": SAMPLES_POST_CRED_COUNT,
Expand All @@ -9,16 +10,20 @@
}, {
"__cred_count": SAMPLES_CRED_COUNT,
"sort_output": True,
"subtext": True,
"hashed": True,
"json_filename": "ml_threshold.json",
"ml_threshold": NEGLIGIBLE_ML_THRESHOLD
}, {
"__cred_count": SAMPLES_IN_DOC,
"sort_output": True,
"hashed": True,
"json_filename": "doc.json",
"doc": True
}, {
"__cred_count": SAMPLES_IN_DEEP_3,
"sort_output": True,
"subtext": True,
"json_filename": "depth_3.json",
"depth": 3
}]
Loading

0 comments on commit 68ea464

Please sign in to comment.