Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix typo #17

Merged
merged 27 commits into from
Jan 2, 2024
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
small changes
  • Loading branch information
edadaltocg committed Aug 23, 2023
commit 8e83cdb0e3d392dfb13f32a5af3c5a44539f9811
3 changes: 0 additions & 3 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -140,9 +140,6 @@ dmypy.json
# data
/data

# cluster
/cluster

# logs
/results
/storage
32 changes: 28 additions & 4 deletions examples/covariate_drift.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import argparse
import logging
import os
import random
from typing import Any, Dict

import numpy as np
import timm
import timm.data
import torch
@@ -28,6 +30,15 @@ def main(
seed=42,
debug=False,
):
# set seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
# benchmarking True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True

device = "cuda" if torch.cuda.is_available() else "cpu"
model = timm.create_model(model_name, pretrained=True)
data_config = timm.data.resolve_data_config(model.default_cfg)
@@ -53,9 +64,10 @@ def main(

if not debug:
pipeline.report(results, subsample=subsample)

fileversion = "v5"
# save results to csv file
path = os.path.join(RESULTS_DIR, pipeline_name, "results.csv")
# make unique id
path = os.path.join(RESULTS_DIR, pipeline_name, f"results_{fileversion}.csv")
save_results = {
"model": model_name,
"method": method_name,
@@ -70,6 +82,7 @@ def main(
"fpr_mistakes": results["fpr_mistakes"],
"first_drift": results["first_drift"],
"splits": results["splits"],
"window_size": results["window_size"],
"seed": seed,
}
append_results_to_csv_file(save_results, path)
@@ -78,22 +91,33 @@ def main(
labels = results["labels"]
preds = results["preds"]
targets = results["targets"]
drift_labels = results["drift_labels"]
mistakes = results["mistakes"]
moving_accuracy = results["moving_accuracy"]
moving_average = results["moving_average"]

results = {
"model": model_name,
"method": method_name,
"method_kwargs": method_kwargs,
"corruption": corruption,
"intensities": intensities,
"scores": scores.numpy().tolist(),
"labels": labels.numpy().tolist(),
"preds": preds.numpy().tolist(),
"targets": targets.numpy().tolist(),
"drift_labels": drift_labels.numpy().tolist(),
"mistakes": mistakes.numpy().tolist(),
"moving_accuracy": moving_accuracy.numpy().tolist(),
"moving_average": moving_average.numpy().tolist(),
"window_size": results["window_size"],
"seed": seed,
}
filename = os.path.join(RESULTS_DIR, args.pipeline, "scores.csv")
filename = os.path.join(RESULTS_DIR, args.pipeline, f"scores_{fileversion}.csv")
append_results_to_csv_file(results, filename)


if __name__ == "__main__":

parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="resnet18_cifar10")
parser.add_argument("--method", type=str, default="msp")
7 changes: 5 additions & 2 deletions examples/ood_benchmark.py
Original file line number Diff line number Diff line change
@@ -71,12 +71,15 @@ def main(args):
parser.add_argument("--pipeline", type=str, default="ood_benchmark_cifar10")
parser.add_argument("--model", type=str, default="resnet18_cifar10")
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument("--limit_fit", type=float, default=1)
parser.add_argument("--limit_fit", type=float, default=0.1)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()

logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
_logger.info(json.dumps(args.__dict__, indent=2))

if args.method == "react_projection":
args.method_kwargs["features_nodes"] = ["layer1", "layer2", "layer3", "clip", "fc"]
if "vit" in args.model and "projection" in args.method:
args.method_kwargs["features_nodes"] = [f"blocks.{l}" for l in range(1, 12)] + ["fc_norm", "head"]
main(args)
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
@@ -6,7 +6,7 @@
"optuna",
"scikit-learn",
"scikit-image",
"timm==0.8.19.dev0",
"timm>=0.8.19.dev0",
"torch>=1.13.1",
"torchvision",
"tqdm",
14 changes: 13 additions & 1 deletion src/detectors/aggregations/__init__.py
Original file line number Diff line number Diff line change
@@ -16,9 +16,16 @@
none_aggregation,
topk_aggregation,
)
from .blahut_arimoto import BlahutArimotoAggregation
from .cosine import CosineAggregation
from .innerprod import InnerProductAggregation
from .innerprod import (
InnerProductAggregation,
InnerProductIntegralAggregation,
InnerProductMeanAggregation,
InnerProductMinAggregation,
)
from .mahalanobis import MahalanobisAggregation
from .power import PowerAggregation
from .quantile import QuantileAggregation

_logger = logging.getLogger(__name__)
@@ -37,8 +44,13 @@
"layer_idx": layer_idx,
"mahalanobis": MahalanobisAggregation,
"innerprod": InnerProductAggregation,
"innerprod_mean": InnerProductMeanAggregation,
"innerprod_min": InnerProductMinAggregation,
"innerprod_integral": InnerProductIntegralAggregation,
"cosine": CosineAggregation,
"quantile": QuantileAggregation,
"power": PowerAggregation,
"blahut_arimoto": BlahutArimotoAggregation,
}


33 changes: 32 additions & 1 deletion src/detectors/aggregations/innerprod.py
Original file line number Diff line number Diff line change
@@ -15,7 +15,7 @@ def __init__(self, *args, **kwargs) -> None:
def fit(self, stack: Tensor, *args, **kwargs):
self.max_trajectory = stack.max(dim=0, keepdim=True)[0]
self.ref_trajectory = stack.mean(dim=0, keepdim=True) / self.max_trajectory
self.scale = torch.sum(self.ref_trajectory ** 2)
self.scale = torch.sum(self.ref_trajectory**2)

_logger.debug("InnerProductAggregation parameters")
_logger.debug(f"max_trajectory: {self.max_trajectory}")
@@ -26,3 +26,34 @@ def __call__(self, scores: Tensor, *args, **kwargs):
scores = scores / self.max_trajectory.to(scores.device)
scores = torch.sum(scores * self.ref_trajectory.to(scores.device), dim=1) / self.scale.to(scores.device)
return scores


class InnerProductIntegralAggregation(InnerProductAggregation):
def __call__(self, scores: Tensor, *args, **kwargs):
scores = scores / self.max_trajectory.to(scores.device)
scores = torch.trapz(scores * self.ref_trajectory.to(scores.device), dim=1) / self.scale.to(scores.device)
return scores


class InnerProductMeanAggregation(InnerProductAggregation):
def fit(self, stack: Tensor, *args, **kwargs):
self.max_trajectory = stack.mean(dim=0, keepdim=True)
self.ref_trajectory = stack.mean(dim=0, keepdim=True) / self.max_trajectory
self.scale = torch.sum(self.ref_trajectory**2)

_logger.debug("InnerProductAggregation parameters")
_logger.debug(f"max_trajectory: {self.max_trajectory}")
_logger.debug(f"ref_trajectory: {self.ref_trajectory}")
_logger.debug(f"scale: {self.scale}")


class InnerProductMinAggregation(InnerProductAggregation):
def fit(self, stack: Tensor, *args, **kwargs):
self.max_trajectory = stack.min(dim=0, keepdim=True)[0]
self.ref_trajectory = stack.mean(dim=0, keepdim=True) / self.max_trajectory
self.scale = torch.sum(self.ref_trajectory**2)

_logger.debug("InnerProductAggregation parameters")
_logger.debug(f"max_trajectory: {self.max_trajectory}")
_logger.debug(f"ref_trajectory: {self.ref_trajectory}")
_logger.debug(f"scale: {self.scale}")
Loading