diff --git a/PhysicsTools/PythonAnalysis/test/BuildFile.xml b/PhysicsTools/PythonAnalysis/test/BuildFile.xml
index bbe49741d2850..0c04ef39142f7 100644
--- a/PhysicsTools/PythonAnalysis/test/BuildFile.xml
+++ b/PhysicsTools/PythonAnalysis/test/BuildFile.xml
@@ -10,9 +10,6 @@
-
-
-
@@ -45,7 +42,6 @@
-
@@ -53,7 +49,6 @@
-
diff --git a/PhysicsTools/PythonAnalysis/test/testDownhill.py b/PhysicsTools/PythonAnalysis/test/testDownhill.py
deleted file mode 100755
index 277512dc7a1e7..0000000000000
--- a/PhysicsTools/PythonAnalysis/test/testDownhill.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python3
-
-from __future__ import print_function
-import downhill
-import numpy as np
-import theano
-import theano.tensor as TT
-
-FLOAT = 'df'[theano.config.floatX == 'float32']
-
-def rand(a, b):
- return np.random.randn(a, b).astype(FLOAT)
-
-A, B, K = 20, 5, 3
-
-# Set up a matrix factorization problem to optimize.
-u = theano.shared(rand(A, K), name='u')
-v = theano.shared(rand(K, B), name='v')
-z = TT.matrix()
-err = TT.sqr(z - TT.dot(u, v))
-loss = err.mean() + abs(u).mean() + (v * v).mean()
-
-# Minimize the regularized loss with respect to a data matrix.
-y = np.dot(rand(A, K), rand(K, B)) + rand(A, B)
-
-# Monitor during optimization.
-monitors = (('err', err.mean()),
- ('|u|<0.1', (abs(u) < 0.1).mean()),
- ('|v|<0.1', (abs(v) < 0.1).mean()))
-
-#minimize(
-#loss,
-#train,
-#batch_size=32,
-#monitor_gradients=False,
-#monitors=(),
-
-#valid=None,
-#params=None,
-#inputs=None,
-#algo='rmsprop',
-#updates=(),
-#train_batches=None,
-#valid_batches=None,
-#**kwargs)
-
-downhill.minimize(
- loss=loss,
- train=[y],
- patience=0,
- batch_size=A, # Process y as a single batch.
- max_gradient_norm=1, # Prevent gradient explosion!
- learning_rate=0.1,
- monitors=monitors,
- monitor_gradients=True)
-
-# Print out the optimized coefficients u and basis v.
-print(('u =', u.get_value()))
-print(('v =', v.get_value()))
diff --git a/PhysicsTools/PythonAnalysis/test/testTheano.py b/PhysicsTools/PythonAnalysis/test/testTheano.py
deleted file mode 100755
index 615b901e88af5..0000000000000
--- a/PhysicsTools/PythonAnalysis/test/testTheano.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env python3
-from __future__ import print_function
-import tensorflow.keras
-import theano
-
-from tensorflow.keras.models import Sequential
-from tensorflow.keras.layers import Dense
-
-print(tensorflow.keras.__version__)
-print(theano.__version__)
-
-model = Sequential()
-model.add(Dense(units=64, activation='relu', input_dim=100))
-model.add(Dense(units=10, activation='softmax'))
diff --git a/PhysicsTools/PythonAnalysis/test/testTheano.sh b/PhysicsTools/PythonAnalysis/test/testTheano.sh
deleted file mode 100755
index dceabec41e878..0000000000000
--- a/PhysicsTools/PythonAnalysis/test/testTheano.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash -x
-
-ERR=0
-echo ">>> Create temporary directory for cache"
-TEST_TMPDIR=$(mktemp -d ${CMSSW_BASE}/tmp/cmssw_theano.XXXXXXX)
-
-echo ">>> Change default behaviour for Theano"
-export THEANO_FLAGS="device=cpu,force_device=True,base_compiledir=$TEST_TMPDIR"
-
-echo ">>> Theano configuration for testing:"
-python3 -c 'import theano; print(theano.config)' || ERR=1
-
-echo ">>> Cleaning compile cache"
-theano-cache clear || ERR=1
-
-if [ "$1" != "" ] ; then
- ${CMSSW_BASE}/src/PhysicsTools/PythonAnalysis/test/$1 || ERR=1
-fi
-
-echo ">>> Cleaning compile cache"
-theano-cache clear || ERR=1
-
-rm -rf "$TEST_TMPDIR"
-exit $ERR
diff --git a/PhysicsTools/PythonAnalysis/test/testhep_ml.py b/PhysicsTools/PythonAnalysis/test/testhep_ml.py
deleted file mode 100755
index 783ea18b0edb9..0000000000000
--- a/PhysicsTools/PythonAnalysis/test/testhep_ml.py
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/usr/bin/env python3
-
-
-"""
-Testing all the nnet library
-"""
-from __future__ import division, print_function
-
-from builtins import range
-import numpy
-from sklearn.linear_model import LogisticRegression
-from sklearn.datasets import make_blobs
-from sklearn.metrics import roc_auc_score
-
-from hep_ml import nnet
-from hep_ml.commonutils import generate_sample
-from hep_ml.preprocessing import BinTransformer, IronTransformer
-
-__author__ = 'Alex Rogozhnikov'
-
-
-def test_nnet(n_samples=200, n_features=7, distance=0.8, complete=False):
- """
- :param complete: if True, all possible combinations will be checked, and quality is printed
- """
- X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=distance)
-
- nn_types = [
- nnet.SimpleNeuralNetwork,
- nnet.MLPClassifier,
- nnet.SoftmaxNeuralNetwork,
- nnet.RBFNeuralNetwork,
- nnet.PairwiseNeuralNetwork,
- nnet.PairwiseSoftplusNeuralNetwork,
- ]
-
- if complete:
- # checking all possible combinations
- for loss in nnet.losses:
- for NNType in nn_types:
- for trainer in nnet.trainers:
- nn = NNType(layers=[5], loss=loss, trainer=trainer, random_state=42, epochs=100)
- nn.fit(X, y )
- print(roc_auc_score(y, nn.predict_proba(X)[:, 1]), nn)
-
- lr = LogisticRegression().fit(X, y)
- print(lr, roc_auc_score(y, lr.predict_proba(X)[:, 1]))
-
- assert 0 == 1, "Let's see and compare results"
- else:
- # checking combinations of losses, nn_types, trainers, most of them are used once during tests.
- attempts = max(len(nnet.losses), len(nnet.trainers), len(nn_types))
- losses_shift = numpy.random.randint(10)
- trainers_shift = numpy.random.randint(10)
- for attempt in range(attempts):
- # each combination is tried 3 times. before raising exception
- retry_attempts = 3
- for retry_attempt in range(retry_attempts):
- loss = list(nnet.losses.keys())[(attempt + losses_shift) % len(nnet.losses)]
- trainer = list(nnet.trainers.keys())[(attempt + trainers_shift) % len(nnet.trainers)]
-
- nn_type = nn_types[attempt % len(nn_types)]
-
- nn = nn_type(layers=[5], loss=loss, trainer=trainer, random_state=42 + retry_attempt, epochs=200)
- print(nn)
- nn.fit(X, y)
- quality = roc_auc_score(y, nn.predict_proba(X)[:, 1])
- computed_loss = nn.compute_loss(X, y)
- if quality > 0.8:
- break
- else:
- print('attempt {} : {}'.format(retry_attempt, quality))
- if retry_attempt == retry_attempts - 1:
- raise RuntimeError('quality of model is too low: {} {}'.format(quality, nn))
-
-
-def test_with_scaler(n_samples=200, n_features=15, distance=0.5):
- X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=distance)
- for scaler in [BinTransformer(max_bins=16), IronTransformer()]:
- clf = nnet.SimpleNeuralNetwork(scaler=scaler,epochs=300)
- clf.fit(X, y)
-
- p = clf.predict_proba(X)
- assert roc_auc_score(y, p[:, 1]) > 0.8, 'quality is too low for model: {}'.format(clf)
-
-
-print("NNet test")
-test_nnet()
-print("Scaler test")
-test_with_scaler()