From 82fe4cd80eebf529cab77bdbb9259a0e6b3ccaef Mon Sep 17 00:00:00 2001 From: Alan Robertson Date: Wed, 29 Mar 2017 14:16:29 +1100 Subject: [PATCH 01/13] Added a simple particle swarm based hyper-heuristic --- src/qinfer/hyper_heuristic_optimisers.py | 202 +++++++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 src/qinfer/hyper_heuristic_optimisers.py diff --git a/src/qinfer/hyper_heuristic_optimisers.py b/src/qinfer/hyper_heuristic_optimisers.py new file mode 100644 index 0000000..4b51879 --- /dev/null +++ b/src/qinfer/hyper_heuristic_optimisers.py @@ -0,0 +1,202 @@ + + +## FEATURES ################################################################### + +from __future__ import absolute_import +from __future__ import division + +## IMPORTS #################################################################### + +import numpy as np +import random +from functools import partial +from qinfer.perf_testing import perf_test_multiple +from qinfer import distributions + +## CLASSES #################################################################### + +__all__ = [ + 'ParticleSwarmOptimiser' +] + +class HyperHeuristicOptimiser(object): + ''' + A generic hyper-heuristic optimiser class that is inherited by the other optimisation functions. + + :param np.ndarray param_names: The list of parameters that are being searched over. + :param function fitness_function: The function that is being optimised over, defaults to perf test multiple + :param function boundary_map: Function to constrain points within some boundary regime + :param dict funct_args: Arguments to pass to the fitness function + :param dict funct_kwargs: Keyword arguments to pass to the fitness function + ''' + + def __init__( + self, + param_names, + fitness_function = None, + boundary_map=None, + *funct_args, + **funct_kwargs + ): + self._param_names = param_names + self._n_free_params = len(param_names) + self._boundary_map = boundary_map + self._funct_args = funct_args + self._funct_kwargs = funct_kwargs + + if fitness_function is None: # Default to calling perf test multiple + self._optimisable = PerfTestMultipleAbstractor( + self._param_names, + *self._funct_args, + **self._funct_kwargs + ) + else: + self._fitness_function = partial(fitness_function, *self._funct_args, **self._funct_kwargs) + + # Member function needed for parralelisation + def fitness_function(self, params): + return self._fitness_function(params) + + def parrallel(self): + raise NotImplementedError("This optimiser does not have parrallel support. To resolve this issue, level an appropriate criticism at the developer.") + +class ParticleSwarmOptimiser(HyperHeuristicOptimiser): + ''' + A particle swarm optimisation based hyperheuristic + :param integer n_pso_iterations: + :param integer n_pso_particles: + :param + :param + ''' + + def __call__(self, + n_pso_iterations=50, + n_pso_particles=60, + initial_position_distribution=None, + initial_velocity_distribution=None, + omega_v=0.35, + phi_p=0.25, + phi_g=0.5, + serial_map=map + ): + self._fitness_dt = np.dtype([ + ('params', np.float64, (self._n_free_params,)), + ('velocities', np.float64, (self._n_free_params,)), + ('fitness', np.float64)]) + self._fitness = np.empty([n_pso_iterations, n_pso_particles], dtype=self._fitness_dt) + local_attractors = np.empty([n_pso_particles], dtype=self._fitness_dt) + global_attractor = np.empty([1], dtype=self._fitness_dt) + + if initial_position_distribution is None: + initial_position_distribution = distributions.UniformDistribution(np.array([[ 0, 1]] * self._n_free_params)); + + if initial_velocity_distribution is None: + initial_velocity_distribution = distributions.UniformDistribution(np.array([[-1, 1]] * self._n_free_params)) + + # Initial particle positions + self._fitness[0]["params"] = initial_position_distribution.sample(n_pso_particles) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Calculate the initial particle fitnesses + self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], + serial_map=serial_map) + + # Calculate the positions of the attractors + local_attractors = self._fitness[0] + local_attractors, global_attractor = self.update_attractors( + self._fitness[0], + local_attractors, + global_attractor) + + # Initial particle velocities + self._fitness[0]["velocities"] = initial_velocity_distribution.sample(n_pso_particles) + self._fitness[0]["velocities"] = self.update_velocities( + self._fitness[0]["params"], + self._fitness[0]["velocities"], + local_attractors["params"], + global_attractor["params"], + omega_v, phi_p, phi_g) + + for itr in range(1, n_pso_iterations): + #Update the particle positions + self._fitness[itr]["params"] = self.update_positions( + self._fitness[itr - 1]["params"], + self._fitness[itr - 1]["velocities"]) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Recalculate the fitness function + self._fitness[itr]["fitness"] = self.evaluate_fitness( + self._fitness[itr]["params"], + serial_map=serial_map) + + # Find the new attractors + local_attractors, global_attractor = self.update_attractors( + self._fitness[itr], + local_attractors, + global_attractor) + + # Update the velocities + self._fitness[itr]["velocities"] = self.update_velocities( + self._fitness[itr]["params"], + self._fitness[itr - 1]["velocities"], + local_attractors["params"], + global_attractor["params"], + omega_v, phi_p, phi_g) + + return global_attractor + + def evaluate_fitness(self, particles, serial_map): + fitness_function = partial(self.fitness_function) + fitness = np.empty([len(particles)], dtype=np.float64) + fitness = serial_map(self.fitness_function, particles) + return fitness + + def update_positions(self, positions, velocities): + updated = positions + velocities + return updated + + def update_velocities(self, positions, velocities, local_attractors, global_attractor, omega_v, phi_p, phi_g): + random_p = np.random.random_sample(positions.shape) + random_g = np.random.random_sample(positions.shape) + updated = omega_v * velocities + phi_p * random_p * (local_attractors - positions) + phi_g * random_g * (global_attractor - positions) + return updated + + def update_attractors(self, particles, local_attractors, global_attractor): + for idx, particle in enumerate(particles): + if particle["fitness"] < local_attractors[idx]["fitness"]: + local_attractors[idx] = particle + global_attractor = local_attractors[np.argmin(local_attractors["fitness"])] + return local_attractors, global_attractor + +class PerfTestMultipleAbstractor: + def __init__(self, + param_names, + evaluation_function = None, + *args, + **kwargs): + self._heuristic = kwargs['heuristic_class'] + del kwargs['heuristic_class'] + self._args = args + self._kwargs = kwargs + self._param_names = param_names + if evaluation_function is None: + self._evaluation_function = lambda performance: performance['loss'][:,-1].mean(axis=0) + else: + self._evaluation_function = evaluation_function + + def __call__(self, params): + performance = perf_test_multiple( + *self._args, + heuristic_class = self._heuristic(**{ + name: param + for name, param in zip(self._param_names, params) + }), + **self._kwargs + ) + return self._evaluation_function(performance) \ No newline at end of file From 60ce342d193603af18c173674c2a056b0c460ee9 Mon Sep 17 00:00:00 2001 From: Alan Robertson Date: Wed, 5 Apr 2017 15:09:13 +1000 Subject: [PATCH 02/13] Applied edits with regards to comments provided --- src/qinfer/hyper_heuristic_optimisers.py | 43 +++++++++--------------- 1 file changed, 16 insertions(+), 27 deletions(-) diff --git a/src/qinfer/hyper_heuristic_optimisers.py b/src/qinfer/hyper_heuristic_optimisers.py index 4b51879..7586d38 100644 --- a/src/qinfer/hyper_heuristic_optimisers.py +++ b/src/qinfer/hyper_heuristic_optimisers.py @@ -1,23 +1,10 @@ - - -## FEATURES ################################################################### - -from __future__ import absolute_import from __future__ import division -## IMPORTS #################################################################### - -import numpy as np import random +import numpy as np from functools import partial -from qinfer.perf_testing import perf_test_multiple +from qinfer.perf_testing import perf_test_multiple, apply_serial from qinfer import distributions - -## CLASSES #################################################################### - -__all__ = [ - 'ParticleSwarmOptimiser' -] class HyperHeuristicOptimiser(object): ''' @@ -45,7 +32,7 @@ def __init__( self._funct_kwargs = funct_kwargs if fitness_function is None: # Default to calling perf test multiple - self._optimisable = PerfTestMultipleAbstractor( + self._fitness_function = PerfTestMultipleAbstractor( self._param_names, *self._funct_args, **self._funct_kwargs @@ -57,8 +44,8 @@ def __init__( def fitness_function(self, params): return self._fitness_function(params) - def parrallel(self): - raise NotImplementedError("This optimiser does not have parrallel support. To resolve this issue, level an appropriate criticism at the developer.") + def parallel(self): + raise NotImplementedError("This optimiser does not have parallel support.") class ParticleSwarmOptimiser(HyperHeuristicOptimiser): ''' @@ -77,7 +64,7 @@ def __call__(self, omega_v=0.35, phi_p=0.25, phi_g=0.5, - serial_map=map + map=map ): self._fitness_dt = np.dtype([ ('params', np.float64, (self._n_free_params,)), @@ -102,7 +89,7 @@ def __call__(self, # Calculate the initial particle fitnesses self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], - serial_map=serial_map) + map=map) # Calculate the positions of the attractors local_attractors = self._fitness[0] @@ -133,7 +120,7 @@ def __call__(self, # Recalculate the fitness function self._fitness[itr]["fitness"] = self.evaluate_fitness( self._fitness[itr]["params"], - serial_map=serial_map) + map=map) # Find the new attractors local_attractors, global_attractor = self.update_attractors( @@ -151,10 +138,9 @@ def __call__(self, return global_attractor - def evaluate_fitness(self, particles, serial_map): + def evaluate_fitness(self, particles, map=map): fitness_function = partial(self.fitness_function) - fitness = np.empty([len(particles)], dtype=np.float64) - fitness = serial_map(self.fitness_function, particles) + fitness = map(self.fitness_function, particles) return fitness def update_positions(self, positions, velocities): @@ -174,14 +160,17 @@ def update_attractors(self, particles, local_attractors, global_attractor): global_attractor = local_attractors[np.argmin(local_attractors["fitness"])] return local_attractors, global_attractor -class PerfTestMultipleAbstractor: +class PerfTestMultipleAbstractor(object): def __init__(self, param_names, evaluation_function = None, *args, **kwargs): - self._heuristic = kwargs['heuristic_class'] - del kwargs['heuristic_class'] + try: + self._heuristic = kwargs['heuristic_class'] + del kwargs['heuristic_class'] + except: + raise NotImplementedError("No heuristic class was passed.") self._args = args self._kwargs = kwargs self._param_names = param_names From a2762ec4b1fa15d4ffc5611f291edea105e83fb4 Mon Sep 17 00:00:00 2001 From: Alan Robertson Date: Tue, 11 Apr 2017 11:58:35 +1000 Subject: [PATCH 03/13] Moved from map to apply, along with other changes --- src/qinfer/hyper_heuristic_optimisers.py | 34 +++++++++++++----------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/src/qinfer/hyper_heuristic_optimisers.py b/src/qinfer/hyper_heuristic_optimisers.py index 7586d38..f87358a 100644 --- a/src/qinfer/hyper_heuristic_optimisers.py +++ b/src/qinfer/hyper_heuristic_optimisers.py @@ -1,4 +1,4 @@ -from __future__ import division +from __future__ import division, absolute_import, print_function import random import numpy as np @@ -6,9 +6,9 @@ from qinfer.perf_testing import perf_test_multiple, apply_serial from qinfer import distributions -class HyperHeuristicOptimiser(object): +class Optimiser(object): ''' - A generic hyper-heuristic optimiser class that is inherited by the other optimisation functions. + A generic optimiser class that is inherited by the other optimisation functions. :param np.ndarray param_names: The list of parameters that are being searched over. :param function fitness_function: The function that is being optimised over, defaults to perf test multiple @@ -47,7 +47,7 @@ def fitness_function(self, params): def parallel(self): raise NotImplementedError("This optimiser does not have parallel support.") -class ParticleSwarmOptimiser(HyperHeuristicOptimiser): +class ParticleSwarmOptimiser(Optimiser): ''' A particle swarm optimisation based hyperheuristic :param integer n_pso_iterations: @@ -64,7 +64,7 @@ def __call__(self, omega_v=0.35, phi_p=0.25, phi_g=0.5, - map=map + apply=apply_serial ): self._fitness_dt = np.dtype([ ('params', np.float64, (self._n_free_params,)), @@ -89,7 +89,7 @@ def __call__(self, # Calculate the initial particle fitnesses self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], - map=map) + apply=apply) # Calculate the positions of the attractors local_attractors = self._fitness[0] @@ -120,7 +120,7 @@ def __call__(self, # Recalculate the fitness function self._fitness[itr]["fitness"] = self.evaluate_fitness( self._fitness[itr]["params"], - map=map) + apply=apply) # Find the new attractors local_attractors, global_attractor = self.update_attractors( @@ -138,9 +138,11 @@ def __call__(self, return global_attractor - def evaluate_fitness(self, particles, map=map): + def evaluate_fitness(self, particles, apply=apply): fitness_function = partial(self.fitness_function) - fitness = map(self.fitness_function, particles) + #fitness = map(self.fitness_function, particles) + results = [apply(self.fitness_function, particle) for particle in particles] + fitness = [result.get() for result in results] return fitness def update_positions(self, positions, velocities): @@ -167,7 +169,7 @@ def __init__(self, *args, **kwargs): try: - self._heuristic = kwargs['heuristic_class'] + self._heuristic_class = kwargs['heuristic_class'] del kwargs['heuristic_class'] except: raise NotImplementedError("No heuristic class was passed.") @@ -182,10 +184,12 @@ def __init__(self, def __call__(self, params): performance = perf_test_multiple( *self._args, - heuristic_class = self._heuristic(**{ - name: param - for name, param in zip(self._param_names, params) - }), - **self._kwargs + heuristic_class = partial( + self._heuristic_class, + **{name: param + for name, param in zip(self._param_names, params) + }), + **self._kwargs ) + return self._evaluation_function(performance) \ No newline at end of file From 8d354351813c5725ccfe618a6881ada6e08bd8cf Mon Sep 17 00:00:00 2001 From: Alan Robertson Date: Wed, 19 Apr 2017 13:56:42 +1000 Subject: [PATCH 04/13] Added tests and additional PSO variants --- src/qinfer/hyper_heuristic_optimisers.py | 302 ++++++++++++++++++++++- src/qinfer/tests/test_optimiser.py | 244 ++++++++++++++++++ 2 files changed, 536 insertions(+), 10 deletions(-) create mode 100644 src/qinfer/tests/test_optimiser.py diff --git a/src/qinfer/hyper_heuristic_optimisers.py b/src/qinfer/hyper_heuristic_optimisers.py index f87358a..5b5543a 100644 --- a/src/qinfer/hyper_heuristic_optimisers.py +++ b/src/qinfer/hyper_heuristic_optimisers.py @@ -1,4 +1,34 @@ -from __future__ import division, absolute_import, print_function +#!/usr/bin/python +# -*- coding: utf-8 -*- +## +# test_models.py: Simple models for testing inference engines. +## +# © 2017 Alan Robertson (arob8086@uni.sydney.edu.au) +# +# This file is a part of the Qinfer project. +# Licensed under the AGPL version 3. +## +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +## + +## FEATURES ################################################################## + +from __future__ import division +from __future__ import absolute_import +from __future__ import print_function + +## IMPORTS ################################################################### import random import numpy as np @@ -6,6 +36,8 @@ from qinfer.perf_testing import perf_test_multiple, apply_serial from qinfer import distributions +## CLASSES #################################################################### + class Optimiser(object): ''' A generic optimiser class that is inherited by the other optimisation functions. @@ -52,8 +84,11 @@ class ParticleSwarmOptimiser(Optimiser): A particle swarm optimisation based hyperheuristic :param integer n_pso_iterations: :param integer n_pso_particles: - :param - :param + :param QInferDistribution initial_velocity_distribution: + :param QInferDistribution initial_velocity_distribution: + :param double + :param double + :param function ''' def __call__(self, @@ -66,13 +101,9 @@ def __call__(self, phi_g=0.5, apply=apply_serial ): - self._fitness_dt = np.dtype([ - ('params', np.float64, (self._n_free_params,)), - ('velocities', np.float64, (self._n_free_params,)), - ('fitness', np.float64)]) - self._fitness = np.empty([n_pso_iterations, n_pso_particles], dtype=self._fitness_dt) - local_attractors = np.empty([n_pso_particles], dtype=self._fitness_dt) - global_attractor = np.empty([1], dtype=self._fitness_dt) + self._fitness = np.empty([n_pso_iterations, n_pso_particles], dtype=self.fitness_dt()) + local_attractors = np.empty([n_pso_particles], dtype=self.fitness_dt()) + global_attractor = np.empty([1], dtype=self.fitness_dt()) if initial_position_distribution is None: initial_position_distribution = distributions.UniformDistribution(np.array([[ 0, 1]] * self._n_free_params)); @@ -161,7 +192,258 @@ def update_attractors(self, particles, local_attractors, global_attractor): local_attractors[idx] = particle global_attractor = local_attractors[np.argmin(local_attractors["fitness"])] return local_attractors, global_attractor + + def fitness_dt(self): + return np.dtype([ + ('params', np.float64, (self._n_free_params,)), + ('velocities', np.float64, (self._n_free_params,)), + ('fitness', np.float64)]) + + +class ParticleSwarmSimpleAnnealingOptimiser(ParticleSwarmOptimiser): + + def __call__(self, + n_pso_iterations=50, + n_pso_particles=60, + initial_position_distribution=None, + initial_velocity_distribution=None, + omega_v=0.35, + phi_p=0.25, + phi_g=0.5, + temperature = 0.95, + apply=apply_serial + ): + self._fitness = np.empty([n_pso_iterations, n_pso_particles], dtype=self.fitness_dt()) + local_attractors = np.empty([n_pso_particles], dtype=self.fitness_dt()) + global_attractor = np.empty([1], dtype=self.fitness_dt()) + + if initial_position_distribution is None: + initial_position_distribution = distributions.UniformDistribution(np.array([[ 0, 1]] * self._n_free_params)); + + if initial_velocity_distribution is None: + initial_velocity_distribution = distributions.UniformDistribution(np.array([[-1, 1]] * self._n_free_params)) + + # Initial particle positions + self._fitness[0]["params"] = initial_position_distribution.sample(n_pso_particles) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Calculate the initial particle fitnesses + self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], + apply=apply) + + # Calculate the positions of the attractors + local_attractors = self._fitness[0] + local_attractors, global_attractor = self.update_attractors( + self._fitness[0], + local_attractors, + global_attractor) + + # Initial particle velocities + self._fitness[0]["velocities"] = initial_velocity_distribution.sample(n_pso_particles) + self._fitness[0]["velocities"] = self.update_velocities( + self._fitness[0]["params"], + self._fitness[0]["velocities"], + local_attractors["params"], + global_attractor["params"], + omega_v, phi_p, phi_g) + + for itr in range(1, n_pso_iterations): + #Update the particle positions + self._fitness[itr]["params"] = self.update_positions( + self._fitness[itr - 1]["params"], + self._fitness[itr - 1]["velocities"]) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Recalculate the fitness function + self._fitness[itr]["fitness"] = self.evaluate_fitness( + self._fitness[itr]["params"], + apply=apply) + + # Find the new attractors + local_attractors, global_attractor = self.update_attractors( + self._fitness[itr], + local_attractors, + global_attractor) + + # Update the velocities + self._fitness[itr]["velocities"] = self.update_velocities( + self._fitness[itr]["params"], + self._fitness[itr - 1]["velocities"], + local_attractors["params"], + global_attractor["params"], + omega_v, phi_p, phi_g) + + # Update the PSO params + omega_v, phi_p, phi_g = self.update_pso_params( + temperature, + omega_v, + phi_p, + phi_g) + + return global_attractor + + def update_pso_params(self, temperature, omega_v, phi_p, phi_g): + omega_v, phi_p, phi_g = np.multiply(temperature, [omega_v, phi_p, phi_g]) + return omega_v, phi_p, phi_g + + +class ParticleSwarmTemperingOptimiser(ParticleSwarmOptimiser): + ''' + A particle swarm optimisation based hyperheuristic + :param integer n_pso_iterations: + :param integer n_pso_particles: + :param QInferDistribution initial_velocity_distribution: + :param QInferDistribution initial_velocity_distribution: + :param double + :param double + :param function + ''' + def __call__(self, + n_pso_iterations=50, + n_pso_particles=60, + initial_position_distribution=None, + initial_velocity_distribution=None, + n_temper_categories = 6, + temper_frequency = 10, + temper_params = None, + apply=apply_serial + ): + self._fitness = np.empty([n_pso_iterations, n_pso_particles], dtype=self.fitness_dt()) + local_attractors = np.empty([n_pso_particles], dtype=self.fitness_dt()) + global_attractor = np.empty([1], dtype=self.fitness_dt()) + + if initial_position_distribution is None: + initial_position_distribution = distributions.UniformDistribution(np.array([[ 0, 1]] * self._n_free_params)); + + if initial_velocity_distribution is None: + initial_velocity_distribution = distributions.UniformDistribution(np.array([[-1, 1]] * self._n_free_params)) + + if temper_params is None: + omega_v = np.random.random(n_temper_categories) + phi_p = np.random.random(n_temper_categories) + phi_g = np.random.random(n_temper_categories) + temper_params = [np.array((params), dtype=self.temper_params_dt()) for params in zip(omega_v, phi_p, phi_g)] + + # Distribute the particles into different temper categories + temper_map = self.distribute_particles(n_pso_particles, n_temper_categories) + + # Initial particle positions + self._fitness[0]["params"] = initial_position_distribution.sample(n_pso_particles) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Calculate the initial particle fitnesses + self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], + apply=apply) + + # Calculate the positions of the attractors + local_attractors = self._fitness[0] + local_attractors, global_attractor = self.update_attractors( + self._fitness[0], + local_attractors, + global_attractor) + + # Initial particle velocities + self._fitness[0]["velocities"] = initial_velocity_distribution.sample(n_pso_particles) + + # Update the velocities using the temper map + for idx, temper_category in enumerate(temper_map): + self._fitness[0]["velocities"][temper_category] = self.update_velocities( + self._fitness[0]["params"][temper_category], + self._fitness[0]["velocities"][temper_category], + local_attractors["params"][temper_category], + global_attractor["params"], + temper_params[idx]["omega_v"], + temper_params[idx]["phi_p"], + temper_params[idx]["phi_g"]) + + for itr in range(1, n_pso_iterations): + # Update the particle positions + self._fitness[itr]["params"] = self.update_positions( + self._fitness[itr - 1]["params"], + self._fitness[itr - 1]["velocities"]) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Recalculate the fitness function + self._fitness[itr]["fitness"] = self.evaluate_fitness( + self._fitness[itr]["params"], + apply=apply) + + # Find the new attractors + local_attractors, global_attractor = self.update_attractors( + self._fitness[itr], + local_attractors, + global_attractor) + + # Update the velocities + for idx, temper_category in enumerate(temper_map): + self._fitness[itr]["velocities"][temper_category] = self.update_velocities( + self._fitness[itr]["params"][temper_category], + self._fitness[itr - 1]["velocities"][temper_category], + local_attractors["params"][temper_category], + global_attractor["params"], + temper_params[idx]["omega_v"], + temper_params[idx]["phi_p"], + temper_params[idx]["phi_g"]) + + # Redistribute the particles into different temper categories + if itr % temper_frequency == 0: + temper_map = self.distribute_particles(n_pso_particles, n_temper_categories) + + return global_attractor + + def temper_params_dt(self): + return np.dtype([ + ('omega_v', np.float64), + ('phi_p', np.float64), + ('phi_g', np.float64)]) + + + def distribute_particles(self, n_pso_particles, n_temper_categories): + + # Distribute as many particles as evenly as possible across the categories, + # This ensures that there are no empty categories + n_evenly_distributable = (n_pso_particles // n_temper_categories) * n_temper_categories + n_unevenly_distributable = n_pso_particles - n_evenly_distributable + + # Generate the required indicies for the pso particles + particle_indicies = range(0, n_pso_particles) + + # Randomise the order + np.random.shuffle(particle_indicies) + + # Reshape to a 2D array indexed on the number of tempering categories + particle_map = np.reshape( + particle_indicies[:n_evenly_distributable], + (n_temper_categories, n_evenly_distributable//n_temper_categories)) + + # Transfer to the map + temper_map = {} + for i, index_category in enumerate(particle_map): + temper_map[i] = index_category + + # Transfer any excess particles that could not be evenly distributed + # This is a slow operation, so for the purposes of speed the number of + # temper categories should be a factor of the number of pso particles + if n_unevenly_distributable != 0: + for i in range(n_evenly_distributable, n_pso_particles): + temper_map[random.randrange(0, n_temper_categories)] = ( + np.append(temper_map[random.randrange(0, n_temper_categories)], [particle_indicies[i]])) + + return temper_map + class PerfTestMultipleAbstractor(object): def __init__(self, param_names, diff --git a/src/qinfer/tests/test_optimiser.py b/src/qinfer/tests/test_optimiser.py new file mode 100644 index 0000000..475465d --- /dev/null +++ b/src/qinfer/tests/test_optimiser.py @@ -0,0 +1,244 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +## +# test_distributions.py: Checks that distribution objects act as expected. +## +# © 2014 Chris Ferrie (csferrie@gmail.com) and +# Christopher E. Granade (cgranade@gmail.com) +# +# This file is a part of the Qinfer project. +# Licensed under the AGPL version 3. +## +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +## + +## FEATURES ################################################################### + +from __future__ import absolute_import +from __future__ import division # Ensures that a/b is always a float. + +## IMPORTS #################################################################### + +import qinfer.rb as rb +import qinfer.distributions as dist + +import numpy as np +import random as rnd + +from functools import partial + +## CLASSES #################################################################### + +class TestPSO(DerandomizedTestCase): + + def test_pso_quad(self): + f_quad = lambda x: numpy.sum(10 * (x-0.5)**2) + hh_opt = ParticleSwarmOptimiser(['x','y','z','a'], fitness_function = f_quad) + hh_opt() + + def test_pso_sin_sq(self): + f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2) + hh_opt = ParticleSwarmOptimiser(['x','y','z','a'], fitness_function = f_sin_sq) + hh_opt() + + def test_pso_rosenbrock(self): + f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)]) + hh_opt = ParticleSwarmOptimiser(['x','y','z','a'], fitness_function = f_rosenbrock) + hh_opt() + + + def test_pso_perf_test_multiple_short(self): + # Define our experiment + n_trials = 20 # Times we repeat the set of experiments + n_exp = 100 # Number of experiments in the set + n_particles = 4000 # Number of points we track during the experiment + + # Model for the experiment + model = rb.RandomizedBenchmarkingModel() + + #Ordering of RB is 'p', 'A', 'B' + # A + B < 1, 0 < p < 1 + #Prior distribution of the experiment + prior = dist.PostselectedDistribution( + dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])), + model + ) + + #Heuristic used in the experiment + heuristic_class = qi.expdesign.ExpSparseHeuristic + + #Heuristic Parameters + params = ['base', 'scale'] + + #Fitness function to evaluate the performance of the experiment + EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) + + hh_opt = ParticleSwarmOptimiser(params, + n_trials = n_trials, + n_particles = n_particles, + prior = prior, + model = model, + n_exp = n_exp, + heuristic_class = heuristic_class + ) + hh_opt(n_pso_iterations=5, + n_pso_particles=6) + + + def test_pso_perf_test_multiple_long(self): + # Define our experiment + n_trials = 20 # Times we repeat the set of experiments + n_exp = 150 # Number of experiments in the set + n_particles = 4000 # Number of points we track during the experiment + + # Model for the experiment + model = rb.RandomizedBenchmarkingModel() + + #Ordering of RB is 'p', 'A', 'B' + # A + B < 1, 0 < p < 1 + #Prior distribution of the experiment + prior = dist.PostselectedDistribution( + dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])), + model + ) + + #Heuristic used in the experiment + heuristic_class = qi.expdesign.ExpSparseHeuristic + + #Heuristic Parameters + params = ['base', 'scale'] + + #Fitness function to evaluate the performance of the experiment + EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) + + hh_opt = ParticleSwarmOptimiser(params, + n_trials = n_trials, + n_particles = n_particles, + prior = prior, + model = model, + n_exp = n_exp, + heuristic_class = heuristic_class + ) + hh_opt() + +def TestPSSAO(DerandomizedTestCase): + + def test_pssao_quad(self): + f_quad = lambda x: numpy.sum(10 * (x-0.5)**2) + hh_opt = ParticleSwarmSimpleAnnealingOptimiser(['x','y','z','a'], fitness_function = f_quad) + hh_opt() + + def test_pssao_sin_sq(self): + f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2) + hh_opt = ParticleSwarmSimpleAnnealingOptimiser(['x','y','z','a'], fitness_function = f_sin_sq) + hh_opt() + + def test_pssao_rosenbrock(self): + f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)]) + hh_opt = ParticleSwarmSimpleAnnealingOptimiser(['x','y','z','a'], fitness_function = f_rosenbrock) + hh_opt() + + + def test_pssao_perf_test_multiple_short(self): + # Define our experiment + n_trials = 20 # Times we repeat the set of experiments + n_exp = 150 # Number of experiments in the set + n_particles = 4000 # Number of points we track during the experiment + + # Model for the experiment + model = rb.RandomizedBenchmarkingModel() + + #Ordering of RB is 'p', 'A', 'B' + # A + B < 1, 0 < p < 1 + #Prior distribution of the experiment + prior = dist.PostselectedDistribution( + dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])), + model + ) + + #Heuristic used in the experiment + heuristic_class = qi.expdesign.ExpSparseHeuristic + + #Heuristic Parameters + params = ['base', 'scale'] + + #Fitness function to evaluate the performance of the experiment + EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) + + hh_opt = ParticleSwarmSimpleAnnealingOptimiser(params, + n_trials = n_trials, + n_particles = n_particles, + prior = prior, + model = model, + n_exp = n_exp, + heuristic_class = heuristic_class + ) + hh_opt(n_pso_iterations=5, + n_pso_particles=6) + + +def TestPSTO(DerandomizedTestCase): + + def test_psto_quad(self): + f_quad = lambda x: numpy.sum(10 * (x-0.5)**2) + hh_opt = ParticleSwarmTemperingOptimiser(['x','y','z','a'], fitness_function = f_quad) + hh_opt() + + def test_psto_sin_sq(self): + f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2) + hh_opt = ParticleSwarmTemperingOptimiser(['x','y','z','a'], fitness_function = f_sin_sq) + hh_opt() + + def test_psto_rosenbrock(self): + f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)]) + hh_opt = ParticleSwarmTemperingOptimiser(['x','y','z','a'], fitness_function = f_rosenbrock) + hh_opt() + + + def test_psto_perf_test_multiple_short(self): + # Define our experiment + n_trials = 20 # Times we repeat the set of experiments + n_exp = 150 # Number of experiments in the set + n_particles = 4000 # Number of points we track during the experiment + + # Model for the experiment + model = rb.RandomizedBenchmarkingModel() + + #Ordering of RB is 'p', 'A', 'B' + # A + B < 1, 0 < p < 1 + #Prior distribution of the experiment + prior = dist.PostselectedDistribution( + dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])), + model + ) + + #Heuristic used in the experiment + heuristic_class = qi.expdesign.ExpSparseHeuristic + + #Heuristic Parameters + params = ['base', 'scale'] + + #Fitness function to evaluate the performance of the experiment + EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) + + hh_opt = ParticleSwarmTemperingOptimiser(params, + n_trials = n_trials, + n_particles = n_particles, + prior = prior, + model = model, + n_exp = n_exp, + heuristic_class = heuristic_class + ) + hh_opt(n_pso_iterations=5, + n_pso_particles=6) From 0f1f598c7555eda26732e78c361e2da650ee9f35 Mon Sep 17 00:00:00 2001 From: Alan Robertson Date: Wed, 19 Apr 2017 14:47:38 +1000 Subject: [PATCH 05/13] Spelling swapped to US, renamed some classes, removed long testing for travis compatibility --- src/qinfer/hyper_heuristic_optimisers.py | 16 +++---- src/qinfer/tests/test_optimiser.py | 61 +++++------------------- 2 files changed, 20 insertions(+), 57 deletions(-) diff --git a/src/qinfer/hyper_heuristic_optimisers.py b/src/qinfer/hyper_heuristic_optimisers.py index 5b5543a..c1c338f 100644 --- a/src/qinfer/hyper_heuristic_optimisers.py +++ b/src/qinfer/hyper_heuristic_optimisers.py @@ -38,9 +38,9 @@ ## CLASSES #################################################################### -class Optimiser(object): +class Optimizer(object): ''' - A generic optimiser class that is inherited by the other optimisation functions. + A generic optimizer class that is inherited by the other optimisation functions. :param np.ndarray param_names: The list of parameters that are being searched over. :param function fitness_function: The function that is being optimised over, defaults to perf test multiple @@ -64,7 +64,7 @@ def __init__( self._funct_kwargs = funct_kwargs if fitness_function is None: # Default to calling perf test multiple - self._fitness_function = PerfTestMultipleAbstractor( + self._fitness_function = HeuristicPerformanceFitness( self._param_names, *self._funct_args, **self._funct_kwargs @@ -77,9 +77,9 @@ def fitness_function(self, params): return self._fitness_function(params) def parallel(self): - raise NotImplementedError("This optimiser does not have parallel support.") + raise NotImplementedError("This optimizer does not have parallel support.") -class ParticleSwarmOptimiser(Optimiser): +class ParticleSwarmOptimizer(Optimizer): ''' A particle swarm optimisation based hyperheuristic :param integer n_pso_iterations: @@ -200,7 +200,7 @@ def fitness_dt(self): ('fitness', np.float64)]) -class ParticleSwarmSimpleAnnealingOptimiser(ParticleSwarmOptimiser): +class ParticleSwarmSimpleAnnealingOptimizer(ParticleSwarmOptimizer): def __call__(self, n_pso_iterations=50, @@ -293,7 +293,7 @@ def update_pso_params(self, temperature, omega_v, phi_p, phi_g): return omega_v, phi_p, phi_g -class ParticleSwarmTemperingOptimiser(ParticleSwarmOptimiser): +class ParticleSwarmTemperingOptimizer(ParticleSwarmOptimizer): ''' A particle swarm optimisation based hyperheuristic :param integer n_pso_iterations: @@ -444,7 +444,7 @@ def distribute_particles(self, n_pso_particles, n_temper_categories): return temper_map -class PerfTestMultipleAbstractor(object): +class HeuristicPerformanceFitness(object): def __init__(self, param_names, evaluation_function = None, diff --git a/src/qinfer/tests/test_optimiser.py b/src/qinfer/tests/test_optimiser.py index 475465d..e773796 100644 --- a/src/qinfer/tests/test_optimiser.py +++ b/src/qinfer/tests/test_optimiser.py @@ -44,17 +44,17 @@ class TestPSO(DerandomizedTestCase): def test_pso_quad(self): f_quad = lambda x: numpy.sum(10 * (x-0.5)**2) - hh_opt = ParticleSwarmOptimiser(['x','y','z','a'], fitness_function = f_quad) + hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_quad) hh_opt() def test_pso_sin_sq(self): f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2) - hh_opt = ParticleSwarmOptimiser(['x','y','z','a'], fitness_function = f_sin_sq) + hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_sin_sq) hh_opt() def test_pso_rosenbrock(self): f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)]) - hh_opt = ParticleSwarmOptimiser(['x','y','z','a'], fitness_function = f_rosenbrock) + hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock) hh_opt() @@ -84,7 +84,7 @@ def test_pso_perf_test_multiple_short(self): #Fitness function to evaluate the performance of the experiment EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) - hh_opt = ParticleSwarmOptimiser(params, + hh_opt = ParticleSwarmOptimizer(params, n_trials = n_trials, n_particles = n_particles, prior = prior, @@ -95,58 +95,21 @@ def test_pso_perf_test_multiple_short(self): hh_opt(n_pso_iterations=5, n_pso_particles=6) - - def test_pso_perf_test_multiple_long(self): - # Define our experiment - n_trials = 20 # Times we repeat the set of experiments - n_exp = 150 # Number of experiments in the set - n_particles = 4000 # Number of points we track during the experiment - - # Model for the experiment - model = rb.RandomizedBenchmarkingModel() - - #Ordering of RB is 'p', 'A', 'B' - # A + B < 1, 0 < p < 1 - #Prior distribution of the experiment - prior = dist.PostselectedDistribution( - dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])), - model - ) - - #Heuristic used in the experiment - heuristic_class = qi.expdesign.ExpSparseHeuristic - - #Heuristic Parameters - params = ['base', 'scale'] - - #Fitness function to evaluate the performance of the experiment - EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) - - hh_opt = ParticleSwarmOptimiser(params, - n_trials = n_trials, - n_particles = n_particles, - prior = prior, - model = model, - n_exp = n_exp, - heuristic_class = heuristic_class - ) - hh_opt() - def TestPSSAO(DerandomizedTestCase): def test_pssao_quad(self): f_quad = lambda x: numpy.sum(10 * (x-0.5)**2) - hh_opt = ParticleSwarmSimpleAnnealingOptimiser(['x','y','z','a'], fitness_function = f_quad) + hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_quad) hh_opt() def test_pssao_sin_sq(self): f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2) - hh_opt = ParticleSwarmSimpleAnnealingOptimiser(['x','y','z','a'], fitness_function = f_sin_sq) + hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_sin_sq) hh_opt() def test_pssao_rosenbrock(self): f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)]) - hh_opt = ParticleSwarmSimpleAnnealingOptimiser(['x','y','z','a'], fitness_function = f_rosenbrock) + hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock) hh_opt() @@ -176,7 +139,7 @@ def test_pssao_perf_test_multiple_short(self): #Fitness function to evaluate the performance of the experiment EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) - hh_opt = ParticleSwarmSimpleAnnealingOptimiser(params, + hh_opt = ParticleSwarmSimpleAnnealingOptimizer(params, n_trials = n_trials, n_particles = n_particles, prior = prior, @@ -192,17 +155,17 @@ def TestPSTO(DerandomizedTestCase): def test_psto_quad(self): f_quad = lambda x: numpy.sum(10 * (x-0.5)**2) - hh_opt = ParticleSwarmTemperingOptimiser(['x','y','z','a'], fitness_function = f_quad) + hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_quad) hh_opt() def test_psto_sin_sq(self): f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2) - hh_opt = ParticleSwarmTemperingOptimiser(['x','y','z','a'], fitness_function = f_sin_sq) + hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_sin_sq) hh_opt() def test_psto_rosenbrock(self): f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)]) - hh_opt = ParticleSwarmTemperingOptimiser(['x','y','z','a'], fitness_function = f_rosenbrock) + hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock) hh_opt() @@ -232,7 +195,7 @@ def test_psto_perf_test_multiple_short(self): #Fitness function to evaluate the performance of the experiment EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) - hh_opt = ParticleSwarmTemperingOptimiser(params, + hh_opt = ParticleSwarmTemperingOptimizer(params, n_trials = n_trials, n_particles = n_particles, prior = prior, From 95094e66a5ed8b358c04d0664fecb952f621bf16 Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Wed, 19 Apr 2017 18:14:21 +1000 Subject: [PATCH 06/13] =?UTF-8?q?Fixed=20import,=20tabs=20=E2=86=92=20spac?= =?UTF-8?q?es.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/qinfer/tests/test_optimiser.py | 300 +++++++++++++++-------------- 1 file changed, 151 insertions(+), 149 deletions(-) diff --git a/src/qinfer/tests/test_optimiser.py b/src/qinfer/tests/test_optimiser.py index e773796..c0adc8d 100644 --- a/src/qinfer/tests/test_optimiser.py +++ b/src/qinfer/tests/test_optimiser.py @@ -30,178 +30,180 @@ ## IMPORTS #################################################################### -import qinfer.rb as rb -import qinfer.distributions as dist +from functools import partial import numpy as np import random as rnd -from functools import partial +import qinfer.rb as rb +import qinfer.distributions as dist + +from qinfer.tests.base_test import DerandomizedTestCase ## CLASSES #################################################################### class TestPSO(DerandomizedTestCase): - def test_pso_quad(self): - f_quad = lambda x: numpy.sum(10 * (x-0.5)**2) - hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_quad) - hh_opt() - - def test_pso_sin_sq(self): - f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2) - hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_sin_sq) - hh_opt() - - def test_pso_rosenbrock(self): - f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)]) - hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock) - hh_opt() - - - def test_pso_perf_test_multiple_short(self): - # Define our experiment - n_trials = 20 # Times we repeat the set of experiments - n_exp = 100 # Number of experiments in the set - n_particles = 4000 # Number of points we track during the experiment - - # Model for the experiment - model = rb.RandomizedBenchmarkingModel() - - #Ordering of RB is 'p', 'A', 'B' - # A + B < 1, 0 < p < 1 - #Prior distribution of the experiment - prior = dist.PostselectedDistribution( - dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])), - model - ) - - #Heuristic used in the experiment - heuristic_class = qi.expdesign.ExpSparseHeuristic - - #Heuristic Parameters - params = ['base', 'scale'] - - #Fitness function to evaluate the performance of the experiment - EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) - - hh_opt = ParticleSwarmOptimizer(params, - n_trials = n_trials, - n_particles = n_particles, - prior = prior, - model = model, - n_exp = n_exp, - heuristic_class = heuristic_class - ) - hh_opt(n_pso_iterations=5, - n_pso_particles=6) + def test_pso_quad(self): + f_quad = lambda x: numpy.sum(10 * (x-0.5)**2) + hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_quad) + hh_opt() + + def test_pso_sin_sq(self): + f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2) + hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_sin_sq) + hh_opt() + + def test_pso_rosenbrock(self): + f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)]) + hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock) + hh_opt() + + + def test_pso_perf_test_multiple_short(self): + # Define our experiment + n_trials = 20 # Times we repeat the set of experiments + n_exp = 100 # Number of experiments in the set + n_particles = 4000 # Number of points we track during the experiment + + # Model for the experiment + model = rb.RandomizedBenchmarkingModel() + + #Ordering of RB is 'p', 'A', 'B' + # A + B < 1, 0 < p < 1 + #Prior distribution of the experiment + prior = dist.PostselectedDistribution( + dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])), + model + ) + + #Heuristic used in the experiment + heuristic_class = qi.expdesign.ExpSparseHeuristic + + #Heuristic Parameters + params = ['base', 'scale'] + + #Fitness function to evaluate the performance of the experiment + EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) + + hh_opt = ParticleSwarmOptimizer(params, + n_trials = n_trials, + n_particles = n_particles, + prior = prior, + model = model, + n_exp = n_exp, + heuristic_class = heuristic_class + ) + hh_opt(n_pso_iterations=5, + n_pso_particles=6) def TestPSSAO(DerandomizedTestCase): - def test_pssao_quad(self): - f_quad = lambda x: numpy.sum(10 * (x-0.5)**2) - hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_quad) - hh_opt() + def test_pssao_quad(self): + f_quad = lambda x: numpy.sum(10 * (x-0.5)**2) + hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_quad) + hh_opt() - def test_pssao_sin_sq(self): - f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2) - hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_sin_sq) - hh_opt() + def test_pssao_sin_sq(self): + f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2) + hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_sin_sq) + hh_opt() - def test_pssao_rosenbrock(self): - f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)]) - hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock) - hh_opt() + def test_pssao_rosenbrock(self): + f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)]) + hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock) + hh_opt() - def test_pssao_perf_test_multiple_short(self): - # Define our experiment - n_trials = 20 # Times we repeat the set of experiments - n_exp = 150 # Number of experiments in the set - n_particles = 4000 # Number of points we track during the experiment + def test_pssao_perf_test_multiple_short(self): + # Define our experiment + n_trials = 20 # Times we repeat the set of experiments + n_exp = 150 # Number of experiments in the set + n_particles = 4000 # Number of points we track during the experiment - # Model for the experiment - model = rb.RandomizedBenchmarkingModel() + # Model for the experiment + model = rb.RandomizedBenchmarkingModel() - #Ordering of RB is 'p', 'A', 'B' - # A + B < 1, 0 < p < 1 - #Prior distribution of the experiment - prior = dist.PostselectedDistribution( - dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])), - model - ) + #Ordering of RB is 'p', 'A', 'B' + # A + B < 1, 0 < p < 1 + #Prior distribution of the experiment + prior = dist.PostselectedDistribution( + dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])), + model + ) - #Heuristic used in the experiment - heuristic_class = qi.expdesign.ExpSparseHeuristic + #Heuristic used in the experiment + heuristic_class = qi.expdesign.ExpSparseHeuristic - #Heuristic Parameters - params = ['base', 'scale'] + #Heuristic Parameters + params = ['base', 'scale'] - #Fitness function to evaluate the performance of the experiment - EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) + #Fitness function to evaluate the performance of the experiment + EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) - hh_opt = ParticleSwarmSimpleAnnealingOptimizer(params, - n_trials = n_trials, - n_particles = n_particles, - prior = prior, - model = model, - n_exp = n_exp, - heuristic_class = heuristic_class - ) - hh_opt(n_pso_iterations=5, - n_pso_particles=6) + hh_opt = ParticleSwarmSimpleAnnealingOptimizer(params, + n_trials = n_trials, + n_particles = n_particles, + prior = prior, + model = model, + n_exp = n_exp, + heuristic_class = heuristic_class + ) + hh_opt(n_pso_iterations=5, + n_pso_particles=6) def TestPSTO(DerandomizedTestCase): - def test_psto_quad(self): - f_quad = lambda x: numpy.sum(10 * (x-0.5)**2) - hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_quad) - hh_opt() - - def test_psto_sin_sq(self): - f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2) - hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_sin_sq) - hh_opt() - - def test_psto_rosenbrock(self): - f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)]) - hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock) - hh_opt() - - - def test_psto_perf_test_multiple_short(self): - # Define our experiment - n_trials = 20 # Times we repeat the set of experiments - n_exp = 150 # Number of experiments in the set - n_particles = 4000 # Number of points we track during the experiment - - # Model for the experiment - model = rb.RandomizedBenchmarkingModel() - - #Ordering of RB is 'p', 'A', 'B' - # A + B < 1, 0 < p < 1 - #Prior distribution of the experiment - prior = dist.PostselectedDistribution( - dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])), - model - ) - - #Heuristic used in the experiment - heuristic_class = qi.expdesign.ExpSparseHeuristic - - #Heuristic Parameters - params = ['base', 'scale'] - - #Fitness function to evaluate the performance of the experiment - EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) - - hh_opt = ParticleSwarmTemperingOptimizer(params, - n_trials = n_trials, - n_particles = n_particles, - prior = prior, - model = model, - n_exp = n_exp, - heuristic_class = heuristic_class - ) - hh_opt(n_pso_iterations=5, - n_pso_particles=6) + def test_psto_quad(self): + f_quad = lambda x: numpy.sum(10 * (x-0.5)**2) + hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_quad) + hh_opt() + + def test_psto_sin_sq(self): + f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2) + hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_sin_sq) + hh_opt() + + def test_psto_rosenbrock(self): + f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)]) + hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock) + hh_opt() + + + def test_psto_perf_test_multiple_short(self): + # Define our experiment + n_trials = 20 # Times we repeat the set of experiments + n_exp = 150 # Number of experiments in the set + n_particles = 4000 # Number of points we track during the experiment + + # Model for the experiment + model = rb.RandomizedBenchmarkingModel() + + #Ordering of RB is 'p', 'A', 'B' + # A + B < 1, 0 < p < 1 + #Prior distribution of the experiment + prior = dist.PostselectedDistribution( + dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])), + model + ) + + #Heuristic used in the experiment + heuristic_class = qi.expdesign.ExpSparseHeuristic + + #Heuristic Parameters + params = ['base', 'scale'] + + #Fitness function to evaluate the performance of the experiment + EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) + + hh_opt = ParticleSwarmTemperingOptimizer(params, + n_trials = n_trials, + n_particles = n_particles, + prior = prior, + model = model, + n_exp = n_exp, + heuristic_class = heuristic_class + ) + hh_opt(n_pso_iterations=5, + n_pso_particles=6) From 339fc5db58862c1e35c7662e0ea88bc0ace236d1 Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Wed, 19 Apr 2017 18:15:55 +1000 Subject: [PATCH 07/13] PEP8 / pylint fixes --- src/qinfer/tests/test_optimiser.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/qinfer/tests/test_optimiser.py b/src/qinfer/tests/test_optimiser.py index c0adc8d..3f37452 100644 --- a/src/qinfer/tests/test_optimiser.py +++ b/src/qinfer/tests/test_optimiser.py @@ -45,18 +45,21 @@ class TestPSO(DerandomizedTestCase): def test_pso_quad(self): - f_quad = lambda x: numpy.sum(10 * (x-0.5)**2) - hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_quad) + f_quad = lambda x: numpy.sum(10 * (x - 0.5) ** 2) + hh_opt = ParticleSwarmOptimizer(['x', 'y', 'z', 'a'], fitness_function=f_quad) hh_opt() def test_pso_sin_sq(self): - f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2) - hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_sin_sq) + f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2) ** 2) + hh_opt = ParticleSwarmOptimizer(['x', 'y', 'z', 'a'], fitness_function=f_sin_sq) hh_opt() def test_pso_rosenbrock(self): - f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)]) - hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock) + f_rosenbrock = lambda x: numpy.sum([ + ((x[i + 1] - x[i] ** 2) ** 2 + (1 - x[i])** 2) / len(x) + for i in range(len(x) - 1) + ]) + hh_opt = ParticleSwarmOptimizer(['x', 'y', 'z', 'a'], fitness_function=f_rosenbrock) hh_opt() From 60f3874bec9f0143541e44b7b0c72c97e683e11e Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Wed, 19 Apr 2017 18:21:23 +1000 Subject: [PATCH 08/13] Import fixes, some tests now pass. --- src/qinfer/tests/test_optimiser.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/qinfer/tests/test_optimiser.py b/src/qinfer/tests/test_optimiser.py index 3f37452..c23cbdd 100644 --- a/src/qinfer/tests/test_optimiser.py +++ b/src/qinfer/tests/test_optimiser.py @@ -35,27 +35,30 @@ import numpy as np import random as rnd +from qinfer.tests.base_test import DerandomizedTestCase + import qinfer.rb as rb import qinfer.distributions as dist -from qinfer.tests.base_test import DerandomizedTestCase +from qinfer.hyper_heuristic_optimisers import ParticleSwarmOptimizer +from qinfer.expdesign import ExpSparseHeuristic ## CLASSES #################################################################### class TestPSO(DerandomizedTestCase): def test_pso_quad(self): - f_quad = lambda x: numpy.sum(10 * (x - 0.5) ** 2) + f_quad = lambda x: np.sum(10 * (x - 0.5) ** 2) hh_opt = ParticleSwarmOptimizer(['x', 'y', 'z', 'a'], fitness_function=f_quad) hh_opt() def test_pso_sin_sq(self): - f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2) ** 2) + f_sin_sq = lambda x: np.sum(np.sin(x - 0.2) ** 2) hh_opt = ParticleSwarmOptimizer(['x', 'y', 'z', 'a'], fitness_function=f_sin_sq) hh_opt() def test_pso_rosenbrock(self): - f_rosenbrock = lambda x: numpy.sum([ + f_rosenbrock = lambda x: np.sum([ ((x[i + 1] - x[i] ** 2) ** 2 + (1 - x[i])** 2) / len(x) for i in range(len(x) - 1) ]) @@ -81,14 +84,12 @@ def test_pso_perf_test_multiple_short(self): ) #Heuristic used in the experiment - heuristic_class = qi.expdesign.ExpSparseHeuristic + heuristic_class = ExpSparseHeuristic #Heuristic Parameters params = ['base', 'scale'] #Fitness function to evaluate the performance of the experiment - EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) - hh_opt = ParticleSwarmOptimizer(params, n_trials = n_trials, n_particles = n_particles, From cab23a986131dfc61a882d65fca9fb094d28d295 Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Wed, 19 Apr 2017 18:31:20 +1000 Subject: [PATCH 09/13] More import fixes, refactored to test annealing+tempering. --- src/qinfer/tests/test_optimiser.py | 160 ++++++----------------------- 1 file changed, 31 insertions(+), 129 deletions(-) diff --git a/src/qinfer/tests/test_optimiser.py b/src/qinfer/tests/test_optimiser.py index c23cbdd..957280b 100644 --- a/src/qinfer/tests/test_optimiser.py +++ b/src/qinfer/tests/test_optimiser.py @@ -30,43 +30,47 @@ ## IMPORTS #################################################################### -from functools import partial - import numpy as np -import random as rnd from qinfer.tests.base_test import DerandomizedTestCase import qinfer.rb as rb import qinfer.distributions as dist -from qinfer.hyper_heuristic_optimisers import ParticleSwarmOptimizer +from qinfer.hyper_heuristic_optimisers import ( + ParticleSwarmOptimizer, + ParticleSwarmSimpleAnnealingOptimizer, + ParticleSwarmTemperingOptimizer +) from qinfer.expdesign import ExpSparseHeuristic ## CLASSES #################################################################### -class TestPSO(DerandomizedTestCase): +class OptimizerTestMethods(object): + # See http://stackoverflow.com/a/1323554/267841 for why this works. + + optimizer_class = None - def test_pso_quad(self): + def test_quad(self): f_quad = lambda x: np.sum(10 * (x - 0.5) ** 2) - hh_opt = ParticleSwarmOptimizer(['x', 'y', 'z', 'a'], fitness_function=f_quad) + hh_opt = self.optimizer_class(['x', 'y', 'z', 'a'], fitness_function=f_quad) hh_opt() - def test_pso_sin_sq(self): + def test_sin_sq(self): f_sin_sq = lambda x: np.sum(np.sin(x - 0.2) ** 2) - hh_opt = ParticleSwarmOptimizer(['x', 'y', 'z', 'a'], fitness_function=f_sin_sq) + hh_opt = self.optimizer_class(['x', 'y', 'z', 'a'], fitness_function=f_sin_sq) hh_opt() - def test_pso_rosenbrock(self): + def test_rosenbrock(self): f_rosenbrock = lambda x: np.sum([ ((x[i + 1] - x[i] ** 2) ** 2 + (1 - x[i])** 2) / len(x) for i in range(len(x) - 1) ]) - hh_opt = ParticleSwarmOptimizer(['x', 'y', 'z', 'a'], fitness_function=f_rosenbrock) + hh_opt = self.optimizer_class(['x', 'y', 'z', 'a'], fitness_function=f_rosenbrock) hh_opt() - def test_pso_perf_test_multiple_short(self): + def test_perf_test_multiple_short(self): # Define our experiment n_trials = 20 # Times we repeat the set of experiments n_exp = 100 # Number of experiments in the set @@ -90,124 +94,22 @@ def test_pso_perf_test_multiple_short(self): params = ['base', 'scale'] #Fitness function to evaluate the performance of the experiment - hh_opt = ParticleSwarmOptimizer(params, - n_trials = n_trials, - n_particles = n_particles, - prior = prior, - model = model, - n_exp = n_exp, - heuristic_class = heuristic_class - ) - hh_opt(n_pso_iterations=5, - n_pso_particles=6) - -def TestPSSAO(DerandomizedTestCase): - - def test_pssao_quad(self): - f_quad = lambda x: numpy.sum(10 * (x-0.5)**2) - hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_quad) - hh_opt() - - def test_pssao_sin_sq(self): - f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2) - hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_sin_sq) - hh_opt() - - def test_pssao_rosenbrock(self): - f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)]) - hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock) - hh_opt() - - - def test_pssao_perf_test_multiple_short(self): - # Define our experiment - n_trials = 20 # Times we repeat the set of experiments - n_exp = 150 # Number of experiments in the set - n_particles = 4000 # Number of points we track during the experiment - - # Model for the experiment - model = rb.RandomizedBenchmarkingModel() - - #Ordering of RB is 'p', 'A', 'B' - # A + B < 1, 0 < p < 1 - #Prior distribution of the experiment - prior = dist.PostselectedDistribution( - dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])), - model - ) - - #Heuristic used in the experiment - heuristic_class = qi.expdesign.ExpSparseHeuristic - - #Heuristic Parameters - params = ['base', 'scale'] - - #Fitness function to evaluate the performance of the experiment - EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) - - hh_opt = ParticleSwarmSimpleAnnealingOptimizer(params, - n_trials = n_trials, - n_particles = n_particles, - prior = prior, - model = model, - n_exp = n_exp, - heuristic_class = heuristic_class - ) + hh_opt = self.optimizer_class(params, + n_trials=n_trials, + n_particles=n_particles, + prior=prior, + model=model, + n_exp=n_exp, + heuristic_class=heuristic_class + ) hh_opt(n_pso_iterations=5, - n_pso_particles=6) - - -def TestPSTO(DerandomizedTestCase): - - def test_psto_quad(self): - f_quad = lambda x: numpy.sum(10 * (x-0.5)**2) - hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_quad) - hh_opt() - - def test_psto_sin_sq(self): - f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2) - hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_sin_sq) - hh_opt() + n_pso_particles=6) - def test_psto_rosenbrock(self): - f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)]) - hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock) - hh_opt() - - - def test_psto_perf_test_multiple_short(self): - # Define our experiment - n_trials = 20 # Times we repeat the set of experiments - n_exp = 150 # Number of experiments in the set - n_particles = 4000 # Number of points we track during the experiment +class TestPSO(DerandomizedTestCase, OptimizerTestMethods): + optimizer_class = ParticleSwarmOptimizer - # Model for the experiment - model = rb.RandomizedBenchmarkingModel() +class TestPSSAO(DerandomizedTestCase, OptimizerTestMethods): + optimizer_class = ParticleSwarmSimpleAnnealingOptimizer - #Ordering of RB is 'p', 'A', 'B' - # A + B < 1, 0 < p < 1 - #Prior distribution of the experiment - prior = dist.PostselectedDistribution( - dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])), - model - ) - - #Heuristic used in the experiment - heuristic_class = qi.expdesign.ExpSparseHeuristic - - #Heuristic Parameters - params = ['base', 'scale'] - - #Fitness function to evaluate the performance of the experiment - EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0) - - hh_opt = ParticleSwarmTemperingOptimizer(params, - n_trials = n_trials, - n_particles = n_particles, - prior = prior, - model = model, - n_exp = n_exp, - heuristic_class = heuristic_class - ) - hh_opt(n_pso_iterations=5, - n_pso_particles=6) +class TestPSTO(DerandomizedTestCase, OptimizerTestMethods): + optimizer_class = ParticleSwarmTemperingOptimizer From 9dd8c1210c86ffc181baf0ce78cab61e186542cd Mon Sep 17 00:00:00 2001 From: Alan Robertson Date: Fri, 12 May 2017 15:58:00 +1000 Subject: [PATCH 10/13] Added SPSA optimiser/optimizer --- src/qinfer/hyper_heuristic_optimisers.py | 108 ++++++++++++++++++++--- 1 file changed, 97 insertions(+), 11 deletions(-) diff --git a/src/qinfer/hyper_heuristic_optimisers.py b/src/qinfer/hyper_heuristic_optimisers.py index c1c338f..181b09a 100644 --- a/src/qinfer/hyper_heuristic_optimisers.py +++ b/src/qinfer/hyper_heuristic_optimisers.py @@ -79,6 +79,17 @@ def fitness_function(self, params): def parallel(self): raise NotImplementedError("This optimizer does not have parallel support.") + def evaluate_fitness(self, particles, apply=apply): + fitness_function = partial(self.fitness_function) + #fitness = map(self.fitness_function, particles) + results = [apply(self.fitness_function, particle) for particle in particles] + fitness = [result.get() for result in results] + return fitness + + def update_positions(self, positions, velocities): + updated = positions + velocities + return updated + class ParticleSwarmOptimizer(Optimizer): ''' A particle swarm optimisation based hyperheuristic @@ -169,17 +180,6 @@ def __call__(self, return global_attractor - def evaluate_fitness(self, particles, apply=apply): - fitness_function = partial(self.fitness_function) - #fitness = map(self.fitness_function, particles) - results = [apply(self.fitness_function, particle) for particle in particles] - fitness = [result.get() for result in results] - return fitness - - def update_positions(self, positions, velocities): - updated = positions + velocities - return updated - def update_velocities(self, positions, velocities, local_attractors, global_attractor, omega_v, phi_p, phi_g): random_p = np.random.random_sample(positions.shape) random_g = np.random.random_sample(positions.shape) @@ -443,6 +443,92 @@ def distribute_particles(self, n_pso_particles, n_temper_categories): np.append(temper_map[random.randrange(0, n_temper_categories)], [particle_indicies[i]])) return temper_map + + +class SPSATwoSiteOptimizer(Optimizer): + + def __call__(self, + n_spsa_iterations = 60, + n_spsa_particles = 50, + initial_position_distribution = None, + A = 0, + s = 1/3, + t = 1, + a = 0.5, + b = 0.5, + apply=apply_serial + ): + + self._fitness = np.empty([n_spsa_iterations, n_spsa_particles], dtype=self.fitness_dt()) + + if initial_position_distribution is None: + initial_position_distribution = distributions.UniformDistribution(np.array([[ 0, 1]] * self._n_free_params)); + + # Initial particle positions + self._fitness[0]["params"] = initial_position_distribution.sample(n_spsa_particles) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Calculate the initial particle fitnesses + self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], + apply=apply) + + for itr in range(1, n_spsa_iterations): + + # Helper functions to determine the update + delta_k = self.delta(n_spsa_particles, self._n_free_params) + first_site = np.vstack( + self.evaluate_fitness( + self._fitness[itr-1]["params"] - self.alpha(itr, a, A, s)*delta_k, + apply=apply)) + second_site = np.vstack( + self.evaluate_fitness( + self._fitness[itr-1]["params"] + self.alpha(itr, a, A, s)*delta_k, + apply=apply)) + + # Determine the update velocity + self._fitness[itr - 1]["velocities"] = self.update_velocities(first_site, + second_site, + self.alpha(itr, a, A, s), + self.beta(itr, b, t), + delta_k) + + # Update the SPSA particle positions + self._fitness[itr]["params"] = self.update_positions( + self._fitness[itr - 1]["params"], + self._fitness[itr - 1]["velocities"]) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Calculate the fitness of the new positions + self._fitness[itr]["fitness"] = self.evaluate_fitness(self._fitness[itr]["params"], + apply=apply) + + return self._fitness[n_spsa_iterations - 1][np.argmin(self._fitness[n_spsa_iterations - 1]['fitness'])] + + + def alpha(self, k, a, A, s): + return a / (1 + A + k)**s + + def beta(self, k, b, t): + return b / (1 + k)**t + + def delta(self, n_particles, n_params): + return (2 * np.round(np.random.random((n_particles, n_params)))) - 1 + + def update_velocities(self, first_site, second_site, alpha, beta, delta): + return delta * beta * (first_site - second_site) / (2* alpha) + + def fitness_dt(self): + return np.dtype([ + ('params', np.float64, (self._n_free_params,)), + ('velocities', np.float64, (self._n_free_params,)), + ('fitness', np.float64)]) + class HeuristicPerformanceFitness(object): def __init__(self, From 73875a35ea94ad43d81583b1df7e330c7523c098 Mon Sep 17 00:00:00 2001 From: Alan Robertson Date: Thu, 18 May 2017 14:39:29 +1000 Subject: [PATCH 11/13] Fixed apply issue for python 3 --- src/qinfer/hyper_heuristic_optimisers.py | 563 ----------------------- 1 file changed, 563 deletions(-) delete mode 100644 src/qinfer/hyper_heuristic_optimisers.py diff --git a/src/qinfer/hyper_heuristic_optimisers.py b/src/qinfer/hyper_heuristic_optimisers.py deleted file mode 100644 index 181b09a..0000000 --- a/src/qinfer/hyper_heuristic_optimisers.py +++ /dev/null @@ -1,563 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -## -# test_models.py: Simple models for testing inference engines. -## -# © 2017 Alan Robertson (arob8086@uni.sydney.edu.au) -# -# This file is a part of the Qinfer project. -# Licensed under the AGPL version 3. -## -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. -# -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . -## - -## FEATURES ################################################################## - -from __future__ import division -from __future__ import absolute_import -from __future__ import print_function - -## IMPORTS ################################################################### - -import random -import numpy as np -from functools import partial -from qinfer.perf_testing import perf_test_multiple, apply_serial -from qinfer import distributions - -## CLASSES #################################################################### - -class Optimizer(object): - ''' - A generic optimizer class that is inherited by the other optimisation functions. - - :param np.ndarray param_names: The list of parameters that are being searched over. - :param function fitness_function: The function that is being optimised over, defaults to perf test multiple - :param function boundary_map: Function to constrain points within some boundary regime - :param dict funct_args: Arguments to pass to the fitness function - :param dict funct_kwargs: Keyword arguments to pass to the fitness function - ''' - - def __init__( - self, - param_names, - fitness_function = None, - boundary_map=None, - *funct_args, - **funct_kwargs - ): - self._param_names = param_names - self._n_free_params = len(param_names) - self._boundary_map = boundary_map - self._funct_args = funct_args - self._funct_kwargs = funct_kwargs - - if fitness_function is None: # Default to calling perf test multiple - self._fitness_function = HeuristicPerformanceFitness( - self._param_names, - *self._funct_args, - **self._funct_kwargs - ) - else: - self._fitness_function = partial(fitness_function, *self._funct_args, **self._funct_kwargs) - - # Member function needed for parralelisation - def fitness_function(self, params): - return self._fitness_function(params) - - def parallel(self): - raise NotImplementedError("This optimizer does not have parallel support.") - - def evaluate_fitness(self, particles, apply=apply): - fitness_function = partial(self.fitness_function) - #fitness = map(self.fitness_function, particles) - results = [apply(self.fitness_function, particle) for particle in particles] - fitness = [result.get() for result in results] - return fitness - - def update_positions(self, positions, velocities): - updated = positions + velocities - return updated - -class ParticleSwarmOptimizer(Optimizer): - ''' - A particle swarm optimisation based hyperheuristic - :param integer n_pso_iterations: - :param integer n_pso_particles: - :param QInferDistribution initial_velocity_distribution: - :param QInferDistribution initial_velocity_distribution: - :param double - :param double - :param function - ''' - - def __call__(self, - n_pso_iterations=50, - n_pso_particles=60, - initial_position_distribution=None, - initial_velocity_distribution=None, - omega_v=0.35, - phi_p=0.25, - phi_g=0.5, - apply=apply_serial - ): - self._fitness = np.empty([n_pso_iterations, n_pso_particles], dtype=self.fitness_dt()) - local_attractors = np.empty([n_pso_particles], dtype=self.fitness_dt()) - global_attractor = np.empty([1], dtype=self.fitness_dt()) - - if initial_position_distribution is None: - initial_position_distribution = distributions.UniformDistribution(np.array([[ 0, 1]] * self._n_free_params)); - - if initial_velocity_distribution is None: - initial_velocity_distribution = distributions.UniformDistribution(np.array([[-1, 1]] * self._n_free_params)) - - # Initial particle positions - self._fitness[0]["params"] = initial_position_distribution.sample(n_pso_particles) - - # Apply the boundary conditions if any exist - if self._boundary_map is not None: - self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) - - # Calculate the initial particle fitnesses - self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], - apply=apply) - - # Calculate the positions of the attractors - local_attractors = self._fitness[0] - local_attractors, global_attractor = self.update_attractors( - self._fitness[0], - local_attractors, - global_attractor) - - # Initial particle velocities - self._fitness[0]["velocities"] = initial_velocity_distribution.sample(n_pso_particles) - self._fitness[0]["velocities"] = self.update_velocities( - self._fitness[0]["params"], - self._fitness[0]["velocities"], - local_attractors["params"], - global_attractor["params"], - omega_v, phi_p, phi_g) - - for itr in range(1, n_pso_iterations): - #Update the particle positions - self._fitness[itr]["params"] = self.update_positions( - self._fitness[itr - 1]["params"], - self._fitness[itr - 1]["velocities"]) - - # Apply the boundary conditions if any exist - if self._boundary_map is not None: - self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) - - # Recalculate the fitness function - self._fitness[itr]["fitness"] = self.evaluate_fitness( - self._fitness[itr]["params"], - apply=apply) - - # Find the new attractors - local_attractors, global_attractor = self.update_attractors( - self._fitness[itr], - local_attractors, - global_attractor) - - # Update the velocities - self._fitness[itr]["velocities"] = self.update_velocities( - self._fitness[itr]["params"], - self._fitness[itr - 1]["velocities"], - local_attractors["params"], - global_attractor["params"], - omega_v, phi_p, phi_g) - - return global_attractor - - def update_velocities(self, positions, velocities, local_attractors, global_attractor, omega_v, phi_p, phi_g): - random_p = np.random.random_sample(positions.shape) - random_g = np.random.random_sample(positions.shape) - updated = omega_v * velocities + phi_p * random_p * (local_attractors - positions) + phi_g * random_g * (global_attractor - positions) - return updated - - def update_attractors(self, particles, local_attractors, global_attractor): - for idx, particle in enumerate(particles): - if particle["fitness"] < local_attractors[idx]["fitness"]: - local_attractors[idx] = particle - global_attractor = local_attractors[np.argmin(local_attractors["fitness"])] - return local_attractors, global_attractor - - def fitness_dt(self): - return np.dtype([ - ('params', np.float64, (self._n_free_params,)), - ('velocities', np.float64, (self._n_free_params,)), - ('fitness', np.float64)]) - - -class ParticleSwarmSimpleAnnealingOptimizer(ParticleSwarmOptimizer): - - def __call__(self, - n_pso_iterations=50, - n_pso_particles=60, - initial_position_distribution=None, - initial_velocity_distribution=None, - omega_v=0.35, - phi_p=0.25, - phi_g=0.5, - temperature = 0.95, - apply=apply_serial - ): - self._fitness = np.empty([n_pso_iterations, n_pso_particles], dtype=self.fitness_dt()) - local_attractors = np.empty([n_pso_particles], dtype=self.fitness_dt()) - global_attractor = np.empty([1], dtype=self.fitness_dt()) - - if initial_position_distribution is None: - initial_position_distribution = distributions.UniformDistribution(np.array([[ 0, 1]] * self._n_free_params)); - - if initial_velocity_distribution is None: - initial_velocity_distribution = distributions.UniformDistribution(np.array([[-1, 1]] * self._n_free_params)) - - # Initial particle positions - self._fitness[0]["params"] = initial_position_distribution.sample(n_pso_particles) - - # Apply the boundary conditions if any exist - if self._boundary_map is not None: - self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) - - # Calculate the initial particle fitnesses - self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], - apply=apply) - - # Calculate the positions of the attractors - local_attractors = self._fitness[0] - local_attractors, global_attractor = self.update_attractors( - self._fitness[0], - local_attractors, - global_attractor) - - # Initial particle velocities - self._fitness[0]["velocities"] = initial_velocity_distribution.sample(n_pso_particles) - self._fitness[0]["velocities"] = self.update_velocities( - self._fitness[0]["params"], - self._fitness[0]["velocities"], - local_attractors["params"], - global_attractor["params"], - omega_v, phi_p, phi_g) - - for itr in range(1, n_pso_iterations): - #Update the particle positions - self._fitness[itr]["params"] = self.update_positions( - self._fitness[itr - 1]["params"], - self._fitness[itr - 1]["velocities"]) - - # Apply the boundary conditions if any exist - if self._boundary_map is not None: - self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) - - # Recalculate the fitness function - self._fitness[itr]["fitness"] = self.evaluate_fitness( - self._fitness[itr]["params"], - apply=apply) - - # Find the new attractors - local_attractors, global_attractor = self.update_attractors( - self._fitness[itr], - local_attractors, - global_attractor) - - # Update the velocities - self._fitness[itr]["velocities"] = self.update_velocities( - self._fitness[itr]["params"], - self._fitness[itr - 1]["velocities"], - local_attractors["params"], - global_attractor["params"], - omega_v, phi_p, phi_g) - - # Update the PSO params - omega_v, phi_p, phi_g = self.update_pso_params( - temperature, - omega_v, - phi_p, - phi_g) - - return global_attractor - - def update_pso_params(self, temperature, omega_v, phi_p, phi_g): - omega_v, phi_p, phi_g = np.multiply(temperature, [omega_v, phi_p, phi_g]) - return omega_v, phi_p, phi_g - - -class ParticleSwarmTemperingOptimizer(ParticleSwarmOptimizer): - ''' - A particle swarm optimisation based hyperheuristic - :param integer n_pso_iterations: - :param integer n_pso_particles: - :param QInferDistribution initial_velocity_distribution: - :param QInferDistribution initial_velocity_distribution: - :param double - :param double - :param function - ''' - - def __call__(self, - n_pso_iterations=50, - n_pso_particles=60, - initial_position_distribution=None, - initial_velocity_distribution=None, - n_temper_categories = 6, - temper_frequency = 10, - temper_params = None, - apply=apply_serial - ): - self._fitness = np.empty([n_pso_iterations, n_pso_particles], dtype=self.fitness_dt()) - local_attractors = np.empty([n_pso_particles], dtype=self.fitness_dt()) - global_attractor = np.empty([1], dtype=self.fitness_dt()) - - if initial_position_distribution is None: - initial_position_distribution = distributions.UniformDistribution(np.array([[ 0, 1]] * self._n_free_params)); - - if initial_velocity_distribution is None: - initial_velocity_distribution = distributions.UniformDistribution(np.array([[-1, 1]] * self._n_free_params)) - - if temper_params is None: - omega_v = np.random.random(n_temper_categories) - phi_p = np.random.random(n_temper_categories) - phi_g = np.random.random(n_temper_categories) - temper_params = [np.array((params), dtype=self.temper_params_dt()) for params in zip(omega_v, phi_p, phi_g)] - - # Distribute the particles into different temper categories - temper_map = self.distribute_particles(n_pso_particles, n_temper_categories) - - # Initial particle positions - self._fitness[0]["params"] = initial_position_distribution.sample(n_pso_particles) - - # Apply the boundary conditions if any exist - if self._boundary_map is not None: - self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) - - # Calculate the initial particle fitnesses - self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], - apply=apply) - - # Calculate the positions of the attractors - local_attractors = self._fitness[0] - local_attractors, global_attractor = self.update_attractors( - self._fitness[0], - local_attractors, - global_attractor) - - # Initial particle velocities - self._fitness[0]["velocities"] = initial_velocity_distribution.sample(n_pso_particles) - - # Update the velocities using the temper map - for idx, temper_category in enumerate(temper_map): - self._fitness[0]["velocities"][temper_category] = self.update_velocities( - self._fitness[0]["params"][temper_category], - self._fitness[0]["velocities"][temper_category], - local_attractors["params"][temper_category], - global_attractor["params"], - temper_params[idx]["omega_v"], - temper_params[idx]["phi_p"], - temper_params[idx]["phi_g"]) - - for itr in range(1, n_pso_iterations): - # Update the particle positions - self._fitness[itr]["params"] = self.update_positions( - self._fitness[itr - 1]["params"], - self._fitness[itr - 1]["velocities"]) - - # Apply the boundary conditions if any exist - if self._boundary_map is not None: - self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) - - # Recalculate the fitness function - self._fitness[itr]["fitness"] = self.evaluate_fitness( - self._fitness[itr]["params"], - apply=apply) - - # Find the new attractors - local_attractors, global_attractor = self.update_attractors( - self._fitness[itr], - local_attractors, - global_attractor) - - # Update the velocities - for idx, temper_category in enumerate(temper_map): - self._fitness[itr]["velocities"][temper_category] = self.update_velocities( - self._fitness[itr]["params"][temper_category], - self._fitness[itr - 1]["velocities"][temper_category], - local_attractors["params"][temper_category], - global_attractor["params"], - temper_params[idx]["omega_v"], - temper_params[idx]["phi_p"], - temper_params[idx]["phi_g"]) - - # Redistribute the particles into different temper categories - if itr % temper_frequency == 0: - temper_map = self.distribute_particles(n_pso_particles, n_temper_categories) - - return global_attractor - - def temper_params_dt(self): - return np.dtype([ - ('omega_v', np.float64), - ('phi_p', np.float64), - ('phi_g', np.float64)]) - - - def distribute_particles(self, n_pso_particles, n_temper_categories): - - # Distribute as many particles as evenly as possible across the categories, - # This ensures that there are no empty categories - n_evenly_distributable = (n_pso_particles // n_temper_categories) * n_temper_categories - n_unevenly_distributable = n_pso_particles - n_evenly_distributable - - # Generate the required indicies for the pso particles - particle_indicies = range(0, n_pso_particles) - - # Randomise the order - np.random.shuffle(particle_indicies) - - # Reshape to a 2D array indexed on the number of tempering categories - particle_map = np.reshape( - particle_indicies[:n_evenly_distributable], - (n_temper_categories, n_evenly_distributable//n_temper_categories)) - - # Transfer to the map - temper_map = {} - for i, index_category in enumerate(particle_map): - temper_map[i] = index_category - - # Transfer any excess particles that could not be evenly distributed - # This is a slow operation, so for the purposes of speed the number of - # temper categories should be a factor of the number of pso particles - if n_unevenly_distributable != 0: - for i in range(n_evenly_distributable, n_pso_particles): - temper_map[random.randrange(0, n_temper_categories)] = ( - np.append(temper_map[random.randrange(0, n_temper_categories)], [particle_indicies[i]])) - - return temper_map - - -class SPSATwoSiteOptimizer(Optimizer): - - def __call__(self, - n_spsa_iterations = 60, - n_spsa_particles = 50, - initial_position_distribution = None, - A = 0, - s = 1/3, - t = 1, - a = 0.5, - b = 0.5, - apply=apply_serial - ): - - self._fitness = np.empty([n_spsa_iterations, n_spsa_particles], dtype=self.fitness_dt()) - - if initial_position_distribution is None: - initial_position_distribution = distributions.UniformDistribution(np.array([[ 0, 1]] * self._n_free_params)); - - # Initial particle positions - self._fitness[0]["params"] = initial_position_distribution.sample(n_spsa_particles) - - # Apply the boundary conditions if any exist - if self._boundary_map is not None: - self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) - - # Calculate the initial particle fitnesses - self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], - apply=apply) - - for itr in range(1, n_spsa_iterations): - - # Helper functions to determine the update - delta_k = self.delta(n_spsa_particles, self._n_free_params) - first_site = np.vstack( - self.evaluate_fitness( - self._fitness[itr-1]["params"] - self.alpha(itr, a, A, s)*delta_k, - apply=apply)) - second_site = np.vstack( - self.evaluate_fitness( - self._fitness[itr-1]["params"] + self.alpha(itr, a, A, s)*delta_k, - apply=apply)) - - # Determine the update velocity - self._fitness[itr - 1]["velocities"] = self.update_velocities(first_site, - second_site, - self.alpha(itr, a, A, s), - self.beta(itr, b, t), - delta_k) - - # Update the SPSA particle positions - self._fitness[itr]["params"] = self.update_positions( - self._fitness[itr - 1]["params"], - self._fitness[itr - 1]["velocities"]) - - # Apply the boundary conditions if any exist - if self._boundary_map is not None: - self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) - - # Calculate the fitness of the new positions - self._fitness[itr]["fitness"] = self.evaluate_fitness(self._fitness[itr]["params"], - apply=apply) - - return self._fitness[n_spsa_iterations - 1][np.argmin(self._fitness[n_spsa_iterations - 1]['fitness'])] - - - def alpha(self, k, a, A, s): - return a / (1 + A + k)**s - - def beta(self, k, b, t): - return b / (1 + k)**t - - def delta(self, n_particles, n_params): - return (2 * np.round(np.random.random((n_particles, n_params)))) - 1 - - def update_velocities(self, first_site, second_site, alpha, beta, delta): - return delta * beta * (first_site - second_site) / (2* alpha) - - def fitness_dt(self): - return np.dtype([ - ('params', np.float64, (self._n_free_params,)), - ('velocities', np.float64, (self._n_free_params,)), - ('fitness', np.float64)]) - - -class HeuristicPerformanceFitness(object): - def __init__(self, - param_names, - evaluation_function = None, - *args, - **kwargs): - try: - self._heuristic_class = kwargs['heuristic_class'] - del kwargs['heuristic_class'] - except: - raise NotImplementedError("No heuristic class was passed.") - self._args = args - self._kwargs = kwargs - self._param_names = param_names - if evaluation_function is None: - self._evaluation_function = lambda performance: performance['loss'][:,-1].mean(axis=0) - else: - self._evaluation_function = evaluation_function - - def __call__(self, params): - performance = perf_test_multiple( - *self._args, - heuristic_class = partial( - self._heuristic_class, - **{name: param - for name, param in zip(self._param_names, params) - }), - **self._kwargs - ) - - return self._evaluation_function(performance) \ No newline at end of file From b87a69cfb0256805a230c289bfbc495677270a38 Mon Sep 17 00:00:00 2001 From: Alan Robertson Date: Fri, 19 May 2017 14:47:39 +1000 Subject: [PATCH 12/13] Fixed issue with boundary map --- src/qinfer/hyper_heuristic_optimizers.py | 573 +++++++++++++++++++++++ 1 file changed, 573 insertions(+) create mode 100644 src/qinfer/hyper_heuristic_optimizers.py diff --git a/src/qinfer/hyper_heuristic_optimizers.py b/src/qinfer/hyper_heuristic_optimizers.py new file mode 100644 index 0000000..4691446 --- /dev/null +++ b/src/qinfer/hyper_heuristic_optimizers.py @@ -0,0 +1,573 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +## +# test_models.py: Simple models for testing inference engines. +## +# © 2017 Alan Robertson (arob8086@uni.sydney.edu.au) +# +# This file is a part of the Qinfer project. +# Licensed under the AGPL version 3. +## +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +## + +## FEATURES ################################################################## + +from __future__ import division +from __future__ import absolute_import +from __future__ import print_function + +## EXPORTS ################################################################### + +__all__ = [ + 'ParticleSwarmOptimizer', + 'ParticleSwarmSimpleAnnealingOptimizer', + 'ParticleSwarmTemperingOptimizer', + 'SPSATwoSiteOptimizer', + 'HeuristicPerformanceFitness' +] + +## IMPORTS ################################################################### + +import random +import numpy as np +from functools import partial +from qinfer.perf_testing import perf_test_multiple, apply_serial +from qinfer import distributions + +## CLASSES #################################################################### + +class Optimizer(object): + ''' + A generic optimizer class that is inherited by the other optimisation functions. + + :param np.ndarray param_names: The list of parameters that are being searched over. + :param function fitness_function: The function that is being optimised over, defaults to perf test multiple + :param function boundary_map: Function to constrain points within some boundary regime + :param dict funct_args: Arguments to pass to the fitness function + :param dict funct_kwargs: Keyword arguments to pass to the fitness function + ''' + + def __init__( + self, + param_names, + fitness_function = None, + boundary_map=None, + *funct_args, + **funct_kwargs + ): + self._param_names = param_names + self._n_free_params = len(param_names) + self._boundary_map = boundary_map + self._funct_args = funct_args + self._funct_kwargs = funct_kwargs + + if fitness_function is None: # Default to calling perf test multiple + self._fitness_function = HeuristicPerformanceFitness( + self._param_names, + *self._funct_args, + **self._funct_kwargs + ) + else: + self._fitness_function = partial(fitness_function, *self._funct_args, **self._funct_kwargs) + + # Member function needed for parralelisation + def fitness_function(self, params): + return self._fitness_function(params) + + def parallel(self): + raise NotImplementedError("This optimizer does not have parallel support.") + + def evaluate_fitness(self, particles, apply=apply_serial): + fitness_function = partial(self.fitness_function) + #fitness = map(self.fitness_function, particles) + results = [apply(self.fitness_function, particle) for particle in particles] + fitness = [result.get() for result in results] + return fitness + + def update_positions(self, positions, velocities): + updated = positions + velocities + return updated + +class ParticleSwarmOptimizer(Optimizer): + ''' + A particle swarm optimisation based hyperheuristic + :param integer n_pso_iterations: + :param integer n_pso_particles: + :param QInferDistribution initial_velocity_distribution: + :param QInferDistribution initial_velocity_distribution: + :param double + :param double + :param function + ''' + + def __call__(self, + n_pso_iterations=50, + n_pso_particles=60, + initial_position_distribution=None, + initial_velocity_distribution=None, + omega_v=0.35, + phi_p=0.25, + phi_g=0.5, + apply=apply_serial + ): + self._fitness = np.empty([n_pso_iterations, n_pso_particles], dtype=self.fitness_dt()) + local_attractors = np.empty([n_pso_particles], dtype=self.fitness_dt()) + global_attractor = np.empty([1], dtype=self.fitness_dt()) + + if initial_position_distribution is None: + initial_position_distribution = distributions.UniformDistribution(np.array([[ 0, 1]] * self._n_free_params)); + + if initial_velocity_distribution is None: + initial_velocity_distribution = distributions.UniformDistribution(np.array([[-1, 1]] * self._n_free_params)) + + # Initial particle positions + self._fitness[0]["params"] = initial_position_distribution.sample(n_pso_particles) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[0]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Calculate the initial particle fitnesses + self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], + apply=apply) + + # Calculate the positions of the attractors + local_attractors = self._fitness[0] + local_attractors, global_attractor = self.update_attractors( + self._fitness[0], + local_attractors, + global_attractor) + + # Initial particle velocities + self._fitness[0]["velocities"] = initial_velocity_distribution.sample(n_pso_particles) + self._fitness[0]["velocities"] = self.update_velocities( + self._fitness[0]["params"], + self._fitness[0]["velocities"], + local_attractors["params"], + global_attractor["params"], + omega_v, phi_p, phi_g) + + for itr in range(1, n_pso_iterations): + #Update the particle positions + self._fitness[itr]["params"] = self.update_positions( + self._fitness[itr - 1]["params"], + self._fitness[itr - 1]["velocities"]) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Recalculate the fitness function + self._fitness[itr]["fitness"] = self.evaluate_fitness( + self._fitness[itr]["params"], + apply=apply) + + # Find the new attractors + local_attractors, global_attractor = self.update_attractors( + self._fitness[itr], + local_attractors, + global_attractor) + + # Update the velocities + self._fitness[itr]["velocities"] = self.update_velocities( + self._fitness[itr]["params"], + self._fitness[itr - 1]["velocities"], + local_attractors["params"], + global_attractor["params"], + omega_v, phi_p, phi_g) + + return global_attractor + + def update_velocities(self, positions, velocities, local_attractors, global_attractor, omega_v, phi_p, phi_g): + random_p = np.random.random_sample(positions.shape) + random_g = np.random.random_sample(positions.shape) + updated = omega_v * velocities + phi_p * random_p * (local_attractors - positions) + phi_g * random_g * (global_attractor - positions) + return updated + + def update_attractors(self, particles, local_attractors, global_attractor): + for idx, particle in enumerate(particles): + if particle["fitness"] < local_attractors[idx]["fitness"]: + local_attractors[idx] = particle + global_attractor = local_attractors[np.argmin(local_attractors["fitness"])] + return local_attractors, global_attractor + + def fitness_dt(self): + return np.dtype([ + ('params', np.float64, (self._n_free_params,)), + ('velocities', np.float64, (self._n_free_params,)), + ('fitness', np.float64)]) + + +class ParticleSwarmSimpleAnnealingOptimizer(ParticleSwarmOptimizer): + + def __call__(self, + n_pso_iterations=50, + n_pso_particles=60, + initial_position_distribution=None, + initial_velocity_distribution=None, + omega_v=0.35, + phi_p=0.25, + phi_g=0.5, + temperature = 0.95, + apply=apply_serial + ): + self._fitness = np.empty([n_pso_iterations, n_pso_particles], dtype=self.fitness_dt()) + local_attractors = np.empty([n_pso_particles], dtype=self.fitness_dt()) + global_attractor = np.empty([1], dtype=self.fitness_dt()) + + if initial_position_distribution is None: + initial_position_distribution = distributions.UniformDistribution(np.array([[ 0, 1]] * self._n_free_params)); + + if initial_velocity_distribution is None: + initial_velocity_distribution = distributions.UniformDistribution(np.array([[-1, 1]] * self._n_free_params)) + + # Initial particle positions + self._fitness[0]["params"] = initial_position_distribution.sample(n_pso_particles) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[0]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Calculate the initial particle fitnesses + self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], + apply=apply) + + # Calculate the positions of the attractors + local_attractors = self._fitness[0] + local_attractors, global_attractor = self.update_attractors( + self._fitness[0], + local_attractors, + global_attractor) + + # Initial particle velocities + self._fitness[0]["velocities"] = initial_velocity_distribution.sample(n_pso_particles) + self._fitness[0]["velocities"] = self.update_velocities( + self._fitness[0]["params"], + self._fitness[0]["velocities"], + local_attractors["params"], + global_attractor["params"], + omega_v, phi_p, phi_g) + + for itr in range(1, n_pso_iterations): + #Update the particle positions + self._fitness[itr]["params"] = self.update_positions( + self._fitness[itr - 1]["params"], + self._fitness[itr - 1]["velocities"]) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Recalculate the fitness function + self._fitness[itr]["fitness"] = self.evaluate_fitness( + self._fitness[itr]["params"], + apply=apply) + + # Find the new attractors + local_attractors, global_attractor = self.update_attractors( + self._fitness[itr], + local_attractors, + global_attractor) + + # Update the velocities + self._fitness[itr]["velocities"] = self.update_velocities( + self._fitness[itr]["params"], + self._fitness[itr - 1]["velocities"], + local_attractors["params"], + global_attractor["params"], + omega_v, phi_p, phi_g) + + # Update the PSO params + omega_v, phi_p, phi_g = self.update_pso_params( + temperature, + omega_v, + phi_p, + phi_g) + + return global_attractor + + def update_pso_params(self, temperature, omega_v, phi_p, phi_g): + omega_v, phi_p, phi_g = np.multiply(temperature, [omega_v, phi_p, phi_g]) + return omega_v, phi_p, phi_g + + +class ParticleSwarmTemperingOptimizer(ParticleSwarmOptimizer): + ''' + A particle swarm optimisation based hyperheuristic + :param integer n_pso_iterations: + :param integer n_pso_particles: + :param QInferDistribution initial_velocity_distribution: + :param QInferDistribution initial_velocity_distribution: + :param double + :param double + :param function + ''' + + def __call__(self, + n_pso_iterations=50, + n_pso_particles=60, + initial_position_distribution=None, + initial_velocity_distribution=None, + n_temper_categories = 6, + temper_frequency = 10, + temper_params = None, + apply=apply_serial + ): + self._fitness = np.empty([n_pso_iterations, n_pso_particles], dtype=self.fitness_dt()) + local_attractors = np.empty([n_pso_particles], dtype=self.fitness_dt()) + global_attractor = np.empty([1], dtype=self.fitness_dt()) + + if initial_position_distribution is None: + initial_position_distribution = distributions.UniformDistribution(np.array([[ 0, 1]] * self._n_free_params)); + + if initial_velocity_distribution is None: + initial_velocity_distribution = distributions.UniformDistribution(np.array([[-1, 1]] * self._n_free_params)) + + if temper_params is None: + omega_v = np.random.random(n_temper_categories) + phi_p = np.random.random(n_temper_categories) + phi_g = np.random.random(n_temper_categories) + temper_params = [np.array((params), dtype=self.temper_params_dt()) for params in zip(omega_v, phi_p, phi_g)] + + # Distribute the particles into different temper categories + temper_map = self.distribute_particles(n_pso_particles, n_temper_categories) + + # Initial particle positions + self._fitness[0]["params"] = initial_position_distribution.sample(n_pso_particles) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[0]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Calculate the initial particle fitnesses + self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], + apply=apply) + + # Calculate the positions of the attractors + local_attractors = self._fitness[0] + local_attractors, global_attractor = self.update_attractors( + self._fitness[0], + local_attractors, + global_attractor) + + # Initial particle velocities + self._fitness[0]["velocities"] = initial_velocity_distribution.sample(n_pso_particles) + + # Update the velocities using the temper map + for idx, temper_category in enumerate(temper_map): + self._fitness[0]["velocities"][temper_category] = self.update_velocities( + self._fitness[0]["params"][temper_category], + self._fitness[0]["velocities"][temper_category], + local_attractors["params"][temper_category], + global_attractor["params"], + temper_params[idx]["omega_v"], + temper_params[idx]["phi_p"], + temper_params[idx]["phi_g"]) + + for itr in range(1, n_pso_iterations): + # Update the particle positions + self._fitness[itr]["params"] = self.update_positions( + self._fitness[itr - 1]["params"], + self._fitness[itr - 1]["velocities"]) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Recalculate the fitness function + self._fitness[itr]["fitness"] = self.evaluate_fitness( + self._fitness[itr]["params"], + apply=apply) + + # Find the new attractors + local_attractors, global_attractor = self.update_attractors( + self._fitness[itr], + local_attractors, + global_attractor) + + # Update the velocities + for idx, temper_category in enumerate(temper_map): + self._fitness[itr]["velocities"][temper_category] = self.update_velocities( + self._fitness[itr]["params"][temper_category], + self._fitness[itr - 1]["velocities"][temper_category], + local_attractors["params"][temper_category], + global_attractor["params"], + temper_params[idx]["omega_v"], + temper_params[idx]["phi_p"], + temper_params[idx]["phi_g"]) + + # Redistribute the particles into different temper categories + if itr % temper_frequency == 0: + temper_map = self.distribute_particles(n_pso_particles, n_temper_categories) + + return global_attractor + + def temper_params_dt(self): + return np.dtype([ + ('omega_v', np.float64), + ('phi_p', np.float64), + ('phi_g', np.float64)]) + + + def distribute_particles(self, n_pso_particles, n_temper_categories): + + # Distribute as many particles as evenly as possible across the categories, + # This ensures that there are no empty categories + n_evenly_distributable = (n_pso_particles // n_temper_categories) * n_temper_categories + n_unevenly_distributable = n_pso_particles - n_evenly_distributable + + # Generate the required indicies for the pso particles + particle_indicies = list(range(0, n_pso_particles)) + + # Randomise the order + np.random.shuffle(particle_indicies) + + # Reshape to a 2D array indexed on the number of tempering categories + particle_map = np.reshape( + particle_indicies[:n_evenly_distributable], + (n_temper_categories, n_evenly_distributable//n_temper_categories)) + + # Transfer to the map + temper_map = {} + for i, index_category in enumerate(particle_map): + temper_map[i] = index_category + + # Transfer any excess particles that could not be evenly distributed + # This is a slow operation, so for the purposes of speed the number of + # temper categories should be a factor of the number of pso particles + if n_unevenly_distributable != 0: + for i in range(n_evenly_distributable, n_pso_particles): + temper_map[random.randrange(0, n_temper_categories)] = ( + np.append(temper_map[random.randrange(0, n_temper_categories)], [particle_indicies[i]])) + + return temper_map + + +class SPSATwoSiteOptimizer(Optimizer): + + def __call__(self, + n_spsa_iterations = 60, + n_spsa_particles = 50, + initial_position_distribution = None, + A = 0, + s = 1/3, + t = 1, + a = 0.5, + b = 0.5, + apply=apply_serial + ): + + self._fitness = np.empty([n_spsa_iterations, n_spsa_particles], dtype=self.fitness_dt()) + + if initial_position_distribution is None: + initial_position_distribution = distributions.UniformDistribution(np.array([[ 0, 1]] * self._n_free_params)); + + # Initial particle positions + self._fitness[0]["params"] = initial_position_distribution.sample(n_spsa_particles) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[0]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Calculate the initial particle fitnesses + self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], + apply=apply) + + for itr in range(1, n_spsa_iterations): + + # Helper functions to determine the update + delta_k = self.delta(n_spsa_particles, self._n_free_params) + first_site = np.vstack( + self.evaluate_fitness( + self._fitness[itr-1]["params"] - self.alpha(itr, a, A, s)*delta_k, + apply=apply)) + second_site = np.vstack( + self.evaluate_fitness( + self._fitness[itr-1]["params"] + self.alpha(itr, a, A, s)*delta_k, + apply=apply)) + + # Determine the update velocity + self._fitness[itr - 1]["velocities"] = self.update_velocities(first_site, + second_site, + self.alpha(itr, a, A, s), + self.beta(itr, b, t), + delta_k) + + # Update the SPSA particle positions + self._fitness[itr]["params"] = self.update_positions( + self._fitness[itr - 1]["params"], + self._fitness[itr - 1]["velocities"]) + + # Apply the boundary conditions if any exist + if self._boundary_map is not None: + self._fitness[itr]["params"] = self._boundary_map(self._fitness[itr]["params"]) + + # Calculate the fitness of the new positions + self._fitness[itr]["fitness"] = self.evaluate_fitness(self._fitness[itr]["params"], + apply=apply) + + return self._fitness[n_spsa_iterations - 1][np.argmin(self._fitness[n_spsa_iterations - 1]['fitness'])] + + + def alpha(self, k, a, A, s): + return a / (1 + A + k)**s + + def beta(self, k, b, t): + return b / (1 + k)**t + + def delta(self, n_particles, n_params): + return (2 * np.round(np.random.random((n_particles, n_params)))) - 1 + + def update_velocities(self, first_site, second_site, alpha, beta, delta): + return delta * beta * (first_site - second_site) / (2* alpha) + + def fitness_dt(self): + return np.dtype([ + ('params', np.float64, (self._n_free_params,)), + ('velocities', np.float64, (self._n_free_params,)), + ('fitness', np.float64)]) + + +class HeuristicPerformanceFitness(object): + def __init__(self, + param_names, + evaluation_function = None, + *args, + **kwargs): + try: + self._heuristic_class = kwargs['heuristic_class'] + del kwargs['heuristic_class'] + except: + raise NotImplementedError("No heuristic class was passed.") + self._args = args + self._kwargs = kwargs + self._param_names = param_names + if evaluation_function is None: + self._evaluation_function = lambda performance: performance['loss'][:,-1].mean(axis=0) + else: + self._evaluation_function = evaluation_function + + def __call__(self, params): + performance = perf_test_multiple( + *self._args, + heuristic_class = partial( + self._heuristic_class, + **{name: param + for name, param in zip(self._param_names, params) + }), + **self._kwargs + ) + + return self._evaluation_function(performance) \ No newline at end of file From 58dbe05663926213d8ca55e2828ecf425a1523b8 Mon Sep 17 00:00:00 2001 From: Alan Robertson Date: Fri, 19 May 2017 14:53:49 +1000 Subject: [PATCH 13/13] Fixed issue with boundary map --- src/qinfer/hyper_heuristic_optimizers.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/qinfer/hyper_heuristic_optimizers.py b/src/qinfer/hyper_heuristic_optimizers.py index 4691446..3f7ad08 100644 --- a/src/qinfer/hyper_heuristic_optimizers.py +++ b/src/qinfer/hyper_heuristic_optimizers.py @@ -137,7 +137,7 @@ def __call__(self, # Apply the boundary conditions if any exist if self._boundary_map is not None: - self._fitness[0]["params"] = self._boundary_map(self._fitness[itr]["params"]) + self._fitness[0]["params"] = self._boundary_map(self._fitness[0]["params"]) # Calculate the initial particle fitnesses self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], @@ -238,7 +238,7 @@ def __call__(self, # Apply the boundary conditions if any exist if self._boundary_map is not None: - self._fitness[0]["params"] = self._boundary_map(self._fitness[itr]["params"]) + self._fitness[0]["params"] = self._boundary_map(self._fitness[0]["params"]) # Calculate the initial particle fitnesses self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], @@ -349,7 +349,7 @@ def __call__(self, # Apply the boundary conditions if any exist if self._boundary_map is not None: - self._fitness[0]["params"] = self._boundary_map(self._fitness[itr]["params"]) + self._fitness[0]["params"] = self._boundary_map(self._fitness[0]["params"]) # Calculate the initial particle fitnesses self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"], @@ -472,14 +472,14 @@ def __call__(self, self._fitness = np.empty([n_spsa_iterations, n_spsa_particles], dtype=self.fitness_dt()) if initial_position_distribution is None: - initial_position_distribution = distributions.UniformDistribution(np.array([[ 0, 1]] * self._n_free_params)); + initial_position_distribution = distributions.UniformDistribution(np.array([[0, 1]] * self._n_free_params)); # Initial particle positions self._fitness[0]["params"] = initial_position_distribution.sample(n_spsa_particles) # Apply the boundary conditions if any exist if self._boundary_map is not None: - self._fitness[0]["params"] = self._boundary_map(self._fitness[itr]["params"]) + self._fitness[0]["params"] = self._boundary_map(self._fitness[0]["params"]) # Calculate the initial particle fitnesses self._fitness[0]["fitness"] = self.evaluate_fitness(self._fitness[0]["params"],