diff --git a/Documentation/Settings_in_Optimization_Dict.md b/Documentation/Settings_in_Optimization_Dict.md index 1a95138..0d0e9cb 100644 --- a/Documentation/Settings_in_Optimization_Dict.md +++ b/Documentation/Settings_in_Optimization_Dict.md @@ -13,6 +13,7 @@ Assuming you define the settings in the form of a .json file, the general struct "create_logfile": true, # determines if you want to save the log-file "console_info": true, # determines if you want the optimization output printed to the console "dump_format": "npz", # format of the results file + "continuation_datetime": "YYYYmmdd_HHMMSS" # date of optimization to be continued "algorithm_settings": {...}, # settings related to the algorithm "pulses": [{...}, {...}, ...], # list of pulses and their settings "parameters": [{...}, {...}, ...], # list of parameters and their settings @@ -30,6 +31,8 @@ The `console_info` key determines if the optimization output is shown in the ter The `dump_format` key specifies the format of the results file (best controls and some meta data). Currently you can choose between "npz" and "json". The default (if you do not give this key) is "npz". +The `continuation_datetime` key determines wether an existing optimization job should be continued and the results saved in the corresponding folder. If intended, the corresponding date and time should be provided in the format "YYYYmmdd_HHMMSS" and the corresponding job name speficied in `optimization_client_name`. QuOCS will then try to import the best controls found in the previous run as initial values for the continuation. Here make sure, to use the same names for pulses and parameters in the `optimiaztion_dictionary` as in the previous run. The default of this key is "no" and will create a new folder with a timestamp of the point of creation. + **Tip:** You can also change specific entries in the code after reading in the .json file if you, e.g., want to sweep certain parameters or have the name of the optimization defined on runtime. diff --git a/Examples/execute_dCRAB.py b/Examples/execute_dCRAB.py index 0cc2429..08a903b 100644 --- a/Examples/execute_dCRAB.py +++ b/Examples/execute_dCRAB.py @@ -149,5 +149,6 @@ def main(optimization_dictionary: dict): print("\nBest FoM: {}".format(optimization_obj.opt_alg_obj.best_FoM)) + if __name__ == "__main__": main(readjson(os.path.join(os.getcwd(), "settings_dCRAB.json"))) diff --git a/src/quocslib/Optimizer.py b/src/quocslib/Optimizer.py index 3529fed..165073c 100644 --- a/src/quocslib/Optimizer.py +++ b/src/quocslib/Optimizer.py @@ -21,6 +21,7 @@ from quocslib.communication.AllInOneCommunication import AllInOneCommunication from quocslib.utils.BestDump import BestDump from quocslib.utils.AbstractFoM import AbstractFoM +from quocslib.utils.Import_previous_results import update_opti_dict class Optimizer: @@ -58,6 +59,8 @@ def __init__(self, self.dump_format = optimization_dict.setdefault("dump_format", "npz") self.optimization_direction = optimization_dict["algorithm_settings"].setdefault("optimization_direction", "minimization") + self.continuation_datetime = optimization_dict.setdefault("continuation_datetime", "no") + self.communication_obj = AllInOneCommunication(interface_job_name=self.interface_job_name, FoM_obj=FoM_object, handle_exit_obj=handle_exit_obj, @@ -66,7 +69,11 @@ def __init__(self, create_logfile=self.create_logfile, console_info=self.console_info, dump_format=self.dump_format, - optimization_direction=self.optimization_direction) + optimization_direction=self.optimization_direction, + continuation_datetime=self.continuation_datetime) + + if self.communication_obj.is_continuation: + optimization_dict = update_opti_dict(optimization_dict, self.communication_obj) self.results_path = self.communication_obj.results_path diff --git a/src/quocslib/communication/AllInOneCommunication.py b/src/quocslib/communication/AllInOneCommunication.py index 0db4b34..fd6e98f 100644 --- a/src/quocslib/communication/AllInOneCommunication.py +++ b/src/quocslib/communication/AllInOneCommunication.py @@ -36,7 +36,8 @@ def __init__(self, create_logfile: bool = True, console_info: bool = True, dump_format: str = "npz", - optimization_direction: str = "minimization"): + optimization_direction: str = "minimization", + continuation_datetime: str = "no"): """ In case the user chooses to run the optimization in his device, this class is used by the OptimizationAlgorithm. The objects to dump the results, calculate the figure of merit, and the logger are created here. @@ -58,25 +59,43 @@ def __init__(self, (self.message_signal, self.FoM_plot_signal, self.controls_update_signal) = comm_signals_list # Pre job name pre_job_name = interface_job_name + # Optimization folder name + optimization_folder = "QuOCS_Results" # Datetime for 1-1 association - self.date_time = str(time.strftime("%Y%m%d_%H%M%S")) + # Check, if optimization is continuation + self.is_continuation = False + queued_logger_info = None + if continuation_datetime == "no": + self.date_time = str(time.strftime("%Y%m%d_%H%M%S")) + else: + continuation_job_folder = os.path.join(os.getcwd(), optimization_folder, continuation_datetime + "_" + pre_job_name) + if not os.path.isdir(continuation_job_folder): + queued_logger_info = "Continuation attempt: No Folder " + continuation_job_folder + " found, new folder created" + self.date_time = str(time.strftime("%Y%m%d_%H%M%S")) + else: + self.date_time = continuation_datetime + self.is_continuation = True + queued_logger_info = "Continue optimization from " + continuation_datetime + "_" + pre_job_name # Client job name to send to the Server self.client_job_name = self.date_time + "_" + pre_job_name ### # Logging, Results, Figure of merit evaluation ... ### - # Optimization folder - optimization_folder = "QuOCS_Results" + # Optimization folder self.results_path = os.path.join(os.getcwd(), optimization_folder, self.client_job_name) if not os.path.isdir(os.path.join(os.getcwd(), optimization_folder)): os.makedirs(os.path.join(os.getcwd(), optimization_folder)) # Create the folder for logging and results - os.makedirs(self.results_path) + if not os.path.isdir(self.results_path): + os.makedirs(self.results_path) # Write the current quocs lib version in the file with open(os.path.join(self.results_path, "quocs_version.txt"), "w") as version_file: version_file.write("QuOCS library version: {0}".format(quocslib_version)) # Create logging object self.logger = create_logger(self.results_path, self.date_time, create_logfile=create_logfile, console_info=console_info) + # print queued logger info + if not queued_logger_info == None: + self.logger.info(queued_logger_info) # Print function evaluation and figure of merit self.print_general_log = True # Figure of merit object diff --git a/src/quocslib/tools/logger.py b/src/quocslib/tools/logger.py index ff8deb4..ba80c6c 100644 --- a/src/quocslib/tools/logger.py +++ b/src/quocslib/tools/logger.py @@ -52,7 +52,8 @@ def create_logger(results_path, date_time, create_logfile=True, console_info=Tru console_handler.setFormatter(logging.Formatter(print_format)) # Log file handler if create_logfile: - file_handler = logging.FileHandler(log_filename) + # file_handler = logging.FileHandler(log_filename) + file_handler = logging.FileHandler(log_filename, mode='a') file_handler.setLevel(logging.INFO) file_handler.setFormatter(logging.Formatter(log_format, date_format)) # Add handler for logfile to the logger diff --git a/src/quocslib/utils/Import_previous_results.py b/src/quocslib/utils/Import_previous_results.py new file mode 100644 index 0000000..91a2aa8 --- /dev/null +++ b/src/quocslib/utils/Import_previous_results.py @@ -0,0 +1,57 @@ +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# Copyright 2021- QuOCS Team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +import os +import numpy as np +from quocslib.utils.inputoutput import readjson + + +def update_opti_dict(optimization_dict: dict, comm_obj: object) -> dict: + """ + Load optimal results of previous run and initial guess into an optimization dictionary + :param dict optimization_dictionary: optimization_dictionary to be updated + :comm_obj: communication object of optimization + :return dict: updated optimization dictionary + """ + + try: + if optimization_dict["dump_format"] == "json": + best_res_path = os.path.join(comm_obj.results_path, comm_obj.date_time + "_best_controls.json") + best_res = readjson(best_res_path) + else: + best_res_path = os.path.join(comm_obj.results_path, comm_obj.date_time + "_best_controls.npz") + best_res = np.load(best_res_path) + + for pulse in optimization_dict["pulses"]: # use same optimization dictionary as in previous optimization + pulse_name = pulse["pulse_name"] # make sure to use the same pulse names in opti_dict as in best_controls + prev_opt_pulse = best_res[pulse_name] + initial_guess = {"function_type": "list_function", "list_function": prev_opt_pulse} + pulse["initial_guess"] = initial_guess + comm_obj.logger.info(f"Initial guess for pulse {pulse_name} imported from previous results") + + for param in optimization_dict["parameters"]: + param_name = param["parameter_name"] + prev_opt_param = best_res[param_name] + param["initial_value"] = prev_opt_param + comm_obj.logger.info(f"Initial guess for parameter {param_name} imported from previous results") + + except: + comm_obj.logger.warn("Previous optimal controls could not be imported for continuation." + " Check if ..._best_controls...-file exists and pulse/paremeter names coincide with optimization dictionary") + + return optimization_dict + + diff --git a/tests/test_continuation.py b/tests/test_continuation.py new file mode 100644 index 0000000..2aae363 --- /dev/null +++ b/tests/test_continuation.py @@ -0,0 +1,231 @@ +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# Copyright 2021- QuOCS Team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +import os, platform +import matplotlib.pyplot as plt +import numpy as np +from quocslib.optimalcontrolproblems.OneQubitProblem import OneQubit +from quocslib.optimalcontrolproblems.IsingModelProblem import IsingModel +from quocslib.Optimizer import Optimizer +import pytest + + +def plot_FoM(result_path, FoM): + + save_name = "FoM_plot" + + iterations = range(1, len(FoM) + 1) + min_FoM = min(FoM) + max_FoM = max(FoM) + difference = abs(max_FoM - min_FoM) + + fig = plt.figure(figsize=(11, 7)) + ax = fig.add_subplot(111) + plt.subplots_adjust(bottom=0.15, top=0.9, right=0.98, left=0.1) + + plt.plot(iterations, FoM, color='darkblue', linewidth=1.5, zorder=10) + + plt.grid(True, which="both") + plt.ylim(min_FoM - 0.05 * difference, max_FoM + 0.05 * difference) + plt.xlabel('Iteration', fontsize=20) + plt.ylabel('FoM', fontsize=20) + plt.savefig(os.path.join(result_path, save_name + '.png')) + +def run_dCRAB_opti(optimization_dictionary): + # define some parameters for the optimization + args_dict = { + "initial_state": "[1.0 , 0.0]", + "target_state": "[1.0/np.sqrt(2), -1j/np.sqrt(2)]", + "optimization_factor": -1.0 + } + # Create FoM object + FoM_object = OneQubit(args_dict=args_dict) + + # Define Optimizer + optimization_obj = Optimizer(optimization_dictionary, FoM_object) + + initial_guess = optimization_obj.get_optimization_algorithm().controls.pulse_objs_list[0].initial_guess_pulse + initial_param = optimization_obj.get_optimization_algorithm().controls.parameter_objs_list[0].value + + # Run optimization + + optimization_obj.execute() + + optimal_pulse = optimization_obj.get_optimization_algorithm().get_best_controls()["pulses"][0] + optimal_param = optimization_obj.get_optimization_algorithm().get_best_controls()["parameters"][0] + + fomlist = optimization_obj.get_optimization_algorithm().FoM_list + + res_path = optimization_obj.results_path + datetime = optimization_obj.communication_obj.date_time + + return res_path, datetime, initial_guess, optimal_pulse, initial_param, optimal_param, fomlist + + + +def test_dCRAB_continuation(): + + optimization_dictionary = { + "optimization_client_name": "continuation_test", + "optimization_direction": "minimization", + "continuation_datetime": "no", + "dump_format": "json", + "algorithm_settings": { + "algorithm_name": "dCRAB", + "super_iteration_number": 2, + "max_eval_total": 100, + "FoM_goal": 0.00001, + "dsm_settings": { + "general_settings": { + "dsm_algorithm_name": "NelderMead", + "is_adaptive": True + }, + "stopping_criteria": { + "max_eval": 50, + } + }, + "random_number_generator": { + "seed_number": 42 + } + }, + "pulses": [{ + "pulse_name": "Pulse_1", + "upper_limit": 5.0, + "lower_limit": -5.0, + "bins_number": 101, + "time_name": "time_1", + "amplitude_variation": 5.0, + "basis": { + "basis_name": "Fourier", + "basis_vector_number": 2, + "random_super_parameter_distribution": { + "distribution_name": "Uniform", + "lower_limit": 0.1, + "upper_limit": 5.0 + } + }, + "initial_guess": { + "function_type": "lambda_function", + "lambda_function": "lambda t: np.pi/3.0 + 0.0*t" + } + }], + "parameters": [{"parameter_name": "Parameter0", + "lower_limit": -2.0, + "upper_limit": 2.0, + "initial_value": 0.4, + "amplitude_variation": 0.5}], + "times": [{ + "time_name": "time_1", + "initial_value": 3.0 + }] + } + res_path1, datetime1, _ , optimal_pulse1, _, optimal_param1, fomlist1 = run_dCRAB_opti(optimization_dictionary) + + optimization_dictionary["continuation_datetime"] = datetime1 + + # optimization_dictionary["pulses"][0]["pulse_name"] = "new_name" + + res_path2, datetime2, inital_guess2, optimal_pulse2, initial_param2, optimal_param2, fomlist2 = run_dCRAB_opti(optimization_dictionary) + + res_path3, datetime3, inital_guess3 , _ ,initial_param3, _ , fomlist3 = run_dCRAB_opti(optimization_dictionary) + + plot_FoM(res_path2, fomlist1 + fomlist2 + fomlist3) + + + assert res_path1 == res_path2 == res_path3 # test, if similar result path for both optimizations + assert datetime1 == datetime2 == datetime3 # test for similar datetime + assert np.array_equal(inital_guess2, optimal_pulse1) # test, if optimal pulse is given imported as initial guess + assert np.array_equal(inital_guess3, optimal_pulse2) + assert initial_param2 == optimal_param1 + assert initial_param3 == optimal_param2 + assert min(fomlist3) <= min(fomlist2) <= min(fomlist1) # test, if second optimization improved the results + + + + +def run_GRAPE_opti(optimization_dictionary): + + FoM_object = IsingModel(args_dict={}) + + optimization_obj = Optimizer(optimization_dictionary, FoM_object) + + initial_guess = optimization_obj.get_optimization_algorithm().controls.pulse_objs_list[0].initial_guess_pulse + # Run optimization + + optimization_obj.execute() + + optimal_pulse = optimization_obj.get_optimization_algorithm().get_best_controls()["pulses"][0] + fomlist = optimization_obj.get_optimization_algorithm().FoM_list + + res_path = optimization_obj.results_path + datetime = optimization_obj.communication_obj.date_time + + return res_path, datetime, initial_guess, optimal_pulse, fomlist + + +def test_GRAPE_continuation(): + optimization_dictionary = { + "optimization_client_name": "continuation_test", + "optimization_direction": "minimization", + "continuation_datetime": "no", + "dump_format": "json", + "algorithm_settings": { + "algorithm_name": "GRAPE", + "stopping_criteria": {"max_eval_total": 150} + }, + "pulses": [{ + "pulse_name": "Pulse_1", + "upper_limit": 100.0, + "lower_limit": -100.0, + "bins_number": 100, + "amplitude_variation": 20.0, + "time_name": "time_1", + "basis": { + "basis_name": "PiecewiseBasis", + "bins_number": 100 + }, + "initial_guess": { + "function_type": "lambda_function", + "lambda_function": "lambda t: 0.0 + 0.0*t" + } + }], + "parameters": [], + "times": [{ + "time_name": "time_1", + "initial_value": 1.0 + }] + } + + res_path1, datetime1, _ , optimal_pulse1, fomlist1 = run_GRAPE_opti(optimization_dictionary) + + optimization_dictionary["continuation_datetime"] = datetime1 + + # optimization_dictionary["pulses"][0]["pulse_name"] = "new_name" + + res_path2, datetime2, inital_guess2, optimal_pulse2, fomlist2 = run_GRAPE_opti(optimization_dictionary) + + res_path3, datetime3, inital_guess3 , _ , fomlist3 = run_GRAPE_opti(optimization_dictionary) + + plot_FoM(res_path2, fomlist1 + fomlist2 + fomlist3) + + + assert res_path1 == res_path2 == res_path3 # test, if similar result path for both optimizations + assert datetime1 == datetime2 == datetime3 # test for similar datetime + assert np.array_equal(inital_guess2, optimal_pulse1) # test, if optimal pulse is given imported as initial guess + assert np.array_equal(inital_guess3, optimal_pulse2) + assert min(fomlist3) <= min(fomlist2) <= min(fomlist1) # test, if second optimization improved the results + +test_dCRAB_continuation() \ No newline at end of file