diff --git a/herbert_core/classes/MPIFramework/@ClusterMPI/ClusterMPI.m b/herbert_core/classes/MPIFramework/@ClusterMPI/ClusterMPI.m index 55c7b5f10..f340daafd 100644 --- a/herbert_core/classes/MPIFramework/@ClusterMPI/ClusterMPI.m +++ b/herbert_core/classes/MPIFramework/@ClusterMPI/ClusterMPI.m @@ -172,9 +172,18 @@ function check_availability(obj) rootpath = fileparts(which('herbert_init')); external_dll_dir = fullfile(rootpath, 'DLL','external'); if ispc() - % only one version of mpiexec is used now. May change in the - % future. - mpi_exec = fullfile(external_dll_dir, 'mpiexec.exe'); + [rs, rv] = system('where mpiexec'); + mpis = splitlines(strip(rv)); + % Ignore Matlab-bundled mpiexec (firewall issues) + mpis(cellfun(@(x) contains(x, matlabroot), mpis)) = []; + if rs == 0 && ~isempty(mpis) + % If multiple mpiexec on path, prefer user installed MS MPI + mpi_id = [1 find(cellfun(@(x) contains(x,'Microsoft'), mpis), 1)]; + mpi_exec = mpis{max(mpi_id)}; + else + % No mpiexec on path, use pre-packaged version + mpi_exec = fullfile(external_dll_dir, 'mpiexec.exe'); + end else mpi_exec = fullfile(external_dll_dir, 'mpiexec'); diff --git a/herbert_core/classes/MPIFramework/@ClusterParpoolWrapper/ClusterParpoolWrapper.m b/herbert_core/classes/MPIFramework/@ClusterParpoolWrapper/ClusterParpoolWrapper.m index 65203ef7b..49f8a539a 100644 --- a/herbert_core/classes/MPIFramework/@ClusterParpoolWrapper/ClusterParpoolWrapper.m +++ b/herbert_core/classes/MPIFramework/@ClusterParpoolWrapper/ClusterParpoolWrapper.m @@ -94,6 +94,9 @@ end obj = init@ClusterWrapper(obj,n_workers,mess_exchange_framework,log_level); + assert(~obj.is_compiled_script_, ... + 'HERBERT:ClusterParpoolWrapper:invalid_argument', ... + 'Parpool cluster does not work with compiled workers') % delete interactive parallel cluster if any exist cl = gcp('nocreate'); diff --git a/herbert_core/classes/MPIFramework/@ClusterWrapper/ClusterWrapper.m b/herbert_core/classes/MPIFramework/@ClusterWrapper/ClusterWrapper.m index 46b75c56a..42953ef7c 100644 --- a/herbert_core/classes/MPIFramework/@ClusterWrapper/ClusterWrapper.m +++ b/herbert_core/classes/MPIFramework/@ClusterWrapper/ClusterWrapper.m @@ -213,11 +213,9 @@ obj.matlab_starter_= fullfile(obj.matlab_starter_,'matlab'); end if obj.is_compiled_script_ - % TODO -- need checking and may be expansion when compiled - % horace ticket is executed. - obj.common_env_var_('HERBERT_PARALLEL_EXECUTOR')= obj.worker_name_; + obj.common_env_var_('HERBERT_PARALLEL_EXECUTOR') = obj.worker_name_; else - obj.common_env_var_('HERBERT_PARALLEL_EXECUTOR') = obj.matlab_starter_; + obj.common_env_var_('HERBERT_PARALLEL_EXECUTOR') = obj.matlab_starter_; end % additional Matlab m-files search path to be available to % workers @@ -461,12 +459,10 @@ function check_availability(~) % Should throw PARALLEL_CONFIG:not_avalable exception % if the particular framework is not available. worker = config_store.instance.get_value('parallel_config','worker'); - pkp = which(worker); - if isempty(pkp) - error('HERBERT:ClusterWrapper:not_available',... - 'Parallel worker %s is not on Matlab path. Parallel extensions are not available',... - worker); - end + assert(~isempty(which(worker)) || exist(worker, 'file'), ... + 'HERBERT:ClusterWrapper:not_available',... + 'Parallel worker %s is not on Matlab path. Parallel extensions are not available',... + worker); end % The property returns the list of the configurations, available for % usage by the diff --git a/herbert_core/classes/MPIFramework/@parallel_config/parallel_config.m b/herbert_core/classes/MPIFramework/@parallel_config/parallel_config.m index 885e4e1c7..68d0b732d 100644 --- a/herbert_core/classes/MPIFramework/@parallel_config/parallel_config.m +++ b/herbert_core/classes/MPIFramework/@parallel_config/parallel_config.m @@ -215,33 +215,30 @@ %----------------------------------------------------------------- % overloaded getters function wrkr = get.worker(obj) - wrkr= get_or_restore_field(obj,'worker'); + wrkr = obj.get_or_restore_field('worker'); end function wrkr = get.is_compiled(obj) - % incomplete! Should be derived from worker - wrkr= obj.is_compiled_; + wrkr = obj.get_or_restore_field('is_compiled'); end - function frmw =get.parallel_cluster(obj) + function frmw = get.parallel_cluster(obj) % wrkr = config_store.instance.get_value(obj,'worker'); - pkp = which(wrkr); - if isempty(pkp) - frmw = 'none'; - return + frmw = 'none'; + if ~isempty(which(wrkr)) || exist(wrkr, 'file') + frmw = obj.get_or_restore_field('parallel_cluster'); end - frmw = get_or_restore_field(obj,'parallel_cluster'); end function conf = get.cluster_config(obj) - conf = get_or_restore_field(obj,'cluster_config'); + conf = obj.get_or_restore_field('cluster_config'); end % - function folder =get.shared_folder_on_local(obj) - folder = get_or_restore_field(obj,'shared_folder_on_local'); + function folder = get.shared_folder_on_local(obj) + folder = obj.get_or_restore_field('shared_folder_on_local'); if isempty(folder) is_depl = MPI_State.instance().is_deployed; if is_depl - folder = get_or_restore_field(obj,'working_directory'); + folder = obj.get_or_restore_field('working_directory'); if isempty(folder) folder = tmp_dir; end @@ -249,8 +246,8 @@ end end % - function folder =get.shared_folder_on_remote(obj) - folder = get_or_restore_field(obj,'shared_folder_on_remote'); + function folder = get.shared_folder_on_remote(obj) + folder = obj.get_or_restore_field('shared_folder_on_remote'); if isempty(folder) folder = obj.shared_folder_on_local; end @@ -261,7 +258,7 @@ if is_depl work_dir = obj.shared_folder_on_remote; else - work_dir = get_or_restore_field(obj,'working_directory'); + work_dir = obj.get_or_restore_field('working_directory'); end if isempty(work_dir) work_dir = tmp_dir; @@ -275,7 +272,7 @@ if is_depl work_dir = obj.shared_folder_on_remote; else - work_dir = get_or_restore_field(obj,'working_directory'); + work_dir = obj.get_or_restore_field('working_directory'); end if isempty(work_dir) is = true; @@ -411,7 +408,7 @@ end % function mpirunner = get.external_mpiexec(obj) - mpirunner = get_or_restore_field(obj,'external_mpiexec'); + mpirunner = obj.get_or_restore_field('external_mpiexec'); end % function obj=set.external_mpiexec(obj,val) @@ -452,4 +449,4 @@ the_opt = select_option_(opt,arg); end end -end \ No newline at end of file +end diff --git a/herbert_core/classes/MPIFramework/@parallel_config/private/check_and_set_cluster_.m b/herbert_core/classes/MPIFramework/@parallel_config/private/check_and_set_cluster_.m index cc0277334..dff423785 100644 --- a/herbert_core/classes/MPIFramework/@parallel_config/private/check_and_set_cluster_.m +++ b/herbert_core/classes/MPIFramework/@parallel_config/private/check_and_set_cluster_.m @@ -7,13 +7,11 @@ % The cluster name (can be defined by single symbol) % or by a cluster number in the list of clusters % -wrkr = which(obj.worker_); -mff = MPI_clusters_factory.instance(); + assert(~isempty(which(obj.worker)) || exist(obj.worker, 'file'), ... + 'HERBERT:parallel_config:not_available', ... + 'Parallel worker is not on the Matlab path so parallel features are not available'); -if isempty(wrkr) - error('HERBERT:parallel_config:not_available',... - 'Parallel worker is not on the Matlab path so parallel features are not available') -else + mff = MPI_clusters_factory.instance(); known_clusters = mff.known_cluster_names; full_cl_name = obj.select_option(known_clusters,cluster_name); mff.parallel_cluster = full_cl_name; diff --git a/herbert_core/classes/MPIFramework/@parallel_config/private/check_and_set_worker_.m b/herbert_core/classes/MPIFramework/@parallel_config/private/check_and_set_worker_.m index e173e45f7..0d3bf9542 100644 --- a/herbert_core/classes/MPIFramework/@parallel_config/private/check_and_set_worker_.m +++ b/herbert_core/classes/MPIFramework/@parallel_config/private/check_and_set_worker_.m @@ -8,7 +8,16 @@ 'The worker property needs the executable script name') end scr_path = which(new_wrkr); +config_instance = config_store.instance(); if isempty(scr_path) + % Check if it is a compiled worker + compiled_wrkr = check_compiled_(new_wrkr); + if ~isempty(compiled_wrkr) + config_instance.store_config(obj, 'worker', new_wrkr); + config_instance.store_config(obj, 'is_compiled', true); + return + end + def_wrkr = obj.worker_; if strcmp(new_wrkr,def_wrkr) cur_fmw = get_or_restore_field(obj,'parallel_cluster'); @@ -18,12 +27,12 @@ 'to all running Matlab sessions but parallel config can not find it.',... ' Parallel extensions are disabled'],... new_wrkr) - config_store.instance().store_config(obj,... + config_instance.store_config(obj,... 'parallel_cluster','none','cluster_config','none'); end else - config_store.instance().store_config(obj,'worker',def_wrkr); + config_instance.store_config(obj,'worker',def_wrkr); error('PARALLEL_CONFIG:invalid_argument',... ['The script to run in parallel (%s) should be available ',... 'to all running Matlab sessions but parallel config can not find it.',... @@ -32,6 +41,27 @@ end else % worker function is available. - config_store.instance().store_config(obj,'worker',new_wrkr); + config_instance.store_config(obj, 'worker', new_wrkr); + config_instance.store_config(obj, 'is_compiled', false); end +end % function +function out = check_compiled_(worker) + out = ''; + if is_file(worker) && ~endsWith(worker, '.m') + % Assume if input is full path to file, then it is a compiled worker + out = worker; + else + if ispc() + cmd = 'where'; + else + cmd = 'which'; + end + [rs, rv] = system([cmd ' ' worker]); + if rs == 0 + % Assume if it is on the system path, then it is a compiled worker + out = splitlines(strip(rv)); + out = out{1}; % Only take first path if there is more than one + end + end +end