diff --git a/commit/__init__.py b/commit/__init__.py index 3362d71..6d0f46c 100755 --- a/commit/__init__.py +++ b/commit/__init__.py @@ -7,4 +7,4 @@ try: __version__ = metadata.version('dmri-commit') except metadata.PackageNotFoundError: - __version__ = 'not installed' + __version__ = 'not installed' \ No newline at end of file diff --git a/commit/core.pyx b/commit/core.pyx index 479816b..6a2e672 100644 --- a/commit/core.pyx +++ b/commit/core.pyx @@ -2,36 +2,28 @@ #cython: language_level=3, boundscheck=False, wraparound=False, nonecheck=False, cdivision=True, initializedcheck=False, binding=False cimport numpy as np - import glob from os import makedirs, remove, listdir from os.path import exists, join as pjoin, isfile, isdir import pyximport import sys import time - import nibabel - import numpy as np - import pickle - -from pkg_resources import get_distribution - +from importlib import metadata import amico.scheme import amico.lut - from dicelib.ui import ProgressBar, setup_logger from dicelib import ui from dicelib.utils import format_time - import commit.models import commit.solvers from commit.operator import operator - logger = setup_logger('core') + def setup( lmax=12 ) : """General setup/initialization of the COMMIT framework. @@ -110,14 +102,14 @@ cdef class Evaluation : self.regularisation_params = None # set by "set_regularisation" method self.x = None # set by "fit" method self.confidence_map_img = None # set by "fit" method - self.debias_mask = None # set by "fit" method + self.debias_mask = None # set by "fit" method self.x_nnls = None # set by "fit" method (coefficients of IC compartment estimated without regularization) self.verbose = 3 # store all the parameters of an evaluation with COMMIT self.CONFIG = {} self.temp_data = {} - self.set_config('version', get_distribution('dmri-commit').version) + self.set_config('version', metadata.version('dmri-commit')) self.set_config('study_path', study_path) self.set_config('subject', subject) self.set_config('dictionary_path', dictionary_path) @@ -151,6 +143,7 @@ cdef class Evaluation : self.verbose = verbose ui.set_verbose( 'core', verbose ) + def set_config( self, key, value ) : self.CONFIG[ key ] = value self.temp_data[ key ] = value @@ -193,7 +186,7 @@ cdef class Evaluation : self.set_config('b0_thr', b0_thr) logger.subinfo('diffusion-weighted signal', indent_char='-', indent_lvl=2) self.scheme = amico.scheme.Scheme( pjoin( self.get_config('DATA_path'), scheme_filename), b0_thr ) - logger.subinfo('%d samples, %d shells' % ( self.scheme.nS, len(self.scheme.shells) ), indent_lvl=2, indent_char='-' ) + logger.subinfo( f'{self.scheme.nS} samples, {len(self.scheme.shells)} shells', indent_lvl=2, indent_char='-' ) scheme_string = f'{self.scheme.b0_count} @ b=0' for i in xrange(len(self.scheme.shells)) : scheme_string += f', {len(self.scheme.shells[i]["idx"])} @ b={self.scheme.shells[i]["b"]:.1f}' @@ -216,7 +209,7 @@ cdef class Evaluation : self.set_config('pixdim', tuple( hdr.get_zooms()[:3] )) logger.subinfo('dim : %d x %d x %d x %d' % self.niiDWI_img.shape, indent_lvl=2, indent_char='-' ) logger.subinfo('pixdim : %.3f x %.3f x %.3f' % self.get_config('pixdim'), indent_lvl=2, indent_char='-' ) - logger.subinfo('values : min=%.2f, max=%.2f, mean=%.2f' % ( self.niiDWI_img.min(), self.niiDWI_img.max(), self.niiDWI_img.mean() ), indent_lvl=2, indent_char='-' ) + logger.subinfo(f'values : min={self.niiDWI_img.min():.2f}, max={self.niiDWI_img.max():.2f}, mean={self.niiDWI_img.mean():.2f}', indent_lvl=2, indent_char='-' ) if self.scheme.nS != self.niiDWI_img.shape[3] : logger.error( 'Scheme does not match with input data' ) @@ -241,9 +234,8 @@ cdef class Evaluation : if self.get_config('doNormalizeSignal') : if self.scheme.b0_count > 0: - log_list = [] ret_subinfo = logger.subinfo('Normalizing to b0:', with_progress=True, indent_char='*', indent_lvl=1) - with ProgressBar(disable=self.verbose < 3, hide_on_exit=True, subinfo=ret_subinfo, log_list=log_list): + with ProgressBar(disable=self.verbose < 3, hide_on_exit=True, subinfo=ret_subinfo): b0 = np.mean( self.niiDWI_img[:,:,:,self.scheme.b0_idx], axis=3 ) idx = b0 <= b0_min_signal * b0[b0>0].mean() b0[ idx ] = 1 @@ -251,7 +243,7 @@ cdef class Evaluation : b0[ idx ] = 0 for i in xrange(self.scheme.nS) : self.niiDWI_img[:,:,:,i] *= b0 - logger.subinfo( '[ min=%.2f, max=%.2f, mean=%.2f ]' % ( self.niiDWI_img.min(), self.niiDWI_img.max(), self.niiDWI_img.mean() ), indent_lvl=2 ) + logger.subinfo( f'[ min={self.niiDWI_img.min():.2f}, max={self.niiDWI_img.max():.2f}, mean={self.niiDWI_img.mean():.2f} ]', indent_lvl=2 ) del idx, b0 else : logger.warning( 'There are no b0 volumes for normalization' ) @@ -270,7 +262,7 @@ cdef class Evaluation : mean = np.repeat( np.expand_dims(np.mean(self.niiDWI_img,axis=3),axis=3), self.niiDWI_img.shape[3], axis=3 ) self.niiDWI_img = self.niiDWI_img - mean logger.subinfo('Demeaning signal:', indent_char='*', indent_lvl=1) - logger.subinfo('[ min=%.2f, max=%.2f, mean=%.2f ]' % ( self.niiDWI_img.min(), self.niiDWI_img.max(), self.niiDWI_img.mean() ), indent_lvl=2 ) + logger.subinfo(f'[ min={self.niiDWI_img.min():.2f}, max={self.niiDWI_img.max():.2f}, mean={self.niiDWI_img.mean():.2f} ]', indent_lvl=2 ) # Check for Nan or Inf values in pre-processed data if np.isnan(self.niiDWI_img).any() or np.isinf(self.niiDWI_img).any(): @@ -298,7 +290,7 @@ cdef class Evaluation : logger.warning( 'Model "VolumeFractions" is deprecated. "ScalarMap" will be used instead (also for the name of the folder containing the results).' ) self.model = getattr(commit.models, 'ScalarMap')() else: - logger.error( 'Model "%s" not recognized' % model_name ) + logger.error( f'Model "{model_name}" not recognized' ) # Check if a lesion mask is provided and if the model supports it if self.dictionary_info['lesion_mask']: @@ -326,18 +318,18 @@ cdef class Evaluation : Number of directions on the half of the sphere representing the possible orientations of the response functions (default : 500) """ logger.subinfo('') - logger.info( 'Simulating with "%s" model' % self.model.name ) + logger.info( f'Simulating with "{self.model.name}" model' ) if not amico.lut.is_valid( ndirs ): - logger.error( 'Unsupported value for ndirs.\nNote: Supported values for ndirs are [1, 500 (default), 1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000, 5500, 6000, 6500, 7000, 7500, 8000, 8500, 9000, 9500, 10000, 32761]' ) + logger.error( 'Unsupported value for "ndirs".\nNote: Supported values for "ndirs" are [1, 500 (default), 1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000, 5500, 6000, 6500, 7000, 7500, 8000, 8500, 9000, 9500, 10000, 32761]' ) if self.scheme is None : logger.error( 'Scheme not loaded; call "load_data()" first' ) if self.model is None : logger.error( 'Model not set; call "set_model()" method first' ) - if self.model.id=='VolumeFractions' or self.model.id=='ScalarMap' and ndirs!=1: + if self.model.id=='ScalarMap' and ndirs!=1: ndirs = 1 logger.subinfo('Forcing "ndirs" to 1 because model is isotropic', indent_char='*', indent_lvl=1) - + # store some values for later use self.set_config('lmax', lmax) self.set_config('ndirs', ndirs) @@ -380,7 +372,7 @@ cdef class Evaluation : logger.subinfo('') logger.info( 'Loading the kernels' ) log_list = [] - ret_subinfo = logger.subinfo( 'Resampling LUT for subject "%s":' % self.get_config('subject'), indent_char='*', indent_lvl=1, with_progress=True ) # TODO: check why not printed + ret_subinfo = logger.subinfo( f'Resampling LUT for subject "{self.get_config("subject")}":', indent_char='*', indent_lvl=1, with_progress=True ) with ProgressBar(disable=self.verbose < 3, hide_on_exit=True, subinfo=ret_subinfo, log_list=log_list): # auxiliary data structures idx_OUT, Ylm_OUT = amico.lut.aux_structures_resample( self.scheme, self.get_config('lmax') ) @@ -464,13 +456,13 @@ cdef class Evaluation : # check that ndirs of dictionary matches with that of the kernels if self.dictionary_info['ndirs'] != self.get_config('ndirs'): - logger.error( '"ndirs" of the dictionary (%d) does not match with the kernels (%d)' % (self.dictionary_info['ndirs'], self.get_config('ndirs')) ) + logger.error( f'"ndirs" of the dictionary ({self.dictionary_info["ndirs"]}) does not match with kernels ({self.get_config("ndirs")})' ) self.DICTIONARY['ndirs'] = self.dictionary_info['ndirs'] self.DICTIONARY['n_threads'] = self.dictionary_info['n_threads'] # load mask self.set_config('dictionary_mask', 'mask' if use_all_voxels_in_mask else 'tdi' ) - mask_filename = pjoin(self.get_config('TRACKING_path'),'dictionary_%s.nii'%self.get_config('dictionary_mask')) + mask_filename = pjoin(self.get_config('TRACKING_path'),f'dictionary_{self.get_config("dictionary_mask")}.nii') if not exists( mask_filename ) : mask_filename += '.gz' if not exists( mask_filename ) : @@ -554,7 +546,7 @@ cdef class Evaluation : self.DICTIONARY['ISO']['v'] = np.fromfile( pjoin(self.get_config('TRACKING_path'),'dictionary_ISO_v.dict'), dtype=np.uint32 ) self.DICTIONARY['ISO']['nV'] = self.DICTIONARY['ISO']['v'].size - + self.DICTIONARY['nV'] = self.DICTIONARY['MASK'].sum() # reorder the segments based on the "v" field @@ -563,7 +555,7 @@ cdef class Evaluation : del idx logger.subinfo( f"{self.DICTIONARY['ISO']['nV']} voxels", indent_char='-', indent_lvl=2 ) - + # post-processing # --------------- log_list = [] @@ -591,7 +583,7 @@ cdef class Evaluation : ---------- n : integer Number of threads to use (default : number of CPUs in the system) - """ + """ if n is None : # Use the same number of threads used in trk2dictionary n = self.DICTIONARY['n_threads'] @@ -615,7 +607,7 @@ cdef class Evaluation : tic = time.time() logger.subinfo('') logger.info( 'Distributing workload to different threads' ) - logger.subinfo('Number of threads: %d' % n , indent_char='*', indent_lvl=1 ) + logger.subinfo(f'Number of threads: {n}', indent_char='*', indent_lvl=1 ) # Distribute load for the computation of A*x product log_list = [] @@ -661,7 +653,7 @@ cdef class Evaluation : for i in xrange(n) : self.THREADS['ISO'][i] = np.searchsorted( self.DICTIONARY['ISO']['v'], self.DICTIONARY['IC']['v'][ self.THREADS['IC'][i] ] ) self.THREADS['ISO'][n] = self.DICTIONARY['ISO']['nV'] - + else : self.THREADS['ISO'] = None @@ -708,11 +700,11 @@ cdef class Evaluation : if self.DICTIONARY['ISO']['nV'] > 0 : self.THREADS['ISOt'] = np.zeros( n+1, dtype=np.uint32 ) N = np.floor( self.DICTIONARY['ISO']['nV']/n ) - + for i in xrange(1,n) : self.THREADS['ISOt'][i] = self.THREADS['ISOt'][i-1] + N self.THREADS['ISOt'][n] = self.DICTIONARY['ISO']['nV'] - + # check if some threads are not assigned any segment if np.count_nonzero( np.diff( self.THREADS['ISOt'].astype(np.int32) ) <= 0 ) : self.THREADS = None @@ -800,7 +792,7 @@ cdef class Evaluation : lambdas[2] corresponds to the Isotropic compartment The lambdas correspond to the ones described in the mathematical formulation of the regularisation term $\Omega(x) = lambdas[0]*regularisers[0](x) + lambdas[1]*regularisers[1](x) + lambdas[2]*regularisers[2](x)$ - The maximum value of the regularisation parameter is the value of lambda above which it is guaranteed that + The maximum value of the regularisation parameter is the value of lambda above which it is guaranteed that the optimal solution is zero (computed as in [3] for lasso and as in [4] for group lasso). NB: if regularisers[k] is None, then lambdas[k] is ignored. NB: lambdas[k] must be a float greater than 0. @@ -842,14 +834,14 @@ cdef class Evaluation : group_weights_extra[k] - additional information associated to group k, only if group_weights_extra is not None ||x_nnls[g[k]]||_2 - l2 norm of the streamline weights in group k, only if group_weights_adaptive is True 'group_weights_cardinality' - boolean : - if True, the weight of a group is multiplied by the square root of its size in order to penalize + if True, the weight of a group is multiplied by the square root of its size in order to penalize all groups in the same manner regardless their cardinality. This field can be specified only if regularisers[0] is 'group_lasso' or 'sparse_group_lasso'. 'group_weights_adaptive' - boolean: - if True, the weights of the groups are scaled by the l2 norm of the streamline weights obtained + if True, the weights of the groups are scaled by the l2 norm of the streamline weights obtained by solving the NNLS problem without regularisation. This field can be specified only if regularisers[0] is 'group_lasso' or 'sparse_group_lasso'. - NB: if both 'group_weights_cardinality' and 'group_weights_adaptive' are True, then the weights + NB: if both 'group_weights_cardinality' and 'group_weights_adaptive' are True, then the weights are computed as in [2]. 'group_weights_extra' - np.array(np.float64) : additional inforamation associated to each group of the IC compartment, based on prior knowledge. @@ -862,7 +854,7 @@ cdef class Evaluation : References: [1] Jenatton et al. - 'Proximal Methods for Hierarchical Sparse Coding', https://www.jmlr.org/papers/volume12/jenatton11a/jenatton11a.pdf - [2] Schiavi et al. - 'A new method for accurate in vivo mapping of human brain connections using + [2] Schiavi et al. - 'A new method for accurate in vivo mapping of human brain connections using microstructural and anatomical information', https://doi.org/10.1126/sciadv.aba8245 [3] Kim et al. - 'An interior-point method for large-scale l1-regularized least squares', https://doi.org/10.1109/JSTSP.2007.910971 [4] Yuan, Lin - 'Model selection and estimation in regression with grouped variables', https://doi.org/10.1111/j.1467-9868.2005.00532.x @@ -870,8 +862,8 @@ cdef class Evaluation : # functions to compute the maximum value of the regularisation parameter (lambda) - def compute_lambda_max_lasso(start, size, w_coeff): - # Ref. Kim et al. - 'An interior-point method for large-scale l1-regularized least squares' https://doi.org/10.1109/JSTSP.2007.910971 + def compute_lambda_max_lasso(start, size, w_coeff): + # Ref. Kim et al. - 'An interior-point method for large-scale l1-regularized least squares' https://doi.org/10.1109/JSTSP.2007.910971 At = self.A.T y = self.get_y() Aty = np.asarray(At.dot(y)) @@ -886,7 +878,7 @@ cdef class Evaluation : for g in range(w_group.size): norm_group[g] = np.sqrt(np.sum(Aty[idx_group[g]]**2)) / w_group[g] return np.max(norm_group) - + if self.niiDWI is None : logger.error( 'Data not loaded; call "load_data()" first' ) if self.DICTIONARY is None : @@ -925,12 +917,12 @@ cdef class Evaluation : tr = time.time() logger.subinfo('') logger.info( 'Setting regularisation' ) - + ############################ # INTRACELLULAR COMPARTMENT# ############################ logger.subinfo( 'IC compartment:', indent_char='*', indent_lvl=1) - + if regularisation['regIC'] == 'lasso': if lambdas[0] is None: logger.error('Missing regularisation parameter for the IC compartment') @@ -1030,7 +1022,7 @@ cdef class Evaluation : all_idx_in = np.sort(np.unique(np.concatenate(dictIC_params['group_idx']))) all_idx = np.arange(self.DICTIONARY['IC']['nF'], dtype=np.int32) if np.any(all_idx_in != all_idx): - logger.error('Group indices must contain all the indices of the input streamlines') + logger.error('Group indices must contain all the indices of the input streamlines') # check if group_weights_extra is consistent with the number of groups if (regularisation['regIC'] == 'group_lasso' or regularisation['regIC'] == 'sparse_group_lasso') and 'group_weights_extra' in dictIC_params: @@ -1136,9 +1128,9 @@ cdef class Evaluation : logger.debug( str_weights ) - ########################### - # EXTRCELLULAR COMPARTMENT# - ########################### + ############################# + # EXTRACELLULAR COMPARTMENT # + ############################# logger.subinfo( 'EC compartment:', indent_char='*', indent_lvl=1 ) if regularisation['regEC'] == 'lasso': @@ -1238,7 +1230,7 @@ cdef class Evaluation : logger.subinfo( f'Regularisation type: {regularisation["regISO"]}', indent_lvl=2, indent_char='-' ) logger.subinfo( f'Non-negativity constraint: {regularisation["nnISO"]}', indent_char='-', indent_lvl=2 ) - if regularisation['regISO'] is not None: + if regularisation['regISO'] is not None: logger.debug( f'Lambda max: {regularisation["lambdaISO_max"]}' ) logger.debug( f'% lambda: {regularisation["lambdaISO_perc"]}' ) logger.debug( f'Lambda used: {regularisation["lambdaISO"]}' ) @@ -1347,7 +1339,7 @@ cdef class Evaluation : elif(confidence_map_rescale): confidence_array = ( confidence_array - cMIN ) / ( cMAX - cMIN ) - logger.subinfo ( '[Confidence map interval was scaled from the original [%.1f, %.1f] to the intended [%.1f, %.1f] linearly]' % ( cMIN, cMAX, np.min(confidence_array), np.max(confidence_array) ), indent_lvl=3 ) + logger.subinfo ( f'[Confidence map interval was scaled from the original [{cMIN:.1f}, {cMAX:.1f}] to the intended [{np.min(confidence_array):.1f}, {np.max(confidence_array):.1f}] linearly]', indent_lvl=3 ) confidence_array_changed = True if(confidence_array_changed): @@ -1484,7 +1476,7 @@ cdef class Evaluation : RESULTS_path = RESULTS_path + path_suffix logger.subinfo('') - logger.info( 'Saving results to "%s/*"' % RESULTS_path ) + logger.info( f'Saving output to "{RESULTS_path}/*"' ) tic = time.time() if self.x is None : @@ -1494,8 +1486,7 @@ cdef class Evaluation : nE = self.DICTIONARY['EC']['nE'] nV = self.DICTIONARY['nV'] norm_fib = np.ones( nF ) - # x is the x of the original problem - # self.x is the x preconditioned + # NB: x correspond to the original problem, self.x are the preconditioned ones if self.get_config('doNormalizeKernels') : # renormalize the coefficients norm1 = np.repeat(self.KERNELS['wmr_norm'],nF) @@ -1522,7 +1513,7 @@ cdef class Evaluation : affine = self.niiDWI.affine if nibabel.__version__ >= '2.0.0' else self.niiDWI.get_affine() niiMAP = nibabel.Nifti1Image( niiMAP_img, affine ) niiMAP_hdr = niiMAP.header if nibabel.__version__ >= '2.0.0' else niiMAP.get_header() - niiMAP_hdr['descrip'] = 'Created with COMMIT %s'%self.get_config('version') + niiMAP_hdr['descrip'] = f'Created with COMMIT {self.get_config("version")}' niiMAP_hdr['db_name'] = '' if self.debias_mask is not None: @@ -1663,22 +1654,18 @@ cdef class Evaluation : nibabel.save( niiIC , pjoin(RESULTS_path,'compartment_IC.nii.gz') ) nibabel.save( niiEC , pjoin(RESULTS_path,'compartment_EC.nii.gz') ) - - if hasattr(self.model, 'lesion_mask') and self.model.lesion_mask: - niiLesion_img = niiISO_img.copy() - niiLesion = nibabel.Nifti1Image( niiLesion_img, affine, header=niiMAP_hdr ) - nibabel.save( niiLesion , pjoin(RESULTS_path,'compartment_lesion.nii.gz') ) - niiISO_img = np.zeros( self.get_config('dim'), dtype=np.float32 ) - niiISO = nibabel.Nifti1Image( niiISO_img, affine, header=niiMAP_hdr ) - nibabel.save( niiISO , pjoin(RESULTS_path,'compartment_ISO.nii.gz') ) - # Configuration and results - logger.subinfo('Configuration and results:', indent_char='*', indent_lvl=1) - log_list = [] - ret_subinfo = logger.subinfo('streamline_weights.txt', indent_lvl=2, indent_char='-', with_progress=False if hasattr(self.model, '_postprocess') else True) - with ProgressBar(disable=self.verbose < 3 or hasattr(self.model, '_postprocess'), hide_on_exit=True, subinfo=ret_subinfo, log_list=log_list): - xic, _, _ = self.get_coeffs() + # process and save streamline_weights.txt + logger.subinfo('Streamline weights:', indent_char='*', indent_lvl=1) + xic, _, _ = self.get_coeffs() + + ret_subinfo = logger.subinfo(f'Handling multiple coeffs per streamline: "{stat_coeffs}"', indent_lvl=2, indent_char='-', with_progress=True) + with ProgressBar(disable=self.verbose<3, hide_on_exit=True, subinfo=ret_subinfo): + self.set_config('stat_coeffs', stat_coeffs) + if stat_coeffs == 'all' : + if self.dictionary_info['blur_gauss_extent'] > 0 or self.dictionary_info['blur_core_extent'] > 0 : + logger.error( 'Not yet implemented. Unable to account for blur in case of multiple streamline constributions.' ) if stat_coeffs != 'all' and xic.size > 0 : xic = np.reshape( xic, (-1,self.DICTIONARY['TRK']['kept'].size) ) if stat_coeffs == 'sum' : @@ -1695,14 +1682,11 @@ cdef class Evaluation : logger.error( 'Stat not allowed. Possible values: sum, mean, median, min, max, all' ) # scale output weights if blur was used - if self.dictionary_info['blur_gauss_extent'] > 0 or self.dictionary_info['blur_core_extent'] > 0 : - if stat_coeffs == 'all' : - logger.error( 'Not yet implemented. Unable to account for blur in case of multiple streamline constributions.' ) if "tractogram_centr_idx" in self.dictionary_info.keys(): ordered_idx = self.dictionary_info["tractogram_centr_idx"].astype(np.int64) unravel_weights = np.zeros( self.dictionary_info['n_count'], dtype=np.float64) unravel_weights[ordered_idx] = self.DICTIONARY['TRK']['kept'].astype(np.float64) - temp_weights = unravel_weights[ordered_idx] + temp_weights = unravel_weights[ordered_idx] if self.dictionary_info['blur_gauss_extent'] > 0 or self.dictionary_info['blur_core_extent'] > 0: temp_weights[temp_weights>0] = xic[self.DICTIONARY['TRK']['kept']>0] * self.DICTIONARY['TRK']['lenTot'] / self.DICTIONARY['TRK']['len'] unravel_weights[ordered_idx] = temp_weights @@ -1711,43 +1695,37 @@ cdef class Evaluation : temp_weights[temp_weights>0] = xic[self.DICTIONARY['TRK']['kept']>0] unravel_weights[ordered_idx] = temp_weights xic = unravel_weights - else: if self.dictionary_info['blur_gauss_extent'] > 0 or self.dictionary_info['blur_core_extent'] > 0: xic[ self.DICTIONARY['TRK']['kept']==1 ] *= self.DICTIONARY['TRK']['lenTot'] / self.DICTIONARY['TRK']['len'] - - self.temp_data['DICTIONARY'] = self.DICTIONARY - self.temp_data['niiIC_img'] = niiIC_img - self.temp_data['niiEC_img'] = niiEC_img - self.temp_data['niiISO_img'] = niiLesion_img if hasattr(self.model, 'lesion_mask') and self.model.lesion_mask else niiISO_img - self.temp_data['streamline_weights'] = xic - self.temp_data['RESULTS_path'] = RESULTS_path - self.temp_data['affine'] = self.niiDWI.affine if nibabel.__version__ >= '2.0.0' else self.niiDWI.get_affine() - - if hasattr(self.model, '_postprocess') and (hasattr(self.model, 'lesion_mask') and self.model.lesion_mask): - self.model._postprocess(self.temp_data, verbose=self.verbose) - else: - np.savetxt( pjoin(RESULTS_path,'streamline_weights.txt'), xic, fmt=coeffs_format ) + # call (potential) postprocessing required by the specific model + if hasattr(self.model, '_postprocess') : + ret_subinfo = logger.subinfo('Calling model-specific postprocessing', indent_lvl=2, indent_char='-', with_progress=True) + with ProgressBar(disable=self.verbose<3, hide_on_exit=True, subinfo=ret_subinfo): + #TODO: make this part "model independent" passing all variables of this context + xic = self.model._postprocess(self, xic) - self.set_config('stat_coeffs', stat_coeffs) + # Save xic to streamline_weights.txt + ret_subinfo = logger.subinfo('streamline_weights.txt', indent_lvl=2, indent_char='-', with_progress=True) + with ProgressBar(disable=self.verbose<3, hide_on_exit=True, subinfo=ret_subinfo): + np.savetxt( pjoin(RESULTS_path,'streamline_weights.txt'), xic, fmt=coeffs_format ) # Save to a pickle file the following items: # item 0: dictionary with all the configuration details # item 1: np.array obtained through the optimisation process with the normalised kernels # item 2: np.array renormalisation of coeffs in item 1 - log_list = [] - ret_subinfo = logger.subinfo('results.pickle', indent_char='-', indent_lvl=2, with_progress=True) - with ProgressBar(disable=self.verbose < 3, hide_on_exit=True, subinfo=ret_subinfo, log_list=log_list): + logger.subinfo('Configuration settings:', indent_lvl=1, indent_char='*') + ret_subinfo = logger.subinfo('results.pickle', indent_lvl=2, indent_char='-', with_progress=True) + with ProgressBar(disable=self.verbose < 3, hide_on_exit=True, subinfo=ret_subinfo): with open( pjoin(RESULTS_path,'results.pickle'), 'wb+' ) as fid : self.CONFIG['optimization']['regularisation'].pop('omega', None) self.CONFIG['optimization']['regularisation'].pop('prox', None) pickle.dump( [self.CONFIG, self.x, x], fid, protocol=2 ) if save_est_dwi: - log_list = [] - ret_subinfo = logger.subinfo('Estimated signal:', indent_char='-', indent_lvl=2, with_progress=True) - with ProgressBar(disable=self.verbose < 3, hide_on_exit=True, subinfo=ret_subinfo, log_list=log_list): + ret_subinfo = logger.subinfo('Estimated signal', indent_lvl=2, indent_char='-', with_progress=True) + with ProgressBar(disable=self.verbose < 3, hide_on_exit=True, subinfo=ret_subinfo): self.niiDWI_img[ self.DICTIONARY['MASK_ix'], self.DICTIONARY['MASK_iy'], self.DICTIONARY['MASK_iz'], : ] = y_est nibabel.save( nibabel.Nifti1Image( self.niiDWI_img , affine ), pjoin(RESULTS_path,'fit_signal_estimated.nii.gz') ) self.niiDWI_img[ self.DICTIONARY['MASK_ix'], self.DICTIONARY['MASK_iy'], self.DICTIONARY['MASK_iz'], : ] = y_mea diff --git a/commit/models.pyx b/commit/models.pyx index 6038d69..6793997 100755 --- a/commit/models.pyx +++ b/commit/models.pyx @@ -1,30 +1,20 @@ #!python #cython: language_level=3, boundscheck=False, wraparound=False, profile=False - -from os import cpu_count as num_cpu from os.path import join as pjoin - -from setuptools import Extension - -from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np -import nibabel as nib - +import nibabel from amico.models import BaseModel, StickZeppelinBall as _StickZeppelinBall, CylinderZeppelinBall as _CylinderZeppelinBall import amico.util as util - from dicelib.ui import setup_logger, set_verbose, ProgressBar - - logger = setup_logger('models') - class StickZeppelinBall(_StickZeppelinBall): """Simulate the response functions according to the Stick-Zeppelin-Ball model. See the AMICO.model module for details. """ def resample(self, in_path, idx_out, Ylm_out, doMergeB0, ndirs): + #FIXME: use logger util.set_verbose(2) return super().resample(in_path, idx_out, Ylm_out, doMergeB0, ndirs) @@ -34,60 +24,15 @@ class CylinderZeppelinBall(_CylinderZeppelinBall): See the AMICO.model module for details. """ def resample(self, in_path, idx_out, Ylm_out, doMergeB0, ndirs): + #FIXME: use logger util.set_verbose(2) return super().resample(in_path, idx_out, Ylm_out, doMergeB0, ndirs) -# class VolumeFractions(BaseModel): -# """Implements a simple model where each compartment contributes only with -# its own volume fraction. This model has been created to test there -# ability to remove false positive fibers with COMMIT. -# """ -# def __init__(self): -# self.id = 'VolumeFractions' -# self.name = 'Volume fractions' -# self.maps_name = [] -# self.maps_descr = [] -# self.nolut = True - -# def set(self): -# return - -# def get_params(self): -# params = {} -# params['id'] = self.id -# params['name'] = self.name -# return params - -# def set_solver(self): -# logger.error('Not implemented') - -# def generate(self, out_path, aux, idx_in, idx_out, ndirs): -# return - -# def resample(self, in_path, idx_out, Ylm_out, doMergeB0, ndirs): -# if doMergeB0: -# nS = 1 + self.scheme.dwi_count -# merge_idx = np.hstack((self.scheme.b0_idx[0], self.scheme.dwi_idx)) -# else: -# nS = self.scheme.nS -# merge_idx = np.arange(nS) - -# KERNELS = {} -# KERNELS['model'] = self.id -# KERNELS['wmr'] = np.ones((1, ndirs, nS), dtype=np.float32) -# KERNELS['wmh'] = np.ones((0, ndirs, nS), dtype=np.float32) -# KERNELS['iso'] = np.ones((0, nS), dtype=np.float32) -# return KERNELS - -# def fit(self, evaluation): -# logger.error('Not implemented') - - class ScalarMap( BaseModel ) : - """Implements a simple model where each compartment contributes only with - its own volume fraction. This model has been created to test there - ability to remove false positive fibers with COMMIT. + """Implements a simple model where each compartment contributes to a scalar map, + e.g. intra-axonsl signal fraction or myelin water fraction, proportionally to + its local length inside each voxel. """ def __init__( self ) : @@ -111,7 +56,7 @@ class ScalarMap( BaseModel ) : def generate( self, out_path, aux, idx_in, idx_out, ndirs ) : return - + def fit(self, evaluation): """Placeholder implementation for the abstract method.""" logger.error('Not implemented') @@ -134,115 +79,74 @@ class ScalarMap( BaseModel ) : KERNELS['iso'] = np.ones( (0,nS), dtype=np.float32 ) return KERNELS - - def find_idx(self, ic_v, ic_les, dict_icf, max_size, progress_bar, chunk_num): - cdef unsigned int [::1]ic_les_view = ic_les - cdef long les_shape = ic_les.shape[0] - cdef long ic_v_shape = ic_v.shape[0] - cdef int chunk_num_mem = chunk_num - cdef unsigned int [::1]progress_bar_mem = progress_bar - - - # indices_vox = np.zeros(les_shape*ic_v_shape, dtype=np.uint32) - indices_vox = np.zeros(max_size, dtype=np.uint32) - cdef unsigned int [::1]indices_vox_view = indices_vox - # indices_str = np.zeros(les_shape*ic_v_shape, dtype=np.uint32) - indices_str = np.zeros(max_size, dtype=np.uint32) - cdef unsigned int [::1]indices_str_view = indices_str - cdef unsigned int[::1] ic_v_view = ic_v - - cdef int count = 0 - - cdef unsigned int [::1]dict_icf_view = dict_icf - - cdef unsigned int ic_les_val = 0 - cdef size_t i, j = 0 - - with nogil: - for i in range(les_shape): - ic_les_val = ic_les_view[i] - for j in range(ic_v_shape): - if ic_v_view[j] == ic_les_val:# and ic_les_val > 0: - indices_vox_view[count] = ic_v_view[j] - indices_str_view[count] = dict_icf_view[j] - count += 1 - progress_bar_mem[chunk_num_mem] += 1 - - return indices_vox[:count], indices_str[:count] - - def _postprocess(self, preproc_dict, verbose=1): - ISO_map = np.array(preproc_dict['niiISO_img'], dtype=np.float32) - IC_map = np.array(preproc_dict['niiIC_img'], dtype=np.float32) - - dictionary = preproc_dict['DICTIONARY'] - - kept = dictionary['TRK']['kept'] - x_weights = preproc_dict["streamline_weights"][kept==1].copy() - x_weights_scaled = x_weights.copy() - new_weights = np.zeros_like(kept, dtype=np.float32) - - - IC = IC_map[dictionary['MASK_ix'], dictionary['MASK_iy'], dictionary['MASK_iz']] - ISO = ISO_map[dictionary['MASK_ix'], dictionary['MASK_iy'], dictionary['MASK_iz']] - ISO_scaled = np.zeros_like(ISO) + def _postprocess(self, evaluation, xic): + """Rescale the streamline weights using the local tissue damage estimated + in all imaging voxels with the COMMIT_lesion new model. + + Parameters + ---------- + evaluation : object + Evaluation object, to enable accessing the whole content of the + whole evaluation object + + xic : np.array + Streamline weights + + Returns + ------- + np.array + The rescaled streamline weights accounting for lesions + """ + if not self.lesion_mask: + # nothing to do if lesion mask is not given + return xic + + RESULTS_path = evaluation.get_config('RESULTS_path') + niiISO_img = np.asanyarray( nibabel.load( pjoin(RESULTS_path,'compartment_ISO.nii.gz') ).dataobj ).astype(np.float32) + ISO = niiISO_img[evaluation.DICTIONARY['MASK_ix'], evaluation.DICTIONARY['MASK_iy'], evaluation.DICTIONARY['MASK_iz']] + if np.count_nonzero(ISO>0) == 0: + logger.warning('No lesions found') + return xic + + # rescale the input scalar map in each voxel according to estimated lesion contributions + niiIC_img = np.asanyarray( nibabel.load( pjoin(RESULTS_path,'compartment_IC.nii.gz') ).dataobj ).astype(np.float32) + IC = niiIC_img[evaluation.DICTIONARY['MASK_ix'], evaluation.DICTIONARY['MASK_iy'], evaluation.DICTIONARY['MASK_iz']] + ISO_scaled = np.zeros_like(ISO, dtype=np.float32) ISO_scaled[ISO>0] = (IC[ISO>0] - ISO[ISO>0]) / IC[ISO>0] + ISO_scaled_save = np.zeros_like(niiISO_img, dtype=np.float32) + ISO_scaled_save[evaluation.DICTIONARY['MASK_ix'], evaluation.DICTIONARY['MASK_iy'], evaluation.DICTIONARY['MASK_iz']] = ISO_scaled + affine = evaluation.niiDWI.affine if nibabel.__version__ >= '2.0.0' else evaluation.niiDWI.get_affine() + nibabel.save(nibabel.Nifti1Image(ISO_scaled_save, affine), pjoin(RESULTS_path,'compartment_IC_lesion_scaled.nii.gz')) - ISO_scaled_save = np.zeros_like(ISO_map) - ISO_scaled_save[dictionary['MASK_ix'], dictionary['MASK_iy'], dictionary['MASK_iz']] = ISO_scaled - nib.save(nib.Nifti1Image(ISO_scaled_save, preproc_dict['affine']), pjoin(preproc_dict["RESULTS_path"],'compartment_IC_lesion_scaled.nii.gz')) - - idx_les = np.argwhere(ISO_scaled > 0)[:,0].astype(np.uint32) - if idx_les.shape[0] == 0: - logger.error('No lesion found in the input image.') - return - - - result = [] - dict_idx_v = dictionary['IC']['v'] - cdef unsigned int [::1]dict_idx_v_view = dict_idx_v - - cdef unsigned int cpu_count = num_cpu() - cdef unsigned int[:] find_idx_progress = np.zeros(cpu_count, dtype=np.uint32) - - n = idx_les.shape[0] - c = n // cpu_count - max_size = int(3e9/cpu_count) - chunks = [] - for ii, jj in zip(range(0, n, c), range(c, n+1, c)): - chunks.append((ii, jj)) - if chunks[len(chunks)-1][1] != n: - chunks[len(chunks)-1] = (chunks[len(chunks)-1][0], n) - logger - logger.subinfo('Recomputing streamlines weights accounting for lesions', indent_lvl=3, indent_char='->', with_progress=True) - with ProgressBar(multithread_progress=find_idx_progress, total=n, - disable=verbose<3, hide_on_exit=True, subinfo=True) as pbar: - with ThreadPoolExecutor(max_workers=cpu_count) as executor: - futures = [executor.submit(self.find_idx, dict_idx_v_view, idx_les[ii:jj], dictionary['IC']['fiber'], max_size, find_idx_progress, chunk_num) for chunk_num, (ii, jj) in enumerate(chunks)] - for future in as_completed(futures): - result.append(future.result()) - - idx_vox = [] - idx_str = [] - for r in result: - idx_vox.extend(r[0].tolist()) - idx_str.extend(r[1].tolist()) - - cdef double [::1] x_weights_view = x_weights - cdef double [::1] x_weights_scaled_view = x_weights_scaled - cdef double x_weight = 0 - cdef float [::1] ISO_scaled_view = ISO_scaled - - cdef size_t vox = 0 - cdef size_t str_i = 0 - - for vox, str_i in zip(idx_vox, idx_str): - x_weight = x_weights_view[str_i] * ISO_scaled_view[vox] - if x_weight < x_weights_scaled_view[str_i]: - x_weights_scaled_view[str_i] = x_weight + # save the map of local tissue damage estimated in each voxel + nibabel.save( nibabel.Nifti1Image( niiISO_img, affine ), pjoin(RESULTS_path,'compartment_lesion.nii.gz') ) - new_weights[kept==1] = x_weights_scaled - coeffs_format='%.5e' + # override ISO map and set it to 0 + nibabel.save( nibabel.Nifti1Image( 0*niiISO_img, affine), pjoin(RESULTS_path,'compartment_ISO.nii.gz') ) - np.savetxt( pjoin(preproc_dict["RESULTS_path"],'streamline_weights.txt'), new_weights, fmt=coeffs_format ) \ No newline at end of file + # rescale each streamline weight + kept = evaluation.DICTIONARY['TRK']['kept'] + cdef double [::1] xic_view = xic[kept==1] + cdef double [::1] xic_scaled_view = xic[kept==1].copy() + cdef float [::1] ISO_scaled_view = ISO_scaled + cdef unsigned int [::1] idx_v_view = evaluation.DICTIONARY['IC']['v'] + cdef unsigned int [::1] idx_f_view = evaluation.DICTIONARY['IC']['fiber'] + cdef size_t i, idx_v, idx_f + cdef double val + + # Rescaling streamline weights accounting for lesions + for i in range(evaluation.DICTIONARY['IC']['v'].shape[0]): + idx_v = idx_v_view[i] + val = ISO_scaled_view[idx_v] + if val > 0: + idx_f = idx_f_view[i] + #TODO: allow considering other than the min value + if xic_view[idx_f] * val < xic_scaled_view[idx_f]: + xic_scaled_view[idx_f] = xic_view[idx_f] * val + + # return rescaled streamline weights + xic_scaled = np.zeros_like(kept, dtype=np.float32) + xic_scaled[kept==1] = xic_scaled_view + return xic_scaled \ No newline at end of file diff --git a/commit/solvers.py b/commit/solvers.py index 730f06d..5171595 100755 --- a/commit/solvers.py +++ b/commit/solvers.py @@ -24,7 +24,7 @@ def init_regularisation(regularisation_params): # check if regularisations are in the list if regularisation_params['regIC'] not in list_regularizers or regularisation_params['regEC'] not in list_regularizers or regularisation_params['regISO'] not in list_regularizers: logger.error('Regularisation not in the list') - + startIC = regularisation_params.get('startIC') sizeIC = regularisation_params.get('sizeIC') startEC = regularisation_params.get('startEC') @@ -97,7 +97,7 @@ def init_regularisation(regularisation_params): proxIC = lambda x, scaling: non_negativity(prox_group_lasso(x,groupIdxIC,groupSizeIC,dictIC_params['group_weights'],scaling*lambda_group_IC),startIC,sizeIC) else: proxIC = lambda x, scaling: prox_group_lasso(x,groupIdxIC,groupSizeIC,dictIC_params['group_weights'],scaling*lambda_group_IC) - + elif regularisation_params['regIC'] == 'sparse_group_lasso': if not len(dictIC_params['group_idx_kept']) == len(dictIC_params['group_weights']): logger.error('Number of groups and weights do not match') @@ -132,12 +132,10 @@ def init_regularisation(regularisation_params): proxIC = lambda x, scaling: prox_group_lasso(soft_thresholding(x,scaling*lambdaIC,startIC,sizeIC),groupIdxIC,groupSizeIC,dictIC_params['group_weights'],scaling*lambda_group_IC) - ########################### - # EXTRCELLULAR COMPARTMENT# - ########################### - - dictEC_params = regularisation_params.get('dictEC_params') - + ############################# + # EXTRACELLULAR COMPARTMENT # + ############################# + # dictEC_params = regularisation_params.get('dictEC_params') if regularisation_params['regEC'] is None: omegaEC = lambda x: 0.0 if regularisation_params.get('nnEC')==True: @@ -167,12 +165,10 @@ def init_regularisation(regularisation_params): # proxEC = lambda x: projection_onto_l2_ball(x, lambdaEC, startEC, sizeEC) - ######################## - # ISOTROPIC COMPARTMENT# - ######################## - - dictISO_params = regularisation_params.get('dictISO_params') - + ######################### + # ISOTROPIC COMPARTMENT # + ######################### + # dictISO_params = regularisation_params.get('dictISO_params') if regularisation_params['regISO'] is None: omegaISO = lambda x: 0.0 if regularisation_params.get('nnISO')==True: diff --git a/commit/trk2dictionary/trk2dictionary.pyx b/commit/trk2dictionary/trk2dictionary.pyx index b167134..1f58e5d 100644 --- a/commit/trk2dictionary/trk2dictionary.pyx +++ b/commit/trk2dictionary/trk2dictionary.pyx @@ -1,36 +1,27 @@ #!python # cython: language_level=3, c_string_type=str, c_string_encoding=ascii, boundscheck=False, wraparound=False, profile=False - from libc.stdlib cimport malloc, free from libcpp cimport bool cimport numpy as np - import nibabel import numpy as np - import os from os.path import join, exists, splitext, dirname, isdir, isfile from os import makedirs, remove - import amico - from dicelib.clustering import run_clustering from dicelib.ui import _in_notebook from dicelib.ui import ProgressBar, setup_logger from dicelib import ui from dicelib.utils import format_time - import pickle - -from pkg_resources import get_distribution - +from importlib import metadata import shutil - import time - logger = setup_logger('trk2dictionary') + # Interface to actual C code cdef extern from "trk2dictionary_c.cpp": int trk2dictionary( @@ -99,7 +90,7 @@ cpdef run( filename_tractogram=None, path_out=None, filename_peaks=None, filenam the mask is created from all voxels intersected by the tracts. filename_lesion_mask : string - Path to a binary mask that defines the position(s) of the lesion(s). + Path to a binary mask that defines the position(s) of the lesion(s). do_intersect : boolean If True then streamline segments that intersect voxel boundaries are splitted (default). @@ -431,7 +422,7 @@ cpdef run( filename_tractogram=None, path_out=None, filename_peaks=None, filenam niiMASK_hdr = _get_header( niiMASK ) logger.subinfo( f'{niiMASK.shape[0]} x {niiMASK.shape[1]} x {niiMASK.shape[2]}', indent_lvl=3, indent_char='-' ) logger.subinfo( f'{niiMASK_hdr["pixdim"][1]:.4f} x {niiMASK_hdr["pixdim"][2]:.4f} x {niiMASK_hdr["pixdim"][3]:.4f}', indent_lvl=3, indent_char='-' ) - if ( Nx!=niiMASK.shape[0] or Ny!=niiMASK.shape[1] or Nz!=niiMASK.shape[2] or + if ( Nx!=niiMASK.shape[0] or Ny!=niiMASK.shape[1] or Nz!=niiMASK.shape[2] or abs(Px-niiMASK_hdr['pixdim'][1])>1e-3 or abs(Py-niiMASK_hdr['pixdim'][2])>1e-3 or abs(Pz-niiMASK_hdr['pixdim'][3])>1e-3 ) : logger.warning( 'Dataset does not have the same geometry as the tractogram' ) niiMASK_img = np.ascontiguousarray( np.asanyarray( niiMASK.dataobj ).astype(np.float32) ) @@ -458,7 +449,7 @@ cpdef run( filename_tractogram=None, path_out=None, filename_peaks=None, filenam niiPEAKS_hdr = _get_header( niiPEAKS ) logger.subinfo( f'{niiPEAKS.shape[0]} x {niiPEAKS.shape[1]} x {niiPEAKS.shape[2]} x {niiPEAKS.shape[3]}', indent_lvl=3, indent_char='-' ) logger.subinfo( f'{niiPEAKS_hdr["pixdim"][1]:.4f} x {niiPEAKS_hdr["pixdim"][2]:.4f} x {niiPEAKS_hdr["pixdim"][3]:.4f}', indent_lvl=3, indent_char='-' ) - + logger.subinfo( f'ignoring peaks < {vf_THR:.2f} * MaxPeak', indent_lvl=3, indent_char='-' ) logger.subinfo( f'{"" if peaks_use_affine else "not "}using affine matrix', indent_lvl=3, indent_char='-' ) logger.subinfo( f'flipping axes : [ x={flip_peaks[0]}, y={flip_peaks[1]}, z={flip_peaks[2]} ]', indent_lvl=3, indent_char='-' ) @@ -484,7 +475,7 @@ cpdef run( filename_tractogram=None, path_out=None, filename_peaks=None, filenam Np = 0 ptrPEAKS = NULL ptrPeaksAffine = NULL - + # ISO map for isotropic compartment cdef float* ptrISO cdef float [:, :, ::1] niiISO_img @@ -548,14 +539,14 @@ cpdef run( filename_tractogram=None, path_out=None, filename_peaks=None, filenam # free memory free(ptrTDI) - + # NOTE: this is to ensure flushing all the output from the cpp code print(end='', flush=True) time.sleep(0.5) # Concatenate files together log_list = [] - ret_subinfo = logger.subinfo( f'Saving data structure in {path_out} ', indent_char='*', indent_lvl=1, with_progress=verbose>2 ) + ret_subinfo = logger.subinfo( f'Saving data structure in "{path_out}" ', indent_char='*', indent_lvl=1, with_progress=verbose>2 ) cdef int discarded = 0 with ProgressBar(disable=verbose<3, hide_on_exit=True, subinfo=ret_subinfo, log_list=log_list): for j in range(n_threads-1): @@ -636,13 +627,13 @@ cpdef run( filename_tractogram=None, path_out=None, filename_peaks=None, filenam with ProgressBar(disable=verbose<3, hide_on_exit=True, subinfo=ret_subinfo, log_list=log_list): v = np.fromfile( join(path_out, 'dictionary_IC_v.dict'), dtype=np.uint32 ) l = np.fromfile( join(path_out, 'dictionary_IC_len.dict'), dtype=np.float32 ) - + niiTDI_mem = compute_tdi( v, l, Nx, Ny, Nz, verbose=0 ) niiTDI_img_save = np.reshape( niiTDI_mem, (Nx,Ny,Nz), order='F' ) niiTDI = nibabel.Nifti1Image( niiTDI_img_save, TDI_affine ) niiTDI_hdr = _get_header( niiTDI ) - niiTDI_hdr['descrip'] = f'Created with COMMIT {get_distribution("dmri-commit").version}' + niiTDI_hdr['descrip'] = f'Created with COMMIT {metadata.version("dmri-commit")}' nibabel.save( niiTDI, join(path_out,'dictionary_tdi.nii.gz') ) if filename_mask is not None : diff --git a/setup.py b/setup.py index 3804ce6..587d86d 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,5 @@ import os import sys - from setuptools import Extension, setup from setuptools.command.build_ext import build_ext @@ -26,7 +25,7 @@ def get_extensions(): models = Extension(name=f'{package_name}.models', sources=[f'{package_name}/models.pyx'], extra_compile_args=[] if sys.platform == 'win32' else ['-w', '-std=c++11'], - language='c++') + language='c++') # NOTE: Windows requires the pthread-win32 static library to compile the operator extension # The library can be downloaded from https://github.com/GerHobbelt/pthread-win32 # The PTHREAD_WIN_INCLUDE and PTHREAD_WIN_LIB environment variables must be set to the include and lib directories @@ -43,7 +42,7 @@ def get_extensions(): library_dirs=[pthread_win_lib] if sys.platform == 'win32' else [], extra_compile_args=['/fp:fast', '/DHAVE_STRUCT_TIMESPEC'] if sys.platform == 'win32' else ['-w', '-O3', '-Ofast'], language='c') - + return [trk2dictionary, core, proximals, operator, models] class CustomBuildExtCommand(build_ext):