@@ -1012,10 +1012,12 @@ def pca(data=None, dim=-1, var_cutoff=0.95, stride=1, mean=None, skip=0, chunksi
1012
1012
warnings .warn ("provided mean ignored" , DeprecationWarning )
1013
1013
1014
1014
res = PCA (dim = dim , var_cutoff = var_cutoff , mean = None , skip = skip , stride = stride )
1015
+ from pyemma .util .reflection import get_default_args
1016
+ cs = _check_old_chunksize_arg (chunksize , get_default_args (pca )['chunksize' ], ** kwargs )
1015
1017
if data is not None :
1016
- from pyemma .util .reflection import get_default_args
1017
- cs = _check_old_chunksize_arg (chunksize , get_default_args (pca )['chunksize' ], ** kwargs )
1018
1018
res .estimate (data , chunksize = cs )
1019
+ else :
1020
+ res .chunksize = cs
1019
1021
return res
1020
1022
1021
1023
@@ -1256,6 +1258,8 @@ def tica(data=None, lag=10, dim=-1, var_cutoff=0.95, kinetic_map=True, commute_m
1256
1258
weights = weights , reversible = reversible , ncov_max = ncov_max )
1257
1259
if data is not None :
1258
1260
res .estimate (data , chunksize = cs )
1261
+ else :
1262
+ res .chunksize = cs
1259
1263
return res
1260
1264
1261
1265
@@ -1267,14 +1271,13 @@ def vamp(data=None, lag=10, dim=None, scaling=None, right=True, ncov_max=float('
1267
1271
----------
1268
1272
lag : int
1269
1273
lag time
1270
- dim : float or int
1274
+ dim : float or int, default=None
1271
1275
Number of dimensions to keep:
1272
1276
1273
- * if dim is not set all available ranks are kept:
1277
+ * if dim is not set (None) all available ranks are kept:
1274
1278
`n_components == min(n_samples, n_features)`
1275
1279
* if dim is an integer >= 1, this number specifies the number
1276
- of dimensions to keep. By default this will use the kinetic
1277
- variance.
1280
+ of dimensions to keep.
1278
1281
* if dim is a float with ``0 < dim < 1``, select the number
1279
1282
of dimensions such that the amount of kinetic variance
1280
1283
that needs to be explained is greater than the percentage
@@ -1406,6 +1409,8 @@ def vamp(data=None, lag=10, dim=None, scaling=None, right=True, ncov_max=float('
1406
1409
res = VAMP (lag , dim = dim , scaling = scaling , right = right , skip = skip , ncov_max = ncov_max )
1407
1410
if data is not None :
1408
1411
res .estimate (data , stride = stride , chunksize = chunksize )
1412
+ else :
1413
+ res .chunksize = chunksize
1409
1414
return res
1410
1415
1411
1416
@@ -1502,6 +1507,8 @@ def covariance_lagged(data=None, c00=True, c0t=True, ctt=False, remove_constant_
1502
1507
weights = weights , stride = stride , skip = skip , ncov_max = ncov_max )
1503
1508
if data is not None :
1504
1509
lc .estimate (data , chunksize = chunksize )
1510
+ else :
1511
+ lc .chunksize = chunksize
1505
1512
return lc
1506
1513
1507
1514
@@ -1552,10 +1559,12 @@ def cluster_mini_batch_kmeans(data=None, k=100, max_iter=10, batch_size=0.2, met
1552
1559
from pyemma .coordinates .clustering .kmeans import MiniBatchKmeansClustering
1553
1560
res = MiniBatchKmeansClustering (n_clusters = k , max_iter = max_iter , metric = metric , init_strategy = init_strategy ,
1554
1561
batch_size = batch_size , n_jobs = n_jobs , skip = skip , clustercenters = clustercenters )
1562
+ from pyemma .util .reflection import get_default_args
1563
+ cs = _check_old_chunksize_arg (chunksize , get_default_args (cluster_mini_batch_kmeans )['chunksize' ], ** kwargs )
1555
1564
if data is not None :
1556
- from pyemma .util .reflection import get_default_args
1557
- cs = _check_old_chunksize_arg (chunksize , get_default_args (cluster_mini_batch_kmeans )['chunksize' ], ** kwargs )
1558
1565
res .estimate (data , chunksize = cs )
1566
+ else :
1567
+ res .chunksize = chunksize
1559
1568
return res
1560
1569
1561
1570
@@ -1687,10 +1696,12 @@ def cluster_kmeans(data=None, k=None, max_iter=10, tolerance=1e-5, stride=1,
1687
1696
res = KmeansClustering (n_clusters = k , max_iter = max_iter , metric = metric , tolerance = tolerance ,
1688
1697
init_strategy = init_strategy , fixed_seed = fixed_seed , n_jobs = n_jobs , skip = skip ,
1689
1698
keep_data = keep_data , clustercenters = clustercenters , stride = stride )
1699
+ from pyemma .util .reflection import get_default_args
1700
+ cs = _check_old_chunksize_arg (chunksize , get_default_args (cluster_kmeans )['chunksize' ], ** kwargs )
1690
1701
if data is not None :
1691
- from pyemma .util .reflection import get_default_args
1692
- cs = _check_old_chunksize_arg (chunksize , get_default_args (cluster_kmeans )['chunksize' ], ** kwargs )
1693
1702
res .estimate (data , chunksize = cs )
1703
+ else :
1704
+ res .chunksize = cs
1694
1705
return res
1695
1706
1696
1707
@@ -1764,10 +1775,12 @@ def cluster_uniform_time(data=None, k=None, stride=1, metric='euclidean',
1764
1775
"""
1765
1776
from pyemma .coordinates .clustering .uniform_time import UniformTimeClustering
1766
1777
res = UniformTimeClustering (k , metric = metric , n_jobs = n_jobs , skip = skip , stride = stride )
1778
+ from pyemma .util .reflection import get_default_args
1779
+ cs = _check_old_chunksize_arg (chunksize , get_default_args (cluster_uniform_time )['chunksize' ], ** kwargs )
1767
1780
if data is not None :
1768
- from pyemma .util .reflection import get_default_args
1769
- cs = _check_old_chunksize_arg (chunksize , get_default_args (cluster_uniform_time )['chunksize' ], ** kwargs )
1770
1781
res .estimate (data , chunksize = cs )
1782
+ else :
1783
+ res .chunksize = cs
1771
1784
return res
1772
1785
1773
1786
@@ -1863,10 +1876,12 @@ def cluster_regspace(data=None, dmin=-1, max_centers=1000, stride=1, metric='euc
1863
1876
from pyemma .coordinates .clustering .regspace import RegularSpaceClustering as _RegularSpaceClustering
1864
1877
res = _RegularSpaceClustering (dmin , max_centers = max_centers , metric = metric ,
1865
1878
n_jobs = n_jobs , stride = stride , skip = skip )
1879
+ from pyemma .util .reflection import get_default_args
1880
+ cs = _check_old_chunksize_arg (chunksize , get_default_args (cluster_regspace )['chunksize' ], ** kwargs )
1866
1881
if data is not None :
1867
- from pyemma .util .reflection import get_default_args
1868
- cs = _check_old_chunksize_arg (chunksize , get_default_args (cluster_regspace )['chunksize' ], ** kwargs )
1869
1882
res .estimate (data , chunksize = cs )
1883
+ else :
1884
+ res .chunksize = cs
1870
1885
return res
1871
1886
1872
1887
@@ -1952,11 +1967,13 @@ def assign_to_centers(data=None, centers=None, stride=1, return_dtrajs=True,
1952
1967
' or NumPy array or a reader created by source function' )
1953
1968
from pyemma .coordinates .clustering .assign import AssignCenters
1954
1969
res = AssignCenters (centers , metric = metric , n_jobs = n_jobs , skip = skip , stride = stride )
1970
+ from pyemma .util .reflection import get_default_args
1971
+ cs = _check_old_chunksize_arg (chunksize , get_default_args (assign_to_centers )['chunksize' ], ** kwargs )
1955
1972
if data is not None :
1956
- from pyemma .util .reflection import get_default_args
1957
- cs = _check_old_chunksize_arg (chunksize , get_default_args (assign_to_centers )['chunksize' ], ** kwargs )
1958
1973
res .estimate (data , chunksize = cs )
1959
1974
if return_dtrajs :
1960
1975
return res .dtrajs
1976
+ else :
1977
+ res .chunksize = cs
1961
1978
1962
1979
return res
0 commit comments