From 20a04eca5c2ddf53201958848df1f7227557fcbb Mon Sep 17 00:00:00 2001 From: HajkD Date: Thu, 5 Dec 2024 13:32:58 +0000 Subject: [PATCH] add new citing papers --- README.md | 39 ++- docs/404.html | 14 +- docs/LICENSE-text.html | 14 +- docs/articles/Distances.html | 19 +- docs/articles/Information_Theory.html | 59 ++-- docs/articles/Introduction.html | 304 +++++++++--------- docs/articles/Many_Distances.html | 49 ++- docs/articles/index.html | 14 +- docs/authors.html | 16 +- docs/index.html | 155 +++++---- docs/news/index.html | 32 +- docs/paper.html | 178 ++++------ docs/pkgdown.yml | 5 +- docs/reference/CE.html | 26 +- docs/reference/H.html | 24 +- docs/reference/JE.html | 24 +- docs/reference/JSD.html | 28 +- docs/reference/KL.html | 34 +- docs/reference/MI.html | 28 +- docs/reference/additive_symm_chi_sq.html | 22 +- docs/reference/avg.html | 22 +- docs/reference/bhattacharyya.html | 30 +- docs/reference/binned.kernel.est.html | 34 +- docs/reference/canberra.html | 22 +- docs/reference/chebyshev.html | 22 +- docs/reference/clark_sq.html | 22 +- docs/reference/cosine_dist.html | 22 +- docs/reference/czekanowski.html | 22 +- docs/reference/dice_dist.html | 22 +- docs/reference/dist.diversity.html | 24 +- docs/reference/dist_many_many.html | 38 +-- docs/reference/dist_one_many.html | 38 +-- docs/reference/dist_one_one.html | 38 +-- docs/reference/distance.html | 50 ++- docs/reference/divergence_sq.html | 22 +- docs/reference/estimate.probability.html | 31 +- docs/reference/euclidean.html | 22 +- docs/reference/fidelity.html | 22 +- docs/reference/gJSD.html | 28 +- docs/reference/getDistMethods.html | 18 +- docs/reference/gower.html | 22 +- docs/reference/harmonic_mean_dist.html | 22 +- docs/reference/hellinger.html | 22 +- docs/reference/index.html | 16 +- docs/reference/inner_product.html | 22 +- docs/reference/intersection_dist.html | 22 +- docs/reference/jaccard.html | 22 +- docs/reference/jeffreys.html | 30 +- docs/reference/jensen_difference.html | 24 +- docs/reference/jensen_shannon.html | 24 +- docs/reference/k_divergence.html | 24 +- docs/reference/kulczynski_d.html | 28 +- docs/reference/kullback_leibler_distance.html | 30 +- docs/reference/kumar_hassebrook.html | 22 +- docs/reference/kumar_johnson.html | 28 +- docs/reference/lin.cor.html | 28 +- docs/reference/lorentzian.html | 24 +- docs/reference/manhattan.html | 22 +- docs/reference/matusita.html | 22 +- docs/reference/minkowski.html | 24 +- docs/reference/motyka.html | 22 +- docs/reference/neyman_chi_sq.html | 28 +- docs/reference/pearson_chi_sq.html | 28 +- docs/reference/prob_symm_chi_sq.html | 22 +- docs/reference/ruzicka.html | 22 +- docs/reference/soergel.html | 22 +- docs/reference/sorensen.html | 22 +- docs/reference/squared_chi_sq.html | 22 +- docs/reference/squared_chord.html | 22 +- docs/reference/squared_euclidean.html | 22 +- docs/reference/taneja.html | 30 +- docs/reference/tanimoto.html | 22 +- docs/reference/topsoe.html | 24 +- docs/reference/wave_hedges.html | 22 +- docs/sitemap.xml | 292 +++++------------ 75 files changed, 1337 insertions(+), 1396 deletions(-) diff --git a/README.md b/README.md index ac023f9..99cbc5d 100644 --- a/README.md +++ b/README.md @@ -175,6 +175,8 @@ The current status of the package as well as a detailed history of the functiona ### Studies that successfully applied the `philentropy` package +> - __A transcriptomic hourglass in brown algae__ JS Lotharukpong, M Zheng, R Luthringer et al. - __Nature__, 2024 +> > - __Annelid functional genomics reveal the origins of bilaterian life cycles__ FM Martín-Zamora, Y Liang, K Guynes et al.- __Nature__, 2023 > > - __An atlas of gene regulatory elements in adult mouse cerebrum__ YE Li, S Preissl, X Hou, Z Zhang, K Zhang et al.- __Nature__, 2021 @@ -191,7 +193,7 @@ YE Li, S Preissl, M Miller, ND Johnson, Z Wang et al. - __Science__, 2023 > - __Extinction at the end-Cretaceous and the origin of modern Neotropical rainforests__ MR Carvalho, C Jaramillo et al. - __Science__, 2021 > > - __sciCSR infers B cell state transition and predicts class-switch recombination dynamics using single-cell transcriptomic data__ -JCF Ng, G Montamat Garcia, AT Stewart et al. - __Nature Methods__, 2023 +JCF Ng, G Montamat Garcia, AT Stewart et al. - __Nature Methods__, 2024 > > - __HERMES: a molecular-formula-oriented method to target the metabolome__ R Giné, J Capellades, JM Badia et al. - __Nature Methods__, 2021 @@ -209,6 +211,8 @@ R Giné, J Capellades, JM Badia et al. - __Nature Methods__, 2021 > - __A full-body transcription factor expression atlas with completely resolved cell identities in C. elegans__ > Y Li, S Chen, W Liu, D Zhao, Y Gao, S Hu, H Liu, Y Li… - __Nature Communications__, 2024 > +> - __Comprehensive mapping and modelling of the rice regulome landscape unveils the regulatory architecture underlying complex traits__ T Zhu, C Xia, R Yu, X Zhou, X Xu, L Wang et al. - __Nature Communications__, 2024 +> > - __Transcriptional vulnerabilities of striatal neurons in human and rodent models of Huntington's disease__ A Matsushima, SS Pineda, JR Crittenden et al. - __Nature Communications__, 2023 > > - __Resolving the structure of phage–bacteria interactions in the context of natural diversity__ KM Kauffman, WK Chang, JM Brown et al. - __Nature Communications__, 2022 @@ -233,15 +237,24 @@ M Anglada-Tort, PMC Harrison, H Lee, N Jacoby - __Current Biology__, 2023 > > - __Detecting and diagnosing prior and likelihood sensitivity with power-scaling__ > N Kallioinen, T Paananen, PC Bürkner, A Vehtari - __Statistics and Computing__, 2024 +> +> - __SLAPSHOT reveals rapid dynamics of extracellularly exposed proteome in response to calcium-activated plasma membrane phospholipid scrambling__ ST Tuomivaara, CF Teo, YN Jan, AP Wiita et al. - __Communications Biology__, 2024 +> > - __TAS-Seq is a robust and sensitive amplification method for bead-based scRNA-seq__ S Shichino, S Ueha, S Hashimoto, T Ogawa et al. - __Communications biology__, 2022 > -> - Mapping hormone-regulated cell-cell interaction networks in the human breast at single-cell resolution +> - __Mapping hormone-regulated cell-cell interaction networks in the human breast at single-cell resolution__ LM Murrow, RJ Weber, JA Caruso et al. - __Cell Systems__, 2022 > +> - __Gene module reconstruction identifies cellular differentiation processes and the regulatory logic of specialized secretion in zebrafish__ Y Wang, J Liu, LY Du, JL Wyss, JA Farrell, AF Schier - __Developmental Cell__, 2024 +> +> - __The temporal progression of lung immune remodeling during breast cancer metastasis__ CS McGinnis, Z Miao, D Superville, W Yao, et al. - __Cancer Cell__, 2024 +> > - __Large-scale chromatin reorganization reactivates placenta-specific genes that drive cellular aging__ Z Liu, Q Ji, J Ren, P Yan, Z Wu, S Wang, L Sun, Z Wang et al. - __Developmental Cell__, 2022 > > - __Direct epitranscriptomic regulation of mammalian translation initiation through N4-acetylcytidine__ D Arango, D Sturgill, R Yang, T Kanai, P Bauer et al. - __Molecular Cell__, 2022 > +> - __Spotless, a reproducible pipeline for benchmarking cell type deconvolution in spatial transcriptomics__ C Sang-Aram, R Browaeys, R Seurinck, Y Saeys - __eLife__, 2024 +> > - __Loss of adaptive capacity in asthmatic patients revealed by biomarker fluctuation dynamics after rhinovirus challenge__ A Sinha et al. - __eLife__, 2019 > > - __Sex and hatching order modulate the association between MHC‐II diversity and fitness in early‐life stages of a wild seabird__ @@ -253,6 +266,8 @@ O Sarala, T Pyhäjärvi, MJ Sillanpää - __Bioinformatics__, 2023 > - __Cellsig plug-in enhances CIBERSORTx signature selection for multi-dataset transcriptomes with sparse multilevel modelling__ MA Al Kamran Khan, J Wu, S Yuhan et al. - __Bioinformatics__, 2023 > +> - __Neutrality in plant–herbivore interactions__ VS Pan, WC Wetzel - __Proceedings of the Royal Society B__, 2024 +> > - __How the Choice of Distance Measure Influences the Detection of Prior-Data Conflict__ K Lek, R Van De Schoot - __Entropy__, 2019 > @@ -266,6 +281,26 @@ EF Davis-Marcisak, TD Sherman et al. - __Cancer research__, 2019 > - __Tumor-associated neutrophils upregulate PANoptosis to foster an immunosuppressive microenvironment of non-small cell lung cancer__ > Q Hu, R Wang, J Zhang, Q Xue, B Ding - __Cancer Immunology, Immunotherapy__, 2023 > +> - __Spatial and Temporal Relationships Between Atrophy and Hypometabolism in Behavioral-Variant Frontotemporal Dementia__ J Stocks, E Gibson, K Popuri, MF Beg et al. - __Alzheimer Disease & Associated Disorders__, 2024 +> +> - __Unveiling Dynamic Changes and Regulatory Mechanisms of T Cell Subsets in Sepsis Pathogenesis__ C Jiang, J Chen, T Sun, J Xu, H Zhu, J Chen - __ImmunoTargets and Therapy__, 2024 +> +> - __Integrated analysis reveals NLRC4 as a potential biomarker in sepsis pathogenesis__ C Jiang, J Chen, J Xu, C Chen, H Zhu, Y Xu, H Zhao et al. - __Genes & Immunity__, 2024 +> +> - __Temporal composition of the cervicovaginal microbiome associates with hrHPV infection outcomes in a longitudinal study__ MA Molina, WPJ Leenders, MA Huynen, WJG Melchers et al. - __BMC Infectious Diseases__, 2024 +> +> - __PlantFUNCO: Integrative functional genomics database reveals clues into duplicates divergence evolution__ V Roces, S Guerrero, A Álvarez, J Pascual, M Meijón - __Molecular Biology and Evolution__, 2024 +> +> - __Cross-tissue human fibroblast atlas reveals myofibroblast subtypes with distinct roles in immune modulation__ Y Gao, J Li, W Cheng, T Diao, H Liu, Y Bo, C Liu et al. - __Cancer Cell__, 2024 +> +> - __Enhancing Immunotherapy Response Prediction in Metastatic Lung Adenocarcinoma: Leveraging Shallow and Deep Learning with CT-Based Radiomics__ C Masson-Grehaigne, M Lafon, J Palussière, L Leroy et al. - __Cancers__, 2024 +> +> - __A general framework for implementing distances for categorical variables__ M van de Velden, AI D'Enza, A Markos, C Cavicchia - __Pattern Recognition__, 2024 +> +> - __Early transcriptional similarities between two distinct neural lineages during ascidian embryogenesis__ RR Copley, J Buttin, MJ Arguel, G Williaume et al. - __Developmental Biology__, 2024 +> +> - __The power of visualizing distributional differences: formal graphical n-sample tests__ K Konstantinou, T Mrkvička, M Myllymäki - __Computational Statistics__, 2024 +> > - __cisDynet: An integrated platform for modeling gene‐regulatory dynamics and networks__ > T Zhu, X Zhou, Y You, L Wang, Z He, D Chen - __iMeta__, 2023 > diff --git a/docs/404.html b/docs/404.html index f543697..88b9635 100644 --- a/docs/404.html +++ b/docs/404.html @@ -18,7 +18,7 @@ - +
- +
@@ -109,16 +109,16 @@

Page not found (404)

-

Site built with pkgdown 2.0.9.

+

Site built with pkgdown 2.1.0.

- - + + diff --git a/docs/LICENSE-text.html b/docs/LICENSE-text.html index c74bde4..e3e08ee 100644 --- a/docs/LICENSE-text.html +++ b/docs/LICENSE-text.html @@ -3,7 +3,7 @@ - +
- +
@@ -421,15 +421,15 @@

License

-

Site built with pkgdown 2.0.9.

+

Site built with pkgdown 2.1.0.

- - + + diff --git a/docs/articles/Distances.html b/docs/articles/Distances.html index a37dc29..a9625ee 100644 --- a/docs/articles/Distances.html +++ b/docs/articles/Distances.html @@ -12,14 +12,13 @@ - - +
- +
@@ -297,9 +296,7 @@

Simple Example - -

+
@@ -312,16 +309,16 @@

Simple Example

-

Site built with pkgdown 2.0.9.

+

Site built with pkgdown 2.1.0.

- - + + diff --git a/docs/articles/Information_Theory.html b/docs/articles/Information_Theory.html index 0539e21..611b2c2 100644 --- a/docs/articles/Information_Theory.html +++ b/docs/articles/Information_Theory.html @@ -12,14 +12,13 @@ - - +
- +
@@ -120,8 +119,7 @@

Information Theory measures

Shannon’s Entropy H(X)

-

\(H(X) = -\sum\limits_{i=1}^n P(x_i) * -log_b(P(x_i))\)

+

H(X)=i=1nP(xi)*logb(P(xi))H(X) = -\sum\limits_{i=1}^n P(x_i) * log_b(P(x_i))

 # define probabilities P(X)
@@ -134,9 +132,7 @@ 

Shannon’s Entropy H(X)Shannon’s Joint-Entropy H(X,Y)

-

\(H(X,Y) = --\sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) * log_b(P(x_i, -y_j))\)

+

H(X,Y)=i=1nj=1mP(xi,yj)*logb(P(xi,yj))H(X,Y) = -\sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) * log_b(P(x_i, y_j))

 # define the joint distribution P(X,Y)
@@ -149,9 +145,7 @@ 

Shannon’s Joint-Entropy H(X,Y)Shannon’s Conditional-Entropy H(X | Y)

-

\(H(Y|X) = -\sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) * log_b( P(x_i) / -P(x_i, y_j) )\)

+

H(Y|X)=i=1nj=1mP(xi,yj)*logb(P(xi)/P(xi,yj))H(Y|X) = \sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) * log_b( P(x_i) / P(x_i, y_j) )

 # define the distribution P(X)
@@ -167,9 +161,7 @@ 

Shannon’s Conditional-Entropy H(X |

Mutual Information I(X,Y)

-

\(MI(X,Y) = -\sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) * log_b( P(x_i, y_j) -/ ( P(x_i) * P(y_j) )\)

+

MI(X,Y)=i=1nj=1mP(xi,yj)*logb(P(xi,yj)/(P(xi)*P(yj))MI(X,Y) = \sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) * log_b( P(x_i, y_j) / ( P(x_i) * P(y_j) )

 # define the distribution P(X)
@@ -187,8 +179,7 @@ 

Mutual Information I(X,Y)Kullback-Leibler Divergence

-

\(KL(P || Q) = \sum\limits_{i=1}^n P(p_i) * -log_2(P(p_i) / P(q_i)) = H(P, Q) - H(P)\)

+

KL(P||Q)=i=1nP(pi)*log2(P(pi)/P(qi))=H(P,Q)H(P)KL(P || Q) = \sum\limits_{i=1}^n P(p_i) * log_2(P(p_i) / P(q_i)) = H(P, Q) - H(P)

where H(P, Q) denotes the joint entropy of the probability distributions P and Q and @@ -237,8 +228,7 @@

Jensen-Shannon DivergenceThe Jensen-Shannon Divergence JSD(P || Q) between two probability distributions P and Q is defined as:

-

\(JSD(P || Q) = 0.5 * (KL(P || R) + KL(Q || -R))\)

+

JSD(P||Q)=0.5*(KL(P||R)+KL(Q||R))JSD(P || Q) = 0.5 * (KL(P || R) + KL(Q || R))

where R = 0.5 * (P + Q) denotes the mid-point of the probability vectors P and Q, and @@ -304,16 +294,21 @@

Properties of the Jen

Generalized Jensen-Shannon Divergence

-

The generalized Jensen-Shannon Divergence \(gJSD_{\pi_1,...,\pi_n}(P_1, ..., P_n)\) +

The generalized Jensen-Shannon Divergence +gJSDπ1,...,πn(P1,...,Pn)gJSD_{\pi_1,...,\pi_n}(P_1, ..., P_n) enables distance comparisons between multiple probability distributions -\(P_1,...,P_n\):

+P1,...,PnP_1,...,P_n:

-

\(gJSD_{\pi_1,...,\pi_n}(P_1, ..., P_n) = -H(\sum_{i = 1}^n \pi_i*P_i) - \sum_{i = 1}^n \pi_i*H(P_i)\)

+

gJSDπ1,...,πn(P1,...,Pn)=H(i=1nπi*Pi)i=1nπi*H(Pi)gJSD_{\pi_1,...,\pi_n}(P_1, ..., P_n) = H(\sum_{i = 1}^n \pi_i*P_i) - \sum_{i = 1}^n \pi_i*H(P_i)

-

where \(\pi_1,...,\pi_n\) denote the -weights selected for the probability vectors \(P_1,...,P_n\) and \(H(P_i)\) denotes the Shannon Entropy of -probability vector \(P_i\).

+

where +π1,...,πn\pi_1,...,\pi_n +denote the weights selected for the probability vectors +P1,...,PnP_1,...,P_n +and +H(Pi)H(P_i) +denotes the Shannon Entropy of probability vector +PiP_i.

 # generate example probability matrix for comparing three probability functions
 Prob <- rbind(1:10/sum(1:10), 20:29/sum(20:29), 30:39/sum(30:39))
@@ -358,9 +353,7 @@ 

Generalized Jensen-Shannon Diverg +

@@ -373,16 +366,16 @@

Generalized Jensen-Shannon Diverg

-

Site built with pkgdown 2.0.9.

+

Site built with pkgdown 2.1.0.

- - + + diff --git a/docs/articles/Introduction.html b/docs/articles/Introduction.html index b7fbdfe..b080a60 100644 --- a/docs/articles/Introduction.html +++ b/docs/articles/Introduction.html @@ -12,14 +12,13 @@ - - +
- +
@@ -167,45 +166,55 @@

Distance and Similarity Measures

-\(L_p\) Minkowski Family +LpL_p +Minkowski Family

    -
  • Euclidean : \(d = \sqrt{\sum_{i = 1}^N | -P_i - Q_i |^2)}\) +
  • Euclidean : +d=i=1N|PiQi|2)d = \sqrt{\sum_{i = 1}^N | P_i - Q_i |^2)}
  • -
  • Manhattan : \(d = \sum_{i = 1}^N | P_i - -Q_i |\) +
  • Manhattan : +d=i=1N|PiQi|d = \sum_{i = 1}^N | P_i - Q_i |
  • -
  • Minkowski : \(d = ( \sum_{i = 1}^N | P_i - -Q_i |^p)^{1/p}\) +
  • Minkowski : +d=(i=1N|PiQi|p)1/pd = ( \sum_{i = 1}^N | P_i - Q_i |^p)^{1/p}
  • -
  • Chebyshev : \(d = max | P_i - Q_i -|\) +
  • Chebyshev : +d=max|PiQi|d = max | P_i - Q_i |

-\(L_1\) Family +L1L_1 +Family

    -
  • Sorensen : \(d = \frac{\sum_{i = 1}^N | -P_i - Q_i |}{\sum_{i = 1}^N (P_i + Q_i)}\) +
  • Sorensen : +d=i=1N|PiQi|i=1N(Pi+Qi)d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{\sum_{i = 1}^N (P_i + Q_i)}
  • -
  • Gower : \(d = \frac{1}{N} \dot \sum_{i = -1}^N | P_i - Q_i |\), where \(N\) is the total number of elements \(i\) in \(P_i\) and \(Q_i\) +
  • Gower : +d=1Ṅi=1N|PiQi|d = \frac{1}{N} \dot \sum_{i = 1}^N | P_i - Q_i |, +where +NN +is the total number of elements +ii +in +PiP_i +and +QiQ_i
  • -
  • Soergel : \(d = \frac{\sum_{i = 1}^N | P_i -- Q_i |}{\sum_{i = 1}^N max(P_i , Q_i)}\) +
  • Soergel : +d=i=1N|PiQi|i=1Nmax(Pi,Qi)d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{\sum_{i = 1}^N max(P_i , Q_i)}
  • -
  • Kulczynski d : \(d = \frac{\sum_{i = 1}^N -| P_i - Q_i |}{\sum_{i = 1}^N min(P_i , Q_i)}\) +
  • Kulczynski d : +d=i=1N|PiQi|i=1Nmin(Pi,Qi)d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{\sum_{i = 1}^N min(P_i , Q_i)}
  • -
  • Canberra : \(d = \frac{\sum_{i = 1}^N | -P_i - Q_i |}{(P_i + Q_i)}\) +
  • Canberra : +d=i=1N|PiQi|(Pi+Qi)d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{(P_i + Q_i)}
  • -
  • Lorentzian : \(d = \sum_{i = 1}^N ln(1 + | -P_i - Q_i |)\) +
  • Lorentzian : +d=i=1Nln(1+|PiQi|)d = \sum_{i = 1}^N ln(1 + | P_i - Q_i |)
@@ -213,55 +222,53 @@

Intersection Family

    -
  • Intersection : \(s = \sum_{i = 1}^N -min(P_i , Q_i)\) +
  • Intersection : +s=i=1Nmin(Pi,Qi)s = \sum_{i = 1}^N min(P_i , Q_i)
  • -
  • Non-Intersection : \(d = 1 - \sum_{i = -1}^N min(P_i , Q_i)\) +
  • Non-Intersection : +d=1i=1Nmin(Pi,Qi)d = 1 - \sum_{i = 1}^N min(P_i , Q_i)
  • -
  • Wave Hedges : \(d = \frac{\sum_{i = 1}^N | -P_i - Q_i |}{max(P_i , Q_i)}\) +
  • Wave Hedges : +d=i=1N|PiQi|max(Pi,Qi)d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{max(P_i , Q_i)}
  • -
  • Czekanowski : \(d = \frac{\sum_{i = 1}^N | -P_i - Q_i |}{\sum_{i = 1}^N | P_i + Q_i |}\) +
  • Czekanowski : +d=i=1N|PiQi|i=1N|Pi+Qi|d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{\sum_{i = 1}^N | P_i + Q_i |}
  • -
  • Motyka : \(d = \frac{\sum_{i = 1}^N -min(P_i , Q_i)}{(P_i + Q_i)}\) +
  • Motyka : +d=i=1Nmin(Pi,Qi)(Pi+Qi)d = \frac{\sum_{i = 1}^N min(P_i , Q_i)}{(P_i + Q_i)}
  • -
  • Kulczynski s : \(d = \frac{\sum_{i = 1}^N -min(P_i , Q_i)}{\sum_{i = 1}^N | P_i - Q_i |}\) +
  • Kulczynski s : +d=i=1Nmin(Pi,Qi)i=1N|PiQi|d = \frac{\sum_{i = 1}^N min(P_i , Q_i)}{\sum_{i = 1}^N | P_i - Q_i |}
  • -
  • Tanimoto : \(d = \frac{\sum_{i = 1}^N -(max(P_i , Q_i) - min(P_i , Q_i))}{\sum_{i = 1}^N max(P_i , -Q_i)}\) ; equivalent to Soergel
  • -
  • Ruzicka : \(s = \frac{\sum_{i = 1}^N -min(P_i , Q_i)}{\sum_{i = 1}^N max(P_i , Q_i)}\) ; equivalent to -1 - Tanimoto = 1 - Soergel
  • +
  • Tanimoto : +d=i=1N(max(Pi,Qi)min(Pi,Qi))i=1Nmax(Pi,Qi)d = \frac{\sum_{i = 1}^N (max(P_i , Q_i) - min(P_i , Q_i))}{\sum_{i = 1}^N max(P_i , Q_i)} +; equivalent to Soergel
  • +
  • Ruzicka : +s=i=1Nmin(Pi,Qi)i=1Nmax(Pi,Qi)s = \frac{\sum_{i = 1}^N min(P_i , Q_i)}{\sum_{i = 1}^N max(P_i , Q_i)} +; equivalent to 1 - Tanimoto = 1 - Soergel

Inner Product Family

    -
  • Inner Product : \(s = \sum_{i = 1}^N P_i -\dot Q_i\) +
  • Inner Product : +s=i=1NPiQ̇is = \sum_{i = 1}^N P_i \dot Q_i
  • -
  • Harmonic mean : \(s = 2 \cdot \frac{ -\sum_{i = 1}^N P_i \cdot Q_i}{P_i + Q_i}\) +
  • Harmonic mean : +s=2i=1NPiQiPi+Qis = 2 \cdot \frac{ \sum_{i = 1}^N P_i \cdot Q_i}{P_i + Q_i}
  • -
  • Cosine : \(s = \frac{\sum_{i = 1}^N P_i -\cdot Q_i}{\sqrt{\sum_{i = 1}^N P_i^2} \cdot \sqrt{\sum_{i = 1}^N -Q_i^2}}\) +
  • Cosine : +s=i=1NPiQii=1NPi2i=1NQi2s = \frac{\sum_{i = 1}^N P_i \cdot Q_i}{\sqrt{\sum_{i = 1}^N P_i^2} \cdot \sqrt{\sum_{i = 1}^N Q_i^2}}
  • -
  • Kumar-Hassebrook (PCE) : \(s = -\frac{\sum_{i = 1}^N (P_i \cdot Q_i)}{(\sum_{i = 1}^N P_i^2 + \sum_{i = -1}^N Q_i^2 - \sum_{i = 1}^N (P_i \cdot Q_i))}\) +
  • Kumar-Hassebrook (PCE) : +s=i=1N(PiQi)(i=1NPi2+i=1NQi2i=1N(PiQi))s = \frac{\sum_{i = 1}^N (P_i \cdot Q_i)}{(\sum_{i = 1}^N P_i^2 + \sum_{i = 1}^N Q_i^2 - \sum_{i = 1}^N (P_i \cdot Q_i))}
  • -
  • Jaccard : \(d = 1 - \frac{\sum_{i = 1}^N -P_i \cdot Q_i}{\sum_{i = 1}^N P_i^2 + \sum_{i = 1}^N Q_i^2 - \sum_{i = -1}^N P_i \cdot Q_i}\) ; equivalent to 1 - Kumar-Hassebrook
  • -
  • Dice : \(d = \frac{\sum_{i = 1}^N (P_i - -Q_i)^2}{(\sum_{i = 1}^N P_i^2 + \sum_{i = 1}^N Q_i^2)}\) +
  • Jaccard : +d=1i=1NPiQii=1NPi2+i=1NQi2i=1NPiQid = 1 - \frac{\sum_{i = 1}^N P_i \cdot Q_i}{\sum_{i = 1}^N P_i^2 + \sum_{i = 1}^N Q_i^2 - \sum_{i = 1}^N P_i \cdot Q_i} +; equivalent to 1 - Kumar-Hassebrook
  • +
  • Dice : +d=i=1N(PiQi)2(i=1NPi2+i=1NQi2)d = \frac{\sum_{i = 1}^N (P_i - Q_i)^2}{(\sum_{i = 1}^N P_i^2 + \sum_{i = 1}^N Q_i^2)}
@@ -269,51 +276,66 @@

Inner Product FamilySquared-chord Family

    -
  • Fidelity : \(s = \sum_{i = 1}^N \sqrt{P_i -\cdot Q_i}\) +
  • Fidelity : +s=i=1NPiQis = \sum_{i = 1}^N \sqrt{P_i \cdot Q_i}
  • -
  • Bhattacharyya : \(d = - ln \sum_{i = 1}^N -\sqrt{P_i \cdot Q_i}\) +
  • Bhattacharyya : +d=lni=1NPiQid = - ln \sum_{i = 1}^N \sqrt{P_i \cdot Q_i}
  • -
  • Hellinger : \(d = 2 \cdot \sqrt{1 - -\sum_{i = 1}^N \sqrt{P_i \cdot Q_i}}\) +
  • Hellinger : +d=21i=1NPiQid = 2 \cdot \sqrt{1 - \sum_{i = 1}^N \sqrt{P_i \cdot Q_i}}
  • -
  • Matusita : \(d = \sqrt{2 - 2 \cdot \sum_{i -= 1}^N \sqrt{P_i \cdot Q_i}}\) +
  • Matusita : +d=22i=1NPiQid = \sqrt{2 - 2 \cdot \sum_{i = 1}^N \sqrt{P_i \cdot Q_i}}
  • -
  • Squared-chord : \(d = \sum_{i = 1}^N ( -\sqrt{P_i} - \sqrt{Q_i} )^2\) +
  • Squared-chord : +d=i=1N(PiQi)2d = \sum_{i = 1}^N ( \sqrt{P_i} - \sqrt{Q_i} )^2
-

Squared \(L_2\) family (\(X^2\) squared family) +

Squared +L2L_2 +family +(X2X^2 +squared family)

    -
  • Squared Euclidean : \(d = \sum_{i = 1}^N ( -P_i - Q_i )^2\) -
  • -
  • Pearson \(X^2\) : \(d = \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{Q_i} -)\) -
  • -
  • Neyman \(X^2\) : \(d = \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{P_i} -)\) -
  • -
  • Squared \(X^2\) : \(d = \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{(P_i + -Q_i)} )\) -
  • -
  • Probabilistic Symmetric \(X^2\) : -\(d = 2 \cdot \sum_{i = 1}^N ( \frac{(P_i - -Q_i )^2}{(P_i + Q_i)} )\) -
  • -
  • Divergence : \(X^2\) : \(d = 2 \cdot \sum_{i = 1}^N ( \frac{(P_i - Q_i -)^2}{(P_i + Q_i)^2} )\) -
  • -
  • Clark : \(d = \sqrt{\sum_{i = 1}^N -(\frac{| P_i - Q_i |}{(P_i + Q_i)^2}}\) -
  • -
  • Additive Symmetric \(X^2\) : \(d = \sum_{i = 1}^N ( \frac{((P_i - Q_i)^2 \cdot -(P_i + Q_i))}{(P_i \cdot Q_i)} )\) +
  • Squared Euclidean : +d=i=1N(PiQi)2d = \sum_{i = 1}^N ( P_i - Q_i )^2 +
  • +
  • Pearson +X2X^2 +: +d=i=1N((PiQi)2Qi)d = \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{Q_i} ) +
  • +
  • Neyman +X2X^2 +: +d=i=1N((PiQi)2Pi)d = \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{P_i} ) +
  • +
  • Squared +X2X^2 +: +d=i=1N((PiQi)2(Pi+Qi))d = \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{(P_i + Q_i)} ) +
  • +
  • Probabilistic Symmetric +X2X^2 +: +d=2i=1N((PiQi)2(Pi+Qi))d = 2 \cdot \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{(P_i + Q_i)} ) +
  • +
  • Divergence : +X2X^2 +: +d=2i=1N((PiQi)2(Pi+Qi)2)d = 2 \cdot \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{(P_i + Q_i)^2} ) +
  • +
  • Clark : +d=i=1N(|PiQi|(Pi+Qi)2d = \sqrt{\sum_{i = 1}^N (\frac{| P_i - Q_i |}{(P_i + Q_i)^2}} +
  • +
  • Additive Symmetric +X2X^2 +: +d=i=1N(((PiQi)2(Pi+Qi))(PiQi))d = \sum_{i = 1}^N ( \frac{((P_i - Q_i)^2 \cdot (P_i + Q_i))}{(P_i \cdot Q_i)} )
@@ -321,26 +343,23 @@

Squared

Shannon’s Entropy Family

    -
  • Kullback-Leibler : \(d = \sum_{i = 1}^N -P_i \cdot log(\frac{P_i}{Q_i})\) +
  • Kullback-Leibler : +d=i=1NPilog(PiQi)d = \sum_{i = 1}^N P_i \cdot log(\frac{P_i}{Q_i})
  • -
  • Jeffreys : \(d = \sum_{i = 1}^N (P_i - -Q_i) \cdot log(\frac{P_i}{Q_i})\) +
  • Jeffreys : +d=i=1N(PiQi)log(PiQi)d = \sum_{i = 1}^N (P_i - Q_i) \cdot log(\frac{P_i}{Q_i})
  • -
  • K divergence : \(d = \sum_{i = 1}^N P_i -\cdot log(\frac{2 \cdot P_i}{P_i + Q_i})\) +
  • K divergence : +d=i=1NPilog(2PiPi+Qi)d = \sum_{i = 1}^N P_i \cdot log(\frac{2 \cdot P_i}{P_i + Q_i})
  • -
  • Topsoe : \(d = \sum_{i = 1}^N ( P_i \cdot -log(\frac{2 \cdot P_i}{P_i + Q_i}) ) + ( Q_i \cdot log(\frac{2 \cdot -Q_i}{P_i + Q_i}) )\) +
  • Topsoe : +d=i=1N(Pilog(2PiPi+Qi))+(Qilog(2QiPi+Qi))d = \sum_{i = 1}^N ( P_i \cdot log(\frac{2 \cdot P_i}{P_i + Q_i}) ) + ( Q_i \cdot log(\frac{2 \cdot Q_i}{P_i + Q_i}) )
  • -
  • Jensen-Shannon : \(d = 0.5 \cdot ( \sum_{i -= 1}^N P_i \cdot log(\frac{2 \cdot P_i}{P_i + Q_i}) + \sum_{i = 1}^N Q_i -\cdot log(\frac{2 * Q_i}{P_i + Q_i}))\) +
  • Jensen-Shannon : +d=0.5(i=1NPilog(2PiPi+Qi)+i=1NQilog(2*QiPi+Qi))d = 0.5 \cdot ( \sum_{i = 1}^N P_i \cdot log(\frac{2 \cdot P_i}{P_i + Q_i}) + \sum_{i = 1}^N Q_i \cdot log(\frac{2 * Q_i}{P_i + Q_i}))
  • -
  • Jensen difference : \(d = \sum_{i = 1}^N ( -(\frac{P_i \cdot log(P_i) + Q_i \cdot log(Q_i)}{2}) - (\frac{P_i + -Q_i}{2}) \cdot log(\frac{P_i + Q_i}{2}) )\) +
  • Jensen difference : +d=i=1N((Pilog(Pi)+Qilog(Qi)2)(Pi+Qi2)log(Pi+Qi2))d = \sum_{i = 1}^N ( (\frac{P_i \cdot log(P_i) + Q_i \cdot log(Q_i)}{2}) - (\frac{P_i + Q_i}{2}) \cdot log(\frac{P_i + Q_i}{2}) )

@@ -348,21 +367,22 @@

Shannon’s Entropy FamilyCombinations

    -
  • Taneja : \(d = \sum_{i = 1}^N ( \frac{P_i -+ Q_i}{2}) \cdot log( \frac{P_i + Q_i}{( 2 \cdot \sqrt{P_i \cdot Q_i})} -)\) +
  • Taneja : +d=i=1N(Pi+Qi2)log(Pi+Qi(2PiQi))d = \sum_{i = 1}^N ( \frac{P_i + Q_i}{2}) \cdot log( \frac{P_i + Q_i}{( 2 \cdot \sqrt{P_i \cdot Q_i})} )
  • -
  • Kumar-Johnson : \(d = \sum_{i = 1}^N -\frac{(P_i^2 - Q_i^2)^2}{2 \cdot (P_i \cdot -Q_i)^{\frac{3}{2}}}\) +
  • Kumar-Johnson : +d=i=1N(Pi2Qi2)22(PiQi)32d = \sum_{i = 1}^N \frac{(P_i^2 - Q_i^2)^2}{2 \cdot (P_i \cdot Q_i)^{\frac{3}{2}}}
  • -
  • Avg(\(L_1\), \(L_n\)) : \(d = -\frac{\sum_{i = 1}^N | P_i - Q_i| + max{ | P_i - Q_i -|}}{2}\) +
  • Avg(L1L_1, +LnL_n) +: +d=i=1N|PiQi|+max|PiQi|2d = \frac{\sum_{i = 1}^N | P_i - Q_i| + max{ | P_i - Q_i |}}{2}
-

Note: \(d\) refers -to distance measures, whereas \(s\) +

Note: +dd +refers to distance measures, whereas +ss denotes similarity measures.

@@ -375,30 +395,26 @@

Information Theory\(H(X) = --\sum\limits_{i=1}^n P(x_i) \cdot log_b(P(x_i))\) +
  • Shannon’s Entropy H(X) : +H(X)=i=1nP(xi)logb(P(xi))H(X) = -\sum\limits_{i=1}^n P(x_i) \cdot log_b(P(x_i))
  • -
  • Shannon’s Joint-Entropy H(X,Y) : \(H(X,Y) -= -\sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) \cdot log_b(P(x_i, -y_j))\) +
  • Shannon’s Joint-Entropy H(X,Y) : +H(X,Y)=i=1nj=1mP(xi,yj)logb(P(xi,yj))H(X,Y) = -\sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) \cdot log_b(P(x_i, y_j))
  • -
  • Shannon’s Conditional-Entropy H(X | Y) : \(H(Y|X) = \sum\limits_{i=1}^n\sum\limits_{j=1}^m -P(x_i, y_j) \cdot log_b( \frac{P(x_i)}{P(x_i, y_j)})\) +
  • Shannon’s Conditional-Entropy H(X | Y) : +H(Y|X)=i=1nj=1mP(xi,yj)logb(P(xi)P(xi,yj))H(Y|X) = \sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) \cdot log_b( \frac{P(x_i)}{P(x_i, y_j)})
  • -
  • Mutual Information I(X,Y) : \(MI(X,Y) = -\sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) \cdot log_b( -\frac{P(x_i, y_j)}{( P(x_i) * P(y_j) )})\) +
  • Mutual Information I(X,Y) : +MI(X,Y)=i=1nj=1mP(xi,yj)logb(P(xi,yj)(P(xi)*P(yj)))MI(X,Y) = \sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) \cdot log_b( \frac{P(x_i, y_j)}{( P(x_i) * P(y_j) )})
  • -
  • Kullback-Leibler Divergence : \(KL(P || Q) -= \sum\limits_{i=1}^n P(p_i) \cdot log_2(\frac{P(p_i) }{P(q_i)}) = H(P, -Q) - H(P)\) +
  • Kullback-Leibler Divergence : +KL(P||Q)=i=1nP(pi)log2(P(pi)P(qi))=H(P,Q)H(P)KL(P || Q) = \sum\limits_{i=1}^n P(p_i) \cdot log_2(\frac{P(p_i) }{P(q_i)}) = H(P, Q) - H(P)
  • -
  • Jensen-Shannon Divergence : \(JSD(P || Q) -= 0.5 * (KL(P || R) + KL(Q || R))\) +
  • Jensen-Shannon Divergence : +JSD(P||Q)=0.5*(KL(P||R)+KL(Q||R))JSD(P || Q) = 0.5 * (KL(P || R) + KL(Q || R))
  • -
  • Generalized Jensen-Shannon Divergence : \(gJSD_{\pi_1,...,\pi_n}(P_1, ..., P_n) = H(\sum_{i -= 1}^n \pi_i \cdot P_i) - \sum_{i = 1}^n \pi_i \cdot -H(P_i)\) +
  • Generalized Jensen-Shannon Divergence : +gJSDπ1,...,πn(P1,...,Pn)=H(i=1nπiPi)i=1nπiH(Pi)gJSD_{\pi_1,...,\pi_n}(P_1, ..., P_n) = H(\sum_{i = 1}^n \pi_i \cdot P_i) - \sum_{i = 1}^n \pi_i \cdot H(P_i)
  • @@ -407,9 +423,7 @@

    Information Theory - -

    +
    @@ -422,16 +436,16 @@

    Information Theory

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/articles/Many_Distances.html b/docs/articles/Many_Distances.html index 68ab5b3..dc651e3 100644 --- a/docs/articles/Many_Distances.html +++ b/docs/articles/Many_Distances.html @@ -12,14 +12,13 @@ - - +
    - +
    @@ -176,11 +175,11 @@

    ## distance(rbind(P, Q), method = "euclidean", test.na = FALSE, mute.message = TRUE) ## euclidean(P, Q, FALSE) ## dist_one_one(P, Q, method = "euclidean", testNA = FALSE) -## min lq mean median uq max neval -## 5166 5371 6431.26 5658 5883.5 75563 100 -## 7585 7913 12703.03 8036 8241.0 459200 100 -## 492 533 771.62 574 615.0 19352 100 -## 820 861 1093.88 943 1025.0 15375 100 +## min lq mean median uq max neval +## 6232 6683 8489.87 6970 7216 144853 100 +## 9266 9881 16504.96 10127 10496 627382 100 +## 574 615 1068.05 656 779 36531 100 +## 902 1025 1340.29 1107 1189 20746 100

    All of them return the same, single value. However, as you can see in the benchmark above, some are more flexible, and others are faster.

    @@ -219,10 +218,10 @@

    ## as.matrix(dist(rbind(P, M), method = "euclidean"))[1, ][-1] ## distance(rbind(P, M), method = "euclidean", test.na = FALSE, mute.message = TRUE)[1, ][-1] ## dist_one_many(P, M, method = "euclidean", testNA = FALSE) -## min lq mean median uq max neval -## 65.313 71.4425 77.49861 74.3330 80.8315 152.028 100 -## 6430.850 6534.4570 6748.67462 6621.6845 6945.7895 8020.297 100 -## 7.749 8.8970 9.69158 9.2455 9.6350 30.381 100 +## min lq mean median uq max neval +## 98.605 113.570 131.39516 124.1480 144.5045 235.914 100 +## 7580.367 8021.588 8546.28272 8299.1380 9249.8460 10280.217 100 +## 10.086 11.562 14.45168 13.8785 15.6620 56.867 100

    The dist_one_many() returns a vector of values. It is, in this case, much faster than distance(), and visibly faster than dist() while allowing for more possible @@ -264,12 +263,12 @@

    dist_many_many(M1, M2, method = "euclidean", testNA = FALSE) )

    ## Unit: microseconds
    -##                                                          expr     min       lq
    -##                                            many_dists(M1, M2) 815.982 859.6265
    -##  dist_many_many(M1, M2, method = "euclidean", testNA = FALSE)  12.669  13.7965
    -##        mean  median       uq      max neval
    -##  1018.45107 900.114 992.6305 3761.668   100
    -##    15.36475  14.719  16.1745   32.062   100
    +## expr min lq +## many_dists(M1, M2) 953.455 1017.5585 +## dist_many_many(M1, M2, method = "euclidean", testNA = FALSE) 15.785 17.0765 +## mean median uq max neval +## 1193.19717 1054.418 1135.003 4861.534 100 +## 20.65908 18.204 21.156 80.647 100

    Both many_dists()and dist_many_many() return a matrix. The above benchmark concludes that dist_many_many() is about 30 times faster than our custom @@ -279,9 +278,7 @@

    +

    @@ -294,16 +291,16 @@

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/articles/index.html b/docs/articles/index.html index 2f170dd..ea67940 100644 --- a/docs/articles/index.html +++ b/docs/articles/index.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -87,15 +87,15 @@

    All vignettes

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/authors.html b/docs/authors.html index 83be8d6..a33d02a 100644 --- a/docs/authors.html +++ b/docs/authors.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -66,7 +66,7 @@

    Authors and Citation

    - +
    • Hajk-Georg Drost. Author, maintainer.

      @@ -107,15 +107,15 @@

      Citation

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/index.html b/docs/index.html index 3b24f29..a88e45e 100644 --- a/docs/index.html +++ b/docs/index.html @@ -19,7 +19,7 @@ - +
    - +
    @@ -132,21 +132,21 @@

    Exampleslibrary(philentropy) # retrieve available distance metrics philentropy::getDistMethods()

    -
     [1] "euclidean"         "manhattan"         "minkowski"        
    - [4] "chebyshev"         "sorensen"          "gower"            
    - [7] "soergel"           "kulczynski_d"      "canberra"         
    -[10] "lorentzian"        "intersection"      "non-intersection" 
    -[13] "wavehedges"        "czekanowski"       "motyka"           
    -[16] "kulczynski_s"      "tanimoto"          "ruzicka"          
    -[19] "inner_product"     "harmonic_mean"     "cosine"           
    -[22] "hassebrook"        "jaccard"           "dice"             
    -[25] "fidelity"          "bhattacharyya"     "hellinger"        
    +
     [1] "euclidean"         "manhattan"         "minkowski"
    + [4] "chebyshev"         "sorensen"          "gower"
    + [7] "soergel"           "kulczynski_d"      "canberra"
    +[10] "lorentzian"        "intersection"      "non-intersection"
    +[13] "wavehedges"        "czekanowski"       "motyka"
    +[16] "kulczynski_s"      "tanimoto"          "ruzicka"
    +[19] "inner_product"     "harmonic_mean"     "cosine"
    +[22] "hassebrook"        "jaccard"           "dice"
    +[25] "fidelity"          "bhattacharyya"     "hellinger"
     [28] "matusita"          "squared_chord"     "squared_euclidean"
    -[31] "pearson"           "neyman"            "squared_chi"      
    -[34] "prob_symm"         "divergence"        "clark"            
    -[37] "additive_symm"     "kullback-leibler"  "jeffreys"         
    -[40] "k_divergence"      "topsoe"            "jensen-shannon"   
    -[43] "jensen_difference" "taneja"            "kumar-johnson"    
    +[31] "pearson"           "neyman"            "squared_chi"
    +[34] "prob_symm"         "divergence"        "clark"
    +[37] "additive_symm"     "kullback-leibler"  "jeffreys"
    +[40] "k_divergence"      "topsoe"            "jensen-shannon"
    +[43] "jensen_difference" "taneja"            "kumar-johnson"
     [46] "avg"
     # define a probability density function P
    @@ -161,56 +161,56 @@ 

    Examples# probability density functions P and Q philentropy::distance(x, method = "jensen-shannon")

    jensen-shannon using unit 'log'.
    -jensen-shannon 
    +jensen-shannon
         0.02628933

    Alternatively, users can also retrieve values from all available distance/similarity metrics using philentropy::dist.diversity():

     philentropy::dist.diversity(x, p = 2, unit = "log2")
    -
            euclidean         manhattan 
    -       0.12807130        0.35250464 
    -        minkowski         chebyshev 
    -       0.12807130        0.06345083 
    -         sorensen             gower 
    -       0.17625232        0.03525046 
    -          soergel      kulczynski_d 
    -       0.29968454        0.42792793 
    -         canberra        lorentzian 
    -       2.09927095        0.49712136 
    -     intersection  non-intersection 
    -       0.82374768        0.17625232 
    -       wavehedges       czekanowski 
    -       3.16657887        0.17625232 
    -           motyka      kulczynski_s 
    -       0.58812616        2.33684211 
    -         tanimoto           ruzicka 
    -       0.29968454        0.70031546 
    -    inner_product     harmonic_mean 
    -       0.10612245        0.94948528 
    -           cosine        hassebrook 
    -       0.93427641        0.86613103 
    -          jaccard              dice 
    -       0.13386897        0.07173611 
    -         fidelity     bhattacharyya 
    -       0.97312397        0.03930448 
    -        hellinger          matusita 
    -       0.32787819        0.23184489 
    -    squared_chord squared_euclidean 
    -       0.05375205        0.01640226 
    -          pearson            neyman 
    -       0.16814418        0.36742465 
    -      squared_chi         prob_symm 
    -       0.10102943        0.20205886 
    -       divergence             clark 
    -       1.49843905        0.86557468 
    -    additive_symm  kullback-leibler 
    -       0.53556883        0.13926288 
    -         jeffreys      k_divergence 
    -       0.31761069        0.04216273 
    -           topsoe    jensen-shannon 
    -       0.07585498        0.03792749 
    -jensen_difference            taneja 
    -       0.03792749        0.04147518 
    -    kumar-johnson               avg 
    +
            euclidean         manhattan
    +       0.12807130        0.35250464
    +        minkowski         chebyshev
    +       0.12807130        0.06345083
    +         sorensen             gower
    +       0.17625232        0.03525046
    +          soergel      kulczynski_d
    +       0.29968454        0.42792793
    +         canberra        lorentzian
    +       2.09927095        0.49712136
    +     intersection  non-intersection
    +       0.82374768        0.17625232
    +       wavehedges       czekanowski
    +       3.16657887        0.17625232
    +           motyka      kulczynski_s
    +       0.58812616        2.33684211
    +         tanimoto           ruzicka
    +       0.29968454        0.70031546
    +    inner_product     harmonic_mean
    +       0.10612245        0.94948528
    +           cosine        hassebrook
    +       0.93427641        0.86613103
    +          jaccard              dice
    +       0.13386897        0.07173611
    +         fidelity     bhattacharyya
    +       0.97312397        0.03930448
    +        hellinger          matusita
    +       0.32787819        0.23184489
    +    squared_chord squared_euclidean
    +       0.05375205        0.01640226
    +          pearson            neyman
    +       0.16814418        0.36742465
    +      squared_chi         prob_symm
    +       0.10102943        0.20205886
    +       divergence             clark
    +       1.49843905        0.86557468
    +    additive_symm  kullback-leibler
    +       0.53556883        0.13926288
    +         jeffreys      k_divergence
    +       0.31761069        0.04216273
    +           topsoe    jensen-shannon
    +       0.07585498        0.03792749
    +jensen_difference            taneja
    +       0.03792749        0.04147518
    +    kumar-johnson               avg
            0.62779644        0.20797774

    Install Developer Version @@ -269,6 +269,7 @@

    Studies that

      +
    • A transcriptomic hourglass in brown algae JS Lotharukpong, M Zheng, R Luthringer et al. - Nature, 2024

    • Annelid functional genomics reveal the origins of bilaterian life cycles FM Martín-Zamora, Y Liang, K Guynes et al.- Nature, 2023

    • An atlas of gene regulatory elements in adult mouse cerebrum YE Li, S Preissl, X Hou, Z Zhang, K Zhang et al.- Nature, 2021

    • Convergent somatic mutations in metabolism genes in chronic liver disease S Ng, F Rouhani, S Brunner, N Brzozowska et al. Nature, 2021

    • @@ -276,13 +277,14 @@

      Studies that
    • High-content single-cell combinatorial indexing R Mulqueen et al. - Nature Biotechnology, 2021

    • A comparative atlas of single-cell chromatin accessibility in the human brain YE Li, S Preissl, M Miller, ND Johnson, Z Wang et al. - Science, 2023

    • Extinction at the end-Cretaceous and the origin of modern Neotropical rainforests MR Carvalho, C Jaramillo et al. - Science, 2021

    • -
    • sciCSR infers B cell state transition and predicts class-switch recombination dynamics using single-cell transcriptomic data JCF Ng, G Montamat Garcia, AT Stewart et al. - Nature Methods, 2023

    • +
    • sciCSR infers B cell state transition and predicts class-switch recombination dynamics using single-cell transcriptomic data JCF Ng, G Montamat Garcia, AT Stewart et al. - Nature Methods, 2024

    • HERMES: a molecular-formula-oriented method to target the metabolome R Giné, J Capellades, JM Badia et al. - Nature Methods, 2021

    • Epithelial zonation along the mouse and human small intestine defines five discrete metabolic domains RK Zwick, P Kasparek, B Palikuqi, et al. - Nature Cell Biology, 2024

    • The genetic architecture of temperature adaptation is shaped by population ancestry and not by selection regime KA Otte, V Nolte, F Mallard et al. - Genome Biology, 2021

    • The Tug1 lncRNA locus is essential for male fertility JP Lewandowski et al. - Genome Biology, 2020

    • Decoding the gene regulatory network of endosperm differentiation in maize Y Yuan, Q Huo, Z Zhang, Q Wang, J Wang, et al. - Nature Communications, 2024

    • A full-body transcription factor expression atlas with completely resolved cell identities in C. elegans Y Li, S Chen, W Liu, D Zhao, Y Gao, S Hu, H Liu, Y Li… - Nature Communications, 2024

    • +
    • Comprehensive mapping and modelling of the rice regulome landscape unveils the regulatory architecture underlying complex traits T Zhu, C Xia, R Yu, X Zhou, X Xu, L Wang et al. - Nature Communications, 2024

    • Transcriptional vulnerabilities of striatal neurons in human and rodent models of Huntington’s disease A Matsushima, SS Pineda, JR Crittenden et al. - Nature Communications, 2023

    • Resolving the structure of phage–bacteria interactions in the context of natural diversity KM Kauffman, WK Chang, JM Brown et al. - Nature Communications, 2022

    • Gut microbiome-mediated metabolism effects on immunity in rural and urban African populations M Stražar, GS Temba, H Vlamakis et al. - Nature Communications, 2021

    • @@ -293,19 +295,34 @@

      Studies that
    • Single-cell deletion analyses show control of pro–T cell developmental speed and pathways by Tcf7, Spi1, Gata3, Bcl11a, Erg, and Bcl11b W Zhou, F Gao, M Romero-Wolf, S Jo, EV Rothenberg - Science Immunology, 2022

    • Large-scale iterated singing experiments reveal oral transmission mechanisms underlying music evolution M Anglada-Tort, PMC Harrison, H Lee, N Jacoby - Current Biology, 2023

    • Detecting and diagnosing prior and likelihood sensitivity with power-scaling N Kallioinen, T Paananen, PC Bürkner, A Vehtari - Statistics and Computing, 2024

    • +
    • SLAPSHOT reveals rapid dynamics of extracellularly exposed proteome in response to calcium-activated plasma membrane phospholipid scrambling ST Tuomivaara, CF Teo, YN Jan, AP Wiita et al. - Communications Biology, 2024

    • TAS-Seq is a robust and sensitive amplification method for bead-based scRNA-seq S Shichino, S Ueha, S Hashimoto, T Ogawa et al. - Communications biology, 2022

    • -
    • Mapping hormone-regulated cell-cell interaction networks in the human breast at single-cell resolution LM Murrow, RJ Weber, JA Caruso et al. - Cell Systems, 2022

    • +
    • Mapping hormone-regulated cell-cell interaction networks in the human breast at single-cell resolution LM Murrow, RJ Weber, JA Caruso et al. - Cell Systems, 2022

    • +
    • Gene module reconstruction identifies cellular differentiation processes and the regulatory logic of specialized secretion in zebrafish Y Wang, J Liu, LY Du, JL Wyss, JA Farrell, AF Schier - Developmental Cell, 2024

    • +
    • The temporal progression of lung immune remodeling during breast cancer metastasis CS McGinnis, Z Miao, D Superville, W Yao, et al. - Cancer Cell, 2024

    • Large-scale chromatin reorganization reactivates placenta-specific genes that drive cellular aging Z Liu, Q Ji, J Ren, P Yan, Z Wu, S Wang, L Sun, Z Wang et al. - Developmental Cell, 2022

    • Direct epitranscriptomic regulation of mammalian translation initiation through N4-acetylcytidine D Arango, D Sturgill, R Yang, T Kanai, P Bauer et al. - Molecular Cell, 2022

    • +
    • Spotless, a reproducible pipeline for benchmarking cell type deconvolution in spatial transcriptomics C Sang-Aram, R Browaeys, R Seurinck, Y Saeys - eLife, 2024

    • Loss of adaptive capacity in asthmatic patients revealed by biomarker fluctuation dynamics after rhinovirus challenge A Sinha et al. - eLife, 2019

    • Sex and hatching order modulate the association between MHC‐II diversity and fitness in early‐life stages of a wild seabird M Pineaux et al - Molecular Ecology, 2020

    • BELMM: Bayesian model selection and random walk smoothing in time-series clustering O Sarala, T Pyhäjärvi, MJ Sillanpää - Bioinformatics, 2023

    • Cellsig plug-in enhances CIBERSORTx signature selection for multi-dataset transcriptomes with sparse multilevel modelling MA Al Kamran Khan, J Wu, S Yuhan et al. - Bioinformatics, 2023

    • +
    • Neutrality in plant–herbivore interactions VS Pan, WC Wetzel - Proceedings of the Royal Society B, 2024

    • How the Choice of Distance Measure Influences the Detection of Prior-Data Conflict K Lek, R Van De Schoot - Entropy, 2019

    • Differential variation analysis enables detection of tumor heterogeneity using single-cell RNA-sequencing data EF Davis-Marcisak, TD Sherman et al. - Cancer research, 2019

    • Multi-Omics Investigation of Innate Navitoclax Resistance in Triple-Negative Breast Cancer Cells M Marczyk et al. - Cancers, 2020

    • Impact of Gut Microbiome on Hypertensive Patients with Low-Salt Intake: Shika Study Results S Nagase et al. - Frontiers in Medicine, 2020

    • Tumor-associated neutrophils upregulate PANoptosis to foster an immunosuppressive microenvironment of non-small cell lung cancer Q Hu, R Wang, J Zhang, Q Xue, B Ding - Cancer Immunology, Immunotherapy, 2023

    • +
    • Spatial and Temporal Relationships Between Atrophy and Hypometabolism in Behavioral-Variant Frontotemporal Dementia J Stocks, E Gibson, K Popuri, MF Beg et al. - Alzheimer Disease & Associated Disorders, 2024

    • +
    • Unveiling Dynamic Changes and Regulatory Mechanisms of T Cell Subsets in Sepsis Pathogenesis C Jiang, J Chen, T Sun, J Xu, H Zhu, J Chen - ImmunoTargets and Therapy, 2024

    • +
    • Integrated analysis reveals NLRC4 as a potential biomarker in sepsis pathogenesis C Jiang, J Chen, J Xu, C Chen, H Zhu, Y Xu, H Zhao et al. - Genes & Immunity, 2024

    • +
    • Temporal composition of the cervicovaginal microbiome associates with hrHPV infection outcomes in a longitudinal study MA Molina, WPJ Leenders, MA Huynen, WJG Melchers et al. - BMC Infectious Diseases, 2024

    • +
    • PlantFUNCO: Integrative functional genomics database reveals clues into duplicates divergence evolution V Roces, S Guerrero, A Álvarez, J Pascual, M Meijón - Molecular Biology and Evolution, 2024

    • +
    • Cross-tissue human fibroblast atlas reveals myofibroblast subtypes with distinct roles in immune modulation Y Gao, J Li, W Cheng, T Diao, H Liu, Y Bo, C Liu et al. - Cancer Cell, 2024

    • +
    • Enhancing Immunotherapy Response Prediction in Metastatic Lung Adenocarcinoma: Leveraging Shallow and Deep Learning with CT-Based Radiomics C Masson-Grehaigne, M Lafon, J Palussière, L Leroy et al. - Cancers, 2024

    • +
    • A general framework for implementing distances for categorical variables M van de Velden, AI D’Enza, A Markos, C Cavicchia - Pattern Recognition, 2024

    • +
    • Early transcriptional similarities between two distinct neural lineages during ascidian embryogenesis RR Copley, J Buttin, MJ Arguel, G Williaume et al. - Developmental Biology, 2024

    • +
    • The power of visualizing distributional differences: formal graphical n-sample tests K Konstantinou, T Mrkvička, M Myllymäki - Computational Statistics, 2024

    • cisDynet: An integrated platform for modeling gene‐regulatory dynamics and networks T Zhu, X Zhou, Y You, L Wang, Z He, D Chen - iMeta, 2023

    • Children’s social networks in developmental psychology: A network approach to capture and describe early social environments N Burke, N Brezack, A Woodward - Frontiers in psychology, 2022

    • Whole Genome Sequencing and Morphological Trait-Based Evaluation of UPOV Option 2 for DUS Testing in Rice H Liu, D Rao, T Guo, SS Gangurde, Y Hong, et al. - Frontiers in Genetics, 2022

    • @@ -421,16 +438,16 @@

      Developers

      -

      Site built with pkgdown 2.0.9.

      +

      Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/news/index.html b/docs/news/index.html index 13475f1..5e2e848 100644 --- a/docs/news/index.html +++ b/docs/news/index.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,7 +77,7 @@

    -L1 Family

    -
    • Sorensen : $d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{\sum_{i = 1}^N (P_i + Q_i)}$ -
    • -
    • Gower : $d = \frac{1}{N} \dot \sum_{i = 1}^N | P_i - Q_i |$, where N is the total number of elements i in Pi and Qi -
    • -
    • Soergel : $d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{\sum_{i = 1}^N max(P_i , Q_i)}$ -
    • -
    • Kulczynski d : $d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{\sum_{i = 1}^N min(P_i , Q_i)}$ -
    • -
    • Canberra : $d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{(P_i + Q_i)}$ -
    • -
    • Lorentzian : $d = \sum_{i = 1}^N ln(1 + | P_i - Q_i |)$ -
    • +L1L_1 Family

    +
    • Sorensen : d=i=1N|PiQi|i=1N(Pi+Qi)d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{\sum_{i = 1}^N (P_i + Q_i)}
    • +
    • Gower : d=1Ṅi=1N|PiQi|d = \frac{1}{N} \dot \sum_{i = 1}^N | P_i - Q_i |, where NN is the total number of elements ii in PiP_i and QiQ_i
    • +
    • Soergel : d=i=1N|PiQi|i=1Nmax(Pi,Qi)d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{\sum_{i = 1}^N max(P_i , Q_i)}
    • +
    • Kulczynski d : d=i=1N|PiQi|i=1Nmin(Pi,Qi)d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{\sum_{i = 1}^N min(P_i , Q_i)}
    • +
    • Canberra : d=i=1N|PiQi|(Pi+Qi)d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{(P_i + Q_i)}
    • +
    • Lorentzian : d=i=1Nln(1+|PiQi|)d = \sum_{i = 1}^N ln(1 + | P_i - Q_i |)

    Intersection Family

    -
    • Intersection : $s = \sum_{i = 1}^N min(P_i , Q_i)$ -
    • -
    • Non-Intersection : $d = 1 - \sum_{i = 1}^N min(P_i , Q_i)$ -
    • -
    • Wave Hedges : $d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{max(P_i , Q_i)}$ -
    • -
    • Czekanowski : $d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{\sum_{i = 1}^N | P_i + Q_i |}$ -
    • -
    • Motyka : $d = \frac{\sum_{i = 1}^N min(P_i , Q_i)}{(P_i + Q_i)}$ -
    • -
    • Kulczynski s : $d = \frac{\sum_{i = 1}^N min(P_i , Q_i)}{\sum_{i = 1}^N | P_i - Q_i |}$ -
    • -
    • Tanimoto : $d = \frac{\sum_{i = 1}^N (max(P_i , Q_i) - min(P_i , Q_i))}{\sum_{i = 1}^N max(P_i , Q_i)}$ ; equivalent to Soergel
    • -
    • Ruzicka : $s = \frac{\sum_{i = 1}^N min(P_i , Q_i)}{\sum_{i = 1}^N max(P_i , Q_i)}$ ; equivalent to 1 - Tanimoto = 1 - Soergel
    • +
      • Intersection : s=i=1Nmin(Pi,Qi)s = \sum_{i = 1}^N min(P_i , Q_i)
      • +
      • Non-Intersection : d=1i=1Nmin(Pi,Qi)d = 1 - \sum_{i = 1}^N min(P_i , Q_i)
      • +
      • Wave Hedges : d=i=1N|PiQi|max(Pi,Qi)d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{max(P_i , Q_i)}
      • +
      • Czekanowski : d=i=1N|PiQi|i=1N|Pi+Qi|d = \frac{\sum_{i = 1}^N | P_i - Q_i |}{\sum_{i = 1}^N | P_i + Q_i |}
      • +
      • Motyka : d=i=1Nmin(Pi,Qi)(Pi+Qi)d = \frac{\sum_{i = 1}^N min(P_i , Q_i)}{(P_i + Q_i)}
      • +
      • Kulczynski s : d=i=1Nmin(Pi,Qi)i=1N|PiQi|d = \frac{\sum_{i = 1}^N min(P_i , Q_i)}{\sum_{i = 1}^N | P_i - Q_i |}
      • +
      • Tanimoto : d=i=1N(max(Pi,Qi)min(Pi,Qi))i=1Nmax(Pi,Qi)d = \frac{\sum_{i = 1}^N (max(P_i , Q_i) - min(P_i , Q_i))}{\sum_{i = 1}^N max(P_i , Q_i)} ; equivalent to Soergel
      • +
      • Ruzicka : s=i=1Nmin(Pi,Qi)i=1Nmax(Pi,Qi)s = \frac{\sum_{i = 1}^N min(P_i , Q_i)}{\sum_{i = 1}^N max(P_i , Q_i)} ; equivalent to 1 - Tanimoto = 1 - Soergel

    Inner Product Family

    -
    • Inner Product : $s = \sum_{i = 1}^N P_i \dot Q_i$ -
    • -
    • Harmonic mean : $s = 2 \cdot \frac{ \sum_{i = 1}^N P_i \cdot Q_i}{P_i + Q_i}$ -
    • -
    • Cosine : $s = \frac{\sum_{i = 1}^N P_i \cdot Q_i}{\sqrt{\sum_{i = 1}^N P_i^2} \cdot \sqrt{\sum_{i = 1}^N Q_i^2}}$ -
    • -
    • Kumar-Hassebrook (PCE) : $s = \frac{\sum_{i = 1}^N (P_i \cdot Q_i)}{(\sum_{i = 1}^N P_i^2 + \sum_{i = 1}^N Q_i^2 - \sum_{i = 1}^N (P_i \cdot Q_i))}$ -
    • -
    • Jaccard : $d = 1 - \frac{\sum_{i = 1}^N P_i \cdot Q_i}{\sum_{i = 1}^N P_i^2 + \sum_{i = 1}^N Q_i^2 - \sum_{i = 1}^N P_i \cdot Q_i}$ ; equivalent to 1 - Kumar-Hassebrook
    • -
    • Dice : $d = \frac{\sum_{i = 1}^N (P_i - Q_i)^2}{(\sum_{i = 1}^N P_i^2 + \sum_{i = 1}^N Q_i^2)}$ -
    • +
      • Inner Product : s=i=1NPiQ̇is = \sum_{i = 1}^N P_i \dot Q_i
      • +
      • Harmonic mean : s=2i=1NPiQiPi+Qis = 2 \cdot \frac{ \sum_{i = 1}^N P_i \cdot Q_i}{P_i + Q_i}
      • +
      • Cosine : s=i=1NPiQii=1NPi2i=1NQi2s = \frac{\sum_{i = 1}^N P_i \cdot Q_i}{\sqrt{\sum_{i = 1}^N P_i^2} \cdot \sqrt{\sum_{i = 1}^N Q_i^2}}
      • +
      • Kumar-Hassebrook (PCE) : s=i=1N(PiQi)(i=1NPi2+i=1NQi2i=1N(PiQi))s = \frac{\sum_{i = 1}^N (P_i \cdot Q_i)}{(\sum_{i = 1}^N P_i^2 + \sum_{i = 1}^N Q_i^2 - \sum_{i = 1}^N (P_i \cdot Q_i))}
      • +
      • Jaccard : d=1i=1NPiQii=1NPi2+i=1NQi2i=1NPiQid = 1 - \frac{\sum_{i = 1}^N P_i \cdot Q_i}{\sum_{i = 1}^N P_i^2 + \sum_{i = 1}^N Q_i^2 - \sum_{i = 1}^N P_i \cdot Q_i} ; equivalent to 1 - Kumar-Hassebrook
      • +
      • Dice : d=i=1N(PiQi)2(i=1NPi2+i=1NQi2)d = \frac{\sum_{i = 1}^N (P_i - Q_i)^2}{(\sum_{i = 1}^N P_i^2 + \sum_{i = 1}^N Q_i^2)}

    Squared-chord Family

    -
    • Fidelity : $s = \sum_{i = 1}^N \sqrt{P_i \cdot Q_i}$ -
    • -
    • Bhattacharyya : $d = - ln \sum_{i = 1}^N \sqrt{P_i \cdot Q_i}$ -
    • -
    • Hellinger : $d = 2 \cdot \sqrt{1 - \sum_{i = 1}^N \sqrt{P_i \cdot Q_i}}$ -
    • -
    • Matusita : $d = \sqrt{2 - 2 \cdot \sum_{i = 1}^N \sqrt{P_i \cdot Q_i}}$ -
    • -
    • Squared-chord : $d = \sum_{i = 1}^N ( \sqrt{P_i} - \sqrt{Q_i} )^2$ -
    • +
      • Fidelity : s=i=1NPiQis = \sum_{i = 1}^N \sqrt{P_i \cdot Q_i}
      • +
      • Bhattacharyya : d=lni=1NPiQid = - ln \sum_{i = 1}^N \sqrt{P_i \cdot Q_i}
      • +
      • Hellinger : d=21i=1NPiQid = 2 \cdot \sqrt{1 - \sum_{i = 1}^N \sqrt{P_i \cdot Q_i}}
      • +
      • Matusita : d=22i=1NPiQid = \sqrt{2 - 2 \cdot \sum_{i = 1}^N \sqrt{P_i \cdot Q_i}}
      • +
      • Squared-chord : d=i=1N(PiQi)2d = \sum_{i = 1}^N ( \sqrt{P_i} - \sqrt{Q_i} )^2
    -

    Squared L2 family (X2 squared family)

    -
    • Squared Euclidean : $d = \sum_{i = 1}^N ( P_i - Q_i )^2$ -
    • -
    • Pearson X2 : $d = \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{Q_i} )$ -
    • -
    • Neyman X2 : $d = \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{P_i} )$ -
    • -
    • Squared X2 : $d = \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{(P_i + Q_i)} )$ -
    • -
    • Probabilistic Symmetric X2 : $d = 2 \cdot \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{(P_i + Q_i)} )$ -
    • -
    • Divergence : X2 : $d = 2 \cdot \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{(P_i + Q_i)^2} )$ -
    • -
    • Clark : $d = \sqrt{\sum_{i = 1}^N (\frac{| P_i - Q_i |}{(P_i + Q_i)^2}}$ -
    • -
    • Additive Symmetric X2 : $d = \sum_{i = 1}^N ( \frac{((P_i - Q_i)^2 \cdot (P_i + Q_i))}{(P_i \cdot Q_i)} )$ -
    • +

      Squared L2L_2 family (X2X^2 squared family)

      +
      • Squared Euclidean : d=i=1N(PiQi)2d = \sum_{i = 1}^N ( P_i - Q_i )^2
      • +
      • Pearson X2X^2 : d=i=1N((PiQi)2Qi)d = \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{Q_i} )
      • +
      • Neyman X2X^2 : d=i=1N((PiQi)2Pi)d = \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{P_i} )
      • +
      • Squared X2X^2 : d=i=1N((PiQi)2(Pi+Qi))d = \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{(P_i + Q_i)} )
      • +
      • Probabilistic Symmetric X2X^2 : d=2i=1N((PiQi)2(Pi+Qi))d = 2 \cdot \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{(P_i + Q_i)} )
      • +
      • Divergence : X2X^2 : d=2i=1N((PiQi)2(Pi+Qi)2)d = 2 \cdot \sum_{i = 1}^N ( \frac{(P_i - Q_i )^2}{(P_i + Q_i)^2} )
      • +
      • Clark : d=i=1N(|PiQi|(Pi+Qi)2d = \sqrt{\sum_{i = 1}^N (\frac{| P_i - Q_i |}{(P_i + Q_i)^2}}
      • +
      • Additive Symmetric X2X^2 : d=i=1N(((PiQi)2(Pi+Qi))(PiQi))d = \sum_{i = 1}^N ( \frac{((P_i - Q_i)^2 \cdot (P_i + Q_i))}{(P_i \cdot Q_i)} )

    Shannon’s Entropy Family

    -
    • Kullback-Leibler : $d = \sum_{i = 1}^N P_i \cdot log(\frac{P_i}{Q_i})$ -
    • -
    • Jeffreys : $d = \sum_{i = 1}^N (P_i - Q_i) \cdot log(\frac{P_i}{Q_i})$ -
    • -
    • K divergence : $d = \sum_{i = 1}^N P_i \cdot log(\frac{2 \cdot P_i}{P_i + Q_i})$ -
    • -
    • Topsoe : $d = \sum_{i = 1}^N ( P_i \cdot log(\frac{2 \cdot P_i}{P_i + Q_i}) ) + ( Q_i \cdot log(\frac{2 \cdot Q_i}{P_i + Q_i}) )$ -
    • -
    • Jensen-Shannon : $d = 0.5 \cdot ( \sum_{i = 1}^N P_i \cdot log(\frac{2 \cdot P_i}{P_i + Q_i}) + \sum_{i = 1}^N Q_i \cdot log(\frac{2 * Q_i}{P_i + Q_i}))$ -
    • -
    • Jensen difference : $d = \sum_{i = 1}^N ( (\frac{P_i \cdot log(P_i) + Q_i \cdot log(Q_i)}{2}) - (\frac{P_i + Q_i}{2}) \cdot log(\frac{P_i + Q_i}{2}) )$ -
    • +
      • Kullback-Leibler : d=i=1NPilog(PiQi)d = \sum_{i = 1}^N P_i \cdot log(\frac{P_i}{Q_i})
      • +
      • Jeffreys : d=i=1N(PiQi)log(PiQi)d = \sum_{i = 1}^N (P_i - Q_i) \cdot log(\frac{P_i}{Q_i})
      • +
      • K divergence : d=i=1NPilog(2PiPi+Qi)d = \sum_{i = 1}^N P_i \cdot log(\frac{2 \cdot P_i}{P_i + Q_i})
      • +
      • Topsoe : d=i=1N(Pilog(2PiPi+Qi))+(Qilog(2QiPi+Qi))d = \sum_{i = 1}^N ( P_i \cdot log(\frac{2 \cdot P_i}{P_i + Q_i}) ) + ( Q_i \cdot log(\frac{2 \cdot Q_i}{P_i + Q_i}) )
      • +
      • Jensen-Shannon : d=0.5(i=1NPilog(2PiPi+Qi)+i=1NQilog(2*QiPi+Qi))d = 0.5 \cdot ( \sum_{i = 1}^N P_i \cdot log(\frac{2 \cdot P_i}{P_i + Q_i}) + \sum_{i = 1}^N Q_i \cdot log(\frac{2 * Q_i}{P_i + Q_i}))
      • +
      • Jensen difference : d=i=1N((Pilog(Pi)+Qilog(Qi)2)(Pi+Qi2)log(Pi+Qi2))d = \sum_{i = 1}^N ( (\frac{P_i \cdot log(P_i) + Q_i \cdot log(Q_i)}{2}) - (\frac{P_i + Q_i}{2}) \cdot log(\frac{P_i + Q_i}{2}) )

    Combinations

    -
    • Taneja : $d = \sum_{i = 1}^N ( \frac{P_i + Q_i}{2}) \cdot log( \frac{P_i + Q_i}{( 2 \cdot \sqrt{P_i \cdot Q_i})} )$ -
    • -
    • Kumar-Johnson : $d = \sum_{i = 1}^N \frac{(P_i^2 - Q_i^2)^2}{2 \cdot (P_i \cdot Q_i)^{\frac{3}{2}}}$ -
    • -
    • Avg(L1, Ln) : $d = \frac{\sum_{i = 1}^N | P_i - Q_i| + max{ | P_i - Q_i |}}{2}$ -
    • -

    Note: d refers to distance measures, whereas s denotes similarity measures.

    +
    • Taneja : d=i=1N(Pi+Qi2)log(Pi+Qi(2PiQi))d = \sum_{i = 1}^N ( \frac{P_i + Q_i}{2}) \cdot log( \frac{P_i + Q_i}{( 2 \cdot \sqrt{P_i \cdot Q_i})} )
    • +
    • Kumar-Johnson : d=i=1N(Pi2Qi2)22(PiQi)32d = \sum_{i = 1}^N \frac{(P_i^2 - Q_i^2)^2}{2 \cdot (P_i \cdot Q_i)^{\frac{3}{2}}}
    • +
    • Avg(L1L_1, LnL_n) : d=i=1N|PiQi|+max|PiQi|2d = \frac{\sum_{i = 1}^N | P_i - Q_i| + max{ | P_i - Q_i |}}{2}
    • +

    Note: dd refers to distance measures, whereas ss denotes similarity measures.

    Information Theory Measures

    -
    • Shannon’s Entropy H(X) : $H(X) = -\sum\limits_{i=1}^n P(x_i) \cdot log_b(P(x_i))$ -
    • -
    • Shannon’s Joint-Entropy H(X,Y) : $H(X,Y) = -\sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) \cdot log_b(P(x_i, y_j))$ -
    • -
    • Shannon’s Conditional-Entropy H(X | Y) : $H(Y|X) = \sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) \cdot log_b( \frac{P(x_i)}{P(x_i, y_j)})$ -
    • -
    • Mutual Information I(X,Y) : $MI(X,Y) = \sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) \cdot log_b( \frac{P(x_i, y_j)}{( P(x_i) * P(y_j) )})$ -
    • -
    • Kullback-Leibler Divergence : $KL(P || Q) = \sum\limits_{i=1}^n P(p_i) \cdot log_2(\frac{P(p_i) }{P(q_i)}) = H(P, Q) - H(P)$ -
    • -
    • Jensen-Shannon Divergence : JSD(P||Q) = 0.5 * (KL(P||R) + KL(Q||R)) -
    • -
    • Generalized Jensen-Shannon Divergence : $gJSD_{\pi_1,...,\pi_n}(P_1, ..., P_n) = H(\sum_{i = 1}^n \pi_i \cdot P_i) - \sum_{i = 1}^n \pi_i \cdot H(P_i)$ -
    • +
      • Shannon’s Entropy H(X) : H(X)=i=1nP(xi)logb(P(xi))H(X) = -\sum\limits_{i=1}^n P(x_i) \cdot log_b(P(x_i))
      • +
      • Shannon’s Joint-Entropy H(X,Y) : H(X,Y)=i=1nj=1mP(xi,yj)logb(P(xi,yj))H(X,Y) = -\sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) \cdot log_b(P(x_i, y_j))
      • +
      • Shannon’s Conditional-Entropy H(X | Y) : H(Y|X)=i=1nj=1mP(xi,yj)logb(P(xi)P(xi,yj))H(Y|X) = \sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) \cdot log_b( \frac{P(x_i)}{P(x_i, y_j)})
      • +
      • Mutual Information I(X,Y) : MI(X,Y)=i=1nj=1mP(xi,yj)logb(P(xi,yj)(P(xi)*P(yj)))MI(X,Y) = \sum\limits_{i=1}^n\sum\limits_{j=1}^m P(x_i, y_j) \cdot log_b( \frac{P(x_i, y_j)}{( P(x_i) * P(y_j) )})
      • +
      • Kullback-Leibler Divergence : KL(P||Q)=i=1nP(pi)log2(P(pi)P(qi))=H(P,Q)H(P)KL(P || Q) = \sum\limits_{i=1}^n P(p_i) \cdot log_2(\frac{P(p_i) }{P(q_i)}) = H(P, Q) - H(P)
      • +
      • Jensen-Shannon Divergence : JSD(P||Q)=0.5*(KL(P||R)+KL(Q||R))JSD(P || Q) = 0.5 * (KL(P || R) + KL(Q || R))
      • +
      • Generalized Jensen-Shannon Divergence : gJSDπ1,...,πn(P1,...,Pn)=H(i=1nπiPi)i=1nπiH(Pi)gJSD_{\pi_1,...,\pi_n}(P_1, ..., P_n) = H(\sum_{i = 1}^n \pi_i \cdot P_i) - \sum_{i = 1}^n \pi_i \cdot H(P_i)

      Philentropy already enabled the robust comparison of similarity measures in analogy-based software effort estimation [@Phannachitta2017] as well as in evolutionary transcriptomics applications [@Drost2018]. The package aims to assist efforts to determine optimal similarity or distance measures when developing new (statistical) models or algorithms. In addition, Philentropy is implemented to be applicable to large-scale datasets that were previously inaccessible using other R packages. The software is open source and currently available on GitHub (https://github.com/HajkD/philentropy) and CRAN (https://cran.r-project.org/web/packages/philentropy/index.html). A comprehensive documentation of Philentropy can be found at https://hajkd.github.io/philentropy/.

    @@ -233,15 +183,15 @@

    References -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/pkgdown.yml b/docs/pkgdown.yml index e9d6dfb..91d1aa9 100644 --- a/docs/pkgdown.yml +++ b/docs/pkgdown.yml @@ -1,10 +1,9 @@ pandoc: '3.2' -pkgdown: 2.0.9 +pkgdown: 2.1.0 pkgdown_sha: ~ articles: Distances: Distances.html Information_Theory: Information_Theory.html Introduction: Introduction.html Many_Distances: Many_Distances.html -last_built: 2024-11-12T20:43Z - +last_built: 2024-12-05T13:32Z diff --git a/docs/reference/CE.html b/docs/reference/CE.html index eca2096..57991bb 100644 --- a/docs/reference/CE.html +++ b/docs/reference/CE.html @@ -5,7 +5,7 @@ - +
    - +
    @@ -81,27 +81,27 @@

    Shannon's Conditional-Entropy \(H(X | Y)\)

    Arguments

    -
    xy
    + + +
    xy

    a numeric joint-probability vector \(P(X,Y)\) for which Shannon's Joint-Entropy \(H(X,Y)\) shall be computed.

    -
    y
    +
    y

    a numeric probability vector \(P(Y)\) for which Shannon's Entropy \(H(Y)\) (as part of the chain rule) shall be computed. It is important to note that this probability vector must be the probability distribution of random variable Y ( P(Y) for which H(Y) is computed).

    -
    unit
    +
    unit

    a character string specifying the logarithm unit that shall be used to compute distances that depend on log computations.

    Value

    - - -

    Shannon's Conditional-Entropy in bit.

    +

    Shannon's Conditional-Entropy in bit.

    Details

    @@ -150,15 +150,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/H.html b/docs/reference/H.html index c14e295..f3f38b1 100644 --- a/docs/reference/H.html +++ b/docs/reference/H.html @@ -4,7 +4,7 @@ - +
    - +
    @@ -79,20 +79,20 @@

    Shannon's Entropy \(H(X)\)

    Arguments

    -
    x
    + + +
    x

    a numeric probability vector \(P(X)\) for which Shannon's Entropy \(H(X)\) shall be computed.

    -
    unit
    +
    unit

    a character string specifying the logarithm unit that shall be used to compute distances that depend on log computations.

    Value

    - - -

    a numeric value representing Shannon's Entropy in bit.

    +

    a numeric value representing Shannon's Entropy in bit.

    Details

    @@ -133,15 +133,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/JE.html b/docs/reference/JE.html index 70aa053..006e3d2 100644 --- a/docs/reference/JE.html +++ b/docs/reference/JE.html @@ -4,7 +4,7 @@ - +
    - +
    @@ -79,20 +79,20 @@

    Shannon's Joint-Entropy \(H(X,Y)\)

    Arguments

    -
    x
    + + +
    x

    a numeric joint-probability vector \(P(X,Y)\) for which Shannon's Joint-Entropy \(H(X,Y)\) shall be computed.

    -
    unit
    +
    unit

    a character string specifying the logarithm unit that shall be used to compute distances that depend on log computations.

    Value

    - - -

    a numeric value representing Shannon's Joint-Entropy in bit.

    +

    a numeric value representing Shannon's Joint-Entropy in bit.

    References

    @@ -128,15 +128,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/JSD.html b/docs/reference/JSD.html index 89703b0..0b3fd99 100644 --- a/docs/reference/JSD.html +++ b/docs/reference/JSD.html @@ -4,7 +4,7 @@ - +
    - +
    @@ -79,28 +79,28 @@

    Jensen-Shannon Divergence

    Arguments

    -
    x
    + + +
    x

    a numeric data.frame or matrix (storing probability vectors) or a numeric data.frame or matrix storing counts (if est.prob = TRUE). See distance for details.

    -
    test.na
    +
    test.na

    a boolean value specifying whether input vectors shall be tested for NA values.

    -
    unit
    +
    unit

    a character string specifying the logarithm unit that shall be used to compute distances that depend on log computations.

    -
    est.prob
    +
    est.prob

    method to estimate probabilities from input count vectors such as non-probability vectors. Default: est.prob = NULL. Options are:

    • est.prob = "empirical": The relative frequencies of each vector are computed internally. For example an input matrix rbind(1:10, 11:20) will be transformed to a probability vector rbind(1:10 / sum(1:10), 11:20 / sum(11:20))

    Value

    - - -

    a divergence value or matrix based on JSD computations.

    +

    a divergence value or matrix based on JSD computations.

    Details

    @@ -194,15 +194,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/KL.html b/docs/reference/KL.html index a371da0..633b809 100644 --- a/docs/reference/KL.html +++ b/docs/reference/KL.html @@ -4,7 +4,7 @@ - +
    - +
    @@ -79,29 +79,31 @@

    Kullback-Leibler Divergence

    Arguments

    -
    x
    + + +
    x

    a numeric data.frame or matrix (storing probability vectors) or a numeric data.frame or matrix storing counts (if est.prob = TRUE). See distance for details.

    -
    test.na
    +
    test.na

    a boolean value indicating whether input vectors should be tested for NA values.

    -
    unit
    +
    unit

    a character string specifying the logarithm unit that shall be used to compute distances that depend on log computations.

    -
    est.prob
    +
    est.prob

    method to estimate probabilities from a count vector. Default: est.prob = NULL.

    -
    epsilon
    +
    epsilon

    a small value to address cases in the KL computation where division by zero occurs. In these cases, x / 0 or 0 / 0 will be replaced by epsilon. The default is epsilon = 0.00001. However, we recommend to choose a custom epsilon value depending on the size of the input vectors, -the expected similarity between compared probability density functions and +the expected similarity between compared probability density functions and whether or not many 0 values are present within the compared vectors. -As a rough rule of thumb we suggest that when dealing with very large +As a rough rule of thumb we suggest that when dealing with very large input vectors which are very similar and contain many 0 values, the epsilon value should be set even smaller (e.g. epsilon = 0.000000001), whereas when vector sizes are small or distributions very divergent then @@ -113,9 +115,7 @@

    Arguments

    Value

    - - -

    The Kullback-Leibler divergence of probability vectors.

    +

    The Kullback-Leibler divergence of probability vectors.

    Details

    @@ -207,15 +207,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/MI.html b/docs/reference/MI.html index 1f58649..2402f58 100644 --- a/docs/reference/MI.html +++ b/docs/reference/MI.html @@ -5,7 +5,7 @@ - +
    - +
    @@ -81,27 +81,27 @@

    Shannon's Mutual Information \(I(X,Y)\)

    Arguments

    -
    x
    + + +
    x

    a numeric probability vector \(P(X)\).

    -
    y
    +
    y

    a numeric probability vector \(P(Y)\).

    -
    xy
    +
    xy

    a numeric joint-probability vector \(P(X,Y)\).

    -
    unit
    +
    unit

    a character string specifying the logarithm unit that shall be used to compute distances that depend on log computations.

    Value

    - - -

    Shannon's Mutual Information in bit.

    +

    Shannon's Mutual Information in bit.

    Details

    @@ -142,15 +142,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/additive_symm_chi_sq.html b/docs/reference/additive_symm_chi_sq.html index af9a7d1..ef3d548 100644 --- a/docs/reference/additive_symm_chi_sq.html +++ b/docs/reference/additive_symm_chi_sq.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Additive symmetric chi-squared distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/avg.html b/docs/reference/avg.html index 544da83..4acff7f 100644 --- a/docs/reference/avg.html +++ b/docs/reference/avg.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    AVG distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/bhattacharyya.html b/docs/reference/bhattacharyya.html index 50e66d4..6a24c53 100644 --- a/docs/reference/bhattacharyya.html +++ b/docs/reference/bhattacharyya.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,29 +77,31 @@

    Bhattacharyya distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    unit
    +
    unit

    type of log function. Option are

    -
    epsilon
    +
    epsilon

    epsilon a small value to address cases in the distance computation where division by zero occurs. In these cases, x / 0 or 0 / 0 will be replaced by epsilon. The default is epsilon = 0.00001. However, we recommend to choose a custom epsilon value depending on the size of the input vectors, -the expected similarity between compared probability density functions and +the expected similarity between compared probability density functions and whether or not many 0 values are present within the compared vectors. -As a rough rule of thumb we suggest that when dealing with very large +As a rough rule of thumb we suggest that when dealing with very large input vectors which are very similar and contain many 0 values, the epsilon value should be set even smaller (e.g. epsilon = 0.000000001), whereas when vector sizes are small or distributions very divergent then @@ -133,15 +135,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/binned.kernel.est.html b/docs/reference/binned.kernel.est.html index 657d534..b9fc800 100644 --- a/docs/reference/binned.kernel.est.html +++ b/docs/reference/binned.kernel.est.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -87,42 +87,44 @@

    Kernel Density Estimation

    Arguments

    -
    data
    + + +
    data

    a numeric vector containing the sample on which the kernel density estimate is to be constructed.

    -
    kernel
    +
    kernel

    character string specifying the smoothing kernel

    -
    bandwidth
    +
    bandwidth

    the kernel bandwidth smoothing parameter.

    -
    canonical
    +
    canonical

    a logical value indicating whether canonically scaled kernels should be used

    -
    scalest
    +
    scalest

    estimate of scale.

    • "stdev" - standard deviation is used.

    • "iqr" - inter-quartile range divided by 1.349 is used.

    • "minim" - minimum of "stdev" and "iqr" is used.

    -
    level
    +
    level

    number of levels of functional estimation used in the plug-in rule.

    -
    gridsize
    +
    gridsize

    the number of equally-spaced points over which binning is performed to obtain kernel functional approximation.

    -
    range.data
    +
    range.data

    vector containing the minimum and maximum values of data at which to compute the estimate. The default is the minimum and maximum data values.

    -
    truncate
    +
    truncate

    logical value indicating whether data with x values outside the range specified by range.data should be ignored.

    @@ -148,15 +150,15 @@

    Author

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/canberra.html b/docs/reference/canberra.html index 104893d..203f232 100644 --- a/docs/reference/canberra.html +++ b/docs/reference/canberra.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Canberra distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/chebyshev.html b/docs/reference/chebyshev.html index a12bc9d..371a7b1 100644 --- a/docs/reference/chebyshev.html +++ b/docs/reference/chebyshev.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Chebyshev distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/clark_sq.html b/docs/reference/clark_sq.html index 09e0cea..1b126f1 100644 --- a/docs/reference/clark_sq.html +++ b/docs/reference/clark_sq.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Clark squared distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/cosine_dist.html b/docs/reference/cosine_dist.html index a52a572..bbcfa4a 100644 --- a/docs/reference/cosine_dist.html +++ b/docs/reference/cosine_dist.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Cosine distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/czekanowski.html b/docs/reference/czekanowski.html index 3c09167..aa46c66 100644 --- a/docs/reference/czekanowski.html +++ b/docs/reference/czekanowski.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Czekanowski distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/dice_dist.html b/docs/reference/dice_dist.html index 4d9df52..3fdca2a 100644 --- a/docs/reference/dice_dist.html +++ b/docs/reference/dice_dist.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Dice distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/dist.diversity.html b/docs/reference/dist.diversity.html index bf390ac..caa6d7a 100644 --- a/docs/reference/dist.diversity.html +++ b/docs/reference/dist.diversity.html @@ -4,7 +4,7 @@ - +
    - +
    @@ -79,19 +79,21 @@

    Distance Diversity between Probability Density Functions

    Arguments

    -
    x
    + + +
    x

    a numeric data.frame or matrix (storing probability vectors) or a numeric data.frame or matrix storing counts (if est.prob is specified).

    -
    p
    +
    p

    power of the Minkowski distance.

    -
    test.na
    +
    test.na

    a boolean value indicating whether input vectors should be tested for NA values. Faster computations if test.na = FALSE.

    -
    unit
    +
    unit

    a character string specifying the logarithm unit that should be used to compute distances that depend on log computations. Options are:

    • unit = "log"

    • unit = "log2"

    • unit = "log10"

    • @@ -192,15 +194,15 @@

      Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/dist_many_many.html b/docs/reference/dist_many_many.html index d9e9d8a..b6e01f5 100644 --- a/docs/reference/dist_many_many.html +++ b/docs/reference/dist_many_many.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -85,40 +85,42 @@

    Distances and Similarities between Many Probability Density Functions

    Arguments

    -
    dists1
    + + +
    dists1

    a numeric matrix storing distributions in its rows.

    -
    dists2
    +
    dists2

    a numeric matrix storing distributions in its rows.

    -
    method
    +
    method

    a character string indicating whether the distance measure that should be computed.

    -
    p
    +
    p

    power of the Minkowski distance.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    unit
    +
    unit

    type of log function. Option are

    • unit = "log"

    • unit = "log2"

    • unit = "log10"

    -
    epsilon
    +
    epsilon

    epsilon a small value to address cases in the distance computation where division by zero occurs. In these cases, x / 0 or 0 / 0 will be replaced by epsilon. The default is epsilon = 0.00001. However, we recommend to choose a custom epsilon value depending on the size of the input vectors, -the expected similarity between compared probability density functions and +the expected similarity between compared probability density functions and whether or not many 0 values are present within the compared vectors. -As a rough rule of thumb we suggest that when dealing with very large +As a rough rule of thumb we suggest that when dealing with very large input vectors which are very similar and contain many 0 values, the epsilon value should be set even smaller (e.g. epsilon = 0.000000001), whereas when vector sizes are small or distributions very divergent then @@ -130,9 +132,7 @@

    Arguments

    Value

    - - -

    A matrix of distance values

    +

    A matrix of distance values

    @@ -155,15 +155,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/dist_one_many.html b/docs/reference/dist_one_many.html index 4db596b..f6533e4 100644 --- a/docs/reference/dist_one_many.html +++ b/docs/reference/dist_one_many.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -85,40 +85,42 @@

    Distances and Similarities between One and Many Probability Density Function

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    dists
    +
    dists

    a numeric matrix storing distributions in its rows.

    -
    method
    +
    method

    a character string indicating whether the distance measure that should be computed.

    -
    p
    +
    p

    power of the Minkowski distance.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    unit
    +
    unit

    type of log function. Option are

    • unit = "log"

    • unit = "log2"

    • unit = "log10"

    -
    epsilon
    +
    epsilon

    epsilon a small value to address cases in the distance computation where division by zero occurs. In these cases, x / 0 or 0 / 0 will be replaced by epsilon. The default is epsilon = 0.00001. However, we recommend to choose a custom epsilon value depending on the size of the input vectors, -the expected similarity between compared probability density functions and +the expected similarity between compared probability density functions and whether or not many 0 values are present within the compared vectors. -As a rough rule of thumb we suggest that when dealing with very large +As a rough rule of thumb we suggest that when dealing with very large input vectors which are very similar and contain many 0 values, the epsilon value should be set even smaller (e.g. epsilon = 0.000000001), whereas when vector sizes are small or distributions very divergent then @@ -130,9 +132,7 @@

    Arguments

    Value

    - - -

    A vector of distance values

    +

    A vector of distance values

    @@ -172,15 +172,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/dist_one_one.html b/docs/reference/dist_one_one.html index 684dc1f..0cdb887 100644 --- a/docs/reference/dist_one_one.html +++ b/docs/reference/dist_one_one.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -85,40 +85,42 @@

    Distances and Similarities between Two Probability Density Functions

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    method
    +
    method

    a character string indicating whether the distance measure that should be computed.

    -
    p
    +
    p

    power of the Minkowski distance.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    unit
    +
    unit

    type of log function. Option are

    • unit = "log"

    • unit = "log2"

    • unit = "log10"

    -
    epsilon
    +
    epsilon

    epsilon a small value to address cases in the distance computation where division by zero occurs. In these cases, x / 0 or 0 / 0 will be replaced by epsilon. The default is epsilon = 0.00001. However, we recommend to choose a custom epsilon value depending on the size of the input vectors, -the expected similarity between compared probability density functions and +the expected similarity between compared probability density functions and whether or not many 0 values are present within the compared vectors. -As a rough rule of thumb we suggest that when dealing with very large +As a rough rule of thumb we suggest that when dealing with very large input vectors which are very similar and contain many 0 values, the epsilon value should be set even smaller (e.g. epsilon = 0.000000001), whereas when vector sizes are small or distributions very divergent then @@ -130,9 +132,7 @@

    Arguments

    Value

    - - -

    A single distance value

    +

    A single distance value

    @@ -155,15 +155,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/distance.html b/docs/reference/distance.html index 6b95cbb..3b36ebc 100644 --- a/docs/reference/distance.html +++ b/docs/reference/distance.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -90,33 +90,35 @@

    Distances and Similarities between Probability Density Functions

    Arguments

    -
    x
    + + +
    x

    a numeric data.frame or matrix (storing probability vectors) or a numeric data.frame or matrix storing counts (if est.prob is specified).

    -
    method
    +
    method

    a character string indicating whether the distance measure that should be computed.

    -
    p
    +
    p

    power of the Minkowski distance.

    -
    test.na
    +
    test.na

    a boolean value indicating whether input vectors should be tested for NA values. Faster computations if test.na = FALSE.

    -
    unit
    +
    unit

    a character string specifying the logarithm unit that should be used to compute distances that depend on log computations.

    -
    epsilon
    +
    epsilon

    a small value to address cases in the distance computation where division by zero occurs. In these cases, x / 0 or 0 / 0 will be replaced by epsilon. The default is epsilon = 0.00001. However, we recommend to choose a custom epsilon value depending on the size of the input vectors, -the expected similarity between compared probability density functions and +the expected similarity between compared probability density functions and whether or not many 0 values are present within the compared vectors. -As a rough rule of thumb we suggest that when dealing with very large +As a rough rule of thumb we suggest that when dealing with very large input vectors which are very similar and contain many 0 values, the epsilon value should be set even smaller (e.g. epsilon = 0.000000001), whereas when vector sizes are small or distributions very divergent then @@ -126,39 +128,35 @@

    Arguments

    technical issues of computing x / 0 or 0 / 0 cases.

    -
    est.prob
    +
    est.prob

    method to estimate probabilities from input count vectors such as non-probability vectors. Default: est.prob = NULL. Options are:

    • est.prob = "empirical": The relative frequencies of each vector are computed internally. For example an input matrix rbind(1:10, 11:20) will be transformed to a probability vector rbind(1:10 / sum(1:10), 11:20 / sum(11:20))

    -
    use.row.names
    +
    use.row.names

    a logical value indicating whether or not row names from the input matrix shall be used as rownames and colnames of the output distance matrix. Default value is use.row.names = FALSE.

    -
    as.dist.obj
    +
    as.dist.obj

    shall the return value or matrix be an object of class link[stats]{dist}? Default is as.dist.obj = FALSE.

    -
    diag
    +
    diag

    if as.dist.obj = TRUE, then this value indicates whether the diagonal of the distance matrix should be printed. Default

    -
    upper
    +
    upper

    if as.dist.obj = TRUE, then this value indicates whether the upper triangle of the distance matrix should be printed.

    -
    mute.message
    +
    mute.message

    a logical value indicating whether or not messages printed by distance shall be muted. Default is mute.message = FALSE.

    Value

    - - -

    The following results are returned depending on the dimension of x:

    - - +

    The following results are returned depending on the dimension of x:

    • in case nrow(x) = 2 : a single distance value.

    • in case nrow(x) > 2 : a distance matrix storing distance values for all pairwise probability vector comparisons.

    @@ -366,15 +364,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/divergence_sq.html b/docs/reference/divergence_sq.html index 597a0e0..046a600 100644 --- a/docs/reference/divergence_sq.html +++ b/docs/reference/divergence_sq.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Divergence squared distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/estimate.probability.html b/docs/reference/estimate.probability.html index fac2942..5a1d0d1 100644 --- a/docs/reference/estimate.probability.html +++ b/docs/reference/estimate.probability.html @@ -5,13 +5,11 @@ method = "empirical" : generates the relative frequency of the data x/sum(x). - - '> - +
    - +
    @@ -80,8 +78,6 @@

    Estimate Probability Vectors From Count Vectors

    probabilities of the corresponding counts.

    The following probability estimation methods are implemented in this function:

    • method = "empirical" : generates the relative frequency of the data x/sum(x).

    • -
    • -
    @@ -90,19 +86,19 @@

    Estimate Probability Vectors From Count Vectors

    Arguments

    -
    x
    + + +
    x

    a numeric vector storing count values.

    -
    method
    +
    method

    a character string specifying the estimation method tht should be used to estimate probabilities from input counts.

    Value

    - - -

    a numeric probability vector.

    +

    a numeric probability vector.

    Author

    @@ -114,8 +110,7 @@

    Examples

    # generate a count vector
     x <- runif(100)
     # generate a probability vector from corresponding counts
    -# method = "empirical"
    -x.prob <- estimate.probability(x, method = "empirical")
    +x.prob <- estimate.probability(x, method = 'empirical')
     
    @@ -130,15 +125,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/euclidean.html b/docs/reference/euclidean.html index 10ba169..58ddf17 100644 --- a/docs/reference/euclidean.html +++ b/docs/reference/euclidean.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Euclidean distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/fidelity.html b/docs/reference/fidelity.html index 1ada4df..ea05ef7 100644 --- a/docs/reference/fidelity.html +++ b/docs/reference/fidelity.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Fidelity distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/gJSD.html b/docs/reference/gJSD.html index bb73703..308c230 100644 --- a/docs/reference/gJSD.html +++ b/docs/reference/gJSD.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Generalized Jensen-Shannon Divergence

    Arguments

    -
    x
    + + +
    x

    a probability matrix.

    -
    unit
    +
    unit

    a character string specifying the logarithm unit that shall be used to compute distances that depend on log computations.

    -
    weights
    +
    weights

    a numeric vector specifying the weights for each distribution in x. Default: weights = NULL; in this case all distributions are weighted equally (= uniform distribution of weights). In case users wish to specify non-uniform weights for e.g. 3 distributions, they @@ -93,16 +95,14 @@

    Arguments

    denotes that vec1 is weighted by 0.5, vec2 is weighted by 0.25, and vec3 is weighted by 0.25 as well.

    -
    est.prob
    +
    est.prob

    method to estimate probabilities from input count vectors such as non-probability vectors. Default: est.prob = NULL. Options are:

    • est.prob = "empirical": The relative frequencies of each vector are computed internally. For example an input matrix rbind(1:10, 11:20) will be transformed to a probability vector rbind(1:10 / sum(1:10), 11:20 / sum(11:20))

    Value

    - - -

    The Jensen-Shannon divergence between all possible combinations of comparisons.

    +

    The Jensen-Shannon divergence between all possible combinations of comparisons.

    Details

    @@ -174,15 +174,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/getDistMethods.html b/docs/reference/getDistMethods.html index 62fd3a2..7a20370 100644 --- a/docs/reference/getDistMethods.html +++ b/docs/reference/getDistMethods.html @@ -1,11 +1,11 @@ Get method names for distance — getDistMethods • philentropy - +
    - +
    @@ -71,7 +71,7 @@

    Get method names for distance

    This function returns the names of the methods that can be applied -to compute distances between probability density functions using the distance +to compute distances between probability density functions using the distance function.

    @@ -119,15 +119,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/gower.html b/docs/reference/gower.html index 95e9367..f85bd9d 100644 --- a/docs/reference/gower.html +++ b/docs/reference/gower.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Gower distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/harmonic_mean_dist.html b/docs/reference/harmonic_mean_dist.html index 74d6368..b1790c6 100644 --- a/docs/reference/harmonic_mean_dist.html +++ b/docs/reference/harmonic_mean_dist.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Harmonic mean distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/hellinger.html b/docs/reference/hellinger.html index 1b29a72..0111776 100644 --- a/docs/reference/hellinger.html +++ b/docs/reference/hellinger.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Hellinger distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/index.html b/docs/reference/index.html index fc23a81..b502d2b 100644 --- a/docs/reference/index.html +++ b/docs/reference/index.html @@ -1,9 +1,9 @@ -Function reference • philentropyPackage index • philentropy - +
    - +
    @@ -334,15 +334,15 @@

    Other

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/inner_product.html b/docs/reference/inner_product.html index 47d3480..924174b 100644 --- a/docs/reference/inner_product.html +++ b/docs/reference/inner_product.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Inner product distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/intersection_dist.html b/docs/reference/intersection_dist.html index c890da3..a2b659f 100644 --- a/docs/reference/intersection_dist.html +++ b/docs/reference/intersection_dist.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Intersection distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/jaccard.html b/docs/reference/jaccard.html index 2dd5aee..c1cab47 100644 --- a/docs/reference/jaccard.html +++ b/docs/reference/jaccard.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Jaccard distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/jeffreys.html b/docs/reference/jeffreys.html index b842063..e18299d 100644 --- a/docs/reference/jeffreys.html +++ b/docs/reference/jeffreys.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,32 +77,34 @@

    Jeffreys distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    unit
    +
    unit

    type of log function. Option are

    • unit = "log"

    • unit = "log2"

    • unit = "log10"

    -
    epsilon
    +
    epsilon

    epsilon a small value to address cases in the distance computation where division by zero occurs. In these cases, x / 0 or 0 / 0 will be replaced by epsilon. The default is epsilon = 0.00001. However, we recommend to choose a custom epsilon value depending on the size of the input vectors, -the expected similarity between compared probability density functions and +the expected similarity between compared probability density functions and whether or not many 0 values are present within the compared vectors. -As a rough rule of thumb we suggest that when dealing with very large +As a rough rule of thumb we suggest that when dealing with very large input vectors which are very similar and contain many 0 values, the epsilon value should be set even smaller (e.g. epsilon = 0.000000001), whereas when vector sizes are small or distributions very divergent then @@ -136,15 +138,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/jensen_difference.html b/docs/reference/jensen_difference.html index 851f47e..e208e42 100644 --- a/docs/reference/jensen_difference.html +++ b/docs/reference/jensen_difference.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,19 +77,21 @@

    Jensen difference (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    unit
    +
    unit

    type of log function. Option are

    • unit = "log"

    • unit = "log2"

    • unit = "log10"

    • @@ -119,15 +121,15 @@

      Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/jensen_shannon.html b/docs/reference/jensen_shannon.html index 3b31c6c..0642eba 100644 --- a/docs/reference/jensen_shannon.html +++ b/docs/reference/jensen_shannon.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,19 +77,21 @@

    Jensen-Shannon distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    unit
    +
    unit

    type of log function. Option are

    • unit = "log"

    • unit = "log2"

    • unit = "log10"

    • @@ -119,15 +121,15 @@

      Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/k_divergence.html b/docs/reference/k_divergence.html index 01b895c..3bdf5d1 100644 --- a/docs/reference/k_divergence.html +++ b/docs/reference/k_divergence.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,19 +77,21 @@

    K-Divergence (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    unit
    +
    unit

    type of log function. Option are

    • unit = "log"

    • unit = "log2"

    • unit = "log10"

    • @@ -119,15 +121,15 @@

      Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/kulczynski_d.html b/docs/reference/kulczynski_d.html index d4de785..c898198 100644 --- a/docs/reference/kulczynski_d.html +++ b/docs/reference/kulczynski_d.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,25 +77,27 @@

    Kulczynski_d distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    epsilon
    +
    epsilon

    epsilon a small value to address cases in the distance computation where division by zero occurs. In these cases, x / 0 or 0 / 0 will be replaced by epsilon. The default is epsilon = 0.00001. However, we recommend to choose a custom epsilon value depending on the size of the input vectors, -the expected similarity between compared probability density functions and +the expected similarity between compared probability density functions and whether or not many 0 values are present within the compared vectors. -As a rough rule of thumb we suggest that when dealing with very large +As a rough rule of thumb we suggest that when dealing with very large input vectors which are very similar and contain many 0 values, the epsilon value should be set even smaller (e.g. epsilon = 0.000000001), whereas when vector sizes are small or distributions very divergent then @@ -129,15 +131,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/kullback_leibler_distance.html b/docs/reference/kullback_leibler_distance.html index 269de13..ff25799 100644 --- a/docs/reference/kullback_leibler_distance.html +++ b/docs/reference/kullback_leibler_distance.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,32 +77,34 @@

    kullback-Leibler distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    unit
    +
    unit

    type of log function. Option are

    • unit = "log"

    • unit = "log2"

    • unit = "log10"

    -
    epsilon
    +
    epsilon

    epsilon a small value to address cases in the distance computation where division by zero occurs. In these cases, x / 0 or 0 / 0 will be replaced by epsilon. The default is epsilon = 0.00001. However, we recommend to choose a custom epsilon value depending on the size of the input vectors, -the expected similarity between compared probability density functions and +the expected similarity between compared probability density functions and whether or not many 0 values are present within the compared vectors. -As a rough rule of thumb we suggest that when dealing with very large +As a rough rule of thumb we suggest that when dealing with very large input vectors which are very similar and contain many 0 values, the epsilon value should be set even smaller (e.g. epsilon = 0.000000001), whereas when vector sizes are small or distributions very divergent then @@ -136,15 +138,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/kumar_hassebrook.html b/docs/reference/kumar_hassebrook.html index ffd46e1..eef23d1 100644 --- a/docs/reference/kumar_hassebrook.html +++ b/docs/reference/kumar_hassebrook.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Kumar hassebrook distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/kumar_johnson.html b/docs/reference/kumar_johnson.html index 8932862..3c3d565 100644 --- a/docs/reference/kumar_johnson.html +++ b/docs/reference/kumar_johnson.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,25 +77,27 @@

    Kumar-Johnson distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    epsilon
    +
    epsilon

    epsilon a small value to address cases in the distance computation where division by zero occurs. In these cases, x / 0 or 0 / 0 will be replaced by epsilon. The default is epsilon = 0.00001. However, we recommend to choose a custom epsilon value depending on the size of the input vectors, -the expected similarity between compared probability density functions and +the expected similarity between compared probability density functions and whether or not many 0 values are present within the compared vectors. -As a rough rule of thumb we suggest that when dealing with very large +As a rough rule of thumb we suggest that when dealing with very large input vectors which are very similar and contain many 0 values, the epsilon value should be set even smaller (e.g. epsilon = 0.000000001), whereas when vector sizes are small or distributions very divergent then @@ -129,15 +131,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/lin.cor.html b/docs/reference/lin.cor.html index 303061a..73c22f7 100644 --- a/docs/reference/lin.cor.html +++ b/docs/reference/lin.cor.html @@ -4,7 +4,7 @@ - +
    - +
    @@ -79,31 +79,33 @@

    Linear Correlation

    Arguments

    -
    x
    + + +
    x

    a numeric vector, matrix, or data.frame.

    -
    y
    +
    y

    a numeric vector that should be correlated with x.

    -
    method
    +
    method

    the method to compute the linear correlation between x and y.

    -
    test.na
    +
    test.na

    a boolean value indicating whether input data should be checked for NA values.

    Details

    - +
    • method = "pearson" : Pearson's correlation coefficient (centred).

    • method = "pearson2" : Pearson's uncentred correlation coefficient.

    • method = "sq_pearson" . Squared Pearson's correlation coefficient.

    • method = "kendall" : Kendall's correlation coefficient.

    • method = "spearman" : Spearman's correlation coefficient.

    • -

    Further Details:

    +

    Further Details:

    • Pearson's correlation coefficient (centred) :

    @@ -123,15 +125,15 @@

    Author

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/lorentzian.html b/docs/reference/lorentzian.html index afe6d94..227e8ea 100644 --- a/docs/reference/lorentzian.html +++ b/docs/reference/lorentzian.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,19 +77,21 @@

    Lorentzian distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    unit
    +
    unit

    type of log function. Option are

    • unit = "log"

    • unit = "log2"

    • unit = "log10"

    • @@ -119,15 +121,15 @@

      Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/manhattan.html b/docs/reference/manhattan.html index e665ee7..df501f5 100644 --- a/docs/reference/manhattan.html +++ b/docs/reference/manhattan.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Manhattan distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/matusita.html b/docs/reference/matusita.html index aa559e7..7bc60fc 100644 --- a/docs/reference/matusita.html +++ b/docs/reference/matusita.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Matusita distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/minkowski.html b/docs/reference/minkowski.html index 6284b61..dbdaf11 100644 --- a/docs/reference/minkowski.html +++ b/docs/reference/minkowski.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,19 +77,21 @@

    Minkowski distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    n
    +
    n

    index for the minkowski exponent.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -116,15 +118,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/motyka.html b/docs/reference/motyka.html index 8df784d..3e922de 100644 --- a/docs/reference/motyka.html +++ b/docs/reference/motyka.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Motyka distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/neyman_chi_sq.html b/docs/reference/neyman_chi_sq.html index 874906f..727b8a1 100644 --- a/docs/reference/neyman_chi_sq.html +++ b/docs/reference/neyman_chi_sq.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,25 +77,27 @@

    Neyman chi-squared distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    epsilon
    +
    epsilon

    epsilon a small value to address cases in the distance computation where division by zero occurs. In these cases, x / 0 or 0 / 0 will be replaced by epsilon. The default is epsilon = 0.00001. However, we recommend to choose a custom epsilon value depending on the size of the input vectors, -the expected similarity between compared probability density functions and +the expected similarity between compared probability density functions and whether or not many 0 values are present within the compared vectors. -As a rough rule of thumb we suggest that when dealing with very large +As a rough rule of thumb we suggest that when dealing with very large input vectors which are very similar and contain many 0 values, the epsilon value should be set even smaller (e.g. epsilon = 0.000000001), whereas when vector sizes are small or distributions very divergent then @@ -129,15 +131,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/pearson_chi_sq.html b/docs/reference/pearson_chi_sq.html index c43d4a3..c5e6fe8 100644 --- a/docs/reference/pearson_chi_sq.html +++ b/docs/reference/pearson_chi_sq.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,25 +77,27 @@

    Pearson chi-squared distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    epsilon
    +
    epsilon

    epsilon a small value to address cases in the distance computation where division by zero occurs. In these cases, x / 0 or 0 / 0 will be replaced by epsilon. The default is epsilon = 0.00001. However, we recommend to choose a custom epsilon value depending on the size of the input vectors, -the expected similarity between compared probability density functions and +the expected similarity between compared probability density functions and whether or not many 0 values are present within the compared vectors. -As a rough rule of thumb we suggest that when dealing with very large +As a rough rule of thumb we suggest that when dealing with very large input vectors which are very similar and contain many 0 values, the epsilon value should be set even smaller (e.g. epsilon = 0.000000001), whereas when vector sizes are small or distributions very divergent then @@ -129,15 +131,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/prob_symm_chi_sq.html b/docs/reference/prob_symm_chi_sq.html index 20ea75e..b890722 100644 --- a/docs/reference/prob_symm_chi_sq.html +++ b/docs/reference/prob_symm_chi_sq.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Probability symmetric chi-squared distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/ruzicka.html b/docs/reference/ruzicka.html index b2eb4b1..896d7df 100644 --- a/docs/reference/ruzicka.html +++ b/docs/reference/ruzicka.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Ruzicka distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/soergel.html b/docs/reference/soergel.html index eee7183..6cbf17c 100644 --- a/docs/reference/soergel.html +++ b/docs/reference/soergel.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Soergel distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/sorensen.html b/docs/reference/sorensen.html index a42c8c9..7172fee 100644 --- a/docs/reference/sorensen.html +++ b/docs/reference/sorensen.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Sorensen distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/squared_chi_sq.html b/docs/reference/squared_chi_sq.html index 3c2ae24..047d604 100644 --- a/docs/reference/squared_chi_sq.html +++ b/docs/reference/squared_chi_sq.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Squared chi-squared distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/squared_chord.html b/docs/reference/squared_chord.html index 80a1d81..e16d4b1 100644 --- a/docs/reference/squared_chord.html +++ b/docs/reference/squared_chord.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Squared chord distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/squared_euclidean.html b/docs/reference/squared_euclidean.html index 7db57fe..5f44a69 100644 --- a/docs/reference/squared_euclidean.html +++ b/docs/reference/squared_euclidean.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Squared euclidean distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/taneja.html b/docs/reference/taneja.html index 6ba928b..c845e0b 100644 --- a/docs/reference/taneja.html +++ b/docs/reference/taneja.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,32 +77,34 @@

    Taneja difference (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    unit
    +
    unit

    type of log function. Option are

    • unit = "log"

    • unit = "log2"

    • unit = "log10"

    -
    epsilon
    +
    epsilon

    epsilon a small value to address cases in the distance computation where division by zero occurs. In these cases, x / 0 or 0 / 0 will be replaced by epsilon. The default is epsilon = 0.00001. However, we recommend to choose a custom epsilon value depending on the size of the input vectors, -the expected similarity between compared probability density functions and +the expected similarity between compared probability density functions and whether or not many 0 values are present within the compared vectors. -As a rough rule of thumb we suggest that when dealing with very large +As a rough rule of thumb we suggest that when dealing with very large input vectors which are very similar and contain many 0 values, the epsilon value should be set even smaller (e.g. epsilon = 0.000000001), whereas when vector sizes are small or distributions very divergent then @@ -136,15 +138,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/tanimoto.html b/docs/reference/tanimoto.html index cee92f1..f3ad563 100644 --- a/docs/reference/tanimoto.html +++ b/docs/reference/tanimoto.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Tanimoto distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/topsoe.html b/docs/reference/topsoe.html index e359a23..8dc6670 100644 --- a/docs/reference/topsoe.html +++ b/docs/reference/topsoe.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,19 +77,21 @@

    Topsoe distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    -
    unit
    +
    unit

    type of log function. Option are

    • unit = "log"

    • unit = "log2"

    • unit = "log10"

    • @@ -119,15 +121,15 @@

      Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/reference/wave_hedges.html b/docs/reference/wave_hedges.html index bc62841..4be36e2 100644 --- a/docs/reference/wave_hedges.html +++ b/docs/reference/wave_hedges.html @@ -3,7 +3,7 @@ - +
    - +
    @@ -77,15 +77,17 @@

    Wave hedges distance (lowlevel function)

    Arguments

    -
    P
    + + +
    P

    a numeric vector storing the first distribution.

    -
    Q
    +
    Q

    a numeric vector storing the second distribution.

    -
    testNA
    +
    testNA

    a logical value indicating whether or not distributions shall be checked for NA values.

    @@ -112,15 +114,15 @@

    Examples

    -

    Site built with pkgdown 2.0.9.

    +

    Site built with pkgdown 2.1.0.

    - - + + diff --git a/docs/sitemap.xml b/docs/sitemap.xml index f9aef54..f0db57b 100644 --- a/docs/sitemap.xml +++ b/docs/sitemap.xml @@ -1,219 +1,75 @@ - - - - /404.html - - - /LICENSE-text.html - - - /articles/Distances.html - - - /articles/Information_Theory.html - - - /articles/Introduction.html - - - /articles/Many_Distances.html - - - /articles/index.html - - - /authors.html - - - /index.html - - - /news/index.html - - - /paper.html - - - /reference/CE.html - - - /reference/H.html - - - /reference/JE.html - - - /reference/JSD.html - - - /reference/KL.html - - - /reference/MI.html - - - /reference/additive_symm_chi_sq.html - - - /reference/avg.html - - - /reference/bhattacharyya.html - - - /reference/binned.kernel.est.html - - - /reference/canberra.html - - - /reference/chebyshev.html - - - /reference/clark_sq.html - - - /reference/cosine_dist.html - - - /reference/czekanowski.html - - - /reference/dice_dist.html - - - /reference/dist.diversity.html - - - /reference/dist_many_many.html - - - /reference/dist_one_many.html - - - /reference/dist_one_one.html - - - /reference/distance.html - - - /reference/divergence_sq.html - - - /reference/estimate.probability.html - - - /reference/euclidean.html - - - /reference/fidelity.html - - - /reference/gJSD.html - - - /reference/getDistMethods.html - - - /reference/gower.html - - - /reference/harmonic_mean_dist.html - - - /reference/hellinger.html - - - /reference/index.html - - - /reference/inner_product.html - - - /reference/intersection_dist.html - - - /reference/jaccard.html - - - /reference/jeffreys.html - - - /reference/jensen_difference.html - - - /reference/jensen_shannon.html - - - /reference/k_divergence.html - - - /reference/kulczynski_d.html - - - /reference/kullback_leibler_distance.html - - - /reference/kumar_hassebrook.html - - - /reference/kumar_johnson.html - - - /reference/lin.cor.html - - - /reference/lorentzian.html - - - /reference/manhattan.html - - - /reference/matusita.html - - - /reference/minkowski.html - - - /reference/motyka.html - - - /reference/neyman_chi_sq.html - - - /reference/pearson_chi_sq.html - - - /reference/prob_symm_chi_sq.html - - - /reference/ruzicka.html - - - /reference/soergel.html - - - /reference/sorensen.html - - - /reference/squared_chi_sq.html - - - /reference/squared_chord.html - - - /reference/squared_euclidean.html - - - /reference/taneja.html - - - /reference/tanimoto.html - - - /reference/topsoe.html - - - /reference/wave_hedges.html - + +/404.html +/LICENSE-text.html +/articles/Distances.html +/articles/Information_Theory.html +/articles/Introduction.html +/articles/Many_Distances.html +/articles/index.html +/authors.html +/index.html +/news/index.html +/paper.html +/reference/CE.html +/reference/H.html +/reference/JE.html +/reference/JSD.html +/reference/KL.html +/reference/MI.html +/reference/additive_symm_chi_sq.html +/reference/avg.html +/reference/bhattacharyya.html +/reference/binned.kernel.est.html +/reference/canberra.html +/reference/chebyshev.html +/reference/clark_sq.html +/reference/cosine_dist.html +/reference/czekanowski.html +/reference/dice_dist.html +/reference/dist.diversity.html +/reference/dist_many_many.html +/reference/dist_one_many.html +/reference/dist_one_one.html +/reference/distance.html +/reference/divergence_sq.html +/reference/estimate.probability.html +/reference/euclidean.html +/reference/fidelity.html +/reference/gJSD.html +/reference/getDistMethods.html +/reference/gower.html +/reference/harmonic_mean_dist.html +/reference/hellinger.html +/reference/index.html +/reference/inner_product.html +/reference/intersection_dist.html +/reference/jaccard.html +/reference/jeffreys.html +/reference/jensen_difference.html +/reference/jensen_shannon.html +/reference/k_divergence.html +/reference/kulczynski_d.html +/reference/kullback_leibler_distance.html +/reference/kumar_hassebrook.html +/reference/kumar_johnson.html +/reference/lin.cor.html +/reference/lorentzian.html +/reference/manhattan.html +/reference/matusita.html +/reference/minkowski.html +/reference/motyka.html +/reference/neyman_chi_sq.html +/reference/pearson_chi_sq.html +/reference/prob_symm_chi_sq.html +/reference/ruzicka.html +/reference/soergel.html +/reference/sorensen.html +/reference/squared_chi_sq.html +/reference/squared_chord.html +/reference/squared_euclidean.html +/reference/taneja.html +/reference/tanimoto.html +/reference/topsoe.html +/reference/wave_hedges.html +