Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

No hierarchical #1

Merged
merged 8 commits into from
May 7, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 56 additions & 0 deletions histograms.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
from pylab import *
from scipy import stats
import numpy.ma as ma

max_planets = 3

sample_data = loadtxt('sample.txt', skiprows=2)
posterior_data = loadtxt('posterior_sample.txt')

## periods
periods_sample = []
periods_posterior = []
for i in range(max_planets):
periods_sample.append(sample_data[:,10+i])
periods_posterior.append(posterior_data[:,10+i])

p_s = np.concatenate(periods_sample)
p_p = np.concatenate(periods_posterior)
# mask 0 values, should use these arrays in plots
P_sample = np.ma.masked_equal(p_s, 0)
P_posterior = np.ma.masked_equal(p_p, 0)


## amplitudes
amplitudes_sample = []
amplitudes_posterior = []
for i in range(max_planets):
amplitudes_sample.append(sample_data[:,10+i])
amplitudes_posterior.append(posterior_data[:,10+i])

k_s = np.concatenate(amplitudes_sample)
k_p = np.concatenate(amplitudes_posterior)
# mask 0 values, should use these arrays in plots
K_sample = np.ma.masked_equal(k_s, 0)
K_posterior = np.ma.masked_equal(k_p, 0)



## eccentricities
eccentricities_sample = []
eccentricities_posterior = []
for i in range(max_planets):
eccentricities_sample.append(sample_data[:,10+3*max_planets+i])
eccentricities_posterior.append(posterior_data[:,10+3*max_planets+i])

ecc_s = np.concatenate(eccentricities_sample)
ecc_p = np.concatenate(eccentricities_posterior)
# mask 0 values, should use these arrays in plots
ECC_sample = np.ma.masked_equal(ecc_s, 0)
ECC_posterior = np.ma.masked_equal(ecc_p, 0)


posterior = np.zeros((len(P_posterior), 3))
posterior[:,0] = ma.log(P_posterior)
posterior[:,1] = ma.log(K_posterior)
posterior[:,2] = ECC_posterior
11 changes: 9 additions & 2 deletions include/MTSamplerImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,14 @@ void MTSampler<ModelType>::createLevel()
template<class ModelType>
void MTSampler<ModelType>::saveParticle(int iWhich, int jWhich)
{
std::cout<<"# Saving a particle to disk. N = "<<(++saves)<<"."<<std::endl;

++saves;
if(saves % 10 == 0)
{
std::cout<<"# Saving particle N = "<<(saves)<<" to disk."<<std::endl;
//std::cout<<"# Saving a particle to disk. N = "<<(++saves)<<"."<<std::endl;
}


// Save the particle to file
std::fstream fout;
Expand All @@ -332,7 +339,7 @@ void MTSampler<ModelType>::saveParticle(int iWhich, int jWhich)
fout.close();

// Save the particle's info
if(saves == 1)
if(saves == 1 and not restart)
{
fout.open(options.sampleInfoFile.c_str(), std::ios::out);
fout<<"# index, logLikelihood, tieBreaker, ID."<<std::endl;
Expand Down
6 changes: 4 additions & 2 deletions showresults.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import postprocess
postprocess.postprocess(cut=0., plot=True, moreSamples=1.)
postprocess.postprocess(cut=0., plot=False, moreSamples=1.)
# import display

import make_plots
import make_plots

import histograms
3 changes: 3 additions & 0 deletions src/Data.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@ void Data::load(const char* filename)
y.clear();
sig.clear();

while(fin.peek() == '#')
fin.ignore(1000000, '\n');

double temp1, temp2, temp3;
while(fin>>temp1 && fin>>temp2 && fin>>temp3)
{
Expand Down
72 changes: 12 additions & 60 deletions src/MyDistribution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,51 +14,13 @@ MyDistribution::MyDistribution()
// Sample prior hyperparameters from their respective priors
void MyDistribution::fromPrior()
{
// Median orbital period, mu_P
// Cauchy prior centered on 5.901 = log(365 days), scale = 1
// general form for location=x0, scale=c
// cauchy_rand = c * tan(M_PI * (randomU()-0.5)) + x0
// the distribution is truncated to the interval -15.3, 27.1, hence
// the 0.97 and 0.485 values
center = 5.901 + tan(M_PI*(0.97*randomU() - 0.485));
// Diversity of orbital periods, w_P
// uniform prior between 0.1 and 3
width = 0.1 + 2.9*randomU();
// Mean amplitude (metres per second)
// standard Cauchy prior
// again truncated to the interval -21.2, 21.2
mu = exp(tan(M_PI*(0.97*randomU() - 0.485)));
// no hyperparameters
}


double MyDistribution::perturb_parameters()
{
double logH = 0.;

int which = randInt(3);

if(which == 0)
{
center = (atan(center - 5.901)/M_PI + 0.485)/0.97;
center += randh();
wrap(center, 0., 1.);
center = 5.901 + tan(M_PI*(0.97*center - 0.485));
}
else if(which == 1)
{
width += 2.9*randh();
wrap(width, 0.1, 3.);
}
else
{
mu = log(mu);
mu = (atan(mu)/M_PI + 0.485)/0.97;
mu += randh();
wrap(mu, 0., 1.);
mu = tan(M_PI*(0.97*mu - 0.485));
mu = exp(mu);
}
return logH;
return 0.;
}

// vec[0] = "position" (log-period)
Expand All @@ -75,22 +37,16 @@ double MyDistribution::log_pdf(const std::vector<double>& vec) const
vec[4] < 0. || vec[4] > 2.*M_PI)
return -1E300;

return -log(2.*width) - abs(vec[0] - center)/width
-log(mu) - vec[1]/mu
+ 2.1*log(1. - vec[3]/0.995);
return 0.;
}

void MyDistribution::from_uniform(std::vector<double>& vec) const
{
// inverse CDF of the Laplace distribution
// http://en.wikipedia.org/wiki/Laplace_distribution#Cumulative_distribution_function
if(vec[0] < 0.5)
vec[0] = center + width*log(2.*vec[0]);
else
vec[0] = center - width*log(2. - 2.*vec[0]);
// inverse CDF of the Jeffreys distribution [0.2d - 365000d]
vec[0] = 0.2*pow(365000./0.2, vec[0]);

// inverse CDF of the Exponential distribution
vec[1] = -mu*log(1. - vec[1]);
// inverse CDF of the Modified Jeffreys distribution [1, 2129]
vec[1] = 1.*( pow(1.+2129./1., vec[1]) - 1.);

// inverse CDF of the Uniform distribution [0, 2pi]
vec[2] = 2.*M_PI*vec[2];
Expand All @@ -105,15 +61,11 @@ void MyDistribution::from_uniform(std::vector<double>& vec) const

void MyDistribution::to_uniform(std::vector<double>& vec) const
{
// CDF of the Laplace distribution
// http://en.wikipedia.org/wiki/Laplace_distribution#Cumulative_distribution_function
if(vec[0] < center)
vec[0] = 0.5*exp((vec[0] - center)/width);
else
vec[0] = 1. - 0.5*exp((center - vec[0])/width);

// CDF of the Exponential distribution
vec[1] = 1. - exp(-vec[1]/mu);
// CDF of the Jeffreys distribution [0.2d - 365000d]
vec[0] = log(vec[0]/0.2) / log(365000./0.2);

// CDF of the Modified Jeffreys distribution [1, 2129]
vec[1] = log(1.+vec[1]/1.) / log(1.+2129./1.);

// CDF of the Uniform distribution [0, 2pi]
vec[2] = vec[2]/(2.*M_PI);
Expand Down
25 changes: 17 additions & 8 deletions src/MyModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ using namespace std;
using namespace DNest3;

MyModel::MyModel()
:objects(5, 10, false, MyDistribution())
:objects(5, 3, false, MyDistribution())
,mu(Data::get_instance().get_t().size())
{

Expand All @@ -27,7 +27,7 @@ void MyModel::fromPrior()
extra_sigma = exp(tan(M_PI*(0.97*randomU() - 0.485)));
// prior for shape parameter of sampling (likelihood) t-distribution
// uniform between log(0.1) and log(1000)
nu = exp(log(0.1) + log(1000.)*randomU());
//nu = exp(log(0.1) + log(1000.)*randomU());
calculate_mu();
}

Expand Down Expand Up @@ -57,7 +57,7 @@ void MyModel::calculate_mu()
vector<double> arg, evaluations;
for(size_t j=0; j<components.size(); j++)
{
T = exp(components[j][0]);
T = components[j][0];
A = components[j][1];
phi = components[j][2];
v0 = sqrt(1. - components[j][3]);
Expand Down Expand Up @@ -90,10 +90,10 @@ double MyModel::perturb()
extra_sigma = tan(M_PI*(0.97*extra_sigma - 0.485));
extra_sigma = exp(extra_sigma);

nu = log(nu);
/* nu = log(nu);
nu += log(1000.)*randh();
wrap(nu, log(0.1), log(1000.));
nu = exp(nu);
nu = exp(nu);*/
}
else
{
Expand Down Expand Up @@ -125,14 +125,23 @@ double MyModel::logLikelihood() const
// http://en.wikipedia.org/wiki/Student%27s_t-distribution#In_terms_of_scaling_parameter_.CF.83.2C_or_.CF.832
// Note that the variance of this distribution depends not only
// on s but also on the degrees of freedom nu
for(size_t i=0; i<y.size(); i++)
/* for(size_t i=0; i<y.size(); i++)
{
var = sig[i]*sig[i] + extra_sigma*extra_sigma;
logL += gsl_sf_lngamma(0.5*(nu + 1.)) - gsl_sf_lngamma(0.5*nu)
- 0.5*log(M_PI*nu) - 0.5*log(var)
- 0.5*(nu + 1.)*log(1. + pow(y[i] - mu[i], 2)/var/nu);
*/


// the likelihood is a gaussian
for(size_t i=0; i<y.size(); i++)
{
var = sig[i]*sig[i] + extra_sigma*extra_sigma;
logL += - 0.5*log(2.*M_PI) - 0.5*log(var) - 0.5*pow(y[i] - mu[i], 2)/var;
}

/*cout<<logL<<'\n';*/
return logL;
}

Expand Down Expand Up @@ -177,14 +186,14 @@ void MyModel::print(std::ostream& out) const

// for(size_t i=0; i<signal.size(); i++)
// out<<signal[i]<<' ';
out<<extra_sigma<<' '<<nu<<' ';
out<<extra_sigma<<' '<< 0. <<' ';
out<<' '<<staleness<<' ';
out<<background<<' ';
objects.print(out);
}

string MyModel::description() const
{
return string("extra_sigma nu staleness m0 num_dimensions max_num_components dist.print() num_components components");
return string("extra_sigma nu' staleness m0 num_dimensions max_num_components dist.print() num_components components");
}