Skip to content

Commit

Permalink
New version (#2181)
Browse files Browse the repository at this point in the history
* Fix style with clang-format (before releasing 8.8.0)

* Update verson.txt for release 8.8.0

* Revert "Fix style with clang-format (before releasing 8.8.0)"

This reverts commit df08035.

* succesfully ran clang-format
  • Loading branch information
JohnLangford authored Dec 7, 2019
1 parent 237add8 commit cb914a0
Show file tree
Hide file tree
Showing 101 changed files with 1,599 additions and 1,830 deletions.
2 changes: 1 addition & 1 deletion version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
8.7.0
8.8.0
2 changes: 1 addition & 1 deletion vowpalwabbit/accumulate.cc
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ void accumulate_weighted_avg(vw& all, parameters& weights)
all.trace_message << "Weighted averaging is implemented only for adaptive gradient, use accumulate_avg instead\n";
return;
}

uint32_t length = 1 << all.num_bits; // This is the number of parameters
float* local_weights = new float[length];

Expand Down
10 changes: 2 additions & 8 deletions vowpalwabbit/array_parameters.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,7 @@ class sparse_parameters
sparse_parameters(const sparse_parameters& other) { shallow_copy(other); }
sparse_parameters(sparse_parameters&&) = delete;

weight* first()
{
THROW_OR_RETURN("Allreduce currently not supported in sparse", nullptr);
}
weight* first() { THROW_OR_RETURN("Allreduce currently not supported in sparse", nullptr); }

// iterator with stride
iterator begin()
Expand Down Expand Up @@ -209,10 +206,7 @@ class sparse_parameters
}

#ifndef _WIN32
void share(size_t /* length */)
{
THROW_OR_RETURN("Operation not supported on Windows");
}
void share(size_t /* length */) { THROW_OR_RETURN("Operation not supported on Windows"); }
#endif

~sparse_parameters()
Expand Down
59 changes: 29 additions & 30 deletions vowpalwabbit/array_parameters_dense.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,21 +8,19 @@ typedef float weight;
template <typename T>
class dense_iterator
{
private:
private:
T* _current;
T* _begin;
uint32_t _stride;

public:
public:
typedef std::forward_iterator_tag iterator_category;
typedef T value_type;
typedef std::ptrdiff_t difference_type;
typedef T* pointer;
typedef T& reference;

dense_iterator(T* current, T* begin, uint32_t stride)
: _current(current), _begin(begin), _stride(stride)
{ }
dense_iterator(T* current, T* begin, uint32_t stride) : _current(current), _begin(begin), _stride(stride) {}

T& operator*() { return *_current; }

Expand All @@ -40,37 +38,39 @@ class dense_iterator

class dense_parameters
{
private:
private:
weight* _begin;
uint64_t _weight_mask; // (stride*(1 << num_bits) -1)
uint32_t _stride_shift;
bool _seeded; // whether the instance is sharing model state with others
bool _seeded; // whether the instance is sharing model state with others

public:
public:
typedef dense_iterator<weight> iterator;
typedef dense_iterator<const weight> const_iterator;
dense_parameters(size_t length, uint32_t stride_shift = 0)
: _begin(calloc_mergable_or_throw<weight>(length << stride_shift)),
_weight_mask((length << stride_shift) - 1),
_stride_shift(stride_shift),
_seeded(false)
{ }
: _begin(calloc_mergable_or_throw<weight>(length << stride_shift))
, _weight_mask((length << stride_shift) - 1)
, _stride_shift(stride_shift)
, _seeded(false)
{
}

dense_parameters()
: _begin(nullptr), _weight_mask(0), _stride_shift(0), _seeded(false)
{}
dense_parameters() : _begin(nullptr), _weight_mask(0), _stride_shift(0), _seeded(false) {}

bool not_null() { return (_weight_mask > 0 && _begin != nullptr); }

dense_parameters(const dense_parameters& other) { shallow_copy(other); }
dense_parameters(dense_parameters&&) = delete;

weight* first() { return _begin; } //TODO: Temporary fix for allreduce.
//iterator with stride
weight* first()
{
return _begin;
} // TODO: Temporary fix for allreduce.
// iterator with stride
iterator begin() { return iterator(_begin, _begin, stride()); }
iterator end() { return iterator(_begin + _weight_mask + 1, _begin, stride()); }

//const iterator
// const iterator
const_iterator cbegin() { return const_iterator(_begin, _begin, stride()); }
const_iterator cend() { return const_iterator(_begin + _weight_mask + 1, _begin, stride()); }

Expand All @@ -87,27 +87,26 @@ class dense_parameters

inline weight& strided_index(size_t index) { return operator[](index << _stride_shift); }

template<class R, class T> void set_default(R& info)
template <class R, class T>
void set_default(R& info)
{
iterator iter = begin();
for (size_t i = 0; iter != end(); ++iter, i += stride())
T::func(*iter, info, iter.index());
for (size_t i = 0; iter != end(); ++iter, i += stride()) T::func(*iter, info, iter.index());
}

template<class T> void set_default()
template <class T>
void set_default()
{
iterator iter = begin();
for (size_t i = 0; iter != end(); ++iter, i += stride())
T::func(*iter, iter.index());
for (size_t i = 0; iter != end(); ++iter, i += stride()) T::func(*iter, iter.index());
}

void set_zero(size_t offset)
{
for (iterator iter = begin(); iter != end(); ++iter)
(&(*iter))[offset] = 0;
for (iterator iter = begin(); iter != end(); ++iter) (&(*iter))[offset] = 0;
}

uint64_t mask() const { return _weight_mask; }
uint64_t mask() const { return _weight_mask; }

uint64_t seeded() const { return _seeded; }

Expand All @@ -121,8 +120,8 @@ class dense_parameters
#ifndef DISABLE_SHARED_WEIGHTS
void share(size_t length)
{
float* shared_weights = (float*)mmap(0, (length << _stride_shift) * sizeof(float),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
float* shared_weights = (float*)mmap(
0, (length << _stride_shift) * sizeof(float), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
size_t float_count = length << _stride_shift;
weight* dest = shared_weights;
memcpy(dest, _begin, float_count * sizeof(float));
Expand Down
42 changes: 20 additions & 22 deletions vowpalwabbit/autolink.cc
Original file line number Diff line number Diff line change
Expand Up @@ -10,26 +10,27 @@ using namespace VW::config;

namespace VW
{
struct autolink
{
autolink(uint32_t d, uint32_t stride_shift);
void predict(LEARNER::single_learner& base, example& ec);
void learn(LEARNER::single_learner& base, example& ec);

private:
void prepare_example(LEARNER::single_learner& base, example& ec);
void reset_example(example& ec);

// degree of the polynomial
const uint32_t _poly_degree;
const uint32_t _stride_shift;
static constexpr int AUTOCONSTANT = 524267083;
};
}
struct autolink
{
autolink(uint32_t d, uint32_t stride_shift);
void predict(LEARNER::single_learner& base, example& ec);
void learn(LEARNER::single_learner& base, example& ec);

private:
void prepare_example(LEARNER::single_learner& base, example& ec);
void reset_example(example& ec);

// degree of the polynomial
const uint32_t _poly_degree;
const uint32_t _stride_shift;
static constexpr int AUTOCONSTANT = 524267083;
};
} // namespace VW

VW::autolink::autolink(uint32_t poly_degree, uint32_t stride_shift)
: _poly_degree(poly_degree), _stride_shift(stride_shift)
{}
: _poly_degree(poly_degree), _stride_shift(stride_shift)
{
}

void VW::autolink::predict(LEARNER::single_learner& base, example& ec)
{
Expand Down Expand Up @@ -93,8 +94,5 @@ LEARNER::base_learner* autolink_setup(options_i& options, vw& all)

auto autolink_reduction = scoped_calloc_or_throw<VW::autolink>(d, all.weights.stride_shift());
return make_base(init_learner(
autolink_reduction,
as_singleline(setup_base(options, all)),
predict_or_learn<true>,
predict_or_learn<false>));
autolink_reduction, as_singleline(setup_base(options, all)), predict_or_learn<true>, predict_or_learn<false>));
}
3 changes: 2 additions & 1 deletion vowpalwabbit/beam.h
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,8 @@ class beam
if (is_equivalent(A[i].data, A[j].data))
{
A[j].active = false; // TODO: if kbest is on, do recomb_friends
// std::cerr << "equivalent " << i << "," << j << ": " << ((size_t)A[i].data) << " and " << ((size_t)A[j].data)
// std::cerr << "equivalent " << i << "," << j << ": " << ((size_t)A[i].data) << " and " <<
// ((size_t)A[j].data)
// << std::endl;
}
}
Expand Down
9 changes: 5 additions & 4 deletions vowpalwabbit/bfgs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1049,10 +1049,11 @@ void save_load(bfgs& b, io_buf& model_file, bool read, bool text)

if (!all->quiet)
std::cerr << "m = " << m << std::endl
<< "Allocated "
<< ((long unsigned int)all->length() * (sizeof(float) * (b.mem_stride) + (sizeof(weight) << stride_shift)) >>
20)
<< "M for weights and mem" << std::endl;
<< "Allocated "
<< ((long unsigned int)all->length() *
(sizeof(float) * (b.mem_stride) + (sizeof(weight) << stride_shift)) >>
20)
<< "M for weights and mem" << std::endl;

b.net_time = 0.0;
ftime(&b.t_start_global);
Expand Down
2 changes: 1 addition & 1 deletion vowpalwabbit/binary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ void predict_or_learn(char&, LEARNER::single_learner& base, example& ec)
if (ec.l.simple.label != FLT_MAX)
{
if (fabs(ec.l.simple.label) != 1.f)
std::cout << "You are using label " << ec.l.simple.label << " not -1 or 1 as loss function expects!" << std::endl;
std::cout << "You are using label " << ec.l.simple.label << " not -1 or 1 as loss function expects!" << std::endl;
else if (ec.l.simple.label == ec.pred.scalar)
ec.loss = 0.;
else
Expand Down
5 changes: 1 addition & 4 deletions vowpalwabbit/bs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,7 @@ struct bs
vw* all; // for raw prediction and loss
std::shared_ptr<rand_state> _random_state;

~bs()
{
delete pred_vec;
}
~bs() { delete pred_vec; }
};

void bs_predict_mean(vw& all, example& ec, std::vector<double>& pred_vec)
Expand Down
2 changes: 1 addition & 1 deletion vowpalwabbit/cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,5 +19,5 @@ void output_features(io_buf& cache, unsigned char index, features& fs, uint64_t

namespace VW
{
uint32_t convert(size_t number);
uint32_t convert(size_t number);
}
14 changes: 8 additions & 6 deletions vowpalwabbit/cb.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,10 @@ size_t read_cached_label(shared_data*, void* v, io_buf& cache)
return total;
}

float weight(void* v) {
CB::label* ld = (CB::label*)v;
return ld->weight;
float weight(void* v)
{
CB::label* ld = (CB::label*)v;
return ld->weight;
}

char* bufcache_label(CB::label* ld, char* c)
Expand Down Expand Up @@ -217,7 +218,8 @@ void print_update(vw& all, bool is_test, example& ec, multi_ex* ec_seq, bool act

namespace CB_EVAL
{
float weight(void* v) {
float weight(void* v)
{
CB_EVAL::label* ld = (CB_EVAL::label*)v;
return ld->event.weight;
}
Expand Down Expand Up @@ -287,6 +289,6 @@ void parse_label(parser* p, shared_data* sd, void* v, v_array<substring>& words)
words.begin()--;
}

label_parser cb_eval = {default_label, parse_label, cache_label, read_cached_label, delete_label, weight,
copy_label, test_label, sizeof(CB_EVAL::label)};
label_parser cb_eval = {default_label, parse_label, cache_label, read_cached_label, delete_label, weight, copy_label,
test_label, sizeof(CB_EVAL::label)};
} // namespace CB_EVAL
2 changes: 1 addition & 1 deletion vowpalwabbit/cb.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ struct label
float weight;
};

extern label_parser cb_label; // for learning
extern label_parser cb_label; // for learning
bool ec_is_example_header(example const& ec); // example headers look like "shared"

void print_update(vw& all, bool is_test, example& ec, std::vector<example*>* ec_seq, bool action_scores);
Expand Down
Loading

0 comments on commit cb914a0

Please sign in to comment.