Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 11 additions & 6 deletions src/hotspot/share/utilities/concurrentHashTable.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ class ConcurrentHashTable : public CHeapObj<MT> {
class Node {
private:
DEBUG_ONLY(size_t _saved_hash);
Node * volatile _next;
Atomic<Node*> _next;
VALUE _value;
public:
Node(const VALUE& value, Node* next = nullptr)
Expand All @@ -80,8 +80,8 @@ class ConcurrentHashTable : public CHeapObj<MT> {
}

Node* next() const;
void set_next(Node* node) { _next = node; }
Node* const volatile * next_ptr() { return &_next; }
void set_next(Node* node) { _next.store_relaxed(node); }
const Atomic<Node*>* next_ptr() const { return &_next; }
#ifdef ASSERT
size_t saved_hash() const { return _saved_hash; }
void set_saved_hash(size_t hash) { _saved_hash = hash; }
Expand Down Expand Up @@ -123,7 +123,7 @@ class ConcurrentHashTable : public CHeapObj<MT> {
// unlocked -> locked -> redirect
// Locked state only applies to an updater.
// Reader only check for redirect.
Node * volatile _first;
Atomic<Node*> _first;

static const uintptr_t STATE_LOCK_BIT = 0x1;
static const uintptr_t STATE_REDIRECT_BIT = 0x2;
Expand Down Expand Up @@ -157,20 +157,25 @@ class ConcurrentHashTable : public CHeapObj<MT> {
// A bucket is only one pointer with the embedded state.
Bucket() : _first(nullptr) {};

NONCOPYABLE(Bucket);

// Copy bucket's first entry to this.
void assign(const Bucket& bucket);

// Get the first pointer unmasked.
Node* first() const;

// Get a pointer to the const first pointer. Do not deference this
// pointer, the pointer pointed to _may_ contain an embedded state. Such
// pointer should only be used as input to release_assign_node_ptr.
Node* const volatile * first_ptr() { return &_first; }
const Atomic<Node*>* first_ptr() const { return &_first; }

// This is the only place where a pointer to a Node pointer that potentially
// is _first should be changed. Otherwise we destroy the embedded state. We
// only give out pointer to const Node pointer to avoid accidental
// assignment, thus here we must cast const part away. Method is not static
// due to an assert.
void release_assign_node_ptr(Node* const volatile * dst, Node* node) const;
void release_assign_node_ptr(const Atomic<Node*>* dst, Node* node) const;
Comment on lines 175 to +178
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The const part of the comment no longer seems relevant.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The const is still relevant, for the reason described. It's at least
arguable that it's kind of sketchy to do this. It certainly took me a bit of
study to understand. Note that I moved the position of the const qualifier
to its more usual location (in our code) for declaring a constant object (the
Atomic<Node*> is atomic). It could instead be written as Atomic<Node*> const*,
retaining the ordering from the original. Also see the implementation, where
we need to cast away the const qualifier, which is now being done with
const_cast rather than a C-style cast (that was also stripping off the volatile
qualifier, which the use of AtomicAccess implicitly reapplied).


// This method assigns this buckets last Node next ptr to input Node.
void release_assign_last_node_next(Node* node);
Expand Down
51 changes: 29 additions & 22 deletions src/hotspot/share/utilities/concurrentHashTable.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ inline typename ConcurrentHashTable<CONFIG, MT>::Node*
ConcurrentHashTable<CONFIG, MT>::
Node::next() const
{
return AtomicAccess::load_acquire(&_next);
return _next.load_acquire();
}

// Bucket
Expand All @@ -72,19 +72,26 @@ inline typename ConcurrentHashTable<CONFIG, MT>::Node*
ConcurrentHashTable<CONFIG, MT>::
Bucket::first_raw() const
{
return AtomicAccess::load_acquire(&_first);
return _first.load_acquire();
}

template <typename CONFIG, MemTag MT>
inline void ConcurrentHashTable<CONFIG, MT>::
Bucket::release_assign_node_ptr(
typename ConcurrentHashTable<CONFIG, MT>::Node* const volatile * dst,
const Atomic<typename ConcurrentHashTable<CONFIG, MT>::Node*>* dst,
typename ConcurrentHashTable<CONFIG, MT>::Node* node) const
{
// Due to this assert this methods is not static.
assert(is_locked(), "Must be locked.");
Node** tmp = (Node**)dst;
AtomicAccess::release_store(tmp, clear_set_state(node, *dst));
Atomic<Node*>* tmp = const_cast<Atomic<Node*>*>(dst);
tmp->release_store(clear_set_state(node, dst->load_relaxed()));
}

template <typename CONFIG, MemTag MT>
inline void ConcurrentHashTable<CONFIG, MT>::
Bucket::assign(const Bucket& bucket)
{
_first.store_relaxed(bucket._first.load_relaxed());
}

template <typename CONFIG, MemTag MT>
Expand All @@ -93,7 +100,7 @@ ConcurrentHashTable<CONFIG, MT>::
Bucket::first() const
{
// We strip the states bit before returning the ptr.
return clear_state(AtomicAccess::load_acquire(&_first));
return clear_state(_first.load_acquire());
}

template <typename CONFIG, MemTag MT>
Expand Down Expand Up @@ -134,9 +141,9 @@ inline void ConcurrentHashTable<CONFIG, MT>::
typename ConcurrentHashTable<CONFIG, MT>::Node* node)
{
assert(is_locked(), "Must be locked.");
Node* const volatile * ret = first_ptr();
while (clear_state(*ret) != nullptr) {
ret = clear_state(*ret)->next_ptr();
const Atomic<Node*>* ret = first_ptr();
while (clear_state(ret->load_relaxed()) != nullptr) {
ret = clear_state(ret->load_relaxed())->next_ptr();
}
release_assign_node_ptr(ret, node);
}
Expand All @@ -150,7 +157,7 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
if (is_locked()) {
return false;
}
if (AtomicAccess::cmpxchg(&_first, expect, node) == expect) {
if (_first.compare_exchange(expect, node) == expect) {
return true;
}
return false;
Expand All @@ -165,7 +172,7 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
}
// We will expect a clean first pointer.
Node* tmp = first();
if (AtomicAccess::cmpxchg(&_first, tmp, set_state(tmp, STATE_LOCK_BIT)) == tmp) {
if (_first.compare_exchange(tmp, set_state(tmp, STATE_LOCK_BIT)) == tmp) {
return true;
}
return false;
Expand All @@ -178,15 +185,15 @@ inline void ConcurrentHashTable<CONFIG, MT>::
assert(is_locked(), "Must be locked.");
assert(!have_redirect(),
"Unlocking a bucket after it has reached terminal state.");
AtomicAccess::release_store(&_first, clear_state(first()));
_first.release_store(clear_state(first()));
}

template <typename CONFIG, MemTag MT>
inline void ConcurrentHashTable<CONFIG, MT>::
Bucket::redirect()
{
assert(is_locked(), "Must be locked.");
AtomicAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
_first.release_store(set_state(_first.load_relaxed(), STATE_REDIRECT_BIT));
}

// InternalTable
Expand Down Expand Up @@ -413,8 +420,8 @@ inline void ConcurrentHashTable<CONFIG, MT>::
bucket->lock();

size_t odd_index = even_index + _table->_size;
_new_table->get_buckets()[even_index] = *bucket;
_new_table->get_buckets()[odd_index] = *bucket;
_new_table->get_buckets()[even_index].assign(*bucket);
_new_table->get_buckets()[odd_index].assign(*bucket);

// Moves lockers go to new table, where they will wait until unlock() below.
bucket->redirect(); /* Must release stores above */
Expand Down Expand Up @@ -445,7 +452,7 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
{
Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
assert(bucket->is_locked(), "Must be locked.");
Node* const volatile * rem_n_prev = bucket->first_ptr();
const Atomic<Node*>* rem_n_prev = bucket->first_ptr();
Node* rem_n = bucket->first();
while (rem_n != nullptr) {
if (lookup_f.equals(rem_n->value())) {
Expand Down Expand Up @@ -534,7 +541,7 @@ inline void ConcurrentHashTable<CONFIG, MT>::

size_t dels = 0;
Node* ndel[StackBufferSize];
Node* const volatile * rem_n_prev = bucket->first_ptr();
const Atomic<Node*>* rem_n_prev = bucket->first_ptr();
Node* rem_n = bucket->first();
while (rem_n != nullptr) {
if (lookup_f.is_dead(rem_n->value())) {
Expand Down Expand Up @@ -643,8 +650,8 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
return false;
}
Node* delete_me = nullptr;
Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr();
Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr();
const Atomic<Node*>* even = new_table->get_bucket(even_index)->first_ptr();
const Atomic<Node*>* odd = new_table->get_bucket(odd_index)->first_ptr();
while (aux != nullptr) {
bool dead_hash = false;
size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash);
Expand Down Expand Up @@ -742,11 +749,11 @@ inline void ConcurrentHashTable<CONFIG, MT>::
b_old_even->lock();
b_old_odd->lock();

_new_table->get_buckets()[bucket_it] = *b_old_even;
_new_table->get_buckets()[bucket_it].assign(*b_old_even);

// Put chains together.
_new_table->get_bucket(bucket_it)->
release_assign_last_node_next(*(b_old_odd->first_ptr()));
release_assign_last_node_next(b_old_odd->first_ptr()->load_relaxed());

b_old_even->redirect();
b_old_odd->redirect();
Expand Down Expand Up @@ -981,7 +988,7 @@ inline size_t ConcurrentHashTable<CONFIG, MT>::
size_t num_del, Node** ndel, GrowableArrayCHeap<Node*, MT>& extra)
{
size_t dels = 0;
Node* const volatile * rem_n_prev = bucket->first_ptr();
const Atomic<Node*>* rem_n_prev = bucket->first_ptr();
Node* rem_n = bucket->first();
while (rem_n != nullptr) {
if (eval_f(rem_n->value())) {
Expand Down