From 0278f373166ca5eac1ab9fe3b7afc2562d4aef92 Mon Sep 17 00:00:00 2001 From: Kim Barrett Date: Sun, 14 Dec 2025 09:07:42 +0000 Subject: [PATCH] atomic _first & _next --- .../share/utilities/concurrentHashTable.hpp | 17 ++++--- .../utilities/concurrentHashTable.inline.hpp | 51 +++++++++++-------- 2 files changed, 40 insertions(+), 28 deletions(-) diff --git a/src/hotspot/share/utilities/concurrentHashTable.hpp b/src/hotspot/share/utilities/concurrentHashTable.hpp index 1a50ecabc3e07..676089abfec2f 100644 --- a/src/hotspot/share/utilities/concurrentHashTable.hpp +++ b/src/hotspot/share/utilities/concurrentHashTable.hpp @@ -70,7 +70,7 @@ class ConcurrentHashTable : public CHeapObj { class Node { private: DEBUG_ONLY(size_t _saved_hash); - Node * volatile _next; + Atomic _next; VALUE _value; public: Node(const VALUE& value, Node* next = nullptr) @@ -80,8 +80,8 @@ class ConcurrentHashTable : public CHeapObj { } Node* next() const; - void set_next(Node* node) { _next = node; } - Node* const volatile * next_ptr() { return &_next; } + void set_next(Node* node) { _next.store_relaxed(node); } + const Atomic* next_ptr() const { return &_next; } #ifdef ASSERT size_t saved_hash() const { return _saved_hash; } void set_saved_hash(size_t hash) { _saved_hash = hash; } @@ -123,7 +123,7 @@ class ConcurrentHashTable : public CHeapObj { // unlocked -> locked -> redirect // Locked state only applies to an updater. // Reader only check for redirect. - Node * volatile _first; + Atomic _first; static const uintptr_t STATE_LOCK_BIT = 0x1; static const uintptr_t STATE_REDIRECT_BIT = 0x2; @@ -157,20 +157,25 @@ class ConcurrentHashTable : public CHeapObj { // A bucket is only one pointer with the embedded state. Bucket() : _first(nullptr) {}; + NONCOPYABLE(Bucket); + + // Copy bucket's first entry to this. + void assign(const Bucket& bucket); + // Get the first pointer unmasked. Node* first() const; // Get a pointer to the const first pointer. Do not deference this // pointer, the pointer pointed to _may_ contain an embedded state. Such // pointer should only be used as input to release_assign_node_ptr. - Node* const volatile * first_ptr() { return &_first; } + const Atomic* first_ptr() const { return &_first; } // This is the only place where a pointer to a Node pointer that potentially // is _first should be changed. Otherwise we destroy the embedded state. We // only give out pointer to const Node pointer to avoid accidental // assignment, thus here we must cast const part away. Method is not static // due to an assert. - void release_assign_node_ptr(Node* const volatile * dst, Node* node) const; + void release_assign_node_ptr(const Atomic* dst, Node* node) const; // This method assigns this buckets last Node next ptr to input Node. void release_assign_last_node_next(Node* node); diff --git a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp index b96a487324c0a..52a821f09a94e 100644 --- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp +++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp @@ -63,7 +63,7 @@ inline typename ConcurrentHashTable::Node* ConcurrentHashTable:: Node::next() const { - return AtomicAccess::load_acquire(&_next); + return _next.load_acquire(); } // Bucket @@ -72,19 +72,26 @@ inline typename ConcurrentHashTable::Node* ConcurrentHashTable:: Bucket::first_raw() const { - return AtomicAccess::load_acquire(&_first); + return _first.load_acquire(); } template inline void ConcurrentHashTable:: Bucket::release_assign_node_ptr( - typename ConcurrentHashTable::Node* const volatile * dst, + const Atomic::Node*>* dst, typename ConcurrentHashTable::Node* node) const { // Due to this assert this methods is not static. assert(is_locked(), "Must be locked."); - Node** tmp = (Node**)dst; - AtomicAccess::release_store(tmp, clear_set_state(node, *dst)); + Atomic* tmp = const_cast*>(dst); + tmp->release_store(clear_set_state(node, dst->load_relaxed())); +} + +template +inline void ConcurrentHashTable:: + Bucket::assign(const Bucket& bucket) +{ + _first.store_relaxed(bucket._first.load_relaxed()); } template @@ -93,7 +100,7 @@ ConcurrentHashTable:: Bucket::first() const { // We strip the states bit before returning the ptr. - return clear_state(AtomicAccess::load_acquire(&_first)); + return clear_state(_first.load_acquire()); } template @@ -134,9 +141,9 @@ inline void ConcurrentHashTable:: typename ConcurrentHashTable::Node* node) { assert(is_locked(), "Must be locked."); - Node* const volatile * ret = first_ptr(); - while (clear_state(*ret) != nullptr) { - ret = clear_state(*ret)->next_ptr(); + const Atomic* ret = first_ptr(); + while (clear_state(ret->load_relaxed()) != nullptr) { + ret = clear_state(ret->load_relaxed())->next_ptr(); } release_assign_node_ptr(ret, node); } @@ -150,7 +157,7 @@ inline bool ConcurrentHashTable:: if (is_locked()) { return false; } - if (AtomicAccess::cmpxchg(&_first, expect, node) == expect) { + if (_first.compare_exchange(expect, node) == expect) { return true; } return false; @@ -165,7 +172,7 @@ inline bool ConcurrentHashTable:: } // We will expect a clean first pointer. Node* tmp = first(); - if (AtomicAccess::cmpxchg(&_first, tmp, set_state(tmp, STATE_LOCK_BIT)) == tmp) { + if (_first.compare_exchange(tmp, set_state(tmp, STATE_LOCK_BIT)) == tmp) { return true; } return false; @@ -178,7 +185,7 @@ inline void ConcurrentHashTable:: assert(is_locked(), "Must be locked."); assert(!have_redirect(), "Unlocking a bucket after it has reached terminal state."); - AtomicAccess::release_store(&_first, clear_state(first())); + _first.release_store(clear_state(first())); } template @@ -186,7 +193,7 @@ inline void ConcurrentHashTable:: Bucket::redirect() { assert(is_locked(), "Must be locked."); - AtomicAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT)); + _first.release_store(set_state(_first.load_relaxed(), STATE_REDIRECT_BIT)); } // InternalTable @@ -413,8 +420,8 @@ inline void ConcurrentHashTable:: bucket->lock(); size_t odd_index = even_index + _table->_size; - _new_table->get_buckets()[even_index] = *bucket; - _new_table->get_buckets()[odd_index] = *bucket; + _new_table->get_buckets()[even_index].assign(*bucket); + _new_table->get_buckets()[odd_index].assign(*bucket); // Moves lockers go to new table, where they will wait until unlock() below. bucket->redirect(); /* Must release stores above */ @@ -445,7 +452,7 @@ inline bool ConcurrentHashTable:: { Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash()); assert(bucket->is_locked(), "Must be locked."); - Node* const volatile * rem_n_prev = bucket->first_ptr(); + const Atomic* rem_n_prev = bucket->first_ptr(); Node* rem_n = bucket->first(); while (rem_n != nullptr) { if (lookup_f.equals(rem_n->value())) { @@ -534,7 +541,7 @@ inline void ConcurrentHashTable:: size_t dels = 0; Node* ndel[StackBufferSize]; - Node* const volatile * rem_n_prev = bucket->first_ptr(); + const Atomic* rem_n_prev = bucket->first_ptr(); Node* rem_n = bucket->first(); while (rem_n != nullptr) { if (lookup_f.is_dead(rem_n->value())) { @@ -643,8 +650,8 @@ inline bool ConcurrentHashTable:: return false; } Node* delete_me = nullptr; - Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr(); - Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr(); + const Atomic* even = new_table->get_bucket(even_index)->first_ptr(); + const Atomic* odd = new_table->get_bucket(odd_index)->first_ptr(); while (aux != nullptr) { bool dead_hash = false; size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash); @@ -742,11 +749,11 @@ inline void ConcurrentHashTable:: b_old_even->lock(); b_old_odd->lock(); - _new_table->get_buckets()[bucket_it] = *b_old_even; + _new_table->get_buckets()[bucket_it].assign(*b_old_even); // Put chains together. _new_table->get_bucket(bucket_it)-> - release_assign_last_node_next(*(b_old_odd->first_ptr())); + release_assign_last_node_next(b_old_odd->first_ptr()->load_relaxed()); b_old_even->redirect(); b_old_odd->redirect(); @@ -981,7 +988,7 @@ inline size_t ConcurrentHashTable:: size_t num_del, Node** ndel, GrowableArrayCHeap& extra) { size_t dels = 0; - Node* const volatile * rem_n_prev = bucket->first_ptr(); + const Atomic* rem_n_prev = bucket->first_ptr(); Node* rem_n = bucket->first(); while (rem_n != nullptr) { if (eval_f(rem_n->value())) {