Skip to content

Commit

Permalink
atomic64: Use arch_spin_locks instead of raw_spin_locks
Browse files Browse the repository at this point in the history
raw_spin_locks can be traced by lockdep or tracing itself. Atomic64
operations can be used in the tracing infrastructure. When an architecture
does not have true atomic64 operations it can use the generic version that
disables interrupts and uses spin_locks.

The tracing ring buffer code uses atomic64 operations for the time
keeping. But because some architectures use the default operations, the
locking inside the atomic operations can cause an infinite recursion.

As atomic64 implementation is architecture specific, it should not be
using raw_spin_locks() but instead arch_spin_locks as that is the purpose
of arch_spin_locks. To be used in architecture specific implementations of
generic infrastructure like atomic64 operations.

Note, by switching from raw_spin_locks to arch_spin_locks, the locks taken
to emulate the atomic64 operations will not have lockdep, mmio, or any
kind of checks done on them. They will not even disable preemption,
although the code will disable interrupts preventing the tasks that hold
the locks from being preempted. As the locks held are done so for very
short periods of time, and the logic is only done to emulate atomic64, not
having them be instrumented should not be an issue.

Cc: [email protected]
Cc: Mark Rutland <[email protected]>
Cc: Mathieu Desnoyers <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Andreas Larsson <[email protected]>
Link: https://lore.kernel.org/[email protected]
Fixes: c84897c ("ring-buffer: Remove 32bit timestamp logic")
Closes: https://lore.kernel.org/all/[email protected]/
Reported-by: Ludwig Rydberg <[email protected]>
Reviewed-by: Masami Hiramatsu (Google) <[email protected]>
Signed-off-by: Steven Rostedt (Google) <[email protected]>
  • Loading branch information
rostedt committed Jan 22, 2025
1 parent cd2375a commit 6c8ad3a
Showing 1 changed file with 48 additions and 30 deletions.
78 changes: 48 additions & 30 deletions lib/atomic64.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,15 @@
* Ensure each lock is in a separate cacheline.
*/
static union {
raw_spinlock_t lock;
arch_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
[0 ... (NR_LOCKS - 1)] = {
.lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
.lock = __ARCH_SPIN_LOCK_UNLOCKED,
},
};

static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
static inline arch_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;

Expand All @@ -45,49 +45,57 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
s64 generic_atomic64_read(const atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
arch_spinlock_t *lock = lock_addr(v);
s64 val;

raw_spin_lock_irqsave(lock, flags);
local_irq_save(flags);
arch_spin_lock(lock);
val = v->counter;
raw_spin_unlock_irqrestore(lock, flags);
arch_spin_unlock(lock);
local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_read);

void generic_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
arch_spinlock_t *lock = lock_addr(v);

raw_spin_lock_irqsave(lock, flags);
local_irq_save(flags);
arch_spin_lock(lock);
v->counter = i;
raw_spin_unlock_irqrestore(lock, flags);
arch_spin_unlock(lock);
local_irq_restore(flags);
}
EXPORT_SYMBOL(generic_atomic64_set);

#define ATOMIC64_OP(op, c_op) \
void generic_atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
arch_spinlock_t *lock = lock_addr(v); \
\
raw_spin_lock_irqsave(lock, flags); \
local_irq_save(flags); \
arch_spin_lock(lock); \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
arch_spin_unlock(lock); \
local_irq_restore(flags); \
} \
EXPORT_SYMBOL(generic_atomic64_##op);

#define ATOMIC64_OP_RETURN(op, c_op) \
s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
arch_spinlock_t *lock = lock_addr(v); \
s64 val; \
\
raw_spin_lock_irqsave(lock, flags); \
local_irq_save(flags); \
arch_spin_lock(lock); \
val = (v->counter c_op a); \
raw_spin_unlock_irqrestore(lock, flags); \
arch_spin_unlock(lock); \
local_irq_restore(flags); \
return val; \
} \
EXPORT_SYMBOL(generic_atomic64_##op##_return);
Expand All @@ -96,13 +104,15 @@ EXPORT_SYMBOL(generic_atomic64_##op##_return);
s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
arch_spinlock_t *lock = lock_addr(v); \
s64 val; \
\
raw_spin_lock_irqsave(lock, flags); \
local_irq_save(flags); \
arch_spin_lock(lock); \
val = v->counter; \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
arch_spin_unlock(lock); \
local_irq_restore(flags); \
return val; \
} \
EXPORT_SYMBOL(generic_atomic64_fetch_##op);
Expand Down Expand Up @@ -131,58 +141,66 @@ ATOMIC64_OPS(xor, ^=)
s64 generic_atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
arch_spinlock_t *lock = lock_addr(v);
s64 val;

raw_spin_lock_irqsave(lock, flags);
local_irq_save(flags);
arch_spin_lock(lock);
val = v->counter - 1;
if (val >= 0)
v->counter = val;
raw_spin_unlock_irqrestore(lock, flags);
arch_spin_unlock(lock);
local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_dec_if_positive);

s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
arch_spinlock_t *lock = lock_addr(v);
s64 val;

raw_spin_lock_irqsave(lock, flags);
local_irq_save(flags);
arch_spin_lock(lock);
val = v->counter;
if (val == o)
v->counter = n;
raw_spin_unlock_irqrestore(lock, flags);
arch_spin_unlock(lock);
local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_cmpxchg);

s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
arch_spinlock_t *lock = lock_addr(v);
s64 val;

raw_spin_lock_irqsave(lock, flags);
local_irq_save(flags);
arch_spin_lock(lock);
val = v->counter;
v->counter = new;
raw_spin_unlock_irqrestore(lock, flags);
arch_spin_unlock(lock);
local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_xchg);

s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
arch_spinlock_t *lock = lock_addr(v);
s64 val;

raw_spin_lock_irqsave(lock, flags);
local_irq_save(flags);
arch_spin_lock(lock);
val = v->counter;
if (val != u)
v->counter += a;
raw_spin_unlock_irqrestore(lock, flags);
arch_spin_unlock(lock);
local_irq_restore(flags);

return val;
}
Expand Down

0 comments on commit 6c8ad3a

Please sign in to comment.