mirror of
https://github.com/torvalds/linux.git
synced 2026-01-12 00:42:35 +08:00
local_lock: Move this_cpu_ptr() notation from internal to main header
local_lock.h is the main header for the local_lock_t type and provides wrappers around internal functions prefixed with __ in local_lock_internal.h. Move the this_cpu_ptr() dereference of the variable from the internal to the main header. Since it is all macro implemented, this_cpu_ptr() will still happen within the preempt/ IRQ disabled section. This frees the internal implementation (__) to be used on local_lock_t types which are local variables and must not be accessed via this_cpu_ptr(). Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Waiman Long <longman@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/all/20250630075138.3448715-2-bigeasy@linutronix.de
This commit is contained in:
committed by
Thomas Gleixner
parent
19272b37aa
commit
7ff495e26a
@@ -13,13 +13,13 @@
|
||||
* local_lock - Acquire a per CPU local lock
|
||||
* @lock: The lock variable
|
||||
*/
|
||||
#define local_lock(lock) __local_lock(lock)
|
||||
#define local_lock(lock) __local_lock(this_cpu_ptr(lock))
|
||||
|
||||
/**
|
||||
* local_lock_irq - Acquire a per CPU local lock and disable interrupts
|
||||
* @lock: The lock variable
|
||||
*/
|
||||
#define local_lock_irq(lock) __local_lock_irq(lock)
|
||||
#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock))
|
||||
|
||||
/**
|
||||
* local_lock_irqsave - Acquire a per CPU local lock, save and disable
|
||||
@@ -28,19 +28,19 @@
|
||||
* @flags: Storage for interrupt flags
|
||||
*/
|
||||
#define local_lock_irqsave(lock, flags) \
|
||||
__local_lock_irqsave(lock, flags)
|
||||
__local_lock_irqsave(this_cpu_ptr(lock), flags)
|
||||
|
||||
/**
|
||||
* local_unlock - Release a per CPU local lock
|
||||
* @lock: The lock variable
|
||||
*/
|
||||
#define local_unlock(lock) __local_unlock(lock)
|
||||
#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock))
|
||||
|
||||
/**
|
||||
* local_unlock_irq - Release a per CPU local lock and enable interrupts
|
||||
* @lock: The lock variable
|
||||
*/
|
||||
#define local_unlock_irq(lock) __local_unlock_irq(lock)
|
||||
#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock))
|
||||
|
||||
/**
|
||||
* local_unlock_irqrestore - Release a per CPU local lock and restore
|
||||
@@ -49,7 +49,7 @@
|
||||
* @flags: Interrupt flags to restore
|
||||
*/
|
||||
#define local_unlock_irqrestore(lock, flags) \
|
||||
__local_unlock_irqrestore(lock, flags)
|
||||
__local_unlock_irqrestore(this_cpu_ptr(lock), flags)
|
||||
|
||||
/**
|
||||
* local_lock_init - Runtime initialize a lock instance
|
||||
@@ -64,7 +64,7 @@
|
||||
* locking constrains it will _always_ fail to acquire the lock in NMI or
|
||||
* HARDIRQ context on PREEMPT_RT.
|
||||
*/
|
||||
#define local_trylock(lock) __local_trylock(lock)
|
||||
#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock))
|
||||
|
||||
/**
|
||||
* local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
|
||||
@@ -77,7 +77,7 @@
|
||||
* HARDIRQ context on PREEMPT_RT.
|
||||
*/
|
||||
#define local_trylock_irqsave(lock, flags) \
|
||||
__local_trylock_irqsave(lock, flags)
|
||||
__local_trylock_irqsave(this_cpu_ptr(lock), flags)
|
||||
|
||||
DEFINE_GUARD(local_lock, local_lock_t __percpu*,
|
||||
local_lock(_T),
|
||||
@@ -91,10 +91,10 @@ DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
|
||||
unsigned long flags)
|
||||
|
||||
#define local_lock_nested_bh(_lock) \
|
||||
__local_lock_nested_bh(_lock)
|
||||
__local_lock_nested_bh(this_cpu_ptr(_lock))
|
||||
|
||||
#define local_unlock_nested_bh(_lock) \
|
||||
__local_unlock_nested_bh(_lock)
|
||||
__local_unlock_nested_bh(this_cpu_ptr(_lock))
|
||||
|
||||
DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
|
||||
local_lock_nested_bh(_T),
|
||||
|
||||
@@ -99,14 +99,14 @@ do { \
|
||||
local_trylock_t *tl; \
|
||||
local_lock_t *l; \
|
||||
\
|
||||
l = (local_lock_t *)this_cpu_ptr(lock); \
|
||||
l = (local_lock_t *)(lock); \
|
||||
tl = (local_trylock_t *)l; \
|
||||
_Generic((lock), \
|
||||
__percpu local_trylock_t *: ({ \
|
||||
local_trylock_t *: ({ \
|
||||
lockdep_assert(tl->acquired == 0); \
|
||||
WRITE_ONCE(tl->acquired, 1); \
|
||||
}), \
|
||||
__percpu local_lock_t *: (void)0); \
|
||||
local_lock_t *: (void)0); \
|
||||
local_lock_acquire(l); \
|
||||
} while (0)
|
||||
|
||||
@@ -133,7 +133,7 @@ do { \
|
||||
local_trylock_t *tl; \
|
||||
\
|
||||
preempt_disable(); \
|
||||
tl = this_cpu_ptr(lock); \
|
||||
tl = (lock); \
|
||||
if (READ_ONCE(tl->acquired)) { \
|
||||
preempt_enable(); \
|
||||
tl = NULL; \
|
||||
@@ -150,7 +150,7 @@ do { \
|
||||
local_trylock_t *tl; \
|
||||
\
|
||||
local_irq_save(flags); \
|
||||
tl = this_cpu_ptr(lock); \
|
||||
tl = (lock); \
|
||||
if (READ_ONCE(tl->acquired)) { \
|
||||
local_irq_restore(flags); \
|
||||
tl = NULL; \
|
||||
@@ -167,15 +167,15 @@ do { \
|
||||
local_trylock_t *tl; \
|
||||
local_lock_t *l; \
|
||||
\
|
||||
l = (local_lock_t *)this_cpu_ptr(lock); \
|
||||
l = (local_lock_t *)(lock); \
|
||||
tl = (local_trylock_t *)l; \
|
||||
local_lock_release(l); \
|
||||
_Generic((lock), \
|
||||
__percpu local_trylock_t *: ({ \
|
||||
local_trylock_t *: ({ \
|
||||
lockdep_assert(tl->acquired == 1); \
|
||||
WRITE_ONCE(tl->acquired, 0); \
|
||||
}), \
|
||||
__percpu local_lock_t *: (void)0); \
|
||||
local_lock_t *: (void)0); \
|
||||
} while (0)
|
||||
|
||||
#define __local_unlock(lock) \
|
||||
@@ -199,11 +199,11 @@ do { \
|
||||
#define __local_lock_nested_bh(lock) \
|
||||
do { \
|
||||
lockdep_assert_in_softirq(); \
|
||||
local_lock_acquire(this_cpu_ptr(lock)); \
|
||||
local_lock_acquire((lock)); \
|
||||
} while (0)
|
||||
|
||||
#define __local_unlock_nested_bh(lock) \
|
||||
local_lock_release(this_cpu_ptr(lock))
|
||||
local_lock_release((lock))
|
||||
|
||||
#else /* !CONFIG_PREEMPT_RT */
|
||||
|
||||
@@ -227,7 +227,7 @@ typedef spinlock_t local_trylock_t;
|
||||
#define __local_lock(__lock) \
|
||||
do { \
|
||||
migrate_disable(); \
|
||||
spin_lock(this_cpu_ptr((__lock))); \
|
||||
spin_lock((__lock)); \
|
||||
} while (0)
|
||||
|
||||
#define __local_lock_irq(lock) __local_lock(lock)
|
||||
@@ -241,7 +241,7 @@ typedef spinlock_t local_trylock_t;
|
||||
|
||||
#define __local_unlock(__lock) \
|
||||
do { \
|
||||
spin_unlock(this_cpu_ptr((__lock))); \
|
||||
spin_unlock((__lock)); \
|
||||
migrate_enable(); \
|
||||
} while (0)
|
||||
|
||||
@@ -252,12 +252,12 @@ typedef spinlock_t local_trylock_t;
|
||||
#define __local_lock_nested_bh(lock) \
|
||||
do { \
|
||||
lockdep_assert_in_softirq_func(); \
|
||||
spin_lock(this_cpu_ptr(lock)); \
|
||||
spin_lock((lock)); \
|
||||
} while (0)
|
||||
|
||||
#define __local_unlock_nested_bh(lock) \
|
||||
do { \
|
||||
spin_unlock(this_cpu_ptr((lock))); \
|
||||
spin_unlock((lock)); \
|
||||
} while (0)
|
||||
|
||||
#define __local_trylock(lock) \
|
||||
@@ -268,7 +268,7 @@ do { \
|
||||
__locked = 0; \
|
||||
} else { \
|
||||
migrate_disable(); \
|
||||
__locked = spin_trylock(this_cpu_ptr((lock))); \
|
||||
__locked = spin_trylock((lock)); \
|
||||
if (!__locked) \
|
||||
migrate_enable(); \
|
||||
} \
|
||||
|
||||
Reference in New Issue
Block a user