mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 15:03:52 +08:00
Replace clear_bit_and_unlock_is_negative_byte() with xor_unlock_is_negative_byte(). We have a few places that like to lock a folio, set a flag and unlock it again. Allow for the possibility of combining the latter two operations for efficiency. We are guaranteed that the caller holds the lock, so it is safe to unlock it with the xor. The caller must guarantee that nobody else will set the flag without holding the lock; it is not safe to do this with the PG_dirty flag, for example. Link: https://lkml.kernel.org/r/20231004165317.1061855-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andreas Dilger <adilger.kernel@dilger.ca> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Henderson <richard.henderson@linaro.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
84 lines
2.1 KiB
C
84 lines
2.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
|
|
#define _ASM_GENERIC_BITOPS_LOCK_H_
|
|
|
|
#include <linux/atomic.h>
|
|
#include <linux/compiler.h>
|
|
#include <asm/barrier.h>
|
|
|
|
/**
|
|
* arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
|
|
* @nr: Bit to set
|
|
* @addr: Address to count from
|
|
*
|
|
* This operation is atomic and provides acquire barrier semantics if
|
|
* the returned value is 0.
|
|
* It can be used to implement bit locks.
|
|
*/
|
|
static __always_inline int
|
|
arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
|
|
{
|
|
long old;
|
|
unsigned long mask = BIT_MASK(nr);
|
|
|
|
p += BIT_WORD(nr);
|
|
if (READ_ONCE(*p) & mask)
|
|
return 1;
|
|
|
|
old = raw_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
|
|
return !!(old & mask);
|
|
}
|
|
|
|
|
|
/**
|
|
* arch_clear_bit_unlock - Clear a bit in memory, for unlock
|
|
* @nr: the bit to set
|
|
* @addr: the address to start counting from
|
|
*
|
|
* This operation is atomic and provides release barrier semantics.
|
|
*/
|
|
static __always_inline void
|
|
arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
|
|
{
|
|
p += BIT_WORD(nr);
|
|
raw_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
|
|
}
|
|
|
|
/**
|
|
* arch___clear_bit_unlock - Clear a bit in memory, for unlock
|
|
* @nr: the bit to set
|
|
* @addr: the address to start counting from
|
|
*
|
|
* A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
|
|
* the bits in the word are protected by this lock some archs can use weaker
|
|
* ops to safely unlock.
|
|
*
|
|
* See for example x86's implementation.
|
|
*/
|
|
static inline void
|
|
arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
|
|
{
|
|
unsigned long old;
|
|
|
|
p += BIT_WORD(nr);
|
|
old = READ_ONCE(*p);
|
|
old &= ~BIT_MASK(nr);
|
|
raw_atomic_long_set_release((atomic_long_t *)p, old);
|
|
}
|
|
|
|
#ifndef arch_xor_unlock_is_negative_byte
|
|
static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
|
|
volatile unsigned long *p)
|
|
{
|
|
long old;
|
|
|
|
old = raw_atomic_long_fetch_xor_release(mask, (atomic_long_t *)p);
|
|
return !!(old & BIT(7));
|
|
}
|
|
#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
|
|
#endif
|
|
|
|
#include <asm-generic/bitops/instrumented-lock.h>
|
|
|
|
#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
|