Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */ 2 : #ifndef _ASM_GENERIC_BITOPS_LOCK_H_ 3 : #define _ASM_GENERIC_BITOPS_LOCK_H_ 4 : 5 : #include <linux/atomic.h> 6 : #include <linux/compiler.h> 7 : #include <asm/barrier.h> 8 : 9 : /** 10 : * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock 11 : * @nr: Bit to set 12 : * @addr: Address to count from 13 : * 14 : * This operation is atomic and provides acquire barrier semantics if 15 : * the returned value is 0. 16 : * It can be used to implement bit locks. 17 : */ 18 : static __always_inline int 19 : arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p) 20 : { 21 2747916381 : long old; 22 2747916381 : unsigned long mask = BIT_MASK(nr); 23 : 24 2747916381 : p += BIT_WORD(nr); 25 2747916381 : if (READ_ONCE(*p) & mask) 26 : return 1; 27 : 28 2745073257 : old = raw_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); 29 2745531936 : return !!(old & mask); 30 : } 31 : 32 : 33 : /** 34 : * arch_clear_bit_unlock - Clear a bit in memory, for unlock 35 : * @nr: the bit to set 36 : * @addr: the address to start counting from 37 : * 38 : * This operation is atomic and provides release barrier semantics. 39 : */ 40 : static __always_inline void 41 : arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p) 42 : { 43 7511847 : p += BIT_WORD(nr); 44 7511847 : raw_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); 45 : } 46 : 47 : /** 48 : * arch___clear_bit_unlock - Clear a bit in memory, for unlock 49 : * @nr: the bit to set 50 : * @addr: the address to start counting from 51 : * 52 : * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all 53 : * the bits in the word are protected by this lock some archs can use weaker 54 : * ops to safely unlock. 55 : * 56 : * See for example x86's implementation. 57 : */ 58 : static inline void 59 : arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p) 60 : { 61 1623259811 : unsigned long old; 62 : 63 1623259811 : p += BIT_WORD(nr); 64 1623259811 : old = READ_ONCE(*p); 65 1623259811 : old &= ~BIT_MASK(nr); 66 1623259811 : raw_atomic_long_set_release((atomic_long_t *)p, old); 67 : } 68 : 69 : /** 70 : * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom 71 : * byte is negative, for unlock. 72 : * @nr: the bit to clear 73 : * @addr: the address to start counting from 74 : * 75 : * This is a bit of a one-trick-pony for the filemap code, which clears 76 : * PG_locked and tests PG_waiters, 77 : */ 78 : #ifndef arch_clear_bit_unlock_is_negative_byte 79 1802721819 : static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr, 80 : volatile unsigned long *p) 81 : { 82 1802721819 : long old; 83 1802721819 : unsigned long mask = BIT_MASK(nr); 84 : 85 1802721819 : p += BIT_WORD(nr); 86 1802721819 : old = raw_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); 87 1802398052 : return !!(old & BIT(7)); 88 : } 89 : #define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte 90 : #endif 91 : 92 : #include <asm-generic/bitops/instrumented-lock.h> 93 : 94 : #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */