LCOV - code coverage report
Current view: top level - include/asm-generic/bitops - lock.h (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc4-xfsa @ Mon Jul 31 20:08:27 PDT 2023 Lines: 19 19 100.0 %
Date: 2023-07-31 20:08:27 Functions: 1 1 100.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _ASM_GENERIC_BITOPS_LOCK_H_
       3             : #define _ASM_GENERIC_BITOPS_LOCK_H_
       4             : 
       5             : #include <linux/atomic.h>
       6             : #include <linux/compiler.h>
       7             : #include <asm/barrier.h>
       8             : 
       9             : /**
      10             :  * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
      11             :  * @nr: Bit to set
      12             :  * @addr: Address to count from
      13             :  *
      14             :  * This operation is atomic and provides acquire barrier semantics if
      15             :  * the returned value is 0.
      16             :  * It can be used to implement bit locks.
      17             :  */
      18             : static __always_inline int
      19             : arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
      20             : {
      21  4696472078 :         long old;
      22  4696472078 :         unsigned long mask = BIT_MASK(nr);
      23             : 
      24  4696472078 :         p += BIT_WORD(nr);
      25  4696472078 :         if (READ_ONCE(*p) & mask)
      26             :                 return 1;
      27             : 
      28  4692145796 :         old = raw_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
      29  4692826227 :         return !!(old & mask);
      30             : }
      31             : 
      32             : 
      33             : /**
      34             :  * arch_clear_bit_unlock - Clear a bit in memory, for unlock
      35             :  * @nr: the bit to set
      36             :  * @addr: the address to start counting from
      37             :  *
      38             :  * This operation is atomic and provides release barrier semantics.
      39             :  */
      40             : static __always_inline void
      41             : arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
      42             : {
      43    12059500 :         p += BIT_WORD(nr);
      44    12059500 :         raw_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
      45             : }
      46             : 
      47             : /**
      48             :  * arch___clear_bit_unlock - Clear a bit in memory, for unlock
      49             :  * @nr: the bit to set
      50             :  * @addr: the address to start counting from
      51             :  *
      52             :  * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
      53             :  * the bits in the word are protected by this lock some archs can use weaker
      54             :  * ops to safely unlock.
      55             :  *
      56             :  * See for example x86's implementation.
      57             :  */
      58             : static inline void
      59             : arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
      60             : {
      61  2281102274 :         unsigned long old;
      62             : 
      63  2281102274 :         p += BIT_WORD(nr);
      64  2281102274 :         old = READ_ONCE(*p);
      65  2281102274 :         old &= ~BIT_MASK(nr);
      66  2281102274 :         raw_atomic_long_set_release((atomic_long_t *)p, old);
      67             : }
      68             : 
      69             : /**
      70             :  * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
      71             :  *                                          byte is negative, for unlock.
      72             :  * @nr: the bit to clear
      73             :  * @addr: the address to start counting from
      74             :  *
      75             :  * This is a bit of a one-trick-pony for the filemap code, which clears
      76             :  * PG_locked and tests PG_waiters,
      77             :  */
      78             : #ifndef arch_clear_bit_unlock_is_negative_byte
      79 30047264490 : static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr,
      80             :                                                           volatile unsigned long *p)
      81             : {
      82 30047264490 :         long old;
      83 30047264490 :         unsigned long mask = BIT_MASK(nr);
      84             : 
      85 30047264490 :         p += BIT_WORD(nr);
      86 30047264490 :         old = raw_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
      87 30003867101 :         return !!(old & BIT(7));
      88             : }
      89             : #define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte
      90             : #endif
      91             : 
      92             : #include <asm-generic/bitops/instrumented-lock.h>
      93             : 
      94             : #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */

Generated by: LCOV version 1.14