LCOV - code coverage report
Current view: top level - arch/arm64/include/asm - atomic_ll_sc.h (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-djwa @ Mon Jul 31 20:08:17 PDT 2023 Lines: 0 9 0.0 %
Date: 2023-07-31 20:08:17 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-only */
       2             : /*
       3             :  * Based on arch/arm/include/asm/atomic.h
       4             :  *
       5             :  * Copyright (C) 1996 Russell King.
       6             :  * Copyright (C) 2002 Deep Blue Solutions Ltd.
       7             :  * Copyright (C) 2012 ARM Ltd.
       8             :  */
       9             : 
      10             : #ifndef __ASM_ATOMIC_LL_SC_H
      11             : #define __ASM_ATOMIC_LL_SC_H
      12             : 
      13             : #include <linux/stringify.h>
      14             : 
      15             : #ifndef CONFIG_CC_HAS_K_CONSTRAINT
      16             : #define K
      17             : #endif
      18             : 
      19             : /*
      20             :  * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
      21             :  * store exclusive to ensure that these are atomic.  We may loop
      22             :  * to ensure that the update happens.
      23             :  */
      24             : 
      25             : #define ATOMIC_OP(op, asm_op, constraint)                               \
      26             : static __always_inline void                                             \
      27             : __ll_sc_atomic_##op(int i, atomic_t *v)                                 \
      28             : {                                                                       \
      29             :         unsigned long tmp;                                              \
      30             :         int result;                                                     \
      31             :                                                                         \
      32             :         asm volatile("// atomic_" #op "\n"                          \
      33             :         "  prfm    pstl1strm, %2\n"                           \
      34             :         "1:        ldxr    %w0, %2\n"                                 \
      35             :         "  " #asm_op "   %w0, %w0, %w3\n"                   \
      36             :         "  stxr    %w1, %w0, %2\n"                                    \
      37             :         "  cbnz    %w1, 1b\n"                                 \
      38             :         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)               \
      39             :         : __stringify(constraint) "r" (i));                           \
      40             : }
      41             : 
      42             : #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
      43             : static __always_inline int                                              \
      44             : __ll_sc_atomic_##op##_return##name(int i, atomic_t *v)                  \
      45             : {                                                                       \
      46             :         unsigned long tmp;                                              \
      47             :         int result;                                                     \
      48             :                                                                         \
      49             :         asm volatile("// atomic_" #op "_return" #name "\n"                \
      50             :         "  prfm    pstl1strm, %2\n"                           \
      51             :         "1:        ld" #acq "xr  %w0, %2\n"                         \
      52             :         "  " #asm_op "   %w0, %w0, %w3\n"                   \
      53             :         "  st" #rel "xr  %w1, %w0, %2\n"                            \
      54             :         "  cbnz    %w1, 1b\n"                                 \
      55             :         "  " #mb                                                      \
      56             :         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)               \
      57             :         : __stringify(constraint) "r" (i)                             \
      58             :         : cl);                                                          \
      59             :                                                                         \
      60             :         return result;                                                  \
      61             : }
      62             : 
      63             : #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
      64             : static __always_inline int                                              \
      65             : __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v)                     \
      66             : {                                                                       \
      67             :         unsigned long tmp;                                              \
      68             :         int val, result;                                                \
      69             :                                                                         \
      70             :         asm volatile("// atomic_fetch_" #op #name "\n"                      \
      71             :         "  prfm    pstl1strm, %3\n"                           \
      72             :         "1:        ld" #acq "xr  %w0, %3\n"                         \
      73             :         "  " #asm_op "   %w1, %w0, %w4\n"                   \
      74             :         "  st" #rel "xr  %w2, %w1, %3\n"                            \
      75             :         "  cbnz    %w2, 1b\n"                                 \
      76             :         "  " #mb                                                      \
      77             :         : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)    \
      78             :         : __stringify(constraint) "r" (i)                             \
      79             :         : cl);                                                          \
      80             :                                                                         \
      81             :         return result;                                                  \
      82             : }
      83             : 
      84             : #define ATOMIC_OPS(...)                                                 \
      85             :         ATOMIC_OP(__VA_ARGS__)                                          \
      86             :         ATOMIC_OP_RETURN(        , dmb ish,  , l, "memory", __VA_ARGS__)\
      87             :         ATOMIC_OP_RETURN(_relaxed,        ,  ,  ,         , __VA_ARGS__)\
      88             :         ATOMIC_OP_RETURN(_acquire,        , a,  , "memory", __VA_ARGS__)\
      89             :         ATOMIC_OP_RETURN(_release,        ,  , l, "memory", __VA_ARGS__)\
      90             :         ATOMIC_FETCH_OP (        , dmb ish,  , l, "memory", __VA_ARGS__)\
      91             :         ATOMIC_FETCH_OP (_relaxed,        ,  ,  ,         , __VA_ARGS__)\
      92             :         ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
      93             :         ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
      94             : 
      95           0 : ATOMIC_OPS(add, add, I)
      96           0 : ATOMIC_OPS(sub, sub, J)
      97             : 
      98             : #undef ATOMIC_OPS
      99             : #define ATOMIC_OPS(...)                                                 \
     100             :         ATOMIC_OP(__VA_ARGS__)                                          \
     101             :         ATOMIC_FETCH_OP (        , dmb ish,  , l, "memory", __VA_ARGS__)\
     102             :         ATOMIC_FETCH_OP (_relaxed,        ,  ,  ,         , __VA_ARGS__)\
     103             :         ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
     104             :         ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
     105             : 
     106             : ATOMIC_OPS(and, and, K)
     107             : ATOMIC_OPS(or, orr, K)
     108             : ATOMIC_OPS(xor, eor, K)
     109             : /*
     110             :  * GAS converts the mysterious and undocumented BIC (immediate) alias to
     111             :  * an AND (immediate) instruction with the immediate inverted. We don't
     112             :  * have a constraint for this, so fall back to register.
     113             :  */
     114             : ATOMIC_OPS(andnot, bic, )
     115             : 
     116             : #undef ATOMIC_OPS
     117             : #undef ATOMIC_FETCH_OP
     118             : #undef ATOMIC_OP_RETURN
     119             : #undef ATOMIC_OP
     120             : 
     121             : #define ATOMIC64_OP(op, asm_op, constraint)                             \
     122             : static __always_inline void                                             \
     123             : __ll_sc_atomic64_##op(s64 i, atomic64_t *v)                             \
     124             : {                                                                       \
     125             :         s64 result;                                                     \
     126             :         unsigned long tmp;                                              \
     127             :                                                                         \
     128             :         asm volatile("// atomic64_" #op "\n"                                \
     129             :         "  prfm    pstl1strm, %2\n"                           \
     130             :         "1:        ldxr    %0, %2\n"                                  \
     131             :         "  " #asm_op "   %0, %0, %3\n"                              \
     132             :         "  stxr    %w1, %0, %2\n"                                     \
     133             :         "  cbnz    %w1, 1b"                                   \
     134             :         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)               \
     135             :         : __stringify(constraint) "r" (i));                           \
     136             : }
     137             : 
     138             : #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
     139             : static __always_inline long                                             \
     140             : __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v)              \
     141             : {                                                                       \
     142             :         s64 result;                                                     \
     143             :         unsigned long tmp;                                              \
     144             :                                                                         \
     145             :         asm volatile("// atomic64_" #op "_return" #name "\n"              \
     146             :         "  prfm    pstl1strm, %2\n"                           \
     147             :         "1:        ld" #acq "xr  %0, %2\n"                          \
     148             :         "  " #asm_op "   %0, %0, %3\n"                              \
     149             :         "  st" #rel "xr  %w1, %0, %2\n"                             \
     150             :         "  cbnz    %w1, 1b\n"                                 \
     151             :         "  " #mb                                                      \
     152             :         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)               \
     153             :         : __stringify(constraint) "r" (i)                             \
     154             :         : cl);                                                          \
     155             :                                                                         \
     156             :         return result;                                                  \
     157             : }
     158             : 
     159             : #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
     160             : static __always_inline long                                             \
     161             : __ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v)                 \
     162             : {                                                                       \
     163             :         s64 result, val;                                                \
     164             :         unsigned long tmp;                                              \
     165             :                                                                         \
     166             :         asm volatile("// atomic64_fetch_" #op #name "\n"            \
     167             :         "  prfm    pstl1strm, %3\n"                           \
     168             :         "1:        ld" #acq "xr  %0, %3\n"                          \
     169             :         "  " #asm_op "   %1, %0, %4\n"                              \
     170             :         "  st" #rel "xr  %w2, %1, %3\n"                             \
     171             :         "  cbnz    %w2, 1b\n"                                 \
     172             :         "  " #mb                                                      \
     173             :         : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)    \
     174             :         : __stringify(constraint) "r" (i)                             \
     175             :         : cl);                                                          \
     176             :                                                                         \
     177             :         return result;                                                  \
     178             : }
     179             : 
     180             : #define ATOMIC64_OPS(...)                                               \
     181             :         ATOMIC64_OP(__VA_ARGS__)                                        \
     182             :         ATOMIC64_OP_RETURN(, dmb ish,  , l, "memory", __VA_ARGS__)    \
     183             :         ATOMIC64_OP_RETURN(_relaxed,,  ,  ,         , __VA_ARGS__)      \
     184             :         ATOMIC64_OP_RETURN(_acquire,, a,  , "memory", __VA_ARGS__)    \
     185             :         ATOMIC64_OP_RETURN(_release,,  , l, "memory", __VA_ARGS__)    \
     186             :         ATOMIC64_FETCH_OP (, dmb ish,  , l, "memory", __VA_ARGS__)    \
     187             :         ATOMIC64_FETCH_OP (_relaxed,,  ,  ,         , __VA_ARGS__)      \
     188             :         ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)    \
     189             :         ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
     190             : 
     191           0 : ATOMIC64_OPS(add, add, I)
     192           0 : ATOMIC64_OPS(sub, sub, J)
     193             : 
     194             : #undef ATOMIC64_OPS
     195             : #define ATOMIC64_OPS(...)                                               \
     196             :         ATOMIC64_OP(__VA_ARGS__)                                        \
     197             :         ATOMIC64_FETCH_OP (, dmb ish,  , l, "memory", __VA_ARGS__)    \
     198             :         ATOMIC64_FETCH_OP (_relaxed,,  ,  ,         , __VA_ARGS__)      \
     199             :         ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)    \
     200             :         ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
     201             : 
     202             : ATOMIC64_OPS(and, and, L)
     203           0 : ATOMIC64_OPS(or, orr, L)
     204             : ATOMIC64_OPS(xor, eor, L)
     205             : /*
     206             :  * GAS converts the mysterious and undocumented BIC (immediate) alias to
     207             :  * an AND (immediate) instruction with the immediate inverted. We don't
     208             :  * have a constraint for this, so fall back to register.
     209             :  */
     210           0 : ATOMIC64_OPS(andnot, bic, )
     211             : 
     212             : #undef ATOMIC64_OPS
     213             : #undef ATOMIC64_FETCH_OP
     214             : #undef ATOMIC64_OP_RETURN
     215             : #undef ATOMIC64_OP
     216             : 
     217             : static __always_inline s64
     218             : __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
     219             : {
     220             :         s64 result;
     221             :         unsigned long tmp;
     222             : 
     223             :         asm volatile("// atomic64_dec_if_positive\n"
     224             :         "  prfm    pstl1strm, %2\n"
     225             :         "1:        ldxr    %0, %2\n"
     226             :         "  subs    %0, %0, #1\n"
     227             :         "  b.lt    2f\n"
     228             :         "  stlxr   %w1, %0, %2\n"
     229             :         "  cbnz    %w1, 1b\n"
     230             :         "  dmb     ish\n"
     231             :         "2:"
     232             :         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
     233             :         :
     234             :         : "cc", "memory");
     235             : 
     236             :         return result;
     237             : }
     238             : 
     239             : #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint)  \
     240             : static __always_inline u##sz                                            \
     241             : __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr,                    \
     242             :                                          unsigned long old,             \
     243             :                                          u##sz new)                     \
     244             : {                                                                       \
     245             :         unsigned long tmp;                                              \
     246             :         u##sz oldval;                                                   \
     247             :                                                                         \
     248             :         /*                                                              \
     249             :          * Sub-word sizes require explicit casting so that the compare  \
     250             :          * part of the cmpxchg doesn't end up interpreting non-zero     \
     251             :          * upper bits of the register containing "old".                       \
     252             :          */                                                             \
     253             :         if (sz < 32)                                                 \
     254             :                 old = (u##sz)old;                                       \
     255             :                                                                         \
     256             :         asm volatile(                                                   \
     257             :         "  prfm    pstl1strm, %[v]\n"                         \
     258             :         "1:        ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n"               \
     259             :         "  eor     %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n"       \
     260             :         "  cbnz    %" #w "[tmp], 2f\n"                              \
     261             :         "  st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
     262             :         "  cbnz    %w[tmp], 1b\n"                                     \
     263             :         "  " #mb "\n"                                               \
     264             :         "2:"                                                          \
     265             :         : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval),                       \
     266             :           [v] "+Q" (*(u##sz *)ptr)                                    \
     267             :         : [old] __stringify(constraint) "r" (old), [new] "r" (new)  \
     268             :         : cl);                                                          \
     269             :                                                                         \
     270             :         return oldval;                                                  \
     271             : }
     272             : 
     273             : /*
     274             :  * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly
     275             :  * handle the 'K' constraint for the value 4294967295 - thus we use no
     276             :  * constraint for 32 bit operations.
     277             :  */
     278             : __CMPXCHG_CASE(w, b,     ,  8,        ,  ,  ,         , K)
     279             : __CMPXCHG_CASE(w, h,     , 16,        ,  ,  ,         , K)
     280           0 : __CMPXCHG_CASE(w,  ,     , 32,        ,  ,  ,         , K)
     281             : __CMPXCHG_CASE( ,  ,     , 64,        ,  ,  ,         , L)
     282             : __CMPXCHG_CASE(w, b, acq_,  8,        , a,  , "memory", K)
     283             : __CMPXCHG_CASE(w, h, acq_, 16,        , a,  , "memory", K)
     284             : __CMPXCHG_CASE(w,  , acq_, 32,        , a,  , "memory", K)
     285             : __CMPXCHG_CASE( ,  , acq_, 64,        , a,  , "memory", L)
     286             : __CMPXCHG_CASE(w, b, rel_,  8,        ,  , l, "memory", K)
     287             : __CMPXCHG_CASE(w, h, rel_, 16,        ,  , l, "memory", K)
     288             : __CMPXCHG_CASE(w,  , rel_, 32,        ,  , l, "memory", K)
     289             : __CMPXCHG_CASE( ,  , rel_, 64,        ,  , l, "memory", L)
     290             : __CMPXCHG_CASE(w, b,  mb_,  8, dmb ish,  , l, "memory", K)
     291             : __CMPXCHG_CASE(w, h,  mb_, 16, dmb ish,  , l, "memory", K)
     292           0 : __CMPXCHG_CASE(w,  ,  mb_, 32, dmb ish,  , l, "memory", K)
     293           0 : __CMPXCHG_CASE( ,  ,  mb_, 64, dmb ish,  , l, "memory", L)
     294             : 
     295             : #undef __CMPXCHG_CASE
     296             : 
     297             : union __u128_halves {
     298             :         u128 full;
     299             :         struct {
     300             :                 u64 low, high;
     301             :         };
     302             : };
     303             : 
     304             : #define __CMPXCHG128(name, mb, rel, cl...)                             \
     305             : static __always_inline u128                                             \
     306             : __ll_sc__cmpxchg128##name(volatile u128 *ptr, u128 old, u128 new)       \
     307             : {                                                                       \
     308             :         union __u128_halves r, o = { .full = (old) },                   \
     309             :                                n = { .full = (new) };                   \
     310             :        unsigned int tmp;                                               \
     311             :                                                                         \
     312             :         asm volatile("// __cmpxchg128" #name "\n"                   \
     313             :        "       prfm    pstl1strm, %[v]\n"                              \
     314             :        "1:     ldxp    %[rl], %[rh], %[v]\n"                           \
     315             :        "       cmp     %[rl], %[ol]\n"                                 \
     316             :        "       ccmp    %[rh], %[oh], 0, eq\n"                          \
     317             :        "       b.ne    2f\n"                                           \
     318             :        "       st" #rel "xp    %w[tmp], %[nl], %[nh], %[v]\n"          \
     319             :        "       cbnz    %w[tmp], 1b\n"                                  \
     320             :         "  " #mb "\n"                                               \
     321             :         "2:"                                                          \
     322             :        : [v] "+Q" (*(u128 *)ptr),                                      \
     323             :          [rl] "=&r" (r.low), [rh] "=&r" (r.high),                      \
     324             :          [tmp] "=&r" (tmp)                                             \
     325             :        : [ol] "r" (o.low), [oh] "r" (o.high),                          \
     326             :          [nl] "r" (n.low), [nh] "r" (n.high)                           \
     327             :        : "cc", ##cl);                                                  \
     328             :                                                                         \
     329             :         return r.full;                                                  \
     330             : }
     331             : 
     332             : __CMPXCHG128(   ,        ,  )
     333             : __CMPXCHG128(_mb, dmb ish, l, "memory")
     334             : 
     335             : #undef __CMPXCHG128
     336             : 
     337             : #undef K
     338             : 
     339             : #endif  /* __ASM_ATOMIC_LL_SC_H */

Generated by: LCOV version 1.14