LCOV - code coverage report
Current view: top level - arch/arm64/include/asm - preempt.h (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc4-xfsa @ Mon Jul 31 20:08:27 PDT 2023 Lines: 11 11 100.0 %
Date: 2023-07-31 20:08:27 Functions: 1 1 100.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef __ASM_PREEMPT_H
       3             : #define __ASM_PREEMPT_H
       4             : 
       5             : #include <linux/jump_label.h>
       6             : #include <linux/thread_info.h>
       7             : 
       8             : #define PREEMPT_NEED_RESCHED    BIT(32)
       9             : #define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
      10             : 
      11             : static inline int preempt_count(void)
      12             : {
      13   981822189 :         return READ_ONCE(current_thread_info()->preempt.count);
      14             : }
      15             : 
      16             : static inline void preempt_count_set(u64 pc)
      17             : {
      18             :         /* Preserve existing value of PREEMPT_NEED_RESCHED */
      19             :         WRITE_ONCE(current_thread_info()->preempt.count, pc);
      20             : }
      21             : 
      22             : #define init_task_preempt_count(p) do { \
      23             :         task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
      24             : } while (0)
      25             : 
      26             : #define init_idle_preempt_count(p, cpu) do { \
      27             :         task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
      28             : } while (0)
      29             : 
      30             : static inline void set_preempt_need_resched(void)
      31             : {
      32             :         current_thread_info()->preempt.need_resched = 0;
      33             : }
      34             : 
      35             : static inline void clear_preempt_need_resched(void)
      36             : {
      37             :         current_thread_info()->preempt.need_resched = 1;
      38             : }
      39             : 
      40             : static inline bool test_preempt_need_resched(void)
      41             : {
      42             :         return !current_thread_info()->preempt.need_resched;
      43             : }
      44             : 
      45             : static inline void __preempt_count_add(int val)
      46             : {
      47 32923736413 :         u32 pc = READ_ONCE(current_thread_info()->preempt.count);
      48 32923736413 :         pc += val;
      49 32923736413 :         WRITE_ONCE(current_thread_info()->preempt.count, pc);
      50             : }
      51             : 
      52             : static inline void __preempt_count_sub(int val)
      53             : {
      54             :         u32 pc = READ_ONCE(current_thread_info()->preempt.count);
      55             :         pc -= val;
      56             :         WRITE_ONCE(current_thread_info()->preempt.count, pc);
      57             : }
      58             : 
      59 32868740733 : static inline bool __preempt_count_dec_and_test(void)
      60             : {
      61 32868740733 :         struct thread_info *ti = current_thread_info();
      62 32868740733 :         u64 pc = READ_ONCE(ti->preempt_count);
      63             : 
      64             :         /* Update only the count field, leaving need_resched unchanged */
      65 32868740733 :         WRITE_ONCE(ti->preempt.count, --pc);
      66             : 
      67             :         /*
      68             :          * If we wrote back all zeroes, then we're preemptible and in
      69             :          * need of a reschedule. Otherwise, we need to reload the
      70             :          * preempt_count in case the need_resched flag was cleared by an
      71             :          * interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE
      72             :          * pair.
      73             :          */
      74 32868740733 :         return !pc || !READ_ONCE(ti->preempt_count);
      75             : }
      76             : 
      77             : static inline bool should_resched(int preempt_offset)
      78             : {
      79 14959519511 :         u64 pc = READ_ONCE(current_thread_info()->preempt_count);
      80 14959519511 :         return pc == preempt_offset;
      81             : }
      82             : 
      83             : #ifdef CONFIG_PREEMPTION
      84             : 
      85             : void preempt_schedule(void);
      86             : void preempt_schedule_notrace(void);
      87             : 
      88             : #ifdef CONFIG_PREEMPT_DYNAMIC
      89             : 
      90             : DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
      91             : void dynamic_preempt_schedule(void);
      92             : #define __preempt_schedule()            dynamic_preempt_schedule()
      93             : void dynamic_preempt_schedule_notrace(void);
      94             : #define __preempt_schedule_notrace()    dynamic_preempt_schedule_notrace()
      95             : 
      96             : #else /* CONFIG_PREEMPT_DYNAMIC */
      97             : 
      98             : #define __preempt_schedule()            preempt_schedule()
      99             : #define __preempt_schedule_notrace()    preempt_schedule_notrace()
     100             : 
     101             : #endif /* CONFIG_PREEMPT_DYNAMIC */
     102             : #endif /* CONFIG_PREEMPTION */
     103             : 
     104             : #endif /* __ASM_PREEMPT_H */

Generated by: LCOV version 1.14