LCOV - code coverage report
Current view: top level - include/linux/sched - clock.h (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc4-xfsa @ Mon Jul 31 20:08:27 PDT 2023 Lines: 0 1 0.0 %
Date: 2023-07-31 20:08:27 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_SCHED_CLOCK_H
       3             : #define _LINUX_SCHED_CLOCK_H
       4             : 
       5             : #include <linux/smp.h>
       6             : 
       7             : /*
       8             :  * Do not use outside of architecture code which knows its limitations.
       9             :  *
      10             :  * sched_clock() has no promise of monotonicity or bounded drift between
      11             :  * CPUs, use (which you should not) requires disabling IRQs.
      12             :  *
      13             :  * Please use one of the three interfaces below.
      14             :  */
      15             : extern u64 sched_clock(void);
      16             : 
      17             : #if defined(CONFIG_ARCH_WANTS_NO_INSTR) || defined(CONFIG_GENERIC_SCHED_CLOCK)
      18             : extern u64 sched_clock_noinstr(void);
      19             : #else
      20             : static __always_inline u64 sched_clock_noinstr(void)
      21             : {
      22             :         return sched_clock();
      23             : }
      24             : #endif
      25             : 
      26             : /*
      27             :  * See the comment in kernel/sched/clock.c
      28             :  */
      29             : extern u64 running_clock(void);
      30             : extern u64 sched_clock_cpu(int cpu);
      31             : 
      32             : 
      33             : extern void sched_clock_init(void);
      34             : 
      35             : #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
      36             : static inline void sched_clock_tick(void)
      37             : {
      38             : }
      39             : 
      40             : static inline void clear_sched_clock_stable(void)
      41             : {
      42             : }
      43             : 
      44             : static inline void sched_clock_idle_sleep_event(void)
      45             : {
      46             : }
      47             : 
      48             : static inline void sched_clock_idle_wakeup_event(void)
      49             : {
      50             : }
      51             : 
      52             : static inline u64 cpu_clock(int cpu)
      53             : {
      54             :         return sched_clock();
      55             : }
      56             : 
      57             : static __always_inline u64 local_clock_noinstr(void)
      58             : {
      59             :         return sched_clock_noinstr();
      60             : }
      61             : 
      62             : static __always_inline u64 local_clock(void)
      63             : {
      64           0 :         return sched_clock();
      65             : }
      66             : #else
      67             : extern int sched_clock_stable(void);
      68             : extern void clear_sched_clock_stable(void);
      69             : 
      70             : /*
      71             :  * When sched_clock_stable(), __sched_clock_offset provides the offset
      72             :  * between local_clock() and sched_clock().
      73             :  */
      74             : extern u64 __sched_clock_offset;
      75             : 
      76             : extern void sched_clock_tick(void);
      77             : extern void sched_clock_tick_stable(void);
      78             : extern void sched_clock_idle_sleep_event(void);
      79             : extern void sched_clock_idle_wakeup_event(void);
      80             : 
      81             : /*
      82             :  * As outlined in clock.c, provides a fast, high resolution, nanosecond
      83             :  * time source that is monotonic per cpu argument and has bounded drift
      84             :  * between cpus.
      85             :  *
      86             :  * ######################### BIG FAT WARNING ##########################
      87             :  * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
      88             :  * # go backwards !!                                                  #
      89             :  * ####################################################################
      90             :  */
      91             : static inline u64 cpu_clock(int cpu)
      92             : {
      93             :         return sched_clock_cpu(cpu);
      94             : }
      95             : 
      96             : extern u64 local_clock_noinstr(void);
      97             : extern u64 local_clock(void);
      98             : 
      99             : #endif
     100             : 
     101             : #ifdef CONFIG_IRQ_TIME_ACCOUNTING
     102             : /*
     103             :  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
     104             :  * The reason for this explicit opt-in is not to have perf penalty with
     105             :  * slow sched_clocks.
     106             :  */
     107             : extern void enable_sched_clock_irqtime(void);
     108             : extern void disable_sched_clock_irqtime(void);
     109             : #else
     110             : static inline void enable_sched_clock_irqtime(void) {}
     111             : static inline void disable_sched_clock_irqtime(void) {}
     112             : #endif
     113             : 
     114             : #endif /* _LINUX_SCHED_CLOCK_H */

Generated by: LCOV version 1.14