LCOV - code coverage report
Current view: top level - include/linux - perf_event.h (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc4-xfsx @ Mon Jul 31 20:08:34 PDT 2023 Lines: 0 1 0.0 %
Date: 2023-07-31 20:08:34 Functions: 0 0 -

          Line data    Source code
       1             : /*
       2             :  * Performance events:
       3             :  *
       4             :  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
       5             :  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
       6             :  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
       7             :  *
       8             :  * Data type definitions, declarations, prototypes.
       9             :  *
      10             :  *    Started by: Thomas Gleixner and Ingo Molnar
      11             :  *
      12             :  * For licencing details see kernel-base/COPYING
      13             :  */
      14             : #ifndef _LINUX_PERF_EVENT_H
      15             : #define _LINUX_PERF_EVENT_H
      16             : 
      17             : #include <uapi/linux/perf_event.h>
      18             : #include <uapi/linux/bpf_perf_event.h>
      19             : 
      20             : /*
      21             :  * Kernel-internal data types and definitions:
      22             :  */
      23             : 
      24             : #ifdef CONFIG_PERF_EVENTS
      25             : # include <asm/perf_event.h>
      26             : # include <asm/local64.h>
      27             : #endif
      28             : 
      29             : #define PERF_GUEST_ACTIVE       0x01
      30             : #define PERF_GUEST_USER 0x02
      31             : 
      32             : struct perf_guest_info_callbacks {
      33             :         unsigned int                    (*state)(void);
      34             :         unsigned long                   (*get_ip)(void);
      35             :         unsigned int                    (*handle_intel_pt_intr)(void);
      36             : };
      37             : 
      38             : #ifdef CONFIG_HAVE_HW_BREAKPOINT
      39             : #include <linux/rhashtable-types.h>
      40             : #include <asm/hw_breakpoint.h>
      41             : #endif
      42             : 
      43             : #include <linux/list.h>
      44             : #include <linux/mutex.h>
      45             : #include <linux/rculist.h>
      46             : #include <linux/rcupdate.h>
      47             : #include <linux/spinlock.h>
      48             : #include <linux/hrtimer.h>
      49             : #include <linux/fs.h>
      50             : #include <linux/pid_namespace.h>
      51             : #include <linux/workqueue.h>
      52             : #include <linux/ftrace.h>
      53             : #include <linux/cpu.h>
      54             : #include <linux/irq_work.h>
      55             : #include <linux/static_key.h>
      56             : #include <linux/jump_label_ratelimit.h>
      57             : #include <linux/atomic.h>
      58             : #include <linux/sysfs.h>
      59             : #include <linux/perf_regs.h>
      60             : #include <linux/cgroup.h>
      61             : #include <linux/refcount.h>
      62             : #include <linux/security.h>
      63             : #include <linux/static_call.h>
      64             : #include <linux/lockdep.h>
      65             : #include <asm/local.h>
      66             : 
      67             : struct perf_callchain_entry {
      68             :         __u64                           nr;
      69             :         __u64                           ip[]; /* /proc/sys/kernel/perf_event_max_stack */
      70             : };
      71             : 
      72             : struct perf_callchain_entry_ctx {
      73             :         struct perf_callchain_entry *entry;
      74             :         u32                         max_stack;
      75             :         u32                         nr;
      76             :         short                       contexts;
      77             :         bool                        contexts_maxed;
      78             : };
      79             : 
      80             : typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
      81             :                                      unsigned long off, unsigned long len);
      82             : 
      83             : struct perf_raw_frag {
      84             :         union {
      85             :                 struct perf_raw_frag    *next;
      86             :                 unsigned long           pad;
      87             :         };
      88             :         perf_copy_f                     copy;
      89             :         void                            *data;
      90             :         u32                             size;
      91             : } __packed;
      92             : 
      93             : struct perf_raw_record {
      94             :         struct perf_raw_frag            frag;
      95             :         u32                             size;
      96             : };
      97             : 
      98             : static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
      99             : {
     100             :         return frag->pad < sizeof(u64);
     101             : }
     102             : 
     103             : /*
     104             :  * branch stack layout:
     105             :  *  nr: number of taken branches stored in entries[]
     106             :  *  hw_idx: The low level index of raw branch records
     107             :  *          for the most recent branch.
     108             :  *          -1ULL means invalid/unknown.
     109             :  *
     110             :  * Note that nr can vary from sample to sample
     111             :  * branches (to, from) are stored from most recent
     112             :  * to least recent, i.e., entries[0] contains the most
     113             :  * recent branch.
     114             :  * The entries[] is an abstraction of raw branch records,
     115             :  * which may not be stored in age order in HW, e.g. Intel LBR.
     116             :  * The hw_idx is to expose the low level index of raw
     117             :  * branch record for the most recent branch aka entries[0].
     118             :  * The hw_idx index is between -1 (unknown) and max depth,
     119             :  * which can be retrieved in /sys/devices/cpu/caps/branches.
     120             :  * For the architectures whose raw branch records are
     121             :  * already stored in age order, the hw_idx should be 0.
     122             :  */
     123             : struct perf_branch_stack {
     124             :         __u64                           nr;
     125             :         __u64                           hw_idx;
     126             :         struct perf_branch_entry        entries[];
     127             : };
     128             : 
     129             : struct task_struct;
     130             : 
     131             : /*
     132             :  * extra PMU register associated with an event
     133             :  */
     134             : struct hw_perf_event_extra {
     135             :         u64             config; /* register value */
     136             :         unsigned int    reg;    /* register address or index */
     137             :         int             alloc;  /* extra register already allocated */
     138             :         int             idx;    /* index in shared_regs->regs[] */
     139             : };
     140             : 
     141             : /**
     142             :  * hw_perf_event::flag values
     143             :  *
     144             :  * PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific
     145             :  * usage.
     146             :  */
     147             : #define PERF_EVENT_FLAG_ARCH                    0x000fffff
     148             : #define PERF_EVENT_FLAG_USER_READ_CNT           0x80000000
     149             : 
     150             : static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0);
     151             : 
     152             : /**
     153             :  * struct hw_perf_event - performance event hardware details:
     154             :  */
     155             : struct hw_perf_event {
     156             : #ifdef CONFIG_PERF_EVENTS
     157             :         union {
     158             :                 struct { /* hardware */
     159             :                         u64             config;
     160             :                         u64             last_tag;
     161             :                         unsigned long   config_base;
     162             :                         unsigned long   event_base;
     163             :                         int             event_base_rdpmc;
     164             :                         int             idx;
     165             :                         int             last_cpu;
     166             :                         int             flags;
     167             : 
     168             :                         struct hw_perf_event_extra extra_reg;
     169             :                         struct hw_perf_event_extra branch_reg;
     170             :                 };
     171             :                 struct { /* software */
     172             :                         struct hrtimer  hrtimer;
     173             :                 };
     174             :                 struct { /* tracepoint */
     175             :                         /* for tp_event->class */
     176             :                         struct list_head        tp_list;
     177             :                 };
     178             :                 struct { /* amd_power */
     179             :                         u64     pwr_acc;
     180             :                         u64     ptsc;
     181             :                 };
     182             : #ifdef CONFIG_HAVE_HW_BREAKPOINT
     183             :                 struct { /* breakpoint */
     184             :                         /*
     185             :                          * Crufty hack to avoid the chicken and egg
     186             :                          * problem hw_breakpoint has with context
     187             :                          * creation and event initalization.
     188             :                          */
     189             :                         struct arch_hw_breakpoint       info;
     190             :                         struct rhlist_head              bp_list;
     191             :                 };
     192             : #endif
     193             :                 struct { /* amd_iommu */
     194             :                         u8      iommu_bank;
     195             :                         u8      iommu_cntr;
     196             :                         u16     padding;
     197             :                         u64     conf;
     198             :                         u64     conf1;
     199             :                 };
     200             :         };
     201             :         /*
     202             :          * If the event is a per task event, this will point to the task in
     203             :          * question. See the comment in perf_event_alloc().
     204             :          */
     205             :         struct task_struct              *target;
     206             : 
     207             :         /*
     208             :          * PMU would store hardware filter configuration
     209             :          * here.
     210             :          */
     211             :         void                            *addr_filters;
     212             : 
     213             :         /* Last sync'ed generation of filters */
     214             :         unsigned long                   addr_filters_gen;
     215             : 
     216             : /*
     217             :  * hw_perf_event::state flags; used to track the PERF_EF_* state.
     218             :  */
     219             : #define PERF_HES_STOPPED        0x01 /* the counter is stopped */
     220             : #define PERF_HES_UPTODATE       0x02 /* event->count up-to-date */
     221             : #define PERF_HES_ARCH           0x04
     222             : 
     223             :         int                             state;
     224             : 
     225             :         /*
     226             :          * The last observed hardware counter value, updated with a
     227             :          * local64_cmpxchg() such that pmu::read() can be called nested.
     228             :          */
     229             :         local64_t                       prev_count;
     230             : 
     231             :         /*
     232             :          * The period to start the next sample with.
     233             :          */
     234             :         u64                             sample_period;
     235             : 
     236             :         union {
     237             :                 struct { /* Sampling */
     238             :                         /*
     239             :                          * The period we started this sample with.
     240             :                          */
     241             :                         u64                             last_period;
     242             : 
     243             :                         /*
     244             :                          * However much is left of the current period;
     245             :                          * note that this is a full 64bit value and
     246             :                          * allows for generation of periods longer
     247             :                          * than hardware might allow.
     248             :                          */
     249             :                         local64_t                       period_left;
     250             :                 };
     251             :                 struct { /* Topdown events counting for context switch */
     252             :                         u64                             saved_metric;
     253             :                         u64                             saved_slots;
     254             :                 };
     255             :         };
     256             : 
     257             :         /*
     258             :          * State for throttling the event, see __perf_event_overflow() and
     259             :          * perf_adjust_freq_unthr_context().
     260             :          */
     261             :         u64                             interrupts_seq;
     262             :         u64                             interrupts;
     263             : 
     264             :         /*
     265             :          * State for freq target events, see __perf_event_overflow() and
     266             :          * perf_adjust_freq_unthr_context().
     267             :          */
     268             :         u64                             freq_time_stamp;
     269             :         u64                             freq_count_stamp;
     270             : #endif
     271             : };
     272             : 
     273             : struct perf_event;
     274             : struct perf_event_pmu_context;
     275             : 
     276             : /*
     277             :  * Common implementation detail of pmu::{start,commit,cancel}_txn
     278             :  */
     279             : #define PERF_PMU_TXN_ADD  0x1           /* txn to add/schedule event on PMU */
     280             : #define PERF_PMU_TXN_READ 0x2           /* txn to read event group from PMU */
     281             : 
     282             : /**
     283             :  * pmu::capabilities flags
     284             :  */
     285             : #define PERF_PMU_CAP_NO_INTERRUPT               0x0001
     286             : #define PERF_PMU_CAP_NO_NMI                     0x0002
     287             : #define PERF_PMU_CAP_AUX_NO_SG                  0x0004
     288             : #define PERF_PMU_CAP_EXTENDED_REGS              0x0008
     289             : #define PERF_PMU_CAP_EXCLUSIVE                  0x0010
     290             : #define PERF_PMU_CAP_ITRACE                     0x0020
     291             : #define PERF_PMU_CAP_HETEROGENEOUS_CPUS         0x0040
     292             : #define PERF_PMU_CAP_NO_EXCLUDE                 0x0080
     293             : #define PERF_PMU_CAP_AUX_OUTPUT                 0x0100
     294             : #define PERF_PMU_CAP_EXTENDED_HW_TYPE           0x0200
     295             : 
     296             : struct perf_output_handle;
     297             : 
     298             : #define PMU_NULL_DEV    ((void *)(~0UL))
     299             : 
     300             : /**
     301             :  * struct pmu - generic performance monitoring unit
     302             :  */
     303             : struct pmu {
     304             :         struct list_head                entry;
     305             : 
     306             :         struct module                   *module;
     307             :         struct device                   *dev;
     308             :         struct device                   *parent;
     309             :         const struct attribute_group    **attr_groups;
     310             :         const struct attribute_group    **attr_update;
     311             :         const char                      *name;
     312             :         int                             type;
     313             : 
     314             :         /*
     315             :          * various common per-pmu feature flags
     316             :          */
     317             :         int                             capabilities;
     318             : 
     319             :         int __percpu                    *pmu_disable_count;
     320             :         struct perf_cpu_pmu_context __percpu *cpu_pmu_context;
     321             :         atomic_t                        exclusive_cnt; /* < 0: cpu; > 0: tsk */
     322             :         int                             task_ctx_nr;
     323             :         int                             hrtimer_interval_ms;
     324             : 
     325             :         /* number of address filters this PMU can do */
     326             :         unsigned int                    nr_addr_filters;
     327             : 
     328             :         /*
     329             :          * Fully disable/enable this PMU, can be used to protect from the PMI
     330             :          * as well as for lazy/batch writing of the MSRs.
     331             :          */
     332             :         void (*pmu_enable)              (struct pmu *pmu); /* optional */
     333             :         void (*pmu_disable)             (struct pmu *pmu); /* optional */
     334             : 
     335             :         /*
     336             :          * Try and initialize the event for this PMU.
     337             :          *
     338             :          * Returns:
     339             :          *  -ENOENT     -- @event is not for this PMU
     340             :          *
     341             :          *  -ENODEV     -- @event is for this PMU but PMU not present
     342             :          *  -EBUSY      -- @event is for this PMU but PMU temporarily unavailable
     343             :          *  -EINVAL     -- @event is for this PMU but @event is not valid
     344             :          *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
     345             :          *  -EACCES     -- @event is for this PMU, @event is valid, but no privileges
     346             :          *
     347             :          *  0           -- @event is for this PMU and valid
     348             :          *
     349             :          * Other error return values are allowed.
     350             :          */
     351             :         int (*event_init)               (struct perf_event *event);
     352             : 
     353             :         /*
     354             :          * Notification that the event was mapped or unmapped.  Called
     355             :          * in the context of the mapping task.
     356             :          */
     357             :         void (*event_mapped)            (struct perf_event *event, struct mm_struct *mm); /* optional */
     358             :         void (*event_unmapped)          (struct perf_event *event, struct mm_struct *mm); /* optional */
     359             : 
     360             :         /*
     361             :          * Flags for ->add()/->del()/ ->start()/->stop(). There are
     362             :          * matching hw_perf_event::state flags.
     363             :          */
     364             : #define PERF_EF_START   0x01            /* start the counter when adding    */
     365             : #define PERF_EF_RELOAD  0x02            /* reload the counter when starting */
     366             : #define PERF_EF_UPDATE  0x04            /* update the counter when stopping */
     367             : 
     368             :         /*
     369             :          * Adds/Removes a counter to/from the PMU, can be done inside a
     370             :          * transaction, see the ->*_txn() methods.
     371             :          *
     372             :          * The add/del callbacks will reserve all hardware resources required
     373             :          * to service the event, this includes any counter constraint
     374             :          * scheduling etc.
     375             :          *
     376             :          * Called with IRQs disabled and the PMU disabled on the CPU the event
     377             :          * is on.
     378             :          *
     379             :          * ->add() called without PERF_EF_START should result in the same state
     380             :          *  as ->add() followed by ->stop().
     381             :          *
     382             :          * ->del() must always PERF_EF_UPDATE stop an event. If it calls
     383             :          *  ->stop() that must deal with already being stopped without
     384             :          *  PERF_EF_UPDATE.
     385             :          */
     386             :         int  (*add)                     (struct perf_event *event, int flags);
     387             :         void (*del)                     (struct perf_event *event, int flags);
     388             : 
     389             :         /*
     390             :          * Starts/Stops a counter present on the PMU.
     391             :          *
     392             :          * The PMI handler should stop the counter when perf_event_overflow()
     393             :          * returns !0. ->start() will be used to continue.
     394             :          *
     395             :          * Also used to change the sample period.
     396             :          *
     397             :          * Called with IRQs disabled and the PMU disabled on the CPU the event
     398             :          * is on -- will be called from NMI context with the PMU generates
     399             :          * NMIs.
     400             :          *
     401             :          * ->stop() with PERF_EF_UPDATE will read the counter and update
     402             :          *  period/count values like ->read() would.
     403             :          *
     404             :          * ->start() with PERF_EF_RELOAD will reprogram the counter
     405             :          *  value, must be preceded by a ->stop() with PERF_EF_UPDATE.
     406             :          */
     407             :         void (*start)                   (struct perf_event *event, int flags);
     408             :         void (*stop)                    (struct perf_event *event, int flags);
     409             : 
     410             :         /*
     411             :          * Updates the counter value of the event.
     412             :          *
     413             :          * For sampling capable PMUs this will also update the software period
     414             :          * hw_perf_event::period_left field.
     415             :          */
     416             :         void (*read)                    (struct perf_event *event);
     417             : 
     418             :         /*
     419             :          * Group events scheduling is treated as a transaction, add
     420             :          * group events as a whole and perform one schedulability test.
     421             :          * If the test fails, roll back the whole group
     422             :          *
     423             :          * Start the transaction, after this ->add() doesn't need to
     424             :          * do schedulability tests.
     425             :          *
     426             :          * Optional.
     427             :          */
     428             :         void (*start_txn)               (struct pmu *pmu, unsigned int txn_flags);
     429             :         /*
     430             :          * If ->start_txn() disabled the ->add() schedulability test
     431             :          * then ->commit_txn() is required to perform one. On success
     432             :          * the transaction is closed. On error the transaction is kept
     433             :          * open until ->cancel_txn() is called.
     434             :          *
     435             :          * Optional.
     436             :          */
     437             :         int  (*commit_txn)              (struct pmu *pmu);
     438             :         /*
     439             :          * Will cancel the transaction, assumes ->del() is called
     440             :          * for each successful ->add() during the transaction.
     441             :          *
     442             :          * Optional.
     443             :          */
     444             :         void (*cancel_txn)              (struct pmu *pmu);
     445             : 
     446             :         /*
     447             :          * Will return the value for perf_event_mmap_page::index for this event,
     448             :          * if no implementation is provided it will default to: event->hw.idx + 1.
     449             :          */
     450             :         int (*event_idx)                (struct perf_event *event); /*optional */
     451             : 
     452             :         /*
     453             :          * context-switches callback
     454             :          */
     455             :         void (*sched_task)              (struct perf_event_pmu_context *pmu_ctx,
     456             :                                         bool sched_in);
     457             : 
     458             :         /*
     459             :          * Kmem cache of PMU specific data
     460             :          */
     461             :         struct kmem_cache               *task_ctx_cache;
     462             : 
     463             :         /*
     464             :          * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
     465             :          * can be synchronized using this function. See Intel LBR callstack support
     466             :          * implementation and Perf core context switch handling callbacks for usage
     467             :          * examples.
     468             :          */
     469             :         void (*swap_task_ctx)           (struct perf_event_pmu_context *prev_epc,
     470             :                                          struct perf_event_pmu_context *next_epc);
     471             :                                         /* optional */
     472             : 
     473             :         /*
     474             :          * Set up pmu-private data structures for an AUX area
     475             :          */
     476             :         void *(*setup_aux)              (struct perf_event *event, void **pages,
     477             :                                          int nr_pages, bool overwrite);
     478             :                                         /* optional */
     479             : 
     480             :         /*
     481             :          * Free pmu-private AUX data structures
     482             :          */
     483             :         void (*free_aux)                (void *aux); /* optional */
     484             : 
     485             :         /*
     486             :          * Take a snapshot of the AUX buffer without touching the event
     487             :          * state, so that preempting ->start()/->stop() callbacks does
     488             :          * not interfere with their logic. Called in PMI context.
     489             :          *
     490             :          * Returns the size of AUX data copied to the output handle.
     491             :          *
     492             :          * Optional.
     493             :          */
     494             :         long (*snapshot_aux)            (struct perf_event *event,
     495             :                                          struct perf_output_handle *handle,
     496             :                                          unsigned long size);
     497             : 
     498             :         /*
     499             :          * Validate address range filters: make sure the HW supports the
     500             :          * requested configuration and number of filters; return 0 if the
     501             :          * supplied filters are valid, -errno otherwise.
     502             :          *
     503             :          * Runs in the context of the ioctl()ing process and is not serialized
     504             :          * with the rest of the PMU callbacks.
     505             :          */
     506             :         int (*addr_filters_validate)    (struct list_head *filters);
     507             :                                         /* optional */
     508             : 
     509             :         /*
     510             :          * Synchronize address range filter configuration:
     511             :          * translate hw-agnostic filters into hardware configuration in
     512             :          * event::hw::addr_filters.
     513             :          *
     514             :          * Runs as a part of filter sync sequence that is done in ->start()
     515             :          * callback by calling perf_event_addr_filters_sync().
     516             :          *
     517             :          * May (and should) traverse event::addr_filters::list, for which its
     518             :          * caller provides necessary serialization.
     519             :          */
     520             :         void (*addr_filters_sync)       (struct perf_event *event);
     521             :                                         /* optional */
     522             : 
     523             :         /*
     524             :          * Check if event can be used for aux_output purposes for
     525             :          * events of this PMU.
     526             :          *
     527             :          * Runs from perf_event_open(). Should return 0 for "no match"
     528             :          * or non-zero for "match".
     529             :          */
     530             :         int (*aux_output_match)         (struct perf_event *event);
     531             :                                         /* optional */
     532             : 
     533             :         /*
     534             :          * Skip programming this PMU on the given CPU. Typically needed for
     535             :          * big.LITTLE things.
     536             :          */
     537             :         bool (*filter)                  (struct pmu *pmu, int cpu); /* optional */
     538             : 
     539             :         /*
     540             :          * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
     541             :          */
     542             :         int (*check_period)             (struct perf_event *event, u64 value); /* optional */
     543             : };
     544             : 
     545             : enum perf_addr_filter_action_t {
     546             :         PERF_ADDR_FILTER_ACTION_STOP = 0,
     547             :         PERF_ADDR_FILTER_ACTION_START,
     548             :         PERF_ADDR_FILTER_ACTION_FILTER,
     549             : };
     550             : 
     551             : /**
     552             :  * struct perf_addr_filter - address range filter definition
     553             :  * @entry:      event's filter list linkage
     554             :  * @path:       object file's path for file-based filters
     555             :  * @offset:     filter range offset
     556             :  * @size:       filter range size (size==0 means single address trigger)
     557             :  * @action:     filter/start/stop
     558             :  *
     559             :  * This is a hardware-agnostic filter configuration as specified by the user.
     560             :  */
     561             : struct perf_addr_filter {
     562             :         struct list_head        entry;
     563             :         struct path             path;
     564             :         unsigned long           offset;
     565             :         unsigned long           size;
     566             :         enum perf_addr_filter_action_t  action;
     567             : };
     568             : 
     569             : /**
     570             :  * struct perf_addr_filters_head - container for address range filters
     571             :  * @list:       list of filters for this event
     572             :  * @lock:       spinlock that serializes accesses to the @list and event's
     573             :  *              (and its children's) filter generations.
     574             :  * @nr_file_filters:    number of file-based filters
     575             :  *
     576             :  * A child event will use parent's @list (and therefore @lock), so they are
     577             :  * bundled together; see perf_event_addr_filters().
     578             :  */
     579             : struct perf_addr_filters_head {
     580             :         struct list_head        list;
     581             :         raw_spinlock_t          lock;
     582             :         unsigned int            nr_file_filters;
     583             : };
     584             : 
     585             : struct perf_addr_filter_range {
     586             :         unsigned long           start;
     587             :         unsigned long           size;
     588             : };
     589             : 
     590             : /**
     591             :  * enum perf_event_state - the states of an event:
     592             :  */
     593             : enum perf_event_state {
     594             :         PERF_EVENT_STATE_DEAD           = -4,
     595             :         PERF_EVENT_STATE_EXIT           = -3,
     596             :         PERF_EVENT_STATE_ERROR          = -2,
     597             :         PERF_EVENT_STATE_OFF            = -1,
     598             :         PERF_EVENT_STATE_INACTIVE       =  0,
     599             :         PERF_EVENT_STATE_ACTIVE         =  1,
     600             : };
     601             : 
     602             : struct file;
     603             : struct perf_sample_data;
     604             : 
     605             : typedef void (*perf_overflow_handler_t)(struct perf_event *,
     606             :                                         struct perf_sample_data *,
     607             :                                         struct pt_regs *regs);
     608             : 
     609             : /*
     610             :  * Event capabilities. For event_caps and groups caps.
     611             :  *
     612             :  * PERF_EV_CAP_SOFTWARE: Is a software event.
     613             :  * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
     614             :  * from any CPU in the package where it is active.
     615             :  * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
     616             :  * cannot be a group leader. If an event with this flag is detached from the
     617             :  * group it is scheduled out and moved into an unrecoverable ERROR state.
     618             :  */
     619             : #define PERF_EV_CAP_SOFTWARE            BIT(0)
     620             : #define PERF_EV_CAP_READ_ACTIVE_PKG     BIT(1)
     621             : #define PERF_EV_CAP_SIBLING             BIT(2)
     622             : 
     623             : #define SWEVENT_HLIST_BITS              8
     624             : #define SWEVENT_HLIST_SIZE              (1 << SWEVENT_HLIST_BITS)
     625             : 
     626             : struct swevent_hlist {
     627             :         struct hlist_head               heads[SWEVENT_HLIST_SIZE];
     628             :         struct rcu_head                 rcu_head;
     629             : };
     630             : 
     631             : #define PERF_ATTACH_CONTEXT     0x01
     632             : #define PERF_ATTACH_GROUP       0x02
     633             : #define PERF_ATTACH_TASK        0x04
     634             : #define PERF_ATTACH_TASK_DATA   0x08
     635             : #define PERF_ATTACH_ITRACE      0x10
     636             : #define PERF_ATTACH_SCHED_CB    0x20
     637             : #define PERF_ATTACH_CHILD       0x40
     638             : 
     639             : struct bpf_prog;
     640             : struct perf_cgroup;
     641             : struct perf_buffer;
     642             : 
     643             : struct pmu_event_list {
     644             :         raw_spinlock_t          lock;
     645             :         struct list_head        list;
     646             : };
     647             : 
     648             : /*
     649             :  * event->sibling_list is modified whole holding both ctx->lock and ctx->mutex
     650             :  * as such iteration must hold either lock. However, since ctx->lock is an IRQ
     651             :  * safe lock, and is only held by the CPU doing the modification, having IRQs
     652             :  * disabled is sufficient since it will hold-off the IPIs.
     653             :  */
     654             : #ifdef CONFIG_PROVE_LOCKING
     655             : #define lockdep_assert_event_ctx(event)                         \
     656             :         WARN_ON_ONCE(__lockdep_enabled &&                       \
     657             :                      (this_cpu_read(hardirqs_enabled) &&        \
     658             :                       lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD))
     659             : #else
     660             : #define lockdep_assert_event_ctx(event)
     661             : #endif
     662             : 
     663             : #define for_each_sibling_event(sibling, event)                  \
     664             :         lockdep_assert_event_ctx(event);                        \
     665             :         if ((event)->group_leader == (event))                        \
     666             :                 list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
     667             : 
     668             : /**
     669             :  * struct perf_event - performance event kernel representation:
     670             :  */
     671             : struct perf_event {
     672             : #ifdef CONFIG_PERF_EVENTS
     673             :         /*
     674             :          * entry onto perf_event_context::event_list;
     675             :          *   modifications require ctx->lock
     676             :          *   RCU safe iterations.
     677             :          */
     678             :         struct list_head                event_entry;
     679             : 
     680             :         /*
     681             :          * Locked for modification by both ctx->mutex and ctx->lock; holding
     682             :          * either sufficies for read.
     683             :          */
     684             :         struct list_head                sibling_list;
     685             :         struct list_head                active_list;
     686             :         /*
     687             :          * Node on the pinned or flexible tree located at the event context;
     688             :          */
     689             :         struct rb_node                  group_node;
     690             :         u64                             group_index;
     691             :         /*
     692             :          * We need storage to track the entries in perf_pmu_migrate_context; we
     693             :          * cannot use the event_entry because of RCU and we want to keep the
     694             :          * group in tact which avoids us using the other two entries.
     695             :          */
     696             :         struct list_head                migrate_entry;
     697             : 
     698             :         struct hlist_node               hlist_entry;
     699             :         struct list_head                active_entry;
     700             :         int                             nr_siblings;
     701             : 
     702             :         /* Not serialized. Only written during event initialization. */
     703             :         int                             event_caps;
     704             :         /* The cumulative AND of all event_caps for events in this group. */
     705             :         int                             group_caps;
     706             : 
     707             :         struct perf_event               *group_leader;
     708             :         /*
     709             :          * event->pmu will always point to pmu in which this event belongs.
     710             :          * Whereas event->pmu_ctx->pmu may point to other pmu when group of
     711             :          * different pmu events is created.
     712             :          */
     713             :         struct pmu                      *pmu;
     714             :         void                            *pmu_private;
     715             : 
     716             :         enum perf_event_state           state;
     717             :         unsigned int                    attach_state;
     718             :         local64_t                       count;
     719             :         atomic64_t                      child_count;
     720             : 
     721             :         /*
     722             :          * These are the total time in nanoseconds that the event
     723             :          * has been enabled (i.e. eligible to run, and the task has
     724             :          * been scheduled in, if this is a per-task event)
     725             :          * and running (scheduled onto the CPU), respectively.
     726             :          */
     727             :         u64                             total_time_enabled;
     728             :         u64                             total_time_running;
     729             :         u64                             tstamp;
     730             : 
     731             :         struct perf_event_attr          attr;
     732             :         u16                             header_size;
     733             :         u16                             id_header_size;
     734             :         u16                             read_size;
     735             :         struct hw_perf_event            hw;
     736             : 
     737             :         struct perf_event_context       *ctx;
     738             :         /*
     739             :          * event->pmu_ctx points to perf_event_pmu_context in which the event
     740             :          * is added. This pmu_ctx can be of other pmu for sw event when that
     741             :          * sw event is part of a group which also contains non-sw events.
     742             :          */
     743             :         struct perf_event_pmu_context   *pmu_ctx;
     744             :         atomic_long_t                   refcount;
     745             : 
     746             :         /*
     747             :          * These accumulate total time (in nanoseconds) that children
     748             :          * events have been enabled and running, respectively.
     749             :          */
     750             :         atomic64_t                      child_total_time_enabled;
     751             :         atomic64_t                      child_total_time_running;
     752             : 
     753             :         /*
     754             :          * Protect attach/detach and child_list:
     755             :          */
     756             :         struct mutex                    child_mutex;
     757             :         struct list_head                child_list;
     758             :         struct perf_event               *parent;
     759             : 
     760             :         int                             oncpu;
     761             :         int                             cpu;
     762             : 
     763             :         struct list_head                owner_entry;
     764             :         struct task_struct              *owner;
     765             : 
     766             :         /* mmap bits */
     767             :         struct mutex                    mmap_mutex;
     768             :         atomic_t                        mmap_count;
     769             : 
     770             :         struct perf_buffer              *rb;
     771             :         struct list_head                rb_entry;
     772             :         unsigned long                   rcu_batches;
     773             :         int                             rcu_pending;
     774             : 
     775             :         /* poll related */
     776             :         wait_queue_head_t               waitq;
     777             :         struct fasync_struct            *fasync;
     778             : 
     779             :         /* delayed work for NMIs and such */
     780             :         unsigned int                    pending_wakeup;
     781             :         unsigned int                    pending_kill;
     782             :         unsigned int                    pending_disable;
     783             :         unsigned int                    pending_sigtrap;
     784             :         unsigned long                   pending_addr;   /* SIGTRAP */
     785             :         struct irq_work                 pending_irq;
     786             :         struct callback_head            pending_task;
     787             :         unsigned int                    pending_work;
     788             : 
     789             :         atomic_t                        event_limit;
     790             : 
     791             :         /* address range filters */
     792             :         struct perf_addr_filters_head   addr_filters;
     793             :         /* vma address array for file-based filders */
     794             :         struct perf_addr_filter_range   *addr_filter_ranges;
     795             :         unsigned long                   addr_filters_gen;
     796             : 
     797             :         /* for aux_output events */
     798             :         struct perf_event               *aux_event;
     799             : 
     800             :         void (*destroy)(struct perf_event *);
     801             :         struct rcu_head                 rcu_head;
     802             : 
     803             :         struct pid_namespace            *ns;
     804             :         u64                             id;
     805             : 
     806             :         atomic64_t                      lost_samples;
     807             : 
     808             :         u64                             (*clock)(void);
     809             :         perf_overflow_handler_t         overflow_handler;
     810             :         void                            *overflow_handler_context;
     811             : #ifdef CONFIG_BPF_SYSCALL
     812             :         perf_overflow_handler_t         orig_overflow_handler;
     813             :         struct bpf_prog                 *prog;
     814             :         u64                             bpf_cookie;
     815             : #endif
     816             : 
     817             : #ifdef CONFIG_EVENT_TRACING
     818             :         struct trace_event_call         *tp_event;
     819             :         struct event_filter             *filter;
     820             : #ifdef CONFIG_FUNCTION_TRACER
     821             :         struct ftrace_ops               ftrace_ops;
     822             : #endif
     823             : #endif
     824             : 
     825             : #ifdef CONFIG_CGROUP_PERF
     826             :         struct perf_cgroup              *cgrp; /* cgroup event is attach to */
     827             : #endif
     828             : 
     829             : #ifdef CONFIG_SECURITY
     830             :         void *security;
     831             : #endif
     832             :         struct list_head                sb_list;
     833             : 
     834             :         /*
     835             :          * Certain events gets forwarded to another pmu internally by over-
     836             :          * writing kernel copy of event->attr.type without user being aware
     837             :          * of it. event->orig_type contains original 'type' requested by
     838             :          * user.
     839             :          */
     840             :         __u32                           orig_type;
     841             : #endif /* CONFIG_PERF_EVENTS */
     842             : };
     843             : 
     844             : /*
     845             :  *           ,-----------------------[1:n]----------------------.
     846             :  *           V                                                  V
     847             :  * perf_event_context <-[1:n]-> perf_event_pmu_context <--- perf_event
     848             :  *           ^                      ^     |                     |
     849             :  *           `--------[1:n]---------'     `-[n:1]-> pmu <-[1:n]-'
     850             :  *
     851             :  *
     852             :  * struct perf_event_pmu_context  lifetime is refcount based and RCU freed
     853             :  * (similar to perf_event_context). Locking is as if it were a member of
     854             :  * perf_event_context; specifically:
     855             :  *
     856             :  *   modification, both: ctx->mutex && ctx->lock
     857             :  *   reading, either:    ctx->mutex || ctx->lock
     858             :  *
     859             :  * There is one exception to this; namely put_pmu_ctx() isn't always called
     860             :  * with ctx->mutex held; this means that as long as we can guarantee the epc
     861             :  * has events the above rules hold.
     862             :  *
     863             :  * Specificially, sys_perf_event_open()'s group_leader case depends on
     864             :  * ctx->mutex pinning the configuration. Since we hold a reference on
     865             :  * group_leader (through the filedesc) it can't go away, therefore it's
     866             :  * associated pmu_ctx must exist and cannot change due to ctx->mutex.
     867             :  */
     868             : struct perf_event_pmu_context {
     869             :         struct pmu                      *pmu;
     870             :         struct perf_event_context       *ctx;
     871             : 
     872             :         struct list_head                pmu_ctx_entry;
     873             : 
     874             :         struct list_head                pinned_active;
     875             :         struct list_head                flexible_active;
     876             : 
     877             :         /* Used to avoid freeing per-cpu perf_event_pmu_context */
     878             :         unsigned int                    embedded : 1;
     879             : 
     880             :         unsigned int                    nr_events;
     881             : 
     882             :         atomic_t                        refcount; /* event <-> epc */
     883             :         struct rcu_head                 rcu_head;
     884             : 
     885             :         void                            *task_ctx_data; /* pmu specific data */
     886             :         /*
     887             :          * Set when one or more (plausibly active) event can't be scheduled
     888             :          * due to pmu overcommit or pmu constraints, except tolerant to
     889             :          * events not necessary to be active due to scheduling constraints,
     890             :          * such as cgroups.
     891             :          */
     892             :         int                             rotate_necessary;
     893             : };
     894             : 
     895             : struct perf_event_groups {
     896             :         struct rb_root  tree;
     897             :         u64             index;
     898             : };
     899             : 
     900             : 
     901             : /**
     902             :  * struct perf_event_context - event context structure
     903             :  *
     904             :  * Used as a container for task events and CPU events as well:
     905             :  */
     906             : struct perf_event_context {
     907             :         /*
     908             :          * Protect the states of the events in the list,
     909             :          * nr_active, and the list:
     910             :          */
     911             :         raw_spinlock_t                  lock;
     912             :         /*
     913             :          * Protect the list of events.  Locking either mutex or lock
     914             :          * is sufficient to ensure the list doesn't change; to change
     915             :          * the list you need to lock both the mutex and the spinlock.
     916             :          */
     917             :         struct mutex                    mutex;
     918             : 
     919             :         struct list_head                pmu_ctx_list;
     920             :         struct perf_event_groups        pinned_groups;
     921             :         struct perf_event_groups        flexible_groups;
     922             :         struct list_head                event_list;
     923             : 
     924             :         int                             nr_events;
     925             :         int                             nr_user;
     926             :         int                             is_active;
     927             : 
     928             :         int                             nr_task_data;
     929             :         int                             nr_stat;
     930             :         int                             nr_freq;
     931             :         int                             rotate_disable;
     932             : 
     933             :         refcount_t                      refcount; /* event <-> ctx */
     934             :         struct task_struct              *task;
     935             : 
     936             :         /*
     937             :          * Context clock, runs when context enabled.
     938             :          */
     939             :         u64                             time;
     940             :         u64                             timestamp;
     941             :         u64                             timeoffset;
     942             : 
     943             :         /*
     944             :          * These fields let us detect when two contexts have both
     945             :          * been cloned (inherited) from a common ancestor.
     946             :          */
     947             :         struct perf_event_context       *parent_ctx;
     948             :         u64                             parent_gen;
     949             :         u64                             generation;
     950             :         int                             pin_count;
     951             : #ifdef CONFIG_CGROUP_PERF
     952             :         int                             nr_cgroups;      /* cgroup evts */
     953             : #endif
     954             :         struct rcu_head                 rcu_head;
     955             : 
     956             :         /*
     957             :          * Sum (event->pending_sigtrap + event->pending_work)
     958             :          *
     959             :          * The SIGTRAP is targeted at ctx->task, as such it won't do changing
     960             :          * that until the signal is delivered.
     961             :          */
     962             :         local_t                         nr_pending;
     963             : };
     964             : 
     965             : /*
     966             :  * Number of contexts where an event can trigger:
     967             :  *      task, softirq, hardirq, nmi.
     968             :  */
     969             : #define PERF_NR_CONTEXTS        4
     970             : 
     971             : struct perf_cpu_pmu_context {
     972             :         struct perf_event_pmu_context   epc;
     973             :         struct perf_event_pmu_context   *task_epc;
     974             : 
     975             :         struct list_head                sched_cb_entry;
     976             :         int                             sched_cb_usage;
     977             : 
     978             :         int                             active_oncpu;
     979             :         int                             exclusive;
     980             : 
     981             :         raw_spinlock_t                  hrtimer_lock;
     982             :         struct hrtimer                  hrtimer;
     983             :         ktime_t                         hrtimer_interval;
     984             :         unsigned int                    hrtimer_active;
     985             : };
     986             : 
     987             : /**
     988             :  * struct perf_event_cpu_context - per cpu event context structure
     989             :  */
     990             : struct perf_cpu_context {
     991             :         struct perf_event_context       ctx;
     992             :         struct perf_event_context       *task_ctx;
     993             :         int                             online;
     994             : 
     995             : #ifdef CONFIG_CGROUP_PERF
     996             :         struct perf_cgroup              *cgrp;
     997             : #endif
     998             : 
     999             :         /*
    1000             :          * Per-CPU storage for iterators used in visit_groups_merge. The default
    1001             :          * storage is of size 2 to hold the CPU and any CPU event iterators.
    1002             :          */
    1003             :         int                             heap_size;
    1004             :         struct perf_event               **heap;
    1005             :         struct perf_event               *heap_default[2];
    1006             : };
    1007             : 
    1008             : struct perf_output_handle {
    1009             :         struct perf_event               *event;
    1010             :         struct perf_buffer              *rb;
    1011             :         unsigned long                   wakeup;
    1012             :         unsigned long                   size;
    1013             :         u64                             aux_flags;
    1014             :         union {
    1015             :                 void                    *addr;
    1016             :                 unsigned long           head;
    1017             :         };
    1018             :         int                             page;
    1019             : };
    1020             : 
    1021             : struct bpf_perf_event_data_kern {
    1022             :         bpf_user_pt_regs_t *regs;
    1023             :         struct perf_sample_data *data;
    1024             :         struct perf_event *event;
    1025             : };
    1026             : 
    1027             : #ifdef CONFIG_CGROUP_PERF
    1028             : 
    1029             : /*
    1030             :  * perf_cgroup_info keeps track of time_enabled for a cgroup.
    1031             :  * This is a per-cpu dynamically allocated data structure.
    1032             :  */
    1033             : struct perf_cgroup_info {
    1034             :         u64                             time;
    1035             :         u64                             timestamp;
    1036             :         u64                             timeoffset;
    1037             :         int                             active;
    1038             : };
    1039             : 
    1040             : struct perf_cgroup {
    1041             :         struct cgroup_subsys_state      css;
    1042             :         struct perf_cgroup_info __percpu *info;
    1043             : };
    1044             : 
    1045             : /*
    1046             :  * Must ensure cgroup is pinned (css_get) before calling
    1047             :  * this function. In other words, we cannot call this function
    1048             :  * if there is no cgroup event for the current CPU context.
    1049             :  */
    1050             : static inline struct perf_cgroup *
    1051             : perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
    1052             : {
    1053             :         return container_of(task_css_check(task, perf_event_cgrp_id,
    1054             :                                            ctx ? lockdep_is_held(&ctx->lock)
    1055             :                                                : true),
    1056             :                             struct perf_cgroup, css);
    1057             : }
    1058             : #endif /* CONFIG_CGROUP_PERF */
    1059             : 
    1060             : #ifdef CONFIG_PERF_EVENTS
    1061             : 
    1062             : extern struct perf_event_context *perf_cpu_task_ctx(void);
    1063             : 
    1064             : extern void *perf_aux_output_begin(struct perf_output_handle *handle,
    1065             :                                    struct perf_event *event);
    1066             : extern void perf_aux_output_end(struct perf_output_handle *handle,
    1067             :                                 unsigned long size);
    1068             : extern int perf_aux_output_skip(struct perf_output_handle *handle,
    1069             :                                 unsigned long size);
    1070             : extern void *perf_get_aux(struct perf_output_handle *handle);
    1071             : extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
    1072             : extern void perf_event_itrace_started(struct perf_event *event);
    1073             : 
    1074             : extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
    1075             : extern void perf_pmu_unregister(struct pmu *pmu);
    1076             : 
    1077             : extern void __perf_event_task_sched_in(struct task_struct *prev,
    1078             :                                        struct task_struct *task);
    1079             : extern void __perf_event_task_sched_out(struct task_struct *prev,
    1080             :                                         struct task_struct *next);
    1081             : extern int perf_event_init_task(struct task_struct *child, u64 clone_flags);
    1082             : extern void perf_event_exit_task(struct task_struct *child);
    1083             : extern void perf_event_free_task(struct task_struct *task);
    1084             : extern void perf_event_delayed_put(struct task_struct *task);
    1085             : extern struct file *perf_event_get(unsigned int fd);
    1086             : extern const struct perf_event *perf_get_event(struct file *file);
    1087             : extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
    1088             : extern void perf_event_print_debug(void);
    1089             : extern void perf_pmu_disable(struct pmu *pmu);
    1090             : extern void perf_pmu_enable(struct pmu *pmu);
    1091             : extern void perf_sched_cb_dec(struct pmu *pmu);
    1092             : extern void perf_sched_cb_inc(struct pmu *pmu);
    1093             : extern int perf_event_task_disable(void);
    1094             : extern int perf_event_task_enable(void);
    1095             : 
    1096             : extern void perf_pmu_resched(struct pmu *pmu);
    1097             : 
    1098             : extern int perf_event_refresh(struct perf_event *event, int refresh);
    1099             : extern void perf_event_update_userpage(struct perf_event *event);
    1100             : extern int perf_event_release_kernel(struct perf_event *event);
    1101             : extern struct perf_event *
    1102             : perf_event_create_kernel_counter(struct perf_event_attr *attr,
    1103             :                                 int cpu,
    1104             :                                 struct task_struct *task,
    1105             :                                 perf_overflow_handler_t callback,
    1106             :                                 void *context);
    1107             : extern void perf_pmu_migrate_context(struct pmu *pmu,
    1108             :                                 int src_cpu, int dst_cpu);
    1109             : int perf_event_read_local(struct perf_event *event, u64 *value,
    1110             :                           u64 *enabled, u64 *running);
    1111             : extern u64 perf_event_read_value(struct perf_event *event,
    1112             :                                  u64 *enabled, u64 *running);
    1113             : 
    1114             : extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
    1115             : 
    1116             : static inline bool branch_sample_no_flags(const struct perf_event *event)
    1117             : {
    1118             :         return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS;
    1119             : }
    1120             : 
    1121             : static inline bool branch_sample_no_cycles(const struct perf_event *event)
    1122             : {
    1123             :         return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES;
    1124             : }
    1125             : 
    1126             : static inline bool branch_sample_type(const struct perf_event *event)
    1127             : {
    1128             :         return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE;
    1129             : }
    1130             : 
    1131             : static inline bool branch_sample_hw_index(const struct perf_event *event)
    1132             : {
    1133             :         return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
    1134             : }
    1135             : 
    1136             : static inline bool branch_sample_priv(const struct perf_event *event)
    1137             : {
    1138             :         return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
    1139             : }
    1140             : 
    1141             : 
    1142             : struct perf_sample_data {
    1143             :         /*
    1144             :          * Fields set by perf_sample_data_init() unconditionally,
    1145             :          * group so as to minimize the cachelines touched.
    1146             :          */
    1147             :         u64                             sample_flags;
    1148             :         u64                             period;
    1149             :         u64                             dyn_size;
    1150             : 
    1151             :         /*
    1152             :          * Fields commonly set by __perf_event_header__init_id(),
    1153             :          * group so as to minimize the cachelines touched.
    1154             :          */
    1155             :         u64                             type;
    1156             :         struct {
    1157             :                 u32     pid;
    1158             :                 u32     tid;
    1159             :         }                               tid_entry;
    1160             :         u64                             time;
    1161             :         u64                             id;
    1162             :         struct {
    1163             :                 u32     cpu;
    1164             :                 u32     reserved;
    1165             :         }                               cpu_entry;
    1166             : 
    1167             :         /*
    1168             :          * The other fields, optionally {set,used} by
    1169             :          * perf_{prepare,output}_sample().
    1170             :          */
    1171             :         u64                             ip;
    1172             :         struct perf_callchain_entry     *callchain;
    1173             :         struct perf_raw_record          *raw;
    1174             :         struct perf_branch_stack        *br_stack;
    1175             :         union perf_sample_weight        weight;
    1176             :         union  perf_mem_data_src        data_src;
    1177             :         u64                             txn;
    1178             : 
    1179             :         struct perf_regs                regs_user;
    1180             :         struct perf_regs                regs_intr;
    1181             :         u64                             stack_user_size;
    1182             : 
    1183             :         u64                             stream_id;
    1184             :         u64                             cgroup;
    1185             :         u64                             addr;
    1186             :         u64                             phys_addr;
    1187             :         u64                             data_page_size;
    1188             :         u64                             code_page_size;
    1189             :         u64                             aux_size;
    1190             : } ____cacheline_aligned;
    1191             : 
    1192             : /* default value for data source */
    1193             : #define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
    1194             :                     PERF_MEM_S(LVL, NA)   |\
    1195             :                     PERF_MEM_S(SNOOP, NA) |\
    1196             :                     PERF_MEM_S(LOCK, NA)  |\
    1197             :                     PERF_MEM_S(TLB, NA))
    1198             : 
    1199             : static inline void perf_sample_data_init(struct perf_sample_data *data,
    1200             :                                          u64 addr, u64 period)
    1201             : {
    1202             :         /* remaining struct members initialized in perf_prepare_sample() */
    1203             :         data->sample_flags = PERF_SAMPLE_PERIOD;
    1204             :         data->period = period;
    1205             :         data->dyn_size = 0;
    1206             : 
    1207             :         if (addr) {
    1208             :                 data->addr = addr;
    1209             :                 data->sample_flags |= PERF_SAMPLE_ADDR;
    1210             :         }
    1211             : }
    1212             : 
    1213             : static inline void perf_sample_save_callchain(struct perf_sample_data *data,
    1214             :                                               struct perf_event *event,
    1215             :                                               struct pt_regs *regs)
    1216             : {
    1217             :         int size = 1;
    1218             : 
    1219             :         data->callchain = perf_callchain(event, regs);
    1220             :         size += data->callchain->nr;
    1221             : 
    1222             :         data->dyn_size += size * sizeof(u64);
    1223             :         data->sample_flags |= PERF_SAMPLE_CALLCHAIN;
    1224             : }
    1225             : 
    1226             : static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
    1227             :                                              struct perf_raw_record *raw)
    1228             : {
    1229             :         struct perf_raw_frag *frag = &raw->frag;
    1230             :         u32 sum = 0;
    1231             :         int size;
    1232             : 
    1233             :         do {
    1234             :                 sum += frag->size;
    1235             :                 if (perf_raw_frag_last(frag))
    1236             :                         break;
    1237             :                 frag = frag->next;
    1238             :         } while (1);
    1239             : 
    1240             :         size = round_up(sum + sizeof(u32), sizeof(u64));
    1241             :         raw->size = size - sizeof(u32);
    1242             :         frag->pad = raw->size - sum;
    1243             : 
    1244             :         data->raw = raw;
    1245             :         data->dyn_size += size;
    1246             :         data->sample_flags |= PERF_SAMPLE_RAW;
    1247             : }
    1248             : 
    1249             : static inline void perf_sample_save_brstack(struct perf_sample_data *data,
    1250             :                                             struct perf_event *event,
    1251             :                                             struct perf_branch_stack *brs)
    1252             : {
    1253             :         int size = sizeof(u64); /* nr */
    1254             : 
    1255             :         if (branch_sample_hw_index(event))
    1256             :                 size += sizeof(u64);
    1257             :         size += brs->nr * sizeof(struct perf_branch_entry);
    1258             : 
    1259             :         data->br_stack = brs;
    1260             :         data->dyn_size += size;
    1261             :         data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
    1262             : }
    1263             : 
    1264             : static inline u32 perf_sample_data_size(struct perf_sample_data *data,
    1265             :                                         struct perf_event *event)
    1266             : {
    1267             :         u32 size = sizeof(struct perf_event_header);
    1268             : 
    1269             :         size += event->header_size + event->id_header_size;
    1270             :         size += data->dyn_size;
    1271             : 
    1272             :         return size;
    1273             : }
    1274             : 
    1275             : /*
    1276             :  * Clear all bitfields in the perf_branch_entry.
    1277             :  * The to and from fields are not cleared because they are
    1278             :  * systematically modified by caller.
    1279             :  */
    1280             : static inline void perf_clear_branch_entry_bitfields(struct perf_branch_entry *br)
    1281             : {
    1282             :         br->mispred = 0;
    1283             :         br->predicted = 0;
    1284             :         br->in_tx = 0;
    1285             :         br->abort = 0;
    1286             :         br->cycles = 0;
    1287             :         br->type = 0;
    1288             :         br->spec = PERF_BR_SPEC_NA;
    1289             :         br->reserved = 0;
    1290             : }
    1291             : 
    1292             : extern void perf_output_sample(struct perf_output_handle *handle,
    1293             :                                struct perf_event_header *header,
    1294             :                                struct perf_sample_data *data,
    1295             :                                struct perf_event *event);
    1296             : extern void perf_prepare_sample(struct perf_sample_data *data,
    1297             :                                 struct perf_event *event,
    1298             :                                 struct pt_regs *regs);
    1299             : extern void perf_prepare_header(struct perf_event_header *header,
    1300             :                                 struct perf_sample_data *data,
    1301             :                                 struct perf_event *event,
    1302             :                                 struct pt_regs *regs);
    1303             : 
    1304             : extern int perf_event_overflow(struct perf_event *event,
    1305             :                                  struct perf_sample_data *data,
    1306             :                                  struct pt_regs *regs);
    1307             : 
    1308             : extern void perf_event_output_forward(struct perf_event *event,
    1309             :                                      struct perf_sample_data *data,
    1310             :                                      struct pt_regs *regs);
    1311             : extern void perf_event_output_backward(struct perf_event *event,
    1312             :                                        struct perf_sample_data *data,
    1313             :                                        struct pt_regs *regs);
    1314             : extern int perf_event_output(struct perf_event *event,
    1315             :                              struct perf_sample_data *data,
    1316             :                              struct pt_regs *regs);
    1317             : 
    1318             : static inline bool
    1319             : is_default_overflow_handler(struct perf_event *event)
    1320             : {
    1321             :         if (likely(event->overflow_handler == perf_event_output_forward))
    1322             :                 return true;
    1323             :         if (unlikely(event->overflow_handler == perf_event_output_backward))
    1324             :                 return true;
    1325             :         return false;
    1326             : }
    1327             : 
    1328             : extern void
    1329             : perf_event_header__init_id(struct perf_event_header *header,
    1330             :                            struct perf_sample_data *data,
    1331             :                            struct perf_event *event);
    1332             : extern void
    1333             : perf_event__output_id_sample(struct perf_event *event,
    1334             :                              struct perf_output_handle *handle,
    1335             :                              struct perf_sample_data *sample);
    1336             : 
    1337             : extern void
    1338             : perf_log_lost_samples(struct perf_event *event, u64 lost);
    1339             : 
    1340             : static inline bool event_has_any_exclude_flag(struct perf_event *event)
    1341             : {
    1342             :         struct perf_event_attr *attr = &event->attr;
    1343             : 
    1344             :         return attr->exclude_idle || attr->exclude_user ||
    1345             :                attr->exclude_kernel || attr->exclude_hv ||
    1346             :                attr->exclude_guest || attr->exclude_host;
    1347             : }
    1348             : 
    1349             : static inline bool is_sampling_event(struct perf_event *event)
    1350             : {
    1351             :         return event->attr.sample_period != 0;
    1352             : }
    1353             : 
    1354             : /*
    1355             :  * Return 1 for a software event, 0 for a hardware event
    1356             :  */
    1357             : static inline int is_software_event(struct perf_event *event)
    1358             : {
    1359             :         return event->event_caps & PERF_EV_CAP_SOFTWARE;
    1360             : }
    1361             : 
    1362             : /*
    1363             :  * Return 1 for event in sw context, 0 for event in hw context
    1364             :  */
    1365             : static inline int in_software_context(struct perf_event *event)
    1366             : {
    1367             :         return event->pmu_ctx->pmu->task_ctx_nr == perf_sw_context;
    1368             : }
    1369             : 
    1370             : static inline int is_exclusive_pmu(struct pmu *pmu)
    1371             : {
    1372             :         return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
    1373             : }
    1374             : 
    1375             : extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
    1376             : 
    1377             : extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
    1378             : extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
    1379             : 
    1380             : #ifndef perf_arch_fetch_caller_regs
    1381             : static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
    1382             : #endif
    1383             : 
    1384             : /*
    1385             :  * When generating a perf sample in-line, instead of from an interrupt /
    1386             :  * exception, we lack a pt_regs. This is typically used from software events
    1387             :  * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
    1388             :  *
    1389             :  * We typically don't need a full set, but (for x86) do require:
    1390             :  * - ip for PERF_SAMPLE_IP
    1391             :  * - cs for user_mode() tests
    1392             :  * - sp for PERF_SAMPLE_CALLCHAIN
    1393             :  * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
    1394             :  *
    1395             :  * NOTE: assumes @regs is otherwise already 0 filled; this is important for
    1396             :  * things like PERF_SAMPLE_REGS_INTR.
    1397             :  */
    1398             : static inline void perf_fetch_caller_regs(struct pt_regs *regs)
    1399             : {
    1400           0 :         perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
    1401             : }
    1402             : 
    1403             : static __always_inline void
    1404             : perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
    1405             : {
    1406             :         if (static_key_false(&perf_swevent_enabled[event_id]))
    1407             :                 __perf_sw_event(event_id, nr, regs, addr);
    1408             : }
    1409             : 
    1410             : DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
    1411             : 
    1412             : /*
    1413             :  * 'Special' version for the scheduler, it hard assumes no recursion,
    1414             :  * which is guaranteed by us not actually scheduling inside other swevents
    1415             :  * because those disable preemption.
    1416             :  */
    1417             : static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
    1418             : {
    1419             :         struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
    1420             : 
    1421             :         perf_fetch_caller_regs(regs);
    1422             :         ___perf_sw_event(event_id, nr, regs, addr);
    1423             : }
    1424             : 
    1425             : extern struct static_key_false perf_sched_events;
    1426             : 
    1427             : static __always_inline bool __perf_sw_enabled(int swevt)
    1428             : {
    1429             :         return static_key_false(&perf_swevent_enabled[swevt]);
    1430             : }
    1431             : 
    1432             : static inline void perf_event_task_migrate(struct task_struct *task)
    1433             : {
    1434             :         if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS))
    1435             :                 task->sched_migrated = 1;
    1436             : }
    1437             : 
    1438             : static inline void perf_event_task_sched_in(struct task_struct *prev,
    1439             :                                             struct task_struct *task)
    1440             : {
    1441             :         if (static_branch_unlikely(&perf_sched_events))
    1442             :                 __perf_event_task_sched_in(prev, task);
    1443             : 
    1444             :         if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) &&
    1445             :             task->sched_migrated) {
    1446             :                 __perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
    1447             :                 task->sched_migrated = 0;
    1448             :         }
    1449             : }
    1450             : 
    1451             : static inline void perf_event_task_sched_out(struct task_struct *prev,
    1452             :                                              struct task_struct *next)
    1453             : {
    1454             :         if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES))
    1455             :                 __perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
    1456             : 
    1457             : #ifdef CONFIG_CGROUP_PERF
    1458             :         if (__perf_sw_enabled(PERF_COUNT_SW_CGROUP_SWITCHES) &&
    1459             :             perf_cgroup_from_task(prev, NULL) !=
    1460             :             perf_cgroup_from_task(next, NULL))
    1461             :                 __perf_sw_event_sched(PERF_COUNT_SW_CGROUP_SWITCHES, 1, 0);
    1462             : #endif
    1463             : 
    1464             :         if (static_branch_unlikely(&perf_sched_events))
    1465             :                 __perf_event_task_sched_out(prev, next);
    1466             : }
    1467             : 
    1468             : extern void perf_event_mmap(struct vm_area_struct *vma);
    1469             : 
    1470             : extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
    1471             :                                bool unregister, const char *sym);
    1472             : extern void perf_event_bpf_event(struct bpf_prog *prog,
    1473             :                                  enum perf_bpf_event_type type,
    1474             :                                  u16 flags);
    1475             : 
    1476             : #ifdef CONFIG_GUEST_PERF_EVENTS
    1477             : extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
    1478             : 
    1479             : DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
    1480             : DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
    1481             : DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
    1482             : 
    1483             : static inline unsigned int perf_guest_state(void)
    1484             : {
    1485             :         return static_call(__perf_guest_state)();
    1486             : }
    1487             : static inline unsigned long perf_guest_get_ip(void)
    1488             : {
    1489             :         return static_call(__perf_guest_get_ip)();
    1490             : }
    1491             : static inline unsigned int perf_guest_handle_intel_pt_intr(void)
    1492             : {
    1493             :         return static_call(__perf_guest_handle_intel_pt_intr)();
    1494             : }
    1495             : extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
    1496             : extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
    1497             : #else
    1498             : static inline unsigned int perf_guest_state(void)                { return 0; }
    1499             : static inline unsigned long perf_guest_get_ip(void)              { return 0; }
    1500             : static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; }
    1501             : #endif /* CONFIG_GUEST_PERF_EVENTS */
    1502             : 
    1503             : extern void perf_event_exec(void);
    1504             : extern void perf_event_comm(struct task_struct *tsk, bool exec);
    1505             : extern void perf_event_namespaces(struct task_struct *tsk);
    1506             : extern void perf_event_fork(struct task_struct *tsk);
    1507             : extern void perf_event_text_poke(const void *addr,
    1508             :                                  const void *old_bytes, size_t old_len,
    1509             :                                  const void *new_bytes, size_t new_len);
    1510             : 
    1511             : /* Callchains */
    1512             : DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
    1513             : 
    1514             : extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
    1515             : extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
    1516             : extern struct perf_callchain_entry *
    1517             : get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
    1518             :                    u32 max_stack, bool crosstask, bool add_mark);
    1519             : extern int get_callchain_buffers(int max_stack);
    1520             : extern void put_callchain_buffers(void);
    1521             : extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
    1522             : extern void put_callchain_entry(int rctx);
    1523             : 
    1524             : extern int sysctl_perf_event_max_stack;
    1525             : extern int sysctl_perf_event_max_contexts_per_stack;
    1526             : 
    1527             : static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
    1528             : {
    1529             :         if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
    1530             :                 struct perf_callchain_entry *entry = ctx->entry;
    1531             :                 entry->ip[entry->nr++] = ip;
    1532             :                 ++ctx->contexts;
    1533             :                 return 0;
    1534             :         } else {
    1535             :                 ctx->contexts_maxed = true;
    1536             :                 return -1; /* no more room, stop walking the stack */
    1537             :         }
    1538             : }
    1539             : 
    1540             : static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
    1541             : {
    1542             :         if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
    1543             :                 struct perf_callchain_entry *entry = ctx->entry;
    1544             :                 entry->ip[entry->nr++] = ip;
    1545             :                 ++ctx->nr;
    1546             :                 return 0;
    1547             :         } else {
    1548             :                 return -1; /* no more room, stop walking the stack */
    1549             :         }
    1550             : }
    1551             : 
    1552             : extern int sysctl_perf_event_paranoid;
    1553             : extern int sysctl_perf_event_mlock;
    1554             : extern int sysctl_perf_event_sample_rate;
    1555             : extern int sysctl_perf_cpu_time_max_percent;
    1556             : 
    1557             : extern void perf_sample_event_took(u64 sample_len_ns);
    1558             : 
    1559             : int perf_proc_update_handler(struct ctl_table *table, int write,
    1560             :                 void *buffer, size_t *lenp, loff_t *ppos);
    1561             : int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
    1562             :                 void *buffer, size_t *lenp, loff_t *ppos);
    1563             : int perf_event_max_stack_handler(struct ctl_table *table, int write,
    1564             :                 void *buffer, size_t *lenp, loff_t *ppos);
    1565             : 
    1566             : /* Access to perf_event_open(2) syscall. */
    1567             : #define PERF_SECURITY_OPEN              0
    1568             : 
    1569             : /* Finer grained perf_event_open(2) access control. */
    1570             : #define PERF_SECURITY_CPU               1
    1571             : #define PERF_SECURITY_KERNEL            2
    1572             : #define PERF_SECURITY_TRACEPOINT        3
    1573             : 
    1574             : static inline int perf_is_paranoid(void)
    1575             : {
    1576             :         return sysctl_perf_event_paranoid > -1;
    1577             : }
    1578             : 
    1579             : static inline int perf_allow_kernel(struct perf_event_attr *attr)
    1580             : {
    1581             :         if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
    1582             :                 return -EACCES;
    1583             : 
    1584             :         return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
    1585             : }
    1586             : 
    1587             : static inline int perf_allow_cpu(struct perf_event_attr *attr)
    1588             : {
    1589             :         if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
    1590             :                 return -EACCES;
    1591             : 
    1592             :         return security_perf_event_open(attr, PERF_SECURITY_CPU);
    1593             : }
    1594             : 
    1595             : static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
    1596             : {
    1597             :         if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
    1598             :                 return -EPERM;
    1599             : 
    1600             :         return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
    1601             : }
    1602             : 
    1603             : extern void perf_event_init(void);
    1604             : extern void perf_tp_event(u16 event_type, u64 count, void *record,
    1605             :                           int entry_size, struct pt_regs *regs,
    1606             :                           struct hlist_head *head, int rctx,
    1607             :                           struct task_struct *task);
    1608             : extern void perf_bp_event(struct perf_event *event, void *data);
    1609             : 
    1610             : #ifndef perf_misc_flags
    1611             : # define perf_misc_flags(regs) \
    1612             :                 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
    1613             : # define perf_instruction_pointer(regs) instruction_pointer(regs)
    1614             : #endif
    1615             : #ifndef perf_arch_bpf_user_pt_regs
    1616             : # define perf_arch_bpf_user_pt_regs(regs) regs
    1617             : #endif
    1618             : 
    1619             : static inline bool has_branch_stack(struct perf_event *event)
    1620             : {
    1621             :         return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
    1622             : }
    1623             : 
    1624             : static inline bool needs_branch_stack(struct perf_event *event)
    1625             : {
    1626             :         return event->attr.branch_sample_type != 0;
    1627             : }
    1628             : 
    1629             : static inline bool has_aux(struct perf_event *event)
    1630             : {
    1631             :         return event->pmu->setup_aux;
    1632             : }
    1633             : 
    1634             : static inline bool is_write_backward(struct perf_event *event)
    1635             : {
    1636             :         return !!event->attr.write_backward;
    1637             : }
    1638             : 
    1639             : static inline bool has_addr_filter(struct perf_event *event)
    1640             : {
    1641             :         return event->pmu->nr_addr_filters;
    1642             : }
    1643             : 
    1644             : /*
    1645             :  * An inherited event uses parent's filters
    1646             :  */
    1647             : static inline struct perf_addr_filters_head *
    1648             : perf_event_addr_filters(struct perf_event *event)
    1649             : {
    1650             :         struct perf_addr_filters_head *ifh = &event->addr_filters;
    1651             : 
    1652             :         if (event->parent)
    1653             :                 ifh = &event->parent->addr_filters;
    1654             : 
    1655             :         return ifh;
    1656             : }
    1657             : 
    1658             : extern void perf_event_addr_filters_sync(struct perf_event *event);
    1659             : extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id);
    1660             : 
    1661             : extern int perf_output_begin(struct perf_output_handle *handle,
    1662             :                              struct perf_sample_data *data,
    1663             :                              struct perf_event *event, unsigned int size);
    1664             : extern int perf_output_begin_forward(struct perf_output_handle *handle,
    1665             :                                      struct perf_sample_data *data,
    1666             :                                      struct perf_event *event,
    1667             :                                      unsigned int size);
    1668             : extern int perf_output_begin_backward(struct perf_output_handle *handle,
    1669             :                                       struct perf_sample_data *data,
    1670             :                                       struct perf_event *event,
    1671             :                                       unsigned int size);
    1672             : 
    1673             : extern void perf_output_end(struct perf_output_handle *handle);
    1674             : extern unsigned int perf_output_copy(struct perf_output_handle *handle,
    1675             :                              const void *buf, unsigned int len);
    1676             : extern unsigned int perf_output_skip(struct perf_output_handle *handle,
    1677             :                                      unsigned int len);
    1678             : extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
    1679             :                                  struct perf_output_handle *handle,
    1680             :                                  unsigned long from, unsigned long to);
    1681             : extern int perf_swevent_get_recursion_context(void);
    1682             : extern void perf_swevent_put_recursion_context(int rctx);
    1683             : extern u64 perf_swevent_set_period(struct perf_event *event);
    1684             : extern void perf_event_enable(struct perf_event *event);
    1685             : extern void perf_event_disable(struct perf_event *event);
    1686             : extern void perf_event_disable_local(struct perf_event *event);
    1687             : extern void perf_event_disable_inatomic(struct perf_event *event);
    1688             : extern void perf_event_task_tick(void);
    1689             : extern int perf_event_account_interrupt(struct perf_event *event);
    1690             : extern int perf_event_period(struct perf_event *event, u64 value);
    1691             : extern u64 perf_event_pause(struct perf_event *event, bool reset);
    1692             : #else /* !CONFIG_PERF_EVENTS: */
    1693             : static inline void *
    1694             : perf_aux_output_begin(struct perf_output_handle *handle,
    1695             :                       struct perf_event *event)                         { return NULL; }
    1696             : static inline void
    1697             : perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
    1698             :                                                                         { }
    1699             : static inline int
    1700             : perf_aux_output_skip(struct perf_output_handle *handle,
    1701             :                      unsigned long size)                                { return -EINVAL; }
    1702             : static inline void *
    1703             : perf_get_aux(struct perf_output_handle *handle)                         { return NULL; }
    1704             : static inline void
    1705             : perf_event_task_migrate(struct task_struct *task)                       { }
    1706             : static inline void
    1707             : perf_event_task_sched_in(struct task_struct *prev,
    1708             :                          struct task_struct *task)                      { }
    1709             : static inline void
    1710             : perf_event_task_sched_out(struct task_struct *prev,
    1711             :                           struct task_struct *next)                     { }
    1712             : static inline int perf_event_init_task(struct task_struct *child,
    1713             :                                        u64 clone_flags)                 { return 0; }
    1714             : static inline void perf_event_exit_task(struct task_struct *child)      { }
    1715             : static inline void perf_event_free_task(struct task_struct *task)       { }
    1716             : static inline void perf_event_delayed_put(struct task_struct *task)     { }
    1717             : static inline struct file *perf_event_get(unsigned int fd)      { return ERR_PTR(-EINVAL); }
    1718             : static inline const struct perf_event *perf_get_event(struct file *file)
    1719             : {
    1720             :         return ERR_PTR(-EINVAL);
    1721             : }
    1722             : static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
    1723             : {
    1724             :         return ERR_PTR(-EINVAL);
    1725             : }
    1726             : static inline int perf_event_read_local(struct perf_event *event, u64 *value,
    1727             :                                         u64 *enabled, u64 *running)
    1728             : {
    1729             :         return -EINVAL;
    1730             : }
    1731             : static inline void perf_event_print_debug(void)                         { }
    1732             : static inline int perf_event_task_disable(void)                         { return -EINVAL; }
    1733             : static inline int perf_event_task_enable(void)                          { return -EINVAL; }
    1734             : static inline int perf_event_refresh(struct perf_event *event, int refresh)
    1735             : {
    1736             :         return -EINVAL;
    1737             : }
    1738             : 
    1739             : static inline void
    1740             : perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)     { }
    1741             : static inline void
    1742             : perf_bp_event(struct perf_event *event, void *data)                     { }
    1743             : 
    1744             : static inline void perf_event_mmap(struct vm_area_struct *vma)          { }
    1745             : 
    1746             : typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
    1747             : static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
    1748             :                                       bool unregister, const char *sym) { }
    1749             : static inline void perf_event_bpf_event(struct bpf_prog *prog,
    1750             :                                         enum perf_bpf_event_type type,
    1751             :                                         u16 flags)                      { }
    1752             : static inline void perf_event_exec(void)                                { }
    1753             : static inline void perf_event_comm(struct task_struct *tsk, bool exec)  { }
    1754             : static inline void perf_event_namespaces(struct task_struct *tsk)       { }
    1755             : static inline void perf_event_fork(struct task_struct *tsk)             { }
    1756             : static inline void perf_event_text_poke(const void *addr,
    1757             :                                         const void *old_bytes,
    1758             :                                         size_t old_len,
    1759             :                                         const void *new_bytes,
    1760             :                                         size_t new_len)                 { }
    1761             : static inline void perf_event_init(void)                                { }
    1762             : static inline int  perf_swevent_get_recursion_context(void)             { return -1; }
    1763             : static inline void perf_swevent_put_recursion_context(int rctx)         { }
    1764             : static inline u64 perf_swevent_set_period(struct perf_event *event)     { return 0; }
    1765             : static inline void perf_event_enable(struct perf_event *event)          { }
    1766             : static inline void perf_event_disable(struct perf_event *event)         { }
    1767             : static inline int __perf_event_disable(void *info)                      { return -1; }
    1768             : static inline void perf_event_task_tick(void)                           { }
    1769             : static inline int perf_event_release_kernel(struct perf_event *event)   { return 0; }
    1770             : static inline int perf_event_period(struct perf_event *event, u64 value)
    1771             : {
    1772             :         return -EINVAL;
    1773             : }
    1774             : static inline u64 perf_event_pause(struct perf_event *event, bool reset)
    1775             : {
    1776             :         return 0;
    1777             : }
    1778             : #endif
    1779             : 
    1780             : #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
    1781             : extern void perf_restore_debug_store(void);
    1782             : #else
    1783             : static inline void perf_restore_debug_store(void)                       { }
    1784             : #endif
    1785             : 
    1786             : #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
    1787             : 
    1788             : struct perf_pmu_events_attr {
    1789             :         struct device_attribute attr;
    1790             :         u64 id;
    1791             :         const char *event_str;
    1792             : };
    1793             : 
    1794             : struct perf_pmu_events_ht_attr {
    1795             :         struct device_attribute                 attr;
    1796             :         u64                                     id;
    1797             :         const char                              *event_str_ht;
    1798             :         const char                              *event_str_noht;
    1799             : };
    1800             : 
    1801             : struct perf_pmu_events_hybrid_attr {
    1802             :         struct device_attribute                 attr;
    1803             :         u64                                     id;
    1804             :         const char                              *event_str;
    1805             :         u64                                     pmu_type;
    1806             : };
    1807             : 
    1808             : struct perf_pmu_format_hybrid_attr {
    1809             :         struct device_attribute                 attr;
    1810             :         u64                                     pmu_type;
    1811             : };
    1812             : 
    1813             : ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
    1814             :                               char *page);
    1815             : 
    1816             : #define PMU_EVENT_ATTR(_name, _var, _id, _show)                         \
    1817             : static struct perf_pmu_events_attr _var = {                             \
    1818             :         .attr = __ATTR(_name, 0444, _show, NULL),                       \
    1819             :         .id   =  _id,                                                   \
    1820             : };
    1821             : 
    1822             : #define PMU_EVENT_ATTR_STRING(_name, _var, _str)                            \
    1823             : static struct perf_pmu_events_attr _var = {                                 \
    1824             :         .attr           = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
    1825             :         .id             = 0,                                                \
    1826             :         .event_str      = _str,                                             \
    1827             : };
    1828             : 
    1829             : #define PMU_EVENT_ATTR_ID(_name, _show, _id)                            \
    1830             :         (&((struct perf_pmu_events_attr[]) {                                \
    1831             :                 { .attr = __ATTR(_name, 0444, _show, NULL),             \
    1832             :                   .id = _id, }                                          \
    1833             :         })[0].attr.attr)
    1834             : 
    1835             : #define PMU_FORMAT_ATTR_SHOW(_name, _format)                            \
    1836             : static ssize_t                                                          \
    1837             : _name##_show(struct device *dev,                                        \
    1838             :                                struct device_attribute *attr,           \
    1839             :                                char *page)                              \
    1840             : {                                                                       \
    1841             :         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                  \
    1842             :         return sprintf(page, _format "\n");                           \
    1843             : }                                                                       \
    1844             : 
    1845             : #define PMU_FORMAT_ATTR(_name, _format)                                 \
    1846             :         PMU_FORMAT_ATTR_SHOW(_name, _format)                            \
    1847             :                                                                         \
    1848             : static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
    1849             : 
    1850             : /* Performance counter hotplug functions */
    1851             : #ifdef CONFIG_PERF_EVENTS
    1852             : int perf_event_init_cpu(unsigned int cpu);
    1853             : int perf_event_exit_cpu(unsigned int cpu);
    1854             : #else
    1855             : #define perf_event_init_cpu     NULL
    1856             : #define perf_event_exit_cpu     NULL
    1857             : #endif
    1858             : 
    1859             : extern void arch_perf_update_userpage(struct perf_event *event,
    1860             :                                       struct perf_event_mmap_page *userpg,
    1861             :                                       u64 now);
    1862             : 
    1863             : #ifdef CONFIG_MMU
    1864             : extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
    1865             : #endif
    1866             : 
    1867             : /*
    1868             :  * Snapshot branch stack on software events.
    1869             :  *
    1870             :  * Branch stack can be very useful in understanding software events. For
    1871             :  * example, when a long function, e.g. sys_perf_event_open, returns an
    1872             :  * errno, it is not obvious why the function failed. Branch stack could
    1873             :  * provide very helpful information in this type of scenarios.
    1874             :  *
    1875             :  * On software event, it is necessary to stop the hardware branch recorder
    1876             :  * fast. Otherwise, the hardware register/buffer will be flushed with
    1877             :  * entries of the triggering event. Therefore, static call is used to
    1878             :  * stop the hardware recorder.
    1879             :  */
    1880             : 
    1881             : /*
    1882             :  * cnt is the number of entries allocated for entries.
    1883             :  * Return number of entries copied to .
    1884             :  */
    1885             : typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries,
    1886             :                                            unsigned int cnt);
    1887             : DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
    1888             : 
    1889             : #ifndef PERF_NEEDS_LOPWR_CB
    1890             : static inline void perf_lopwr_cb(bool mode)
    1891             : {
    1892             : }
    1893             : #endif
    1894             : 
    1895             : #endif /* _LINUX_PERF_EVENT_H */

Generated by: LCOV version 1.14