Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 :
3 : #ifndef _LINUX_TRACE_EVENT_H
4 : #define _LINUX_TRACE_EVENT_H
5 :
6 : #include <linux/ring_buffer.h>
7 : #include <linux/trace_seq.h>
8 : #include <linux/percpu.h>
9 : #include <linux/hardirq.h>
10 : #include <linux/perf_event.h>
11 : #include <linux/tracepoint.h>
12 :
13 : struct trace_array;
14 : struct array_buffer;
15 : struct tracer;
16 : struct dentry;
17 : struct bpf_prog;
18 : union bpf_attr;
19 :
20 : const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
21 : unsigned long flags,
22 : const struct trace_print_flags *flag_array);
23 :
24 : const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
25 : const struct trace_print_flags *symbol_array);
26 :
27 : #if BITS_PER_LONG == 32
28 : const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
29 : unsigned long long flags,
30 : const struct trace_print_flags_u64 *flag_array);
31 :
32 : const char *trace_print_symbols_seq_u64(struct trace_seq *p,
33 : unsigned long long val,
34 : const struct trace_print_flags_u64
35 : *symbol_array);
36 : #endif
37 :
38 : const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
39 : unsigned int bitmask_size);
40 :
41 : const char *trace_print_hex_seq(struct trace_seq *p,
42 : const unsigned char *buf, int len,
43 : bool concatenate);
44 :
45 : const char *trace_print_array_seq(struct trace_seq *p,
46 : const void *buf, int count,
47 : size_t el_size);
48 :
49 : const char *
50 : trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
51 : int prefix_type, int rowsize, int groupsize,
52 : const void *buf, size_t len, bool ascii);
53 :
54 : struct trace_iterator;
55 : struct trace_event;
56 :
57 : int trace_raw_output_prep(struct trace_iterator *iter,
58 : struct trace_event *event);
59 : extern __printf(2, 3)
60 : void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
61 :
62 : /*
63 : * The trace entry - the most basic unit of tracing. This is what
64 : * is printed in the end as a single line in the trace output, such as:
65 : *
66 : * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
67 : */
68 : struct trace_entry {
69 : unsigned short type;
70 : unsigned char flags;
71 : unsigned char preempt_count;
72 : int pid;
73 : };
74 :
75 : #define TRACE_EVENT_TYPE_MAX \
76 : ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
77 :
78 : /*
79 : * Trace iterator - used by printout routines who present trace
80 : * results to users and which routines might sleep, etc:
81 : */
82 : struct trace_iterator {
83 : struct trace_array *tr;
84 : struct tracer *trace;
85 : struct array_buffer *array_buffer;
86 : void *private;
87 : int cpu_file;
88 : struct mutex mutex;
89 : struct ring_buffer_iter **buffer_iter;
90 : unsigned long iter_flags;
91 : void *temp; /* temp holder */
92 : unsigned int temp_size;
93 : char *fmt; /* modified format holder */
94 : unsigned int fmt_size;
95 : long wait_index;
96 :
97 : /* trace_seq for __print_flags() and __print_symbolic() etc. */
98 : struct trace_seq tmp_seq;
99 :
100 : cpumask_var_t started;
101 :
102 : /* it's true when current open file is snapshot */
103 : bool snapshot;
104 :
105 : /* The below is zeroed out in pipe_read */
106 : struct trace_seq seq;
107 : struct trace_entry *ent;
108 : unsigned long lost_events;
109 : int leftover;
110 : int ent_size;
111 : int cpu;
112 : u64 ts;
113 :
114 : loff_t pos;
115 : long idx;
116 :
117 : /* All new field here will be zeroed out in pipe_read */
118 : };
119 :
120 : enum trace_iter_flags {
121 : TRACE_FILE_LAT_FMT = 1,
122 : TRACE_FILE_ANNOTATE = 2,
123 : TRACE_FILE_TIME_IN_NS = 4,
124 : };
125 :
126 :
127 : typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
128 : int flags, struct trace_event *event);
129 :
130 : struct trace_event_functions {
131 : trace_print_func trace;
132 : trace_print_func raw;
133 : trace_print_func hex;
134 : trace_print_func binary;
135 : };
136 :
137 : struct trace_event {
138 : struct hlist_node node;
139 : int type;
140 : struct trace_event_functions *funcs;
141 : };
142 :
143 : extern int register_trace_event(struct trace_event *event);
144 : extern int unregister_trace_event(struct trace_event *event);
145 :
146 : /* Return values for print_line callback */
147 : enum print_line_t {
148 : TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
149 : TRACE_TYPE_HANDLED = 1,
150 : TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
151 : TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
152 : };
153 :
154 : enum print_line_t trace_handle_return(struct trace_seq *s);
155 :
156 : static inline void tracing_generic_entry_update(struct trace_entry *entry,
157 : unsigned short type,
158 : unsigned int trace_ctx)
159 : {
160 : entry->preempt_count = trace_ctx & 0xff;
161 : entry->pid = current->pid;
162 : entry->type = type;
163 : entry->flags = trace_ctx >> 16;
164 : }
165 :
166 : unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
167 :
168 : enum trace_flag_type {
169 : TRACE_FLAG_IRQS_OFF = 0x01,
170 : TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
171 : TRACE_FLAG_NEED_RESCHED = 0x04,
172 : TRACE_FLAG_HARDIRQ = 0x08,
173 : TRACE_FLAG_SOFTIRQ = 0x10,
174 : TRACE_FLAG_PREEMPT_RESCHED = 0x20,
175 : TRACE_FLAG_NMI = 0x40,
176 : TRACE_FLAG_BH_OFF = 0x80,
177 : };
178 :
179 : #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
180 : static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
181 : {
182 : unsigned int irq_status = irqs_disabled_flags(irqflags) ?
183 : TRACE_FLAG_IRQS_OFF : 0;
184 : return tracing_gen_ctx_irq_test(irq_status);
185 : }
186 : static inline unsigned int tracing_gen_ctx(void)
187 : {
188 : unsigned long irqflags;
189 :
190 : local_save_flags(irqflags);
191 : return tracing_gen_ctx_flags(irqflags);
192 : }
193 : #else
194 :
195 : static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
196 : {
197 : return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
198 : }
199 : static inline unsigned int tracing_gen_ctx(void)
200 : {
201 : return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
202 : }
203 : #endif
204 :
205 : static inline unsigned int tracing_gen_ctx_dec(void)
206 : {
207 : unsigned int trace_ctx;
208 :
209 : trace_ctx = tracing_gen_ctx();
210 : /*
211 : * Subtract one from the preemption counter if preemption is enabled,
212 : * see trace_event_buffer_reserve()for details.
213 : */
214 : if (IS_ENABLED(CONFIG_PREEMPTION))
215 : trace_ctx--;
216 : return trace_ctx;
217 : }
218 :
219 : struct trace_event_file;
220 :
221 : struct ring_buffer_event *
222 : trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
223 : struct trace_event_file *trace_file,
224 : int type, unsigned long len,
225 : unsigned int trace_ctx);
226 :
227 : #define TRACE_RECORD_CMDLINE BIT(0)
228 : #define TRACE_RECORD_TGID BIT(1)
229 :
230 : void tracing_record_taskinfo(struct task_struct *task, int flags);
231 : void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
232 : struct task_struct *next, int flags);
233 :
234 : void tracing_record_cmdline(struct task_struct *task);
235 : void tracing_record_tgid(struct task_struct *task);
236 :
237 : int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
238 : __printf(3, 4);
239 :
240 : struct event_filter;
241 :
242 : enum trace_reg {
243 : TRACE_REG_REGISTER,
244 : TRACE_REG_UNREGISTER,
245 : #ifdef CONFIG_PERF_EVENTS
246 : TRACE_REG_PERF_REGISTER,
247 : TRACE_REG_PERF_UNREGISTER,
248 : TRACE_REG_PERF_OPEN,
249 : TRACE_REG_PERF_CLOSE,
250 : /*
251 : * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
252 : * custom action was taken and the default action is not to be
253 : * performed.
254 : */
255 : TRACE_REG_PERF_ADD,
256 : TRACE_REG_PERF_DEL,
257 : #endif
258 : };
259 :
260 : struct trace_event_call;
261 :
262 : #define TRACE_FUNCTION_TYPE ((const char *)~0UL)
263 :
264 : struct trace_event_fields {
265 : const char *type;
266 : union {
267 : struct {
268 : const char *name;
269 : const int size;
270 : const int align;
271 : const int is_signed;
272 : const int filter_type;
273 : const int len;
274 : };
275 : int (*define_fields)(struct trace_event_call *);
276 : };
277 : };
278 :
279 : struct trace_event_class {
280 : const char *system;
281 : void *probe;
282 : #ifdef CONFIG_PERF_EVENTS
283 : void *perf_probe;
284 : #endif
285 : int (*reg)(struct trace_event_call *event,
286 : enum trace_reg type, void *data);
287 : struct trace_event_fields *fields_array;
288 : struct list_head *(*get_fields)(struct trace_event_call *);
289 : struct list_head fields;
290 : int (*raw_init)(struct trace_event_call *);
291 : };
292 :
293 : extern int trace_event_reg(struct trace_event_call *event,
294 : enum trace_reg type, void *data);
295 :
296 : struct trace_event_buffer {
297 : struct trace_buffer *buffer;
298 : struct ring_buffer_event *event;
299 : struct trace_event_file *trace_file;
300 : void *entry;
301 : unsigned int trace_ctx;
302 : struct pt_regs *regs;
303 : };
304 :
305 : void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
306 : struct trace_event_file *trace_file,
307 : unsigned long len);
308 :
309 : void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
310 :
311 : enum {
312 : TRACE_EVENT_FL_FILTERED_BIT,
313 : TRACE_EVENT_FL_CAP_ANY_BIT,
314 : TRACE_EVENT_FL_NO_SET_FILTER_BIT,
315 : TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
316 : TRACE_EVENT_FL_TRACEPOINT_BIT,
317 : TRACE_EVENT_FL_DYNAMIC_BIT,
318 : TRACE_EVENT_FL_KPROBE_BIT,
319 : TRACE_EVENT_FL_UPROBE_BIT,
320 : TRACE_EVENT_FL_EPROBE_BIT,
321 : TRACE_EVENT_FL_FPROBE_BIT,
322 : TRACE_EVENT_FL_CUSTOM_BIT,
323 : };
324 :
325 : /*
326 : * Event flags:
327 : * FILTERED - The event has a filter attached
328 : * CAP_ANY - Any user can enable for perf
329 : * NO_SET_FILTER - Set when filter has error and is to be ignored
330 : * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
331 : * TRACEPOINT - Event is a tracepoint
332 : * DYNAMIC - Event is a dynamic event (created at run time)
333 : * KPROBE - Event is a kprobe
334 : * UPROBE - Event is a uprobe
335 : * EPROBE - Event is an event probe
336 : * FPROBE - Event is an function probe
337 : * CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint)
338 : * This is set when the custom event has not been attached
339 : * to a tracepoint yet, then it is cleared when it is.
340 : */
341 : enum {
342 : TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
343 : TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
344 : TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
345 : TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
346 : TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
347 : TRACE_EVENT_FL_DYNAMIC = (1 << TRACE_EVENT_FL_DYNAMIC_BIT),
348 : TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
349 : TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
350 : TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT),
351 : TRACE_EVENT_FL_FPROBE = (1 << TRACE_EVENT_FL_FPROBE_BIT),
352 : TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT),
353 : };
354 :
355 : #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
356 :
357 : struct trace_event_call {
358 : struct list_head list;
359 : struct trace_event_class *class;
360 : union {
361 : char *name;
362 : /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
363 : struct tracepoint *tp;
364 : };
365 : struct trace_event event;
366 : char *print_fmt;
367 : struct event_filter *filter;
368 : /*
369 : * Static events can disappear with modules,
370 : * where as dynamic ones need their own ref count.
371 : */
372 : union {
373 : void *module;
374 : atomic_t refcnt;
375 : };
376 : void *data;
377 :
378 : /* See the TRACE_EVENT_FL_* flags above */
379 : int flags; /* static flags of different events */
380 :
381 : #ifdef CONFIG_PERF_EVENTS
382 : int perf_refcount;
383 : struct hlist_head __percpu *perf_events;
384 : struct bpf_prog_array __rcu *prog_array;
385 :
386 : int (*perf_perm)(struct trace_event_call *,
387 : struct perf_event *);
388 : #endif
389 : };
390 :
391 : #ifdef CONFIG_DYNAMIC_EVENTS
392 : bool trace_event_dyn_try_get_ref(struct trace_event_call *call);
393 : void trace_event_dyn_put_ref(struct trace_event_call *call);
394 : bool trace_event_dyn_busy(struct trace_event_call *call);
395 : #else
396 : static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call)
397 : {
398 : /* Without DYNAMIC_EVENTS configured, nothing should be calling this */
399 : return false;
400 : }
401 : static inline void trace_event_dyn_put_ref(struct trace_event_call *call)
402 : {
403 : }
404 : static inline bool trace_event_dyn_busy(struct trace_event_call *call)
405 : {
406 : /* Nothing should call this without DYNAIMIC_EVENTS configured. */
407 : return true;
408 : }
409 : #endif
410 :
411 : static inline bool trace_event_try_get_ref(struct trace_event_call *call)
412 : {
413 : if (call->flags & TRACE_EVENT_FL_DYNAMIC)
414 : return trace_event_dyn_try_get_ref(call);
415 : else
416 : return try_module_get(call->module);
417 : }
418 :
419 : static inline void trace_event_put_ref(struct trace_event_call *call)
420 : {
421 : if (call->flags & TRACE_EVENT_FL_DYNAMIC)
422 : trace_event_dyn_put_ref(call);
423 : else
424 : module_put(call->module);
425 : }
426 :
427 : #ifdef CONFIG_PERF_EVENTS
428 : static inline bool bpf_prog_array_valid(struct trace_event_call *call)
429 : {
430 : /*
431 : * This inline function checks whether call->prog_array
432 : * is valid or not. The function is called in various places,
433 : * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
434 : *
435 : * If this function returns true, and later call->prog_array
436 : * becomes false inside rcu_read_lock/unlock region,
437 : * we bail out then. If this function return false,
438 : * there is a risk that we might miss a few events if the checking
439 : * were delayed until inside rcu_read_lock/unlock region and
440 : * call->prog_array happened to become non-NULL then.
441 : *
442 : * Here, READ_ONCE() is used instead of rcu_access_pointer().
443 : * rcu_access_pointer() requires the actual definition of
444 : * "struct bpf_prog_array" while READ_ONCE() only needs
445 : * a declaration of the same type.
446 : */
447 0 : return !!READ_ONCE(call->prog_array);
448 : }
449 : #endif
450 :
451 : static inline const char *
452 : trace_event_name(struct trace_event_call *call)
453 : {
454 : if (call->flags & TRACE_EVENT_FL_CUSTOM)
455 : return call->name;
456 : else if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
457 : return call->tp ? call->tp->name : NULL;
458 : else
459 : return call->name;
460 : }
461 :
462 : static inline struct list_head *
463 : trace_get_fields(struct trace_event_call *event_call)
464 : {
465 : if (!event_call->class->get_fields)
466 : return &event_call->class->fields;
467 : return event_call->class->get_fields(event_call);
468 : }
469 :
470 : struct trace_subsystem_dir;
471 :
472 : enum {
473 : EVENT_FILE_FL_ENABLED_BIT,
474 : EVENT_FILE_FL_RECORDED_CMD_BIT,
475 : EVENT_FILE_FL_RECORDED_TGID_BIT,
476 : EVENT_FILE_FL_FILTERED_BIT,
477 : EVENT_FILE_FL_NO_SET_FILTER_BIT,
478 : EVENT_FILE_FL_SOFT_MODE_BIT,
479 : EVENT_FILE_FL_SOFT_DISABLED_BIT,
480 : EVENT_FILE_FL_TRIGGER_MODE_BIT,
481 : EVENT_FILE_FL_TRIGGER_COND_BIT,
482 : EVENT_FILE_FL_PID_FILTER_BIT,
483 : EVENT_FILE_FL_WAS_ENABLED_BIT,
484 : };
485 :
486 : extern struct trace_event_file *trace_get_event_file(const char *instance,
487 : const char *system,
488 : const char *event);
489 : extern void trace_put_event_file(struct trace_event_file *file);
490 :
491 : #define MAX_DYNEVENT_CMD_LEN (2048)
492 :
493 : enum dynevent_type {
494 : DYNEVENT_TYPE_SYNTH = 1,
495 : DYNEVENT_TYPE_KPROBE,
496 : DYNEVENT_TYPE_NONE,
497 : };
498 :
499 : struct dynevent_cmd;
500 :
501 : typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd);
502 :
503 : struct dynevent_cmd {
504 : struct seq_buf seq;
505 : const char *event_name;
506 : unsigned int n_fields;
507 : enum dynevent_type type;
508 : dynevent_create_fn_t run_command;
509 : void *private_data;
510 : };
511 :
512 : extern int dynevent_create(struct dynevent_cmd *cmd);
513 :
514 : extern int synth_event_delete(const char *name);
515 :
516 : extern void synth_event_cmd_init(struct dynevent_cmd *cmd,
517 : char *buf, int maxlen);
518 :
519 : extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd,
520 : const char *name,
521 : struct module *mod, ...);
522 :
523 : #define synth_event_gen_cmd_start(cmd, name, mod, ...) \
524 : __synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL)
525 :
526 : struct synth_field_desc {
527 : const char *type;
528 : const char *name;
529 : };
530 :
531 : extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd,
532 : const char *name,
533 : struct module *mod,
534 : struct synth_field_desc *fields,
535 : unsigned int n_fields);
536 : extern int synth_event_create(const char *name,
537 : struct synth_field_desc *fields,
538 : unsigned int n_fields, struct module *mod);
539 :
540 : extern int synth_event_add_field(struct dynevent_cmd *cmd,
541 : const char *type,
542 : const char *name);
543 : extern int synth_event_add_field_str(struct dynevent_cmd *cmd,
544 : const char *type_name);
545 : extern int synth_event_add_fields(struct dynevent_cmd *cmd,
546 : struct synth_field_desc *fields,
547 : unsigned int n_fields);
548 :
549 : #define synth_event_gen_cmd_end(cmd) \
550 : dynevent_create(cmd)
551 :
552 : struct synth_event;
553 :
554 : struct synth_event_trace_state {
555 : struct trace_event_buffer fbuffer;
556 : struct synth_trace_event *entry;
557 : struct trace_buffer *buffer;
558 : struct synth_event *event;
559 : unsigned int cur_field;
560 : unsigned int n_u64;
561 : bool disabled;
562 : bool add_next;
563 : bool add_name;
564 : };
565 :
566 : extern int synth_event_trace(struct trace_event_file *file,
567 : unsigned int n_vals, ...);
568 : extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
569 : unsigned int n_vals);
570 : extern int synth_event_trace_start(struct trace_event_file *file,
571 : struct synth_event_trace_state *trace_state);
572 : extern int synth_event_add_next_val(u64 val,
573 : struct synth_event_trace_state *trace_state);
574 : extern int synth_event_add_val(const char *field_name, u64 val,
575 : struct synth_event_trace_state *trace_state);
576 : extern int synth_event_trace_end(struct synth_event_trace_state *trace_state);
577 :
578 : extern int kprobe_event_delete(const char *name);
579 :
580 : extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd,
581 : char *buf, int maxlen);
582 :
583 : #define kprobe_event_gen_cmd_start(cmd, name, loc, ...) \
584 : __kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL)
585 :
586 : #define kretprobe_event_gen_cmd_start(cmd, name, loc, ...) \
587 : __kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL)
588 :
589 : extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd,
590 : bool kretprobe,
591 : const char *name,
592 : const char *loc, ...);
593 :
594 : #define kprobe_event_add_fields(cmd, ...) \
595 : __kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL)
596 :
597 : #define kprobe_event_add_field(cmd, field) \
598 : __kprobe_event_add_fields(cmd, field, NULL)
599 :
600 : extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
601 :
602 : #define kprobe_event_gen_cmd_end(cmd) \
603 : dynevent_create(cmd)
604 :
605 : #define kretprobe_event_gen_cmd_end(cmd) \
606 : dynevent_create(cmd)
607 :
608 : /*
609 : * Event file flags:
610 : * ENABLED - The event is enabled
611 : * RECORDED_CMD - The comms should be recorded at sched_switch
612 : * RECORDED_TGID - The tgids should be recorded at sched_switch
613 : * FILTERED - The event has a filter attached
614 : * NO_SET_FILTER - Set when filter has error and is to be ignored
615 : * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
616 : * SOFT_DISABLED - When set, do not trace the event (even though its
617 : * tracepoint may be enabled)
618 : * TRIGGER_MODE - When set, invoke the triggers associated with the event
619 : * TRIGGER_COND - When set, one or more triggers has an associated filter
620 : * PID_FILTER - When set, the event is filtered based on pid
621 : * WAS_ENABLED - Set when enabled to know to clear trace on module removal
622 : */
623 : enum {
624 : EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
625 : EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
626 : EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
627 : EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
628 : EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
629 : EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
630 : EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
631 : EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
632 : EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
633 : EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
634 : EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
635 : };
636 :
637 : struct trace_event_file {
638 : struct list_head list;
639 : struct trace_event_call *event_call;
640 : struct event_filter __rcu *filter;
641 : struct dentry *dir;
642 : struct trace_array *tr;
643 : struct trace_subsystem_dir *system;
644 : struct list_head triggers;
645 :
646 : /*
647 : * 32 bit flags:
648 : * bit 0: enabled
649 : * bit 1: enabled cmd record
650 : * bit 2: enable/disable with the soft disable bit
651 : * bit 3: soft disabled
652 : * bit 4: trigger enabled
653 : *
654 : * Note: The bits must be set atomically to prevent races
655 : * from other writers. Reads of flags do not need to be in
656 : * sync as they occur in critical sections. But the way flags
657 : * is currently used, these changes do not affect the code
658 : * except that when a change is made, it may have a slight
659 : * delay in propagating the changes to other CPUs due to
660 : * caching and such. Which is mostly OK ;-)
661 : */
662 : unsigned long flags;
663 : atomic_t sm_ref; /* soft-mode reference counter */
664 : atomic_t tm_ref; /* trigger-mode reference counter */
665 : };
666 :
667 : #define __TRACE_EVENT_FLAGS(name, value) \
668 : static int __init trace_init_flags_##name(void) \
669 : { \
670 : event_##name.flags |= value; \
671 : return 0; \
672 : } \
673 : early_initcall(trace_init_flags_##name);
674 :
675 : #define __TRACE_EVENT_PERF_PERM(name, expr...) \
676 : static int perf_perm_##name(struct trace_event_call *tp_event, \
677 : struct perf_event *p_event) \
678 : { \
679 : return ({ expr; }); \
680 : } \
681 : static int __init trace_init_perf_perm_##name(void) \
682 : { \
683 : event_##name.perf_perm = &perf_perm_##name; \
684 : return 0; \
685 : } \
686 : early_initcall(trace_init_perf_perm_##name);
687 :
688 : #define PERF_MAX_TRACE_SIZE 8192
689 :
690 : #define MAX_FILTER_STR_VAL 256U /* Should handle KSYM_SYMBOL_LEN */
691 :
692 : enum event_trigger_type {
693 : ETT_NONE = (0),
694 : ETT_TRACE_ONOFF = (1 << 0),
695 : ETT_SNAPSHOT = (1 << 1),
696 : ETT_STACKTRACE = (1 << 2),
697 : ETT_EVENT_ENABLE = (1 << 3),
698 : ETT_EVENT_HIST = (1 << 4),
699 : ETT_HIST_ENABLE = (1 << 5),
700 : ETT_EVENT_EPROBE = (1 << 6),
701 : };
702 :
703 : extern int filter_match_preds(struct event_filter *filter, void *rec);
704 :
705 : extern enum event_trigger_type
706 : event_triggers_call(struct trace_event_file *file,
707 : struct trace_buffer *buffer, void *rec,
708 : struct ring_buffer_event *event);
709 : extern void
710 : event_triggers_post_call(struct trace_event_file *file,
711 : enum event_trigger_type tt);
712 :
713 : bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
714 :
715 : bool __trace_trigger_soft_disabled(struct trace_event_file *file);
716 :
717 : /**
718 : * trace_trigger_soft_disabled - do triggers and test if soft disabled
719 : * @file: The file pointer of the event to test
720 : *
721 : * If any triggers without filters are attached to this event, they
722 : * will be called here. If the event is soft disabled and has no
723 : * triggers that require testing the fields, it will return true,
724 : * otherwise false.
725 : */
726 : static __always_inline bool
727 : trace_trigger_soft_disabled(struct trace_event_file *file)
728 : {
729 19 : unsigned long eflags = file->flags;
730 :
731 19 : if (likely(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE |
732 : EVENT_FILE_FL_SOFT_DISABLED |
733 : EVENT_FILE_FL_PID_FILTER))))
734 : return false;
735 :
736 0 : if (likely(eflags & EVENT_FILE_FL_TRIGGER_COND))
737 : return false;
738 :
739 0 : return __trace_trigger_soft_disabled(file);
740 : }
741 :
742 : #ifdef CONFIG_BPF_EVENTS
743 : unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
744 : int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
745 : void perf_event_detach_bpf_prog(struct perf_event *event);
746 : int perf_event_query_prog_array(struct perf_event *event, void __user *info);
747 : int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
748 : int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
749 : struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
750 : void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
751 : int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
752 : u32 *fd_type, const char **buf,
753 : u64 *probe_offset, u64 *probe_addr);
754 : int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
755 : #else
756 : static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
757 : {
758 : return 1;
759 : }
760 :
761 : static inline int
762 : perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie)
763 : {
764 : return -EOPNOTSUPP;
765 : }
766 :
767 : static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
768 :
769 : static inline int
770 : perf_event_query_prog_array(struct perf_event *event, void __user *info)
771 : {
772 : return -EOPNOTSUPP;
773 : }
774 : static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p)
775 : {
776 : return -EOPNOTSUPP;
777 : }
778 : static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p)
779 : {
780 : return -EOPNOTSUPP;
781 : }
782 : static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
783 : {
784 : return NULL;
785 : }
786 : static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
787 : {
788 : }
789 : static inline int bpf_get_perf_event_info(const struct perf_event *event,
790 : u32 *prog_id, u32 *fd_type,
791 : const char **buf, u64 *probe_offset,
792 : u64 *probe_addr)
793 : {
794 : return -EOPNOTSUPP;
795 : }
796 : static inline int
797 : bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
798 : {
799 : return -EOPNOTSUPP;
800 : }
801 : #endif
802 :
803 : enum {
804 : FILTER_OTHER = 0,
805 : FILTER_STATIC_STRING,
806 : FILTER_DYN_STRING,
807 : FILTER_RDYN_STRING,
808 : FILTER_PTR_STRING,
809 : FILTER_TRACE_FN,
810 : FILTER_COMM,
811 : FILTER_CPU,
812 : FILTER_STACKTRACE,
813 : };
814 :
815 : extern int trace_event_raw_init(struct trace_event_call *call);
816 : extern int trace_define_field(struct trace_event_call *call, const char *type,
817 : const char *name, int offset, int size,
818 : int is_signed, int filter_type);
819 : extern int trace_add_event_call(struct trace_event_call *call);
820 : extern int trace_remove_event_call(struct trace_event_call *call);
821 : extern int trace_event_get_offsets(struct trace_event_call *call);
822 :
823 : int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
824 : int trace_set_clr_event(const char *system, const char *event, int set);
825 : int trace_array_set_clr_event(struct trace_array *tr, const char *system,
826 : const char *event, bool enable);
827 : /*
828 : * The double __builtin_constant_p is because gcc will give us an error
829 : * if we try to allocate the static variable to fmt if it is not a
830 : * constant. Even with the outer if statement optimizing out.
831 : */
832 : #define event_trace_printk(ip, fmt, args...) \
833 : do { \
834 : __trace_printk_check_format(fmt, ##args); \
835 : tracing_record_cmdline(current); \
836 : if (__builtin_constant_p(fmt)) { \
837 : static const char *trace_printk_fmt \
838 : __section("__trace_printk_fmt") = \
839 : __builtin_constant_p(fmt) ? fmt : NULL; \
840 : \
841 : __trace_bprintk(ip, trace_printk_fmt, ##args); \
842 : } else \
843 : __trace_printk(ip, fmt, ##args); \
844 : } while (0)
845 :
846 : #ifdef CONFIG_PERF_EVENTS
847 : struct perf_event;
848 :
849 : DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
850 : DECLARE_PER_CPU(int, bpf_kprobe_override);
851 :
852 : extern int perf_trace_init(struct perf_event *event);
853 : extern void perf_trace_destroy(struct perf_event *event);
854 : extern int perf_trace_add(struct perf_event *event, int flags);
855 : extern void perf_trace_del(struct perf_event *event, int flags);
856 : #ifdef CONFIG_KPROBE_EVENTS
857 : extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe);
858 : extern void perf_kprobe_destroy(struct perf_event *event);
859 : extern int bpf_get_kprobe_info(const struct perf_event *event,
860 : u32 *fd_type, const char **symbol,
861 : u64 *probe_offset, u64 *probe_addr,
862 : bool perf_type_tracepoint);
863 : #endif
864 : #ifdef CONFIG_UPROBE_EVENTS
865 : extern int perf_uprobe_init(struct perf_event *event,
866 : unsigned long ref_ctr_offset, bool is_retprobe);
867 : extern void perf_uprobe_destroy(struct perf_event *event);
868 : extern int bpf_get_uprobe_info(const struct perf_event *event,
869 : u32 *fd_type, const char **filename,
870 : u64 *probe_offset, bool perf_type_tracepoint);
871 : #endif
872 : extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
873 : char *filter_str);
874 : extern void ftrace_profile_free_filter(struct perf_event *event);
875 : void perf_trace_buf_update(void *record, u16 type);
876 : void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
877 :
878 : int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
879 : void perf_event_free_bpf_prog(struct perf_event *event);
880 :
881 : void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
882 : void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
883 : void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
884 : u64 arg3);
885 : void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
886 : u64 arg3, u64 arg4);
887 : void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
888 : u64 arg3, u64 arg4, u64 arg5);
889 : void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
890 : u64 arg3, u64 arg4, u64 arg5, u64 arg6);
891 : void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
892 : u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
893 : void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
894 : u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
895 : u64 arg8);
896 : void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
897 : u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
898 : u64 arg8, u64 arg9);
899 : void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
900 : u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
901 : u64 arg8, u64 arg9, u64 arg10);
902 : void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
903 : u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
904 : u64 arg8, u64 arg9, u64 arg10, u64 arg11);
905 : void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
906 : u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
907 : u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
908 : void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
909 : struct trace_event_call *call, u64 count,
910 : struct pt_regs *regs, struct hlist_head *head,
911 : struct task_struct *task);
912 :
913 : static inline void
914 : perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
915 : u64 count, struct pt_regs *regs, void *head,
916 : struct task_struct *task)
917 : {
918 : perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
919 : }
920 :
921 : #endif
922 :
923 : #define TRACE_EVENT_STR_MAX 512
924 :
925 : /*
926 : * gcc warns that you can not use a va_list in an inlined
927 : * function. But lets me make it into a macro :-/
928 : */
929 : #define __trace_event_vstr_len(fmt, va) \
930 : ({ \
931 : va_list __ap; \
932 : int __ret; \
933 : \
934 : va_copy(__ap, *(va)); \
935 : __ret = vsnprintf(NULL, 0, fmt, __ap) + 1; \
936 : va_end(__ap); \
937 : \
938 : min(__ret, TRACE_EVENT_STR_MAX); \
939 : })
940 :
941 : #endif /* _LINUX_TRACE_EVENT_H */
942 :
943 : /*
944 : * Note: we keep the TRACE_CUSTOM_EVENT outside the include file ifdef protection.
945 : * This is due to the way trace custom events work. If a file includes two
946 : * trace event headers under one "CREATE_CUSTOM_TRACE_EVENTS" the first include
947 : * will override the TRACE_CUSTOM_EVENT and break the second include.
948 : */
949 :
950 : #ifndef TRACE_CUSTOM_EVENT
951 :
952 : #define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print)
953 : #define DEFINE_CUSTOM_EVENT(template, name, proto, args)
954 : #define TRACE_CUSTOM_EVENT(name, proto, args, struct, assign, print)
955 :
956 : #endif /* ifdef TRACE_CUSTOM_EVENT (see note above) */
|