Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #undef TRACE_SYSTEM
3 : #define TRACE_SYSTEM fs_dax
4 :
5 : #if !defined(_TRACE_FS_DAX_H) || defined(TRACE_HEADER_MULTI_READ)
6 : #define _TRACE_FS_DAX_H
7 :
8 : #include <linux/tracepoint.h>
9 :
10 0 : DECLARE_EVENT_CLASS(dax_pmd_fault_class,
11 : TP_PROTO(struct inode *inode, struct vm_fault *vmf,
12 : pgoff_t max_pgoff, int result),
13 : TP_ARGS(inode, vmf, max_pgoff, result),
14 : TP_STRUCT__entry(
15 : __field(unsigned long, ino)
16 : __field(unsigned long, vm_start)
17 : __field(unsigned long, vm_end)
18 : __field(unsigned long, vm_flags)
19 : __field(unsigned long, address)
20 : __field(pgoff_t, pgoff)
21 : __field(pgoff_t, max_pgoff)
22 : __field(dev_t, dev)
23 : __field(unsigned int, flags)
24 : __field(int, result)
25 : ),
26 : TP_fast_assign(
27 : __entry->dev = inode->i_sb->s_dev;
28 : __entry->ino = inode->i_ino;
29 : __entry->vm_start = vmf->vma->vm_start;
30 : __entry->vm_end = vmf->vma->vm_end;
31 : __entry->vm_flags = vmf->vma->vm_flags;
32 : __entry->address = vmf->address;
33 : __entry->flags = vmf->flags;
34 : __entry->pgoff = vmf->pgoff;
35 : __entry->max_pgoff = max_pgoff;
36 : __entry->result = result;
37 : ),
38 : TP_printk("dev %d:%d ino %#lx %s %s address %#lx vm_start "
39 : "%#lx vm_end %#lx pgoff %#lx max_pgoff %#lx %s",
40 : MAJOR(__entry->dev),
41 : MINOR(__entry->dev),
42 : __entry->ino,
43 : __entry->vm_flags & VM_SHARED ? "shared" : "private",
44 : __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE),
45 : __entry->address,
46 : __entry->vm_start,
47 : __entry->vm_end,
48 : __entry->pgoff,
49 : __entry->max_pgoff,
50 : __print_flags(__entry->result, "|", VM_FAULT_RESULT_TRACE)
51 : )
52 : )
53 :
54 : #define DEFINE_PMD_FAULT_EVENT(name) \
55 : DEFINE_EVENT(dax_pmd_fault_class, name, \
56 : TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
57 : pgoff_t max_pgoff, int result), \
58 : TP_ARGS(inode, vmf, max_pgoff, result))
59 :
60 0 : DEFINE_PMD_FAULT_EVENT(dax_pmd_fault);
61 0 : DEFINE_PMD_FAULT_EVENT(dax_pmd_fault_done);
62 :
63 0 : DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
64 : TP_PROTO(struct inode *inode, struct vm_fault *vmf,
65 : struct page *zero_page,
66 : void *radix_entry),
67 : TP_ARGS(inode, vmf, zero_page, radix_entry),
68 : TP_STRUCT__entry(
69 : __field(unsigned long, ino)
70 : __field(unsigned long, vm_flags)
71 : __field(unsigned long, address)
72 : __field(struct page *, zero_page)
73 : __field(void *, radix_entry)
74 : __field(dev_t, dev)
75 : ),
76 : TP_fast_assign(
77 : __entry->dev = inode->i_sb->s_dev;
78 : __entry->ino = inode->i_ino;
79 : __entry->vm_flags = vmf->vma->vm_flags;
80 : __entry->address = vmf->address;
81 : __entry->zero_page = zero_page;
82 : __entry->radix_entry = radix_entry;
83 : ),
84 : TP_printk("dev %d:%d ino %#lx %s address %#lx zero_page %p "
85 : "radix_entry %#lx",
86 : MAJOR(__entry->dev),
87 : MINOR(__entry->dev),
88 : __entry->ino,
89 : __entry->vm_flags & VM_SHARED ? "shared" : "private",
90 : __entry->address,
91 : __entry->zero_page,
92 : (unsigned long)__entry->radix_entry
93 : )
94 : )
95 :
96 : #define DEFINE_PMD_LOAD_HOLE_EVENT(name) \
97 : DEFINE_EVENT(dax_pmd_load_hole_class, name, \
98 : TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
99 : struct page *zero_page, void *radix_entry), \
100 : TP_ARGS(inode, vmf, zero_page, radix_entry))
101 :
102 0 : DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole);
103 0 : DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback);
104 :
105 0 : DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class,
106 : TP_PROTO(struct inode *inode, struct vm_fault *vmf,
107 : long length, pfn_t pfn, void *radix_entry),
108 : TP_ARGS(inode, vmf, length, pfn, radix_entry),
109 : TP_STRUCT__entry(
110 : __field(unsigned long, ino)
111 : __field(unsigned long, vm_flags)
112 : __field(unsigned long, address)
113 : __field(long, length)
114 : __field(u64, pfn_val)
115 : __field(void *, radix_entry)
116 : __field(dev_t, dev)
117 : __field(int, write)
118 : ),
119 : TP_fast_assign(
120 : __entry->dev = inode->i_sb->s_dev;
121 : __entry->ino = inode->i_ino;
122 : __entry->vm_flags = vmf->vma->vm_flags;
123 : __entry->address = vmf->address;
124 : __entry->write = vmf->flags & FAULT_FLAG_WRITE;
125 : __entry->length = length;
126 : __entry->pfn_val = pfn.val;
127 : __entry->radix_entry = radix_entry;
128 : ),
129 : TP_printk("dev %d:%d ino %#lx %s %s address %#lx length %#lx "
130 : "pfn %#llx %s radix_entry %#lx",
131 : MAJOR(__entry->dev),
132 : MINOR(__entry->dev),
133 : __entry->ino,
134 : __entry->vm_flags & VM_SHARED ? "shared" : "private",
135 : __entry->write ? "write" : "read",
136 : __entry->address,
137 : __entry->length,
138 : __entry->pfn_val & ~PFN_FLAGS_MASK,
139 : __print_flags_u64(__entry->pfn_val & PFN_FLAGS_MASK, "|",
140 : PFN_FLAGS_TRACE),
141 : (unsigned long)__entry->radix_entry
142 : )
143 : )
144 :
145 : #define DEFINE_PMD_INSERT_MAPPING_EVENT(name) \
146 : DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \
147 : TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
148 : long length, pfn_t pfn, void *radix_entry), \
149 : TP_ARGS(inode, vmf, length, pfn, radix_entry))
150 :
151 0 : DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping);
152 :
153 0 : DECLARE_EVENT_CLASS(dax_pte_fault_class,
154 : TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result),
155 : TP_ARGS(inode, vmf, result),
156 : TP_STRUCT__entry(
157 : __field(unsigned long, ino)
158 : __field(unsigned long, vm_flags)
159 : __field(unsigned long, address)
160 : __field(pgoff_t, pgoff)
161 : __field(dev_t, dev)
162 : __field(unsigned int, flags)
163 : __field(int, result)
164 : ),
165 : TP_fast_assign(
166 : __entry->dev = inode->i_sb->s_dev;
167 : __entry->ino = inode->i_ino;
168 : __entry->vm_flags = vmf->vma->vm_flags;
169 : __entry->address = vmf->address;
170 : __entry->flags = vmf->flags;
171 : __entry->pgoff = vmf->pgoff;
172 : __entry->result = result;
173 : ),
174 : TP_printk("dev %d:%d ino %#lx %s %s address %#lx pgoff %#lx %s",
175 : MAJOR(__entry->dev),
176 : MINOR(__entry->dev),
177 : __entry->ino,
178 : __entry->vm_flags & VM_SHARED ? "shared" : "private",
179 : __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE),
180 : __entry->address,
181 : __entry->pgoff,
182 : __print_flags(__entry->result, "|", VM_FAULT_RESULT_TRACE)
183 : )
184 : )
185 :
186 : #define DEFINE_PTE_FAULT_EVENT(name) \
187 : DEFINE_EVENT(dax_pte_fault_class, name, \
188 : TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result), \
189 : TP_ARGS(inode, vmf, result))
190 :
191 0 : DEFINE_PTE_FAULT_EVENT(dax_pte_fault);
192 0 : DEFINE_PTE_FAULT_EVENT(dax_pte_fault_done);
193 0 : DEFINE_PTE_FAULT_EVENT(dax_load_hole);
194 0 : DEFINE_PTE_FAULT_EVENT(dax_insert_pfn_mkwrite_no_entry);
195 0 : DEFINE_PTE_FAULT_EVENT(dax_insert_pfn_mkwrite);
196 :
197 0 : TRACE_EVENT(dax_insert_mapping,
198 : TP_PROTO(struct inode *inode, struct vm_fault *vmf, void *radix_entry),
199 : TP_ARGS(inode, vmf, radix_entry),
200 : TP_STRUCT__entry(
201 : __field(unsigned long, ino)
202 : __field(unsigned long, vm_flags)
203 : __field(unsigned long, address)
204 : __field(void *, radix_entry)
205 : __field(dev_t, dev)
206 : __field(int, write)
207 : ),
208 : TP_fast_assign(
209 : __entry->dev = inode->i_sb->s_dev;
210 : __entry->ino = inode->i_ino;
211 : __entry->vm_flags = vmf->vma->vm_flags;
212 : __entry->address = vmf->address;
213 : __entry->write = vmf->flags & FAULT_FLAG_WRITE;
214 : __entry->radix_entry = radix_entry;
215 : ),
216 : TP_printk("dev %d:%d ino %#lx %s %s address %#lx radix_entry %#lx",
217 : MAJOR(__entry->dev),
218 : MINOR(__entry->dev),
219 : __entry->ino,
220 : __entry->vm_flags & VM_SHARED ? "shared" : "private",
221 : __entry->write ? "write" : "read",
222 : __entry->address,
223 : (unsigned long)__entry->radix_entry
224 : )
225 : )
226 :
227 0 : DECLARE_EVENT_CLASS(dax_writeback_range_class,
228 : TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index),
229 : TP_ARGS(inode, start_index, end_index),
230 : TP_STRUCT__entry(
231 : __field(unsigned long, ino)
232 : __field(pgoff_t, start_index)
233 : __field(pgoff_t, end_index)
234 : __field(dev_t, dev)
235 : ),
236 : TP_fast_assign(
237 : __entry->dev = inode->i_sb->s_dev;
238 : __entry->ino = inode->i_ino;
239 : __entry->start_index = start_index;
240 : __entry->end_index = end_index;
241 : ),
242 : TP_printk("dev %d:%d ino %#lx pgoff %#lx-%#lx",
243 : MAJOR(__entry->dev),
244 : MINOR(__entry->dev),
245 : __entry->ino,
246 : __entry->start_index,
247 : __entry->end_index
248 : )
249 : )
250 :
251 : #define DEFINE_WRITEBACK_RANGE_EVENT(name) \
252 : DEFINE_EVENT(dax_writeback_range_class, name, \
253 : TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index),\
254 : TP_ARGS(inode, start_index, end_index))
255 :
256 0 : DEFINE_WRITEBACK_RANGE_EVENT(dax_writeback_range);
257 0 : DEFINE_WRITEBACK_RANGE_EVENT(dax_writeback_range_done);
258 :
259 0 : TRACE_EVENT(dax_writeback_one,
260 : TP_PROTO(struct inode *inode, pgoff_t pgoff, pgoff_t pglen),
261 : TP_ARGS(inode, pgoff, pglen),
262 : TP_STRUCT__entry(
263 : __field(unsigned long, ino)
264 : __field(pgoff_t, pgoff)
265 : __field(pgoff_t, pglen)
266 : __field(dev_t, dev)
267 : ),
268 : TP_fast_assign(
269 : __entry->dev = inode->i_sb->s_dev;
270 : __entry->ino = inode->i_ino;
271 : __entry->pgoff = pgoff;
272 : __entry->pglen = pglen;
273 : ),
274 : TP_printk("dev %d:%d ino %#lx pgoff %#lx pglen %#lx",
275 : MAJOR(__entry->dev),
276 : MINOR(__entry->dev),
277 : __entry->ino,
278 : __entry->pgoff,
279 : __entry->pglen
280 : )
281 : )
282 :
283 : #endif /* _TRACE_FS_DAX_H */
284 :
285 : /* This part must be outside protection */
286 : #include <trace/define_trace.h>
|