Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM writeback
4
5#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_WRITEBACK_H
7
8#include <linux/tracepoint.h>
9#include <linux/backing-dev.h>
10#include <linux/writeback.h>
11
12#define show_inode_state(state) \
13 __print_flags(state, "|", \
14 {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
15 {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
16 {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
17 {I_NEW, "I_NEW"}, \
18 {I_WILL_FREE, "I_WILL_FREE"}, \
19 {I_FREEING, "I_FREEING"}, \
20 {I_CLEAR, "I_CLEAR"}, \
21 {I_SYNC, "I_SYNC"}, \
22 {I_DIRTY_TIME, "I_DIRTY_TIME"}, \
23 {I_REFERENCED, "I_REFERENCED"}, \
24 {I_LINKABLE, "I_LINKABLE"}, \
25 {I_WB_SWITCH, "I_WB_SWITCH"}, \
26 {I_OVL_INUSE, "I_OVL_INUSE"}, \
27 {I_CREATING, "I_CREATING"}, \
28 {I_DONTCACHE, "I_DONTCACHE"}, \
29 {I_SYNC_QUEUED, "I_SYNC_QUEUED"}, \
30 {I_PINNING_NETFS_WB, "I_PINNING_NETFS_WB"}, \
31 {I_LRU_ISOLATING, "I_LRU_ISOLATING"} \
32 )
33
34/* enums need to be exported to user space */
35#undef EM
36#undef EMe
37#define EM(a,b) TRACE_DEFINE_ENUM(a);
38#define EMe(a,b) TRACE_DEFINE_ENUM(a);
39
40#define WB_WORK_REASON \
41 EM( WB_REASON_BACKGROUND, "background") \
42 EM( WB_REASON_VMSCAN, "vmscan") \
43 EM( WB_REASON_SYNC, "sync") \
44 EM( WB_REASON_PERIODIC, "periodic") \
45 EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \
46 EM( WB_REASON_FORKER_THREAD, "forker_thread") \
47 EMe(WB_REASON_FOREIGN_FLUSH, "foreign_flush")
48
49WB_WORK_REASON
50
51/*
52 * Now redefine the EM() and EMe() macros to map the enums to the strings
53 * that will be printed in the output.
54 */
55#undef EM
56#undef EMe
57#define EM(a,b) { a, b },
58#define EMe(a,b) { a, b }
59
60struct wb_writeback_work;
61
62DECLARE_EVENT_CLASS(writeback_folio_template,
63
64 TP_PROTO(struct folio *folio, struct address_space *mapping),
65
66 TP_ARGS(folio, mapping),
67
68 TP_STRUCT__entry (
69 __array(char, name, 32)
70 __field(u64, ino)
71 __field(pgoff_t, index)
72 ),
73
74 TP_fast_assign(
75 strscpy_pad(__entry->name,
76 bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
77 NULL), 32);
78 __entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0;
79 __entry->index = folio->index;
80 ),
81
82 TP_printk("bdi %s: ino=%llu index=%lu",
83 __entry->name,
84 __entry->ino,
85 __entry->index
86 )
87);
88
89DEFINE_EVENT(writeback_folio_template, writeback_dirty_folio,
90
91 TP_PROTO(struct folio *folio, struct address_space *mapping),
92
93 TP_ARGS(folio, mapping)
94);
95
96DEFINE_EVENT(writeback_folio_template, folio_wait_writeback,
97
98 TP_PROTO(struct folio *folio, struct address_space *mapping),
99
100 TP_ARGS(folio, mapping)
101);
102
103DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
104
105 TP_PROTO(struct inode *inode, int flags),
106
107 TP_ARGS(inode, flags),
108
109 TP_STRUCT__entry (
110 __array(char, name, 32)
111 __field(u64, ino)
112 __field(unsigned long, state)
113 __field(unsigned long, flags)
114 ),
115
116 TP_fast_assign(
117 struct backing_dev_info *bdi = inode_to_bdi(inode);
118
119 /* may be called for files on pseudo FSes w/ unregistered bdi */
120 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
121 __entry->ino = inode->i_ino;
122 __entry->state = inode_state_read_once(inode);
123 __entry->flags = flags;
124 ),
125
126 TP_printk("bdi %s: ino=%llu state=%s flags=%s",
127 __entry->name,
128 __entry->ino,
129 show_inode_state(__entry->state),
130 show_inode_state(__entry->flags)
131 )
132);
133
134DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
135
136 TP_PROTO(struct inode *inode, int flags),
137
138 TP_ARGS(inode, flags)
139);
140
141DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
142
143 TP_PROTO(struct inode *inode, int flags),
144
145 TP_ARGS(inode, flags)
146);
147
148DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
149
150 TP_PROTO(struct inode *inode, int flags),
151
152 TP_ARGS(inode, flags)
153);
154
155#ifdef CREATE_TRACE_POINTS
156#ifdef CONFIG_CGROUP_WRITEBACK
157
158static inline u64 __trace_wb_assign_cgroup(struct bdi_writeback *wb)
159{
160 return cgroup_ino(wb->memcg_css->cgroup);
161}
162
163static inline u64 __trace_wbc_assign_cgroup(struct writeback_control *wbc)
164{
165 if (wbc->wb)
166 return __trace_wb_assign_cgroup(wbc->wb);
167 else
168 return 1;
169}
170#else /* CONFIG_CGROUP_WRITEBACK */
171
172static inline u64 __trace_wb_assign_cgroup(struct bdi_writeback *wb)
173{
174 return 1;
175}
176
177static inline u64 __trace_wbc_assign_cgroup(struct writeback_control *wbc)
178{
179 return 1;
180}
181
182#endif /* CONFIG_CGROUP_WRITEBACK */
183#endif /* CREATE_TRACE_POINTS */
184
185#ifdef CONFIG_CGROUP_WRITEBACK
186TRACE_EVENT(inode_foreign_history,
187
188 TP_PROTO(struct inode *inode, struct writeback_control *wbc,
189 unsigned int history),
190
191 TP_ARGS(inode, wbc, history),
192
193 TP_STRUCT__entry(
194 __array(char, name, 32)
195 __field(u64, ino)
196 __field(u64, cgroup_ino)
197 __field(unsigned int, history)
198 ),
199
200 TP_fast_assign(
201 strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
202 __entry->ino = inode->i_ino;
203 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
204 __entry->history = history;
205 ),
206
207 TP_printk("bdi %s: ino=%llu cgroup_ino=%llu history=0x%x",
208 __entry->name,
209 __entry->ino,
210 __entry->cgroup_ino,
211 __entry->history
212 )
213);
214
215TRACE_EVENT(inode_switch_wbs_queue,
216
217 TP_PROTO(struct bdi_writeback *old_wb, struct bdi_writeback *new_wb,
218 unsigned int count),
219
220 TP_ARGS(old_wb, new_wb, count),
221
222 TP_STRUCT__entry(
223 __array(char, name, 32)
224 __field(u64, old_cgroup_ino)
225 __field(u64, new_cgroup_ino)
226 __field(unsigned int, count)
227 ),
228
229 TP_fast_assign(
230 strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
231 __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
232 __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
233 __entry->count = count;
234 ),
235
236 TP_printk("bdi %s: old_cgroup_ino=%llu new_cgroup_ino=%llu count=%u",
237 __entry->name,
238 __entry->old_cgroup_ino,
239 __entry->new_cgroup_ino,
240 __entry->count
241 )
242);
243
244TRACE_EVENT(inode_switch_wbs,
245
246 TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
247 struct bdi_writeback *new_wb),
248
249 TP_ARGS(inode, old_wb, new_wb),
250
251 TP_STRUCT__entry(
252 __array(char, name, 32)
253 __field(u64, ino)
254 __field(u64, old_cgroup_ino)
255 __field(u64, new_cgroup_ino)
256 ),
257
258 TP_fast_assign(
259 strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
260 __entry->ino = inode->i_ino;
261 __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
262 __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
263 ),
264
265 TP_printk("bdi %s: ino=%llu old_cgroup_ino=%llu new_cgroup_ino=%llu",
266 __entry->name,
267 __entry->ino,
268 __entry->old_cgroup_ino,
269 __entry->new_cgroup_ino
270 )
271);
272
273TRACE_EVENT(track_foreign_dirty,
274
275 TP_PROTO(struct folio *folio, struct bdi_writeback *wb),
276
277 TP_ARGS(folio, wb),
278
279 TP_STRUCT__entry(
280 __array(char, name, 32)
281 __field(u64, bdi_id)
282 __field(u64, ino)
283 __field(u64, cgroup_ino)
284 __field(u64, page_cgroup_ino)
285 __field(unsigned int, memcg_id)
286 ),
287
288 TP_fast_assign(
289 struct address_space *mapping = folio_mapping(folio);
290 struct inode *inode = mapping ? mapping->host : NULL;
291
292 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
293 __entry->bdi_id = wb->bdi->id;
294 __entry->ino = inode ? inode->i_ino : 0;
295 __entry->memcg_id = wb->memcg_css->id;
296 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
297
298 rcu_read_lock();
299 __entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup);
300 rcu_read_unlock();
301 ),
302
303 TP_printk("bdi %s[%llu]: ino=%llu memcg_id=%u cgroup_ino=%llu page_cgroup_ino=%llu",
304 __entry->name,
305 __entry->bdi_id,
306 __entry->ino,
307 __entry->memcg_id,
308 __entry->cgroup_ino,
309 __entry->page_cgroup_ino
310 )
311);
312
313TRACE_EVENT(flush_foreign,
314
315 TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id,
316 unsigned int frn_memcg_id),
317
318 TP_ARGS(wb, frn_bdi_id, frn_memcg_id),
319
320 TP_STRUCT__entry(
321 __array(char, name, 32)
322 __field(u64, cgroup_ino)
323 __field(unsigned int, frn_bdi_id)
324 __field(unsigned int, frn_memcg_id)
325 ),
326
327 TP_fast_assign(
328 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
329 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
330 __entry->frn_bdi_id = frn_bdi_id;
331 __entry->frn_memcg_id = frn_memcg_id;
332 ),
333
334 TP_printk("bdi %s: cgroup_ino=%llu frn_bdi_id=%u frn_memcg_id=%u",
335 __entry->name,
336 __entry->cgroup_ino,
337 __entry->frn_bdi_id,
338 __entry->frn_memcg_id
339 )
340);
341#endif
342
343DECLARE_EVENT_CLASS(writeback_write_inode_template,
344
345 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
346
347 TP_ARGS(inode, wbc),
348
349 TP_STRUCT__entry (
350 __array(char, name, 32)
351 __field(u64, ino)
352 __field(u64, cgroup_ino)
353 __field(int, sync_mode)
354 ),
355
356 TP_fast_assign(
357 strscpy_pad(__entry->name,
358 bdi_dev_name(inode_to_bdi(inode)), 32);
359 __entry->ino = inode->i_ino;
360 __entry->sync_mode = wbc->sync_mode;
361 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
362 ),
363
364 TP_printk("bdi %s: ino=%llu sync_mode=%d cgroup_ino=%llu",
365 __entry->name,
366 __entry->ino,
367 __entry->sync_mode,
368 __entry->cgroup_ino
369 )
370);
371
372DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
373
374 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
375
376 TP_ARGS(inode, wbc)
377);
378
379DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
380
381 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
382
383 TP_ARGS(inode, wbc)
384);
385
386DECLARE_EVENT_CLASS(writeback_work_class,
387 TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
388 TP_ARGS(wb, work),
389 TP_STRUCT__entry(
390 __array(char, name, 32)
391 __field(u64, cgroup_ino)
392 __field(long, nr_pages)
393 __field(dev_t, sb_dev)
394 __field(int, sync_mode)
395 __field(int, for_kupdate)
396 __field(int, range_cyclic)
397 __field(int, for_background)
398 __field(int, reason)
399 ),
400 TP_fast_assign(
401 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
402 __entry->nr_pages = work->nr_pages;
403 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
404 __entry->sync_mode = work->sync_mode;
405 __entry->for_kupdate = work->for_kupdate;
406 __entry->range_cyclic = work->range_cyclic;
407 __entry->for_background = work->for_background;
408 __entry->reason = work->reason;
409 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
410 ),
411 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
412 "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%llu",
413 __entry->name,
414 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
415 __entry->nr_pages,
416 __entry->sync_mode,
417 __entry->for_kupdate,
418 __entry->range_cyclic,
419 __entry->for_background,
420 __print_symbolic(__entry->reason, WB_WORK_REASON),
421 __entry->cgroup_ino
422 )
423);
424#define DEFINE_WRITEBACK_WORK_EVENT(name) \
425DEFINE_EVENT(writeback_work_class, name, \
426 TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
427 TP_ARGS(wb, work))
428DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
429DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
430DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
431DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
432DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
433
434TRACE_EVENT(writeback_pages_written,
435 TP_PROTO(long pages_written),
436 TP_ARGS(pages_written),
437 TP_STRUCT__entry(
438 __field(long, pages)
439 ),
440 TP_fast_assign(
441 __entry->pages = pages_written;
442 ),
443 TP_printk("%ld", __entry->pages)
444);
445
446DECLARE_EVENT_CLASS(writeback_class,
447 TP_PROTO(struct bdi_writeback *wb),
448 TP_ARGS(wb),
449 TP_STRUCT__entry(
450 __array(char, name, 32)
451 __field(u64, cgroup_ino)
452 ),
453 TP_fast_assign(
454 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
455 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
456 ),
457 TP_printk("bdi %s: cgroup_ino=%llu",
458 __entry->name,
459 __entry->cgroup_ino
460 )
461);
462#define DEFINE_WRITEBACK_EVENT(name) \
463DEFINE_EVENT(writeback_class, name, \
464 TP_PROTO(struct bdi_writeback *wb), \
465 TP_ARGS(wb))
466
467DEFINE_WRITEBACK_EVENT(writeback_wake_background);
468
469TRACE_EVENT(writeback_bdi_register,
470 TP_PROTO(struct backing_dev_info *bdi),
471 TP_ARGS(bdi),
472 TP_STRUCT__entry(
473 __array(char, name, 32)
474 ),
475 TP_fast_assign(
476 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
477 ),
478 TP_printk("bdi %s",
479 __entry->name
480 )
481);
482
483DECLARE_EVENT_CLASS(wbc_class,
484 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
485 TP_ARGS(wbc, bdi),
486 TP_STRUCT__entry(
487 __array(char, name, 32)
488 __field(u64, cgroup_ino)
489 __field(long, nr_to_write)
490 __field(long, pages_skipped)
491 __field(long, range_start)
492 __field(long, range_end)
493 __field(int, sync_mode)
494 __field(int, for_kupdate)
495 __field(int, for_background)
496 __field(int, range_cyclic)
497 ),
498
499 TP_fast_assign(
500 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
501 __entry->nr_to_write = wbc->nr_to_write;
502 __entry->pages_skipped = wbc->pages_skipped;
503 __entry->sync_mode = wbc->sync_mode;
504 __entry->for_kupdate = wbc->for_kupdate;
505 __entry->for_background = wbc->for_background;
506 __entry->range_cyclic = wbc->range_cyclic;
507 __entry->range_start = (long)wbc->range_start;
508 __entry->range_end = (long)wbc->range_end;
509 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
510 ),
511
512 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d bgrd=%d "
513 "cyclic=%d start=0x%lx end=0x%lx cgroup_ino=%llu",
514 __entry->name,
515 __entry->nr_to_write,
516 __entry->pages_skipped,
517 __entry->sync_mode,
518 __entry->for_kupdate,
519 __entry->for_background,
520 __entry->range_cyclic,
521 __entry->range_start,
522 __entry->range_end,
523 __entry->cgroup_ino
524 )
525)
526
527#define DEFINE_WBC_EVENT(name) \
528DEFINE_EVENT(wbc_class, name, \
529 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
530 TP_ARGS(wbc, bdi))
531DEFINE_WBC_EVENT(wbc_writepage);
532
533TRACE_EVENT(writeback_queue_io,
534 TP_PROTO(struct bdi_writeback *wb,
535 struct wb_writeback_work *work,
536 unsigned long dirtied_before,
537 int moved),
538 TP_ARGS(wb, work, dirtied_before, moved),
539 TP_STRUCT__entry(
540 __array(char, name, 32)
541 __field(u64, cgroup_ino)
542 __field(unsigned long, older)
543 __field(long, age)
544 __field(int, moved)
545 __field(int, reason)
546 ),
547 TP_fast_assign(
548 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
549 __entry->older = dirtied_before;
550 __entry->age = (jiffies - dirtied_before) * 1000 / HZ;
551 __entry->moved = moved;
552 __entry->reason = work->reason;
553 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
554 ),
555 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%llu",
556 __entry->name,
557 __entry->older, /* dirtied_before in jiffies */
558 __entry->age, /* dirtied_before in relative milliseconds */
559 __entry->moved,
560 __print_symbolic(__entry->reason, WB_WORK_REASON),
561 __entry->cgroup_ino
562 )
563);
564
565TRACE_EVENT(global_dirty_state,
566
567 TP_PROTO(unsigned long background_thresh,
568 unsigned long dirty_thresh
569 ),
570
571 TP_ARGS(background_thresh,
572 dirty_thresh
573 ),
574
575 TP_STRUCT__entry(
576 __field(unsigned long, nr_dirty)
577 __field(unsigned long, nr_writeback)
578 __field(unsigned long, background_thresh)
579 __field(unsigned long, dirty_thresh)
580 __field(unsigned long, dirty_limit)
581 __field(unsigned long, nr_dirtied)
582 __field(unsigned long, nr_written)
583 ),
584
585 TP_fast_assign(
586 __entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY);
587 __entry->nr_writeback = global_node_page_state(NR_WRITEBACK);
588 __entry->nr_dirtied = global_node_page_state(NR_DIRTIED);
589 __entry->nr_written = global_node_page_state(NR_WRITTEN);
590 __entry->background_thresh = background_thresh;
591 __entry->dirty_thresh = dirty_thresh;
592 __entry->dirty_limit = global_wb_domain.dirty_limit;
593 ),
594
595 TP_printk("dirty=%lu writeback=%lu "
596 "bg_thresh=%lu thresh=%lu limit=%lu "
597 "dirtied=%lu written=%lu",
598 __entry->nr_dirty,
599 __entry->nr_writeback,
600 __entry->background_thresh,
601 __entry->dirty_thresh,
602 __entry->dirty_limit,
603 __entry->nr_dirtied,
604 __entry->nr_written
605 )
606);
607
608#define KBps(x) ((x) << (PAGE_SHIFT - 10))
609
610TRACE_EVENT(bdi_dirty_ratelimit,
611
612 TP_PROTO(struct bdi_writeback *wb,
613 unsigned long dirty_rate,
614 unsigned long task_ratelimit),
615
616 TP_ARGS(wb, dirty_rate, task_ratelimit),
617
618 TP_STRUCT__entry(
619 __array(char, bdi, 32)
620 __field(u64, cgroup_ino)
621 __field(unsigned long, write_bw)
622 __field(unsigned long, avg_write_bw)
623 __field(unsigned long, dirty_rate)
624 __field(unsigned long, dirty_ratelimit)
625 __field(unsigned long, task_ratelimit)
626 __field(unsigned long, balanced_dirty_ratelimit)
627 ),
628
629 TP_fast_assign(
630 strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
631 __entry->write_bw = KBps(wb->write_bandwidth);
632 __entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
633 __entry->dirty_rate = KBps(dirty_rate);
634 __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
635 __entry->task_ratelimit = KBps(task_ratelimit);
636 __entry->balanced_dirty_ratelimit =
637 KBps(wb->balanced_dirty_ratelimit);
638 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
639 ),
640
641 TP_printk("bdi %s: "
642 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
643 "dirty_ratelimit=%lu task_ratelimit=%lu "
644 "balanced_dirty_ratelimit=%lu cgroup_ino=%llu",
645 __entry->bdi,
646 __entry->write_bw, /* write bandwidth */
647 __entry->avg_write_bw, /* avg write bandwidth */
648 __entry->dirty_rate, /* bdi dirty rate */
649 __entry->dirty_ratelimit, /* base ratelimit */
650 __entry->task_ratelimit, /* ratelimit with position control */
651 __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
652 __entry->cgroup_ino
653 )
654);
655
656TRACE_EVENT(balance_dirty_pages,
657
658 TP_PROTO(struct bdi_writeback *wb,
659 struct dirty_throttle_control *dtc,
660 unsigned long dirty_ratelimit,
661 unsigned long task_ratelimit,
662 unsigned long dirtied,
663 unsigned long period,
664 long pause,
665 unsigned long start_time),
666
667 TP_ARGS(wb, dtc,
668 dirty_ratelimit, task_ratelimit,
669 dirtied, period, pause, start_time),
670
671 TP_STRUCT__entry(
672 __array( char, bdi, 32)
673 __field(u64, cgroup_ino)
674 __field(unsigned long, limit)
675 __field(unsigned long, setpoint)
676 __field(unsigned long, dirty)
677 __field(unsigned long, wb_setpoint)
678 __field(unsigned long, wb_dirty)
679 __field(unsigned long, dirty_ratelimit)
680 __field(unsigned long, task_ratelimit)
681 __field(unsigned long, paused)
682 __field( long, pause)
683 __field(unsigned long, period)
684 __field( long, think)
685 __field(unsigned int, dirtied)
686 __field(unsigned int, dirtied_pause)
687 ),
688
689 TP_fast_assign(
690 unsigned long freerun = (dtc->thresh + dtc->bg_thresh) / 2;
691 strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
692
693 __entry->limit = dtc->limit;
694 __entry->setpoint = (dtc->limit + freerun) / 2;
695 __entry->dirty = dtc->dirty;
696 __entry->wb_setpoint = __entry->setpoint *
697 dtc->wb_thresh / (dtc->thresh + 1);
698 __entry->wb_dirty = dtc->wb_dirty;
699 __entry->dirty_ratelimit = KBps(dirty_ratelimit);
700 __entry->task_ratelimit = KBps(task_ratelimit);
701 __entry->dirtied = dirtied;
702 __entry->dirtied_pause = current->nr_dirtied_pause;
703 __entry->think = current->dirty_paused_when == 0 ? 0 :
704 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
705 __entry->period = period * 1000 / HZ;
706 __entry->pause = pause * 1000 / HZ;
707 __entry->paused = (jiffies - start_time) * 1000 / HZ;
708 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
709 ),
710
711
712 TP_printk("bdi %s: "
713 "limit=%lu setpoint=%lu dirty=%lu "
714 "wb_setpoint=%lu wb_dirty=%lu "
715 "dirty_ratelimit=%lu task_ratelimit=%lu "
716 "dirtied=%u dirtied_pause=%u "
717 "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%llu",
718 __entry->bdi,
719 __entry->limit,
720 __entry->setpoint,
721 __entry->dirty,
722 __entry->wb_setpoint,
723 __entry->wb_dirty,
724 __entry->dirty_ratelimit,
725 __entry->task_ratelimit,
726 __entry->dirtied,
727 __entry->dirtied_pause,
728 __entry->paused, /* ms */
729 __entry->pause, /* ms */
730 __entry->period, /* ms */
731 __entry->think, /* ms */
732 __entry->cgroup_ino
733 )
734);
735
736TRACE_EVENT(writeback_sb_inodes_requeue,
737
738 TP_PROTO(struct inode *inode),
739 TP_ARGS(inode),
740
741 TP_STRUCT__entry(
742 __array(char, name, 32)
743 __field(u64, ino)
744 __field(u64, cgroup_ino)
745 __field(unsigned long, state)
746 __field(unsigned long, dirtied_when)
747 ),
748
749 TP_fast_assign(
750 strscpy_pad(__entry->name,
751 bdi_dev_name(inode_to_bdi(inode)), 32);
752 __entry->ino = inode->i_ino;
753 __entry->state = inode_state_read_once(inode);
754 __entry->dirtied_when = inode->dirtied_when;
755 __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode));
756 ),
757
758 TP_printk("bdi %s: ino=%llu state=%s dirtied_when=%lu age=%lu cgroup_ino=%llu",
759 __entry->name,
760 __entry->ino,
761 show_inode_state(__entry->state),
762 __entry->dirtied_when,
763 (jiffies - __entry->dirtied_when) / HZ,
764 __entry->cgroup_ino
765 )
766);
767
768DECLARE_EVENT_CLASS(writeback_single_inode_template,
769
770 TP_PROTO(struct inode *inode,
771 struct writeback_control *wbc,
772 unsigned long nr_to_write
773 ),
774
775 TP_ARGS(inode, wbc, nr_to_write),
776
777 TP_STRUCT__entry(
778 __array(char, name, 32)
779 __field(u64, ino)
780 __field(u64, cgroup_ino)
781 __field(unsigned long, state)
782 __field(unsigned long, dirtied_when)
783 __field(unsigned long, writeback_index)
784 __field(unsigned long, wrote)
785 __field(long, nr_to_write)
786 ),
787
788 TP_fast_assign(
789 strscpy_pad(__entry->name,
790 bdi_dev_name(inode_to_bdi(inode)), 32);
791 __entry->ino = inode->i_ino;
792 __entry->state = inode_state_read_once(inode);
793 __entry->dirtied_when = inode->dirtied_when;
794 __entry->writeback_index = inode->i_mapping->writeback_index;
795 __entry->nr_to_write = nr_to_write;
796 __entry->wrote = nr_to_write - wbc->nr_to_write;
797 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
798 ),
799
800 TP_printk("bdi %s: ino=%llu state=%s dirtied_when=%lu age=%lu "
801 "index=%lu to_write=%ld wrote=%lu cgroup_ino=%llu",
802 __entry->name,
803 __entry->ino,
804 show_inode_state(__entry->state),
805 __entry->dirtied_when,
806 (jiffies - __entry->dirtied_when) / HZ,
807 __entry->writeback_index,
808 __entry->nr_to_write,
809 __entry->wrote,
810 __entry->cgroup_ino
811 )
812);
813
814DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
815 TP_PROTO(struct inode *inode,
816 struct writeback_control *wbc,
817 unsigned long nr_to_write),
818 TP_ARGS(inode, wbc, nr_to_write)
819);
820
821DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
822 TP_PROTO(struct inode *inode,
823 struct writeback_control *wbc,
824 unsigned long nr_to_write),
825 TP_ARGS(inode, wbc, nr_to_write)
826);
827
828DECLARE_EVENT_CLASS(writeback_inode_template,
829 TP_PROTO(struct inode *inode),
830
831 TP_ARGS(inode),
832
833 TP_STRUCT__entry(
834 __field( u64, ino )
835 __field(unsigned long, state )
836 __field(unsigned long, dirtied_when )
837 __field( dev_t, dev )
838 __field( __u16, mode )
839 ),
840
841 TP_fast_assign(
842 __entry->dev = inode->i_sb->s_dev;
843 __entry->ino = inode->i_ino;
844 __entry->state = inode_state_read_once(inode);
845 __entry->mode = inode->i_mode;
846 __entry->dirtied_when = inode->dirtied_when;
847 ),
848
849 TP_printk("dev %d,%d ino %llu dirtied %lu state %s mode 0%o",
850 MAJOR(__entry->dev), MINOR(__entry->dev),
851 __entry->ino, __entry->dirtied_when,
852 show_inode_state(__entry->state), __entry->mode)
853);
854
855DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
856 TP_PROTO(struct inode *inode),
857
858 TP_ARGS(inode)
859);
860
861DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
862
863 TP_PROTO(struct inode *inode),
864
865 TP_ARGS(inode)
866);
867
868/*
869 * Inode writeback list tracking.
870 */
871
872DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
873 TP_PROTO(struct inode *inode),
874 TP_ARGS(inode)
875);
876
877DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
878 TP_PROTO(struct inode *inode),
879 TP_ARGS(inode)
880);
881
882#endif /* _TRACE_WRITEBACK_H */
883
884/* This part must be outside protection */
885#include <trace/define_trace.h>