diff options
Diffstat (limited to 'include/trace')
-rw-r--r-- | include/trace/events/btrfs.h | 91 | ||||
-rw-r--r-- | include/trace/events/compaction.h | 57 | ||||
-rw-r--r-- | include/trace/events/cpuhp.h | 66 | ||||
-rw-r--r-- | include/trace/events/f2fs.h | 12 | ||||
-rw-r--r-- | include/trace/events/fib6.h | 2 | ||||
-rw-r--r-- | include/trace/events/gfpflags.h | 43 | ||||
-rw-r--r-- | include/trace/events/huge_memory.h | 2 | ||||
-rw-r--r-- | include/trace/events/kmem.h | 44 | ||||
-rw-r--r-- | include/trace/events/kvm.h | 9 | ||||
-rw-r--r-- | include/trace/events/mmflags.h | 183 | ||||
-rw-r--r-- | include/trace/events/page_isolation.h | 2 | ||||
-rw-r--r-- | include/trace/events/page_ref.h | 134 | ||||
-rw-r--r-- | include/trace/events/power.h | 22 | ||||
-rw-r--r-- | include/trace/events/sunvnet.h | 139 | ||||
-rw-r--r-- | include/trace/events/thermal.h | 16 | ||||
-rw-r--r-- | include/trace/events/timer.h | 36 | ||||
-rw-r--r-- | include/trace/events/tlb.h | 4 | ||||
-rw-r--r-- | include/trace/events/vmscan.h | 2 | ||||
-rw-r--r-- | include/trace/events/writeback.h | 121 |
19 files changed, 801 insertions, 184 deletions
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index d866f21ef..e90e82ad6 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -6,7 +6,7 @@ #include <linux/writeback.h> #include <linux/tracepoint.h> -#include <trace/events/gfpflags.h> +#include <trace/events/mmflags.h> struct btrfs_root; struct btrfs_fs_info; @@ -23,7 +23,7 @@ struct map_lookup; struct extent_buffer; struct btrfs_work; struct __btrfs_workqueue; -struct btrfs_qgroup_operation; +struct btrfs_qgroup_extent_record; #define show_ref_type(type) \ __print_symbolic(type, \ @@ -1231,6 +1231,93 @@ DEFINE_EVENT(btrfs__qgroup_delayed_ref, btrfs_qgroup_free_delayed_ref, TP_ARGS(ref_root, reserved) ); + +DECLARE_EVENT_CLASS(btrfs_qgroup_extent, + TP_PROTO(struct btrfs_qgroup_extent_record *rec), + + TP_ARGS(rec), + + TP_STRUCT__entry( + __field( u64, bytenr ) + __field( u64, num_bytes ) + ), + + TP_fast_assign( + __entry->bytenr = rec->bytenr, + __entry->num_bytes = rec->num_bytes; + ), + + TP_printk("bytenr = %llu, num_bytes = %llu", + (unsigned long long)__entry->bytenr, + (unsigned long long)__entry->num_bytes) +); + +DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents, + + TP_PROTO(struct btrfs_qgroup_extent_record *rec), + + TP_ARGS(rec) +); + +DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_insert_dirty_extent, + + TP_PROTO(struct btrfs_qgroup_extent_record *rec), + + TP_ARGS(rec) +); + +TRACE_EVENT(btrfs_qgroup_account_extent, + + TP_PROTO(u64 bytenr, u64 num_bytes, u64 nr_old_roots, u64 nr_new_roots), + + TP_ARGS(bytenr, num_bytes, nr_old_roots, nr_new_roots), + + TP_STRUCT__entry( + __field( u64, bytenr ) + __field( u64, num_bytes ) + __field( u64, nr_old_roots ) + __field( u64, nr_new_roots ) + ), + + TP_fast_assign( + __entry->bytenr = bytenr; + __entry->num_bytes = num_bytes; + __entry->nr_old_roots = nr_old_roots; + __entry->nr_new_roots = nr_new_roots; + ), + + TP_printk("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, " + "nr_new_roots = %llu", + __entry->bytenr, + __entry->num_bytes, + __entry->nr_old_roots, + __entry->nr_new_roots) +); + +TRACE_EVENT(qgroup_update_counters, + + TP_PROTO(u64 qgid, u64 cur_old_count, u64 cur_new_count), + + TP_ARGS(qgid, cur_old_count, cur_new_count), + + TP_STRUCT__entry( + __field( u64, qgid ) + __field( u64, cur_old_count ) + __field( u64, cur_new_count ) + ), + + TP_fast_assign( + __entry->qgid = qgid; + __entry->cur_old_count = cur_old_count; + __entry->cur_new_count = cur_new_count; + ), + + TP_printk("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu", + __entry->qgid, + __entry->cur_old_count, + __entry->cur_new_count) +); + #endif /* _TRACE_BTRFS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index c92d1e1cb..e215bf68f 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h @@ -7,7 +7,7 @@ #include <linux/types.h> #include <linux/list.h> #include <linux/tracepoint.h> -#include <trace/events/gfpflags.h> +#include <trace/events/mmflags.h> #define COMPACTION_STATUS \ EM( COMPACT_DEFERRED, "deferred") \ @@ -350,6 +350,61 @@ DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_reset, ); #endif +TRACE_EVENT(mm_compaction_kcompactd_sleep, + + TP_PROTO(int nid), + + TP_ARGS(nid), + + TP_STRUCT__entry( + __field(int, nid) + ), + + TP_fast_assign( + __entry->nid = nid; + ), + + TP_printk("nid=%d", __entry->nid) +); + +DECLARE_EVENT_CLASS(kcompactd_wake_template, + + TP_PROTO(int nid, int order, enum zone_type classzone_idx), + + TP_ARGS(nid, order, classzone_idx), + + TP_STRUCT__entry( + __field(int, nid) + __field(int, order) + __field(enum zone_type, classzone_idx) + ), + + TP_fast_assign( + __entry->nid = nid; + __entry->order = order; + __entry->classzone_idx = classzone_idx; + ), + + TP_printk("nid=%d order=%d classzone_idx=%-8s", + __entry->nid, + __entry->order, + __print_symbolic(__entry->classzone_idx, ZONE_TYPE)) +); + +DEFINE_EVENT(kcompactd_wake_template, mm_compaction_wakeup_kcompactd, + + TP_PROTO(int nid, int order, enum zone_type classzone_idx), + + TP_ARGS(nid, order, classzone_idx) +); + +DEFINE_EVENT(kcompactd_wake_template, mm_compaction_kcompactd_wake, + + TP_PROTO(int nid, int order, enum zone_type classzone_idx), + + TP_ARGS(nid, order, classzone_idx) +); + #endif /* _TRACE_COMPACTION_H */ /* This part must be outside protection */ diff --git a/include/trace/events/cpuhp.h b/include/trace/events/cpuhp.h new file mode 100644 index 000000000..a72bd93ec --- /dev/null +++ b/include/trace/events/cpuhp.h @@ -0,0 +1,66 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cpuhp + +#if !defined(_TRACE_CPUHP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_CPUHP_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(cpuhp_enter, + + TP_PROTO(unsigned int cpu, + int target, + int idx, + int (*fun)(unsigned int)), + + TP_ARGS(cpu, target, idx, fun), + + TP_STRUCT__entry( + __field( unsigned int, cpu ) + __field( int, target ) + __field( int, idx ) + __field( void *, fun ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->target = target; + __entry->idx = idx; + __entry->fun = fun; + ), + + TP_printk("cpu: %04u target: %3d step: %3d (%pf)", + __entry->cpu, __entry->target, __entry->idx, __entry->fun) +); + +TRACE_EVENT(cpuhp_exit, + + TP_PROTO(unsigned int cpu, + int state, + int idx, + int ret), + + TP_ARGS(cpu, state, idx, ret), + + TP_STRUCT__entry( + __field( unsigned int, cpu ) + __field( int, state ) + __field( int, idx ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->state = state; + __entry->idx = idx; + __entry->ret = ret; + ), + + TP_printk(" cpu: %04u state: %3d step: %3d ret: %d", + __entry->cpu, __entry->state, __entry->idx, __entry->ret) +); + +#endif + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index a1b488809..0f5658457 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -52,6 +52,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD); { META_FLUSH, "META_FLUSH" }, \ { INMEM, "INMEM" }, \ { INMEM_DROP, "INMEM_DROP" }, \ + { INMEM_REVOKE, "INMEM_REVOKE" }, \ { IPU, "IN-PLACE" }, \ { OPU, "OUT-OF-PLACE" }) @@ -727,7 +728,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, __field(dev_t, dev) __field(ino_t, ino) __field(pgoff_t, index) - __field(block_t, blkaddr) + __field(block_t, old_blkaddr) + __field(block_t, new_blkaddr) __field(int, rw) __field(int, type) ), @@ -736,16 +738,18 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, __entry->dev = page->mapping->host->i_sb->s_dev; __entry->ino = page->mapping->host->i_ino; __entry->index = page->index; - __entry->blkaddr = fio->blk_addr; + __entry->old_blkaddr = fio->old_blkaddr; + __entry->new_blkaddr = fio->new_blkaddr; __entry->rw = fio->rw; __entry->type = fio->type; ), TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, " - "blkaddr = 0x%llx, rw = %s%s, type = %s", + "oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%s, type = %s", show_dev_ino(__entry), (unsigned long)__entry->index, - (unsigned long long)__entry->blkaddr, + (unsigned long long)__entry->old_blkaddr, + (unsigned long long)__entry->new_blkaddr, show_bio_type(__entry->rw), show_block_type(__entry->type)) ); diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h index 4cf6bac46..d60096cdd 100644 --- a/include/trace/events/fib6.h +++ b/include/trace/events/fib6.h @@ -37,7 +37,7 @@ TRACE_EVENT(fib6_table_lookup, __entry->tb_id = tb_id; __entry->oif = flp->flowi6_oif; __entry->iif = flp->flowi6_iif; - __entry->tos = flp->flowi6_tos; + __entry->tos = ip6_tclass(flp->flowlabel); __entry->scope = flp->flowi6_scope; __entry->flags = flp->flowi6_flags; diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h deleted file mode 100644 index dde6bf092..000000000 --- a/include/trace/events/gfpflags.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * The order of these masks is important. Matching masks will be seen - * first and the left over flags will end up showing by themselves. - * - * For example, if we have GFP_KERNEL before GFP_USER we wil get: - * - * GFP_KERNEL|GFP_HARDWALL - * - * Thus most bits set go first. - */ -#define show_gfp_flags(flags) \ - (flags) ? __print_flags(flags, "|", \ - {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \ - {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ - {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ - {(unsigned long)GFP_USER, "GFP_USER"}, \ - {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \ - {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ - {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \ - {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \ - {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \ - {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \ - {(unsigned long)__GFP_ATOMIC, "GFP_ATOMIC"}, \ - {(unsigned long)__GFP_IO, "GFP_IO"}, \ - {(unsigned long)__GFP_COLD, "GFP_COLD"}, \ - {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \ - {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \ - {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \ - {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \ - {(unsigned long)__GFP_COMP, "GFP_COMP"}, \ - {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \ - {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \ - {(unsigned long)__GFP_MEMALLOC, "GFP_MEMALLOC"}, \ - {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ - {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ - {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ - {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \ - {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \ - {(unsigned long)__GFP_DIRECT_RECLAIM, "GFP_DIRECT_RECLAIM"}, \ - {(unsigned long)__GFP_KSWAPD_RECLAIM, "GFP_KSWAPD_RECLAIM"}, \ - {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \ - ) : "GFP_NOWAIT" - diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 47c6212d8..551ba4acd 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -6,8 +6,6 @@ #include <linux/tracepoint.h> -#include <trace/events/gfpflags.h> - #define SCAN_STATUS \ EM( SCAN_FAIL, "failed") \ EM( SCAN_SUCCEED, "succeeded") \ diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index f7554fd7f..6b2e154fd 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -6,7 +6,7 @@ #include <linux/types.h> #include <linux/tracepoint.h> -#include <trace/events/gfpflags.h> +#include <trace/events/mmflags.h> DECLARE_EVENT_CLASS(kmem_alloc, @@ -140,42 +140,19 @@ DEFINE_EVENT(kmem_free, kfree, TP_ARGS(call_site, ptr) ); -DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free, +DEFINE_EVENT(kmem_free, kmem_cache_free, TP_PROTO(unsigned long call_site, const void *ptr), - TP_ARGS(call_site, ptr), - - /* - * This trace can be potentially called from an offlined cpu. - * Since trace points use RCU and RCU should not be used from - * offline cpus, filter such calls out. - * While this trace can be called from a preemptable section, - * it has no impact on the condition since tasks can migrate - * only from online cpus to other online cpus. Thus its safe - * to use raw_smp_processor_id. - */ - TP_CONDITION(cpu_online(raw_smp_processor_id())) + TP_ARGS(call_site, ptr) ); -TRACE_EVENT_CONDITION(mm_page_free, +TRACE_EVENT(mm_page_free, TP_PROTO(struct page *page, unsigned int order), TP_ARGS(page, order), - - /* - * This trace can be potentially called from an offlined cpu. - * Since trace points use RCU and RCU should not be used from - * offline cpus, filter such calls out. - * While this trace can be called from a preemptable section, - * it has no impact on the condition since tasks can migrate - * only from online cpus to other online cpus. Thus its safe - * to use raw_smp_processor_id. - */ - TP_CONDITION(cpu_online(raw_smp_processor_id())), - TP_STRUCT__entry( __field( unsigned long, pfn ) __field( unsigned int, order ) @@ -276,23 +253,12 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, TP_ARGS(page, order, migratetype) ); -TRACE_EVENT_CONDITION(mm_page_pcpu_drain, +TRACE_EVENT(mm_page_pcpu_drain, TP_PROTO(struct page *page, unsigned int order, int migratetype), TP_ARGS(page, order, migratetype), - /* - * This trace can be potentially called from an offlined cpu. - * Since trace points use RCU and RCU should not be used from - * offline cpus, filter such calls out. - * While this trace can be called from a preemptable section, - * it has no impact on the condition since tasks can migrate - * only from online cpus to other online cpus. Thus its safe - * to use raw_smp_processor_id. - */ - TP_CONDITION(cpu_online(raw_smp_processor_id())), - TP_STRUCT__entry( __field( unsigned long, pfn ) __field( unsigned int, order ) diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index d6f83222a..aa69253ec 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -359,14 +359,15 @@ TRACE_EVENT( #endif TRACE_EVENT(kvm_halt_poll_ns, - TP_PROTO(bool grow, unsigned int vcpu_id, int new, int old), + TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new, + unsigned int old), TP_ARGS(grow, vcpu_id, new, old), TP_STRUCT__entry( __field(bool, grow) __field(unsigned int, vcpu_id) - __field(int, new) - __field(int, old) + __field(unsigned int, new) + __field(unsigned int, old) ), TP_fast_assign( @@ -376,7 +377,7 @@ TRACE_EVENT(kvm_halt_poll_ns, __entry->old = old; ), - TP_printk("vcpu %u: halt_poll_ns %d (%s %d)", + TP_printk("vcpu %u: halt_poll_ns %u (%s %u)", __entry->vcpu_id, __entry->new, __entry->grow ? "grow" : "shrink", diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h new file mode 100644 index 000000000..98930eae5 --- /dev/null +++ b/include/trace/events/mmflags.h @@ -0,0 +1,183 @@ +/* + * The order of these masks is important. Matching masks will be seen + * first and the left over flags will end up showing by themselves. + * + * For example, if we have GFP_KERNEL before GFP_USER we wil get: + * + * GFP_KERNEL|GFP_HARDWALL + * + * Thus most bits set go first. + */ + +#define __def_gfpflag_names \ + {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \ + {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\ + {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ + {(unsigned long)GFP_USER, "GFP_USER"}, \ + {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \ + {(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \ + {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ + {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \ + {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \ + {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \ + {(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \ + {(unsigned long)GFP_DMA, "GFP_DMA"}, \ + {(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \ + {(unsigned long)GFP_DMA32, "GFP_DMA32"}, \ + {(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \ + {(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \ + {(unsigned long)__GFP_IO, "__GFP_IO"}, \ + {(unsigned long)__GFP_FS, "__GFP_FS"}, \ + {(unsigned long)__GFP_COLD, "__GFP_COLD"}, \ + {(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \ + {(unsigned long)__GFP_REPEAT, "__GFP_REPEAT"}, \ + {(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \ + {(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \ + {(unsigned long)__GFP_COMP, "__GFP_COMP"}, \ + {(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \ + {(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \ + {(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \ + {(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \ + {(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \ + {(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \ + {(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \ + {(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \ + {(unsigned long)__GFP_NOTRACK, "__GFP_NOTRACK"}, \ + {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \ + {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ + {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ + {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\ + {(unsigned long)__GFP_OTHER_NODE, "__GFP_OTHER_NODE"} \ + +#define show_gfp_flags(flags) \ + (flags) ? __print_flags(flags, "|", \ + __def_gfpflag_names \ + ) : "none" + +#ifdef CONFIG_MMU +#define IF_HAVE_PG_MLOCK(flag,string) ,{1UL << flag, string} +#else +#define IF_HAVE_PG_MLOCK(flag,string) +#endif + +#ifdef CONFIG_ARCH_USES_PG_UNCACHED +#define IF_HAVE_PG_UNCACHED(flag,string) ,{1UL << flag, string} +#else +#define IF_HAVE_PG_UNCACHED(flag,string) +#endif + +#ifdef CONFIG_MEMORY_FAILURE +#define IF_HAVE_PG_HWPOISON(flag,string) ,{1UL << flag, string} +#else +#define IF_HAVE_PG_HWPOISON(flag,string) +#endif + +#ifdef CONFIG_TUXONICE_ +#define IF_HAVE_PG_TUXONICE(flag,string) ,{1UL << flag, string} +#else +#define IF_HAVE_PG_TUXONICE(flag,string) +#endif + +#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) +#define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string} +#else +#define IF_HAVE_PG_IDLE(flag,string) +#endif + +#define __def_pageflag_names \ + {1UL << PG_locked, "locked" }, \ + {1UL << PG_error, "error" }, \ + {1UL << PG_referenced, "referenced" }, \ + {1UL << PG_uptodate, "uptodate" }, \ + {1UL << PG_dirty, "dirty" }, \ + {1UL << PG_lru, "lru" }, \ + {1UL << PG_active, "active" }, \ + {1UL << PG_slab, "slab" }, \ + {1UL << PG_owner_priv_1, "owner_priv_1" }, \ + {1UL << PG_arch_1, "arch_1" }, \ + {1UL << PG_reserved, "reserved" }, \ + {1UL << PG_private, "private" }, \ + {1UL << PG_private_2, "private_2" }, \ + {1UL << PG_writeback, "writeback" }, \ + {1UL << PG_head, "head" }, \ + {1UL << PG_swapcache, "swapcache" }, \ + {1UL << PG_mappedtodisk, "mappedtodisk" }, \ + {1UL << PG_reclaim, "reclaim" }, \ + {1UL << PG_swapbacked, "swapbacked" }, \ + {1UL << PG_unevictable, "unevictable" } \ +IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \ +IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \ +IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \ +IF_HAVE_PG_TUXONICE(PG_toi_untracked, "toi_untracked" ) \ +IF_HAVE_PG_TUXONICE(PG_toi_ro, "toi_ro" ) \ +IF_HAVE_PG_TUXONICE(PG_toi_cbw, "toi_cbw" ) \ +IF_HAVE_PG_TUXONICE(PG_toi_dirty, "toi_dirty" ) \ +IF_HAVE_PG_IDLE(PG_young, "young" ) \ +IF_HAVE_PG_IDLE(PG_idle, "idle" ) + +#define show_page_flags(flags) \ + (flags) ? __print_flags(flags, "|", \ + __def_pageflag_names \ + ) : "none" + +#if defined(CONFIG_X86) +#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" } +#elif defined(CONFIG_PPC) +#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" } +#elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64) +#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" } +#elif !defined(CONFIG_MMU) +#define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" } +#else +#define __VM_ARCH_SPECIFIC_1 {VM_ARCH_1, "arch_1" } +#endif + +#if defined(CONFIG_X86) +#define __VM_ARCH_SPECIFIC_2 {VM_MPX, "mpx" } +#else +#define __VM_ARCH_SPECIFIC_2 {VM_ARCH_2, "arch_2" } +#endif + +#ifdef CONFIG_MEM_SOFT_DIRTY +#define IF_HAVE_VM_SOFTDIRTY(flag,name) {flag, name }, +#else +#define IF_HAVE_VM_SOFTDIRTY(flag,name) +#endif + +#define __def_vmaflag_names \ + {VM_READ, "read" }, \ + {VM_WRITE, "write" }, \ + {VM_EXEC, "exec" }, \ + {VM_SHARED, "shared" }, \ + {VM_MAYREAD, "mayread" }, \ + {VM_MAYWRITE, "maywrite" }, \ + {VM_MAYEXEC, "mayexec" }, \ + {VM_MAYSHARE, "mayshare" }, \ + {VM_GROWSDOWN, "growsdown" }, \ + {VM_UFFD_MISSING, "uffd_missing" }, \ + {VM_PFNMAP, "pfnmap" }, \ + {VM_DENYWRITE, "denywrite" }, \ + {VM_UFFD_WP, "uffd_wp" }, \ + {VM_LOCKED, "locked" }, \ + {VM_IO, "io" }, \ + {VM_SEQ_READ, "seqread" }, \ + {VM_RAND_READ, "randread" }, \ + {VM_DONTCOPY, "dontcopy" }, \ + {VM_DONTEXPAND, "dontexpand" }, \ + {VM_LOCKONFAULT, "lockonfault" }, \ + {VM_ACCOUNT, "account" }, \ + {VM_NORESERVE, "noreserve" }, \ + {VM_HUGETLB, "hugetlb" }, \ + __VM_ARCH_SPECIFIC_1 , \ + __VM_ARCH_SPECIFIC_2 , \ + {VM_DONTDUMP, "dontdump" }, \ +IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \ + {VM_MIXEDMAP, "mixedmap" }, \ + {VM_HUGEPAGE, "hugepage" }, \ + {VM_NOHUGEPAGE, "nohugepage" }, \ + {VM_MERGEABLE, "mergeable" } \ + +#define show_vma_flags(flags) \ + (flags) ? __print_flags(flags, "|", \ + __def_vmaflag_names \ + ) : "none" diff --git a/include/trace/events/page_isolation.h b/include/trace/events/page_isolation.h index 6fb644029..8738a78e6 100644 --- a/include/trace/events/page_isolation.h +++ b/include/trace/events/page_isolation.h @@ -29,7 +29,7 @@ TRACE_EVENT(test_pages_isolated, TP_printk("start_pfn=0x%lx end_pfn=0x%lx fin_pfn=0x%lx ret=%s", __entry->start_pfn, __entry->end_pfn, __entry->fin_pfn, - __entry->end_pfn == __entry->fin_pfn ? "success" : "fail") + __entry->end_pfn <= __entry->fin_pfn ? "success" : "fail") ); #endif /* _TRACE_PAGE_ISOLATION_H */ diff --git a/include/trace/events/page_ref.h b/include/trace/events/page_ref.h new file mode 100644 index 000000000..81001f8b0 --- /dev/null +++ b/include/trace/events/page_ref.h @@ -0,0 +1,134 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM page_ref + +#if !defined(_TRACE_PAGE_REF_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PAGE_REF_H + +#include <linux/types.h> +#include <linux/page_ref.h> +#include <linux/tracepoint.h> +#include <trace/events/mmflags.h> + +DECLARE_EVENT_CLASS(page_ref_mod_template, + + TP_PROTO(struct page *page, int v), + + TP_ARGS(page, v), + + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(unsigned long, flags) + __field(int, count) + __field(int, mapcount) + __field(void *, mapping) + __field(int, mt) + __field(int, val) + ), + + TP_fast_assign( + __entry->pfn = page_to_pfn(page); + __entry->flags = page->flags; + __entry->count = page_ref_count(page); + __entry->mapcount = page_mapcount(page); + __entry->mapping = page->mapping; + __entry->mt = get_pageblock_migratetype(page); + __entry->val = v; + ), + + TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d", + __entry->pfn, + show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)), + __entry->count, + __entry->mapcount, __entry->mapping, __entry->mt, + __entry->val) +); + +DEFINE_EVENT(page_ref_mod_template, page_ref_set, + + TP_PROTO(struct page *page, int v), + + TP_ARGS(page, v) +); + +DEFINE_EVENT(page_ref_mod_template, page_ref_mod, + + TP_PROTO(struct page *page, int v), + + TP_ARGS(page, v) +); + +DECLARE_EVENT_CLASS(page_ref_mod_and_test_template, + + TP_PROTO(struct page *page, int v, int ret), + + TP_ARGS(page, v, ret), + + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(unsigned long, flags) + __field(int, count) + __field(int, mapcount) + __field(void *, mapping) + __field(int, mt) + __field(int, val) + __field(int, ret) + ), + + TP_fast_assign( + __entry->pfn = page_to_pfn(page); + __entry->flags = page->flags; + __entry->count = page_ref_count(page); + __entry->mapcount = page_mapcount(page); + __entry->mapping = page->mapping; + __entry->mt = get_pageblock_migratetype(page); + __entry->val = v; + __entry->ret = ret; + ), + + TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d ret=%d", + __entry->pfn, + show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)), + __entry->count, + __entry->mapcount, __entry->mapping, __entry->mt, + __entry->val, __entry->ret) +); + +DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_mod_and_test, + + TP_PROTO(struct page *page, int v, int ret), + + TP_ARGS(page, v, ret) +); + +DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_mod_and_return, + + TP_PROTO(struct page *page, int v, int ret), + + TP_ARGS(page, v, ret) +); + +DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_mod_unless, + + TP_PROTO(struct page *page, int v, int ret), + + TP_ARGS(page, v, ret) +); + +DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_freeze, + + TP_PROTO(struct page *page, int v, int ret), + + TP_ARGS(page, v, ret) +); + +DEFINE_EVENT(page_ref_mod_template, page_ref_unfreeze, + + TP_PROTO(struct page *page, int v), + + TP_ARGS(page, v) +); + +#endif /* _TRACE_PAGE_COUNT_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/power.h b/include/trace/events/power.h index 284244ebf..19e50300c 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -38,6 +38,28 @@ DEFINE_EVENT(cpu, cpu_idle, TP_ARGS(state, cpu_id) ); +TRACE_EVENT(powernv_throttle, + + TP_PROTO(int chip_id, const char *reason, int pmax), + + TP_ARGS(chip_id, reason, pmax), + + TP_STRUCT__entry( + __field(int, chip_id) + __string(reason, reason) + __field(int, pmax) + ), + + TP_fast_assign( + __entry->chip_id = chip_id; + __assign_str(reason, reason); + __entry->pmax = pmax; + ), + + TP_printk("Chip %d Pmax %d %s", __entry->chip_id, + __entry->pmax, __get_str(reason)) +); + TRACE_EVENT(pstate_sample, TP_PROTO(u32 core_busy, diff --git a/include/trace/events/sunvnet.h b/include/trace/events/sunvnet.h new file mode 100644 index 000000000..eb080b267 --- /dev/null +++ b/include/trace/events/sunvnet.h @@ -0,0 +1,139 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sunvnet + +#if !defined(_TRACE_SUNVNET_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SUNVNET_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(vnet_rx_one, + + TP_PROTO(int lsid, int rsid, int index, int needs_ack), + + TP_ARGS(lsid, rsid, index, needs_ack), + + TP_STRUCT__entry( + __field(int, lsid) + __field(int, rsid) + __field(int, index) + __field(int, needs_ack) + ), + + TP_fast_assign( + __entry->lsid = lsid; + __entry->rsid = rsid; + __entry->index = index; + __entry->needs_ack = needs_ack; + ), + + TP_printk("(%x:%x) walk_rx_one index %d; needs_ack %d", + __entry->lsid, __entry->rsid, + __entry->index, __entry->needs_ack) +); + +DECLARE_EVENT_CLASS(vnet_tx_stopped_ack_template, + + TP_PROTO(int lsid, int rsid, int ack_end, int npkts), + + TP_ARGS(lsid, rsid, ack_end, npkts), + + TP_STRUCT__entry( + __field(int, lsid) + __field(int, rsid) + __field(int, ack_end) + __field(int, npkts) + ), + + TP_fast_assign( + __entry->lsid = lsid; + __entry->rsid = rsid; + __entry->ack_end = ack_end; + __entry->npkts = npkts; + ), + + TP_printk("(%x:%x) stopped ack for %d; npkts %d", + __entry->lsid, __entry->rsid, + __entry->ack_end, __entry->npkts) +); +DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_send_stopped_ack, + TP_PROTO(int lsid, int rsid, int ack_end, int npkts), + TP_ARGS(lsid, rsid, ack_end, npkts)); +DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_defer_stopped_ack, + TP_PROTO(int lsid, int rsid, int ack_end, int npkts), + TP_ARGS(lsid, rsid, ack_end, npkts)); +DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_pending_stopped_ack, + TP_PROTO(int lsid, int rsid, int ack_end, int npkts), + TP_ARGS(lsid, rsid, ack_end, npkts)); + +TRACE_EVENT(vnet_rx_stopped_ack, + + TP_PROTO(int lsid, int rsid, int end), + + TP_ARGS(lsid, rsid, end), + + TP_STRUCT__entry( + __field(int, lsid) + __field(int, rsid) + __field(int, end) + ), + + TP_fast_assign( + __entry->lsid = lsid; + __entry->rsid = rsid; + __entry->end = end; + ), + + TP_printk("(%x:%x) stopped ack for index %d", + __entry->lsid, __entry->rsid, __entry->end) +); + +TRACE_EVENT(vnet_tx_trigger, + + TP_PROTO(int lsid, int rsid, int start, int err), + + TP_ARGS(lsid, rsid, start, err), + + TP_STRUCT__entry( + __field(int, lsid) + __field(int, rsid) + __field(int, start) + __field(int, err) + ), + + TP_fast_assign( + __entry->lsid = lsid; + __entry->rsid = rsid; + __entry->start = start; + __entry->err = err; + ), + + TP_printk("(%x:%x) Tx trigger for %d sent with err %d %s", + __entry->lsid, __entry->rsid, __entry->start, + __entry->err, __entry->err > 0 ? "(ok)" : " ") +); + +TRACE_EVENT(vnet_skip_tx_trigger, + + TP_PROTO(int lsid, int rsid, int last), + + TP_ARGS(lsid, rsid, last), + + TP_STRUCT__entry( + __field(int, lsid) + __field(int, rsid) + __field(int, last) + ), + + TP_fast_assign( + __entry->lsid = lsid; + __entry->rsid = rsid; + __entry->last = last; + ), + + TP_printk("(%x:%x) Skip Tx trigger. Last trigger sent was %d", + __entry->lsid, __entry->rsid, __entry->last) +); +#endif /* _TRACE_SOCK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h index 5738bb3e2..2b4a8ff72 100644 --- a/include/trace/events/thermal.h +++ b/include/trace/events/thermal.h @@ -8,6 +8,18 @@ #include <linux/thermal.h> #include <linux/tracepoint.h> +TRACE_DEFINE_ENUM(THERMAL_TRIP_CRITICAL); +TRACE_DEFINE_ENUM(THERMAL_TRIP_HOT); +TRACE_DEFINE_ENUM(THERMAL_TRIP_PASSIVE); +TRACE_DEFINE_ENUM(THERMAL_TRIP_ACTIVE); + +#define show_tzt_type(type) \ + __print_symbolic(type, \ + { THERMAL_TRIP_CRITICAL, "CRITICAL"}, \ + { THERMAL_TRIP_HOT, "HOT"}, \ + { THERMAL_TRIP_PASSIVE, "PASSIVE"}, \ + { THERMAL_TRIP_ACTIVE, "ACTIVE"}) + TRACE_EVENT(thermal_temperature, TP_PROTO(struct thermal_zone_device *tz), @@ -73,9 +85,9 @@ TRACE_EVENT(thermal_zone_trip, __entry->trip_type = trip_type; ), - TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%d", + TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%s", __get_str(thermal_zone), __entry->id, __entry->trip, - __entry->trip_type) + show_tzt_type(__entry->trip_type)) ); TRACE_EVENT(thermal_power_cpu_get_power, diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 073b9ac24..51440131d 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -328,23 +328,49 @@ TRACE_EVENT(itimer_expire, ); #ifdef CONFIG_NO_HZ_COMMON + +#define TICK_DEP_NAMES \ + tick_dep_name(NONE) \ + tick_dep_name(POSIX_TIMER) \ + tick_dep_name(PERF_EVENTS) \ + tick_dep_name(SCHED) \ + tick_dep_name_end(CLOCK_UNSTABLE) + +#undef tick_dep_name +#undef tick_dep_name_end + +#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); +#define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); + +TICK_DEP_NAMES + +#undef tick_dep_name +#undef tick_dep_name_end + +#define tick_dep_name(sdep) { TICK_DEP_MASK_##sdep, #sdep }, +#define tick_dep_name_end(sdep) { TICK_DEP_MASK_##sdep, #sdep } + +#define show_tick_dep_name(val) \ + __print_symbolic(val, TICK_DEP_NAMES) + TRACE_EVENT(tick_stop, - TP_PROTO(int success, char *error_msg), + TP_PROTO(int success, int dependency), - TP_ARGS(success, error_msg), + TP_ARGS(success, dependency), TP_STRUCT__entry( __field( int , success ) - __string( msg, error_msg ) + __field( int , dependency ) ), TP_fast_assign( __entry->success = success; - __assign_str(msg, error_msg); + __entry->dependency = dependency; ), - TP_printk("success=%s msg=%s", __entry->success ? "yes" : "no", __get_str(msg)) + TP_printk("success=%d dependency=%s", __entry->success, \ + show_tick_dep_name(__entry->dependency)) ); #endif diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h index bc8815f45..9d14b1992 100644 --- a/include/trace/events/tlb.h +++ b/include/trace/events/tlb.h @@ -34,13 +34,11 @@ TLB_FLUSH_REASON #define EM(a,b) { a, b }, #define EMe(a,b) { a, b } -TRACE_EVENT_CONDITION(tlb_flush, +TRACE_EVENT(tlb_flush, TP_PROTO(int reason, unsigned long pages), TP_ARGS(reason, pages), - TP_CONDITION(cpu_online(smp_processor_id())), - TP_STRUCT__entry( __field( int, reason) __field(unsigned long, pages) diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index 31763dd8d..0101ef37f 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -8,7 +8,7 @@ #include <linux/tracepoint.h> #include <linux/mm.h> #include <linux/memcontrol.h> -#include <trace/events/gfpflags.h> +#include <trace/events/mmflags.h> #define RECLAIM_WB_ANON 0x0001u #define RECLAIM_WB_FILE 0x0002u diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index fff846b51..73614ce1d 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -134,58 +134,28 @@ DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode, #ifdef CREATE_TRACE_POINTS #ifdef CONFIG_CGROUP_WRITEBACK -static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb) +static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb) { - return kernfs_path_len(wb->memcg_css->cgroup->kn) + 1; + return wb->memcg_css->cgroup->kn->ino; } -static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb) -{ - struct cgroup *cgrp = wb->memcg_css->cgroup; - char *path; - - path = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1); - WARN_ON_ONCE(path != buf); -} - -static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc) -{ - if (wbc->wb) - return __trace_wb_cgroup_size(wbc->wb); - else - return 2; -} - -static inline void __trace_wbc_assign_cgroup(char *buf, - struct writeback_control *wbc) +static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc) { if (wbc->wb) - __trace_wb_assign_cgroup(buf, wbc->wb); + return __trace_wb_assign_cgroup(wbc->wb); else - strcpy(buf, "/"); + return -1U; } - #else /* CONFIG_CGROUP_WRITEBACK */ -static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb) -{ - return 2; -} - -static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb) -{ - strcpy(buf, "/"); -} - -static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc) +static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb) { - return 2; + return -1U; } -static inline void __trace_wbc_assign_cgroup(char *buf, - struct writeback_control *wbc) +static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc) { - strcpy(buf, "/"); + return -1U; } #endif /* CONFIG_CGROUP_WRITEBACK */ @@ -201,7 +171,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template, __array(char, name, 32) __field(unsigned long, ino) __field(int, sync_mode) - __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc)) + __field(unsigned int, cgroup_ino) ), TP_fast_assign( @@ -209,14 +179,14 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template, dev_name(inode_to_bdi(inode)->dev), 32); __entry->ino = inode->i_ino; __entry->sync_mode = wbc->sync_mode; - __trace_wbc_assign_cgroup(__get_str(cgroup), wbc); + __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); ), - TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup=%s", + TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u", __entry->name, __entry->ino, __entry->sync_mode, - __get_str(cgroup) + __entry->cgroup_ino ) ); @@ -246,7 +216,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, __field(int, range_cyclic) __field(int, for_background) __field(int, reason) - __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) + __field(unsigned int, cgroup_ino) ), TP_fast_assign( strncpy(__entry->name, @@ -258,10 +228,10 @@ DECLARE_EVENT_CLASS(writeback_work_class, __entry->range_cyclic = work->range_cyclic; __entry->for_background = work->for_background; __entry->reason = work->reason; - __trace_wb_assign_cgroup(__get_str(cgroup), wb); + __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " - "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup=%s", + "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u", __entry->name, MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), __entry->nr_pages, @@ -270,7 +240,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, __entry->range_cyclic, __entry->for_background, __print_symbolic(__entry->reason, WB_WORK_REASON), - __get_str(cgroup) + __entry->cgroup_ino ) ); #define DEFINE_WRITEBACK_WORK_EVENT(name) \ @@ -300,15 +270,15 @@ DECLARE_EVENT_CLASS(writeback_class, TP_ARGS(wb), TP_STRUCT__entry( __array(char, name, 32) - __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) + __field(unsigned int, cgroup_ino) ), TP_fast_assign( strncpy(__entry->name, dev_name(wb->bdi->dev), 32); - __trace_wb_assign_cgroup(__get_str(cgroup), wb); + __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), - TP_printk("bdi %s: cgroup=%s", + TP_printk("bdi %s: cgroup_ino=%u", __entry->name, - __get_str(cgroup) + __entry->cgroup_ino ) ); #define DEFINE_WRITEBACK_EVENT(name) \ @@ -347,7 +317,7 @@ DECLARE_EVENT_CLASS(wbc_class, __field(int, range_cyclic) __field(long, range_start) __field(long, range_end) - __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc)) + __field(unsigned int, cgroup_ino) ), TP_fast_assign( @@ -361,12 +331,12 @@ DECLARE_EVENT_CLASS(wbc_class, __entry->range_cyclic = wbc->range_cyclic; __entry->range_start = (long)wbc->range_start; __entry->range_end = (long)wbc->range_end; - __trace_wbc_assign_cgroup(__get_str(cgroup), wbc); + __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); ), TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " "bgrd=%d reclm=%d cyclic=%d " - "start=0x%lx end=0x%lx cgroup=%s", + "start=0x%lx end=0x%lx cgroup_ino=%u", __entry->name, __entry->nr_to_write, __entry->pages_skipped, @@ -377,7 +347,7 @@ DECLARE_EVENT_CLASS(wbc_class, __entry->range_cyclic, __entry->range_start, __entry->range_end, - __get_str(cgroup) + __entry->cgroup_ino ) ) @@ -398,7 +368,7 @@ TRACE_EVENT(writeback_queue_io, __field(long, age) __field(int, moved) __field(int, reason) - __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) + __field(unsigned int, cgroup_ino) ), TP_fast_assign( unsigned long *older_than_this = work->older_than_this; @@ -408,15 +378,15 @@ TRACE_EVENT(writeback_queue_io, (jiffies - *older_than_this) * 1000 / HZ : -1; __entry->moved = moved; __entry->reason = work->reason; - __trace_wb_assign_cgroup(__get_str(cgroup), wb); + __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), - TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup=%s", + TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u", __entry->name, __entry->older, /* older_than_this in jiffies */ __entry->age, /* older_than_this in relative milliseconds */ __entry->moved, __print_symbolic(__entry->reason, WB_WORK_REASON), - __get_str(cgroup) + __entry->cgroup_ino ) ); @@ -484,7 +454,7 @@ TRACE_EVENT(bdi_dirty_ratelimit, __field(unsigned long, dirty_ratelimit) __field(unsigned long, task_ratelimit) __field(unsigned long, balanced_dirty_ratelimit) - __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) + __field(unsigned int, cgroup_ino) ), TP_fast_assign( @@ -496,13 +466,13 @@ TRACE_EVENT(bdi_dirty_ratelimit, __entry->task_ratelimit = KBps(task_ratelimit); __entry->balanced_dirty_ratelimit = KBps(wb->balanced_dirty_ratelimit); - __trace_wb_assign_cgroup(__get_str(cgroup), wb); + __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: " "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " "dirty_ratelimit=%lu task_ratelimit=%lu " - "balanced_dirty_ratelimit=%lu cgroup=%s", + "balanced_dirty_ratelimit=%lu cgroup_ino=%u", __entry->bdi, __entry->write_bw, /* write bandwidth */ __entry->avg_write_bw, /* avg write bandwidth */ @@ -510,7 +480,7 @@ TRACE_EVENT(bdi_dirty_ratelimit, __entry->dirty_ratelimit, /* base ratelimit */ __entry->task_ratelimit, /* ratelimit with position control */ __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */ - __get_str(cgroup) + __entry->cgroup_ino ) ); @@ -548,7 +518,7 @@ TRACE_EVENT(balance_dirty_pages, __field( long, pause) __field(unsigned long, period) __field( long, think) - __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) + __field(unsigned int, cgroup_ino) ), TP_fast_assign( @@ -571,7 +541,7 @@ TRACE_EVENT(balance_dirty_pages, __entry->period = period * 1000 / HZ; __entry->pause = pause * 1000 / HZ; __entry->paused = (jiffies - start_time) * 1000 / HZ; - __trace_wb_assign_cgroup(__get_str(cgroup), wb); + __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), @@ -580,7 +550,7 @@ TRACE_EVENT(balance_dirty_pages, "bdi_setpoint=%lu bdi_dirty=%lu " "dirty_ratelimit=%lu task_ratelimit=%lu " "dirtied=%u dirtied_pause=%u " - "paused=%lu pause=%ld period=%lu think=%ld cgroup=%s", + "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u", __entry->bdi, __entry->limit, __entry->setpoint, @@ -595,7 +565,7 @@ TRACE_EVENT(balance_dirty_pages, __entry->pause, /* ms */ __entry->period, /* ms */ __entry->think, /* ms */ - __get_str(cgroup) + __entry->cgroup_ino ) ); @@ -609,8 +579,7 @@ TRACE_EVENT(writeback_sb_inodes_requeue, __field(unsigned long, ino) __field(unsigned long, state) __field(unsigned long, dirtied_when) - __dynamic_array(char, cgroup, - __trace_wb_cgroup_size(inode_to_wb(inode))) + __field(unsigned int, cgroup_ino) ), TP_fast_assign( @@ -619,16 +588,16 @@ TRACE_EVENT(writeback_sb_inodes_requeue, __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->dirtied_when = inode->dirtied_when; - __trace_wb_assign_cgroup(__get_str(cgroup), inode_to_wb(inode)); + __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode)); ), - TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup=%s", + TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u", __entry->name, __entry->ino, show_inode_state(__entry->state), __entry->dirtied_when, (jiffies - __entry->dirtied_when) / HZ, - __get_str(cgroup) + __entry->cgroup_ino ) ); @@ -684,7 +653,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, __field(unsigned long, writeback_index) __field(long, nr_to_write) __field(unsigned long, wrote) - __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc)) + __field(unsigned int, cgroup_ino) ), TP_fast_assign( @@ -696,11 +665,11 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, __entry->writeback_index = inode->i_mapping->writeback_index; __entry->nr_to_write = nr_to_write; __entry->wrote = nr_to_write - wbc->nr_to_write; - __trace_wbc_assign_cgroup(__get_str(cgroup), wbc); + __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); ), TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " - "index=%lu to_write=%ld wrote=%lu cgroup=%s", + "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u", __entry->name, __entry->ino, show_inode_state(__entry->state), @@ -709,7 +678,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, __entry->writeback_index, __entry->nr_to_write, __entry->wrote, - __get_str(cgroup) + __entry->cgroup_ino ) ); |