summaryrefslogtreecommitdiff
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/define_trace.h2
-rw-r--r--include/trace/events/btrfs.h113
-rw-r--r--include/trace/events/compaction.h72
-rw-r--r--include/trace/events/f2fs.h69
-rw-r--r--include/trace/events/filelock.h38
-rw-r--r--include/trace/events/gfpflags.h5
-rw-r--r--include/trace/events/gpio.h4
-rw-r--r--include/trace/events/nilfs2.h224
-rw-r--r--include/trace/events/sched.h22
-rw-r--r--include/trace/events/thermal.h53
-rw-r--r--include/trace/events/v4l2.h63
-rw-r--r--include/trace/events/vb2.h65
-rw-r--r--include/trace/perf.h258
-rw-r--r--include/trace/trace_events.h258
14 files changed, 919 insertions, 327 deletions
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 09b388010..2d8639ea6 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -86,7 +86,7 @@
#undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args)
-#ifdef CONFIG_EVENT_TRACING
+#ifdef TRACEPOINTS_ENABLED
#include <trace/trace_events.h>
#include <trace/perf.h>
#endif
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 0b73af9be..b4473dab3 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1117,6 +1117,119 @@ DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy,
TP_ARGS(wq)
);
+DECLARE_EVENT_CLASS(btrfs__qgroup_data_map,
+
+ TP_PROTO(struct inode *inode, u64 free_reserved),
+
+ TP_ARGS(inode, free_reserved),
+
+ TP_STRUCT__entry(
+ __field( u64, rootid )
+ __field( unsigned long, ino )
+ __field( u64, free_reserved )
+ ),
+
+ TP_fast_assign(
+ __entry->rootid = BTRFS_I(inode)->root->objectid;
+ __entry->ino = inode->i_ino;
+ __entry->free_reserved = free_reserved;
+ ),
+
+ TP_printk("rootid=%llu, ino=%lu, free_reserved=%llu",
+ __entry->rootid, __entry->ino, __entry->free_reserved)
+);
+
+DEFINE_EVENT(btrfs__qgroup_data_map, btrfs_qgroup_init_data_rsv_map,
+
+ TP_PROTO(struct inode *inode, u64 free_reserved),
+
+ TP_ARGS(inode, free_reserved)
+);
+
+DEFINE_EVENT(btrfs__qgroup_data_map, btrfs_qgroup_free_data_rsv_map,
+
+ TP_PROTO(struct inode *inode, u64 free_reserved),
+
+ TP_ARGS(inode, free_reserved)
+);
+
+#define BTRFS_QGROUP_OPERATIONS \
+ { QGROUP_RESERVE, "reserve" }, \
+ { QGROUP_RELEASE, "release" }, \
+ { QGROUP_FREE, "free" }
+
+DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
+
+ TP_PROTO(struct inode *inode, u64 start, u64 len, u64 reserved, int op),
+
+ TP_ARGS(inode, start, len, reserved, op),
+
+ TP_STRUCT__entry(
+ __field( u64, rootid )
+ __field( unsigned long, ino )
+ __field( u64, start )
+ __field( u64, len )
+ __field( u64, reserved )
+ __field( int, op )
+ ),
+
+ TP_fast_assign(
+ __entry->rootid = BTRFS_I(inode)->root->objectid;
+ __entry->ino = inode->i_ino;
+ __entry->start = start;
+ __entry->len = len;
+ __entry->reserved = reserved;
+ __entry->op = op;
+ ),
+
+ TP_printk("root=%llu, ino=%lu, start=%llu, len=%llu, reserved=%llu, op=%s",
+ __entry->rootid, __entry->ino, __entry->start, __entry->len,
+ __entry->reserved,
+ __print_flags((unsigned long)__entry->op, "",
+ BTRFS_QGROUP_OPERATIONS)
+ )
+);
+
+DEFINE_EVENT(btrfs__qgroup_rsv_data, btrfs_qgroup_reserve_data,
+
+ TP_PROTO(struct inode *inode, u64 start, u64 len, u64 reserved, int op),
+
+ TP_ARGS(inode, start, len, reserved, op)
+);
+
+DEFINE_EVENT(btrfs__qgroup_rsv_data, btrfs_qgroup_release_data,
+
+ TP_PROTO(struct inode *inode, u64 start, u64 len, u64 reserved, int op),
+
+ TP_ARGS(inode, start, len, reserved, op)
+);
+
+DECLARE_EVENT_CLASS(btrfs__qgroup_delayed_ref,
+
+ TP_PROTO(u64 ref_root, u64 reserved),
+
+ TP_ARGS(ref_root, reserved),
+
+ TP_STRUCT__entry(
+ __field( u64, ref_root )
+ __field( u64, reserved )
+ ),
+
+ TP_fast_assign(
+ __entry->ref_root = ref_root;
+ __entry->reserved = reserved;
+ ),
+
+ TP_printk("root=%llu, reserved=%llu, op=free",
+ __entry->ref_root, __entry->reserved)
+);
+
+DEFINE_EVENT(btrfs__qgroup_delayed_ref, btrfs_qgroup_free_delayed_ref,
+
+ TP_PROTO(u64 ref_root, u64 reserved),
+
+ TP_ARGS(ref_root, reserved)
+);
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index 9a6a3fe0f..c92d1e1cb 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -9,6 +9,62 @@
#include <linux/tracepoint.h>
#include <trace/events/gfpflags.h>
+#define COMPACTION_STATUS \
+ EM( COMPACT_DEFERRED, "deferred") \
+ EM( COMPACT_SKIPPED, "skipped") \
+ EM( COMPACT_CONTINUE, "continue") \
+ EM( COMPACT_PARTIAL, "partial") \
+ EM( COMPACT_COMPLETE, "complete") \
+ EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \
+ EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \
+ EMe(COMPACT_CONTENDED, "contended")
+
+#ifdef CONFIG_ZONE_DMA
+#define IFDEF_ZONE_DMA(X) X
+#else
+#define IFDEF_ZONE_DMA(X)
+#endif
+
+#ifdef CONFIG_ZONE_DMA32
+#define IFDEF_ZONE_DMA32(X) X
+#else
+#define IFDEF_ZONE_DMA32(X)
+#endif
+
+#ifdef CONFIG_HIGHMEM
+#define IFDEF_ZONE_HIGHMEM(X) X
+#else
+#define IFDEF_ZONE_HIGHMEM(X)
+#endif
+
+#define ZONE_TYPE \
+ IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \
+ IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \
+ EM (ZONE_NORMAL, "Normal") \
+ IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \
+ EMe(ZONE_MOVABLE,"Movable")
+
+/*
+ * First define the enums in the above macros to be exported to userspace
+ * via TRACE_DEFINE_ENUM().
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+COMPACTION_STATUS
+ZONE_TYPE
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
+
DECLARE_EVENT_CLASS(mm_compaction_isolate_template,
TP_PROTO(
@@ -161,7 +217,7 @@ TRACE_EVENT(mm_compaction_end,
__entry->free_pfn,
__entry->zone_end,
__entry->sync ? "sync" : "async",
- compaction_status_string[__entry->status])
+ __print_symbolic(__entry->status, COMPACTION_STATUS))
);
TRACE_EVENT(mm_compaction_try_to_compact_pages,
@@ -201,23 +257,23 @@ DECLARE_EVENT_CLASS(mm_compaction_suitable_template,
TP_STRUCT__entry(
__field(int, nid)
- __field(char *, name)
+ __field(enum zone_type, idx)
__field(int, order)
__field(int, ret)
),
TP_fast_assign(
__entry->nid = zone_to_nid(zone);
- __entry->name = (char *)zone->name;
+ __entry->idx = zone_idx(zone);
__entry->order = order;
__entry->ret = ret;
),
TP_printk("node=%d zone=%-8s order=%d ret=%s",
__entry->nid,
- __entry->name,
+ __print_symbolic(__entry->idx, ZONE_TYPE),
__entry->order,
- compaction_status_string[__entry->ret])
+ __print_symbolic(__entry->ret, COMPACTION_STATUS))
);
DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_finished,
@@ -247,7 +303,7 @@ DECLARE_EVENT_CLASS(mm_compaction_defer_template,
TP_STRUCT__entry(
__field(int, nid)
- __field(char *, name)
+ __field(enum zone_type, idx)
__field(int, order)
__field(unsigned int, considered)
__field(unsigned int, defer_shift)
@@ -256,7 +312,7 @@ DECLARE_EVENT_CLASS(mm_compaction_defer_template,
TP_fast_assign(
__entry->nid = zone_to_nid(zone);
- __entry->name = (char *)zone->name;
+ __entry->idx = zone_idx(zone);
__entry->order = order;
__entry->considered = zone->compact_considered;
__entry->defer_shift = zone->compact_defer_shift;
@@ -265,7 +321,7 @@ DECLARE_EVENT_CLASS(mm_compaction_defer_template,
TP_printk("node=%d zone=%-8s order=%d order_failed=%d consider=%u limit=%lu",
__entry->nid,
- __entry->name,
+ __print_symbolic(__entry->idx, ZONE_TYPE),
__entry->order,
__entry->order_failed,
__entry->considered,
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index a01946514..00b4a6308 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -514,6 +514,34 @@ TRACE_EVENT(f2fs_map_blocks,
__entry->ret)
);
+TRACE_EVENT(f2fs_background_gc,
+
+ TP_PROTO(struct super_block *sb, long wait_ms,
+ unsigned int prefree, unsigned int free),
+
+ TP_ARGS(sb, wait_ms, prefree, free),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(long, wait_ms)
+ __field(unsigned int, prefree)
+ __field(unsigned int, free)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->wait_ms = wait_ms;
+ __entry->prefree = prefree;
+ __entry->free = free;
+ ),
+
+ TP_printk("dev = (%d,%d), wait_ms = %ld, prefree = %u, free = %u",
+ show_dev(__entry),
+ __entry->wait_ms,
+ __entry->prefree,
+ __entry->free)
+);
+
TRACE_EVENT(f2fs_get_victim,
TP_PROTO(struct super_block *sb, int type, int gc_type,
@@ -1000,6 +1028,32 @@ TRACE_EVENT(f2fs_writepages,
__entry->for_sync)
);
+TRACE_EVENT(f2fs_readpages,
+
+ TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage),
+
+ TP_ARGS(inode, page, nrpage),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(pgoff_t, start)
+ __field(unsigned int, nrpage)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->start = page->index;
+ __entry->nrpage = nrpage;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, start = %lu nrpage = %u",
+ show_dev_ino(__entry),
+ (unsigned long)__entry->start,
+ __entry->nrpage)
+);
+
TRACE_EVENT(f2fs_write_checkpoint,
TP_PROTO(struct super_block *sb, int reason, char *msg),
@@ -1132,17 +1186,19 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
__entry->len)
);
-TRACE_EVENT(f2fs_update_extent_tree,
+TRACE_EVENT(f2fs_update_extent_tree_range,
- TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr),
+ TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr,
+ unsigned int len),
- TP_ARGS(inode, pgofs, blkaddr),
+ TP_ARGS(inode, pgofs, blkaddr, len),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
__field(unsigned int, pgofs)
__field(u32, blk)
+ __field(unsigned int, len)
),
TP_fast_assign(
@@ -1150,12 +1206,15 @@ TRACE_EVENT(f2fs_update_extent_tree,
__entry->ino = inode->i_ino;
__entry->pgofs = pgofs;
__entry->blk = blkaddr;
+ __entry->len = len;
),
- TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, blkaddr = %u",
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+ "blkaddr = %u, len = %u",
show_dev_ino(__entry),
__entry->pgofs,
- __entry->blk)
+ __entry->blk,
+ __entry->len)
);
TRACE_EVENT(f2fs_shrink_extent_tree,
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index a0d008070..c72f2dc01 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -81,15 +81,47 @@ DEFINE_EVENT(filelock_lease, break_lease_block, TP_PROTO(struct inode *inode, st
DEFINE_EVENT(filelock_lease, break_lease_unblock, TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl));
-DEFINE_EVENT(filelock_lease, generic_add_lease, TP_PROTO(struct inode *inode, struct file_lock *fl),
- TP_ARGS(inode, fl));
-
DEFINE_EVENT(filelock_lease, generic_delete_lease, TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl));
DEFINE_EVENT(filelock_lease, time_out_leases, TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl));
+TRACE_EVENT(generic_add_lease,
+ TP_PROTO(struct inode *inode, struct file_lock *fl),
+
+ TP_ARGS(inode, fl),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(int, wcount)
+ __field(int, dcount)
+ __field(int, icount)
+ __field(dev_t, s_dev)
+ __field(fl_owner_t, fl_owner)
+ __field(unsigned int, fl_flags)
+ __field(unsigned char, fl_type)
+ ),
+
+ TP_fast_assign(
+ __entry->s_dev = inode->i_sb->s_dev;
+ __entry->i_ino = inode->i_ino;
+ __entry->wcount = atomic_read(&inode->i_writecount);
+ __entry->dcount = d_count(fl->fl_file->f_path.dentry);
+ __entry->icount = atomic_read(&inode->i_count);
+ __entry->fl_owner = fl ? fl->fl_owner : NULL;
+ __entry->fl_flags = fl ? fl->fl_flags : 0;
+ __entry->fl_type = fl ? fl->fl_type : 0;
+ ),
+
+ TP_printk("dev=0x%x:0x%x ino=0x%lx wcount=%d dcount=%d icount=%d fl_owner=0x%p fl_flags=%s fl_type=%s",
+ MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
+ __entry->i_ino, __entry->wcount, __entry->dcount,
+ __entry->icount, __entry->fl_owner,
+ show_fl_flags(__entry->fl_flags),
+ show_fl_type(__entry->fl_type))
+);
+
#endif /* _TRACE_FILELOCK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h
index d6fd8e5b1..dde6bf092 100644
--- a/include/trace/events/gfpflags.h
+++ b/include/trace/events/gfpflags.h
@@ -20,7 +20,7 @@
{(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
{(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
{(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \
- {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \
+ {(unsigned long)__GFP_ATOMIC, "GFP_ATOMIC"}, \
{(unsigned long)__GFP_IO, "GFP_IO"}, \
{(unsigned long)__GFP_COLD, "GFP_COLD"}, \
{(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \
@@ -36,7 +36,8 @@
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
{(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
{(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
- {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
+ {(unsigned long)__GFP_DIRECT_RECLAIM, "GFP_DIRECT_RECLAIM"}, \
+ {(unsigned long)__GFP_KSWAPD_RECLAIM, "GFP_KSWAPD_RECLAIM"}, \
{(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
) : "GFP_NOWAIT"
diff --git a/include/trace/events/gpio.h b/include/trace/events/gpio.h
index 927a8ad9e..2da73b92d 100644
--- a/include/trace/events/gpio.h
+++ b/include/trace/events/gpio.h
@@ -1,6 +1,10 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM gpio
+#ifndef CONFIG_TRACING_EVENTS_GPIO
+#define NOTRACE
+#endif
+
#if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_GPIO_H
diff --git a/include/trace/events/nilfs2.h b/include/trace/events/nilfs2.h
new file mode 100644
index 000000000..c7805818f
--- /dev/null
+++ b/include/trace/events/nilfs2.h
@@ -0,0 +1,224 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nilfs2
+
+#if !defined(_TRACE_NILFS2_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NILFS2_H
+
+#include <linux/tracepoint.h>
+
+struct nilfs_sc_info;
+
+#define show_collection_stage(type) \
+ __print_symbolic(type, \
+ { NILFS_ST_INIT, "ST_INIT" }, \
+ { NILFS_ST_GC, "ST_GC" }, \
+ { NILFS_ST_FILE, "ST_FILE" }, \
+ { NILFS_ST_IFILE, "ST_IFILE" }, \
+ { NILFS_ST_CPFILE, "ST_CPFILE" }, \
+ { NILFS_ST_SUFILE, "ST_SUFILE" }, \
+ { NILFS_ST_DAT, "ST_DAT" }, \
+ { NILFS_ST_SR, "ST_SR" }, \
+ { NILFS_ST_DSYNC, "ST_DSYNC" }, \
+ { NILFS_ST_DONE, "ST_DONE"})
+
+TRACE_EVENT(nilfs2_collection_stage_transition,
+
+ TP_PROTO(struct nilfs_sc_info *sci),
+
+ TP_ARGS(sci),
+
+ TP_STRUCT__entry(
+ __field(void *, sci)
+ __field(int, stage)
+ ),
+
+ TP_fast_assign(
+ __entry->sci = sci;
+ __entry->stage = sci->sc_stage.scnt;
+ ),
+
+ TP_printk("sci = %p stage = %s",
+ __entry->sci,
+ show_collection_stage(__entry->stage))
+);
+
+#ifndef TRACE_HEADER_MULTI_READ
+enum nilfs2_transaction_transition_state {
+ TRACE_NILFS2_TRANSACTION_BEGIN,
+ TRACE_NILFS2_TRANSACTION_COMMIT,
+ TRACE_NILFS2_TRANSACTION_ABORT,
+ TRACE_NILFS2_TRANSACTION_TRYLOCK,
+ TRACE_NILFS2_TRANSACTION_LOCK,
+ TRACE_NILFS2_TRANSACTION_UNLOCK,
+};
+#endif
+
+#define show_transaction_state(type) \
+ __print_symbolic(type, \
+ { TRACE_NILFS2_TRANSACTION_BEGIN, "BEGIN" }, \
+ { TRACE_NILFS2_TRANSACTION_COMMIT, "COMMIT" }, \
+ { TRACE_NILFS2_TRANSACTION_ABORT, "ABORT" }, \
+ { TRACE_NILFS2_TRANSACTION_TRYLOCK, "TRYLOCK" }, \
+ { TRACE_NILFS2_TRANSACTION_LOCK, "LOCK" }, \
+ { TRACE_NILFS2_TRANSACTION_UNLOCK, "UNLOCK" })
+
+TRACE_EVENT(nilfs2_transaction_transition,
+ TP_PROTO(struct super_block *sb,
+ struct nilfs_transaction_info *ti,
+ int count,
+ unsigned int flags,
+ enum nilfs2_transaction_transition_state state),
+
+ TP_ARGS(sb, ti, count, flags, state),
+
+ TP_STRUCT__entry(
+ __field(void *, sb)
+ __field(void *, ti)
+ __field(int, count)
+ __field(unsigned int, flags)
+ __field(int, state)
+ ),
+
+ TP_fast_assign(
+ __entry->sb = sb;
+ __entry->ti = ti;
+ __entry->count = count;
+ __entry->flags = flags;
+ __entry->state = state;
+ ),
+
+ TP_printk("sb = %p ti = %p count = %d flags = %x state = %s",
+ __entry->sb,
+ __entry->ti,
+ __entry->count,
+ __entry->flags,
+ show_transaction_state(__entry->state))
+);
+
+TRACE_EVENT(nilfs2_segment_usage_check,
+ TP_PROTO(struct inode *sufile,
+ __u64 segnum,
+ unsigned long cnt),
+
+ TP_ARGS(sufile, segnum, cnt),
+
+ TP_STRUCT__entry(
+ __field(struct inode *, sufile)
+ __field(__u64, segnum)
+ __field(unsigned long, cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->sufile = sufile;
+ __entry->segnum = segnum;
+ __entry->cnt = cnt;
+ ),
+
+ TP_printk("sufile = %p segnum = %llu cnt = %lu",
+ __entry->sufile,
+ __entry->segnum,
+ __entry->cnt)
+);
+
+TRACE_EVENT(nilfs2_segment_usage_allocated,
+ TP_PROTO(struct inode *sufile,
+ __u64 segnum),
+
+ TP_ARGS(sufile, segnum),
+
+ TP_STRUCT__entry(
+ __field(struct inode *, sufile)
+ __field(__u64, segnum)
+ ),
+
+ TP_fast_assign(
+ __entry->sufile = sufile;
+ __entry->segnum = segnum;
+ ),
+
+ TP_printk("sufile = %p segnum = %llu",
+ __entry->sufile,
+ __entry->segnum)
+);
+
+TRACE_EVENT(nilfs2_segment_usage_freed,
+ TP_PROTO(struct inode *sufile,
+ __u64 segnum),
+
+ TP_ARGS(sufile, segnum),
+
+ TP_STRUCT__entry(
+ __field(struct inode *, sufile)
+ __field(__u64, segnum)
+ ),
+
+ TP_fast_assign(
+ __entry->sufile = sufile;
+ __entry->segnum = segnum;
+ ),
+
+ TP_printk("sufile = %p segnum = %llu",
+ __entry->sufile,
+ __entry->segnum)
+);
+
+TRACE_EVENT(nilfs2_mdt_insert_new_block,
+ TP_PROTO(struct inode *inode,
+ unsigned long ino,
+ unsigned long block),
+
+ TP_ARGS(inode, ino, block),
+
+ TP_STRUCT__entry(
+ __field(struct inode *, inode)
+ __field(unsigned long, ino)
+ __field(unsigned long, block)
+ ),
+
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->ino = ino;
+ __entry->block = block;
+ ),
+
+ TP_printk("inode = %p ino = %lu block = %lu",
+ __entry->inode,
+ __entry->ino,
+ __entry->block)
+);
+
+TRACE_EVENT(nilfs2_mdt_submit_block,
+ TP_PROTO(struct inode *inode,
+ unsigned long ino,
+ unsigned long blkoff,
+ int mode),
+
+ TP_ARGS(inode, ino, blkoff, mode),
+
+ TP_STRUCT__entry(
+ __field(struct inode *, inode)
+ __field(unsigned long, ino)
+ __field(unsigned long, blkoff)
+ __field(int, mode)
+ ),
+
+ TP_fast_assign(
+ __entry->inode = inode;
+ __entry->ino = ino;
+ __entry->blkoff = blkoff;
+ __entry->mode = mode;
+ ),
+
+ TP_printk("inode = %p ino = %lu blkoff = %lu mode = %x",
+ __entry->inode,
+ __entry->ino,
+ __entry->blkoff,
+ __entry->mode)
+);
+
+#endif /* _TRACE_NILFS2_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE nilfs2
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 539d6bc32..9b90c5751 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -104,22 +104,17 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
TP_ARGS(p));
#ifdef CREATE_TRACE_POINTS
-static inline long __trace_sched_switch_state(struct task_struct *p)
+static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
{
- long state = p->state;
-
-#ifdef CONFIG_PREEMPT
#ifdef CONFIG_SCHED_DEBUG
BUG_ON(p != current);
#endif /* CONFIG_SCHED_DEBUG */
+
/*
- * For all intents and purposes a preempted task is a running task.
+ * Preemption ignores task state, therefore preempted tasks are always
+ * RUNNING (we will not have dequeued if state != RUNNING).
*/
- if (preempt_count() & PREEMPT_ACTIVE)
- state = TASK_RUNNING | TASK_STATE_MAX;
-#endif /* CONFIG_PREEMPT */
-
- return state;
+ return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
}
#endif /* CREATE_TRACE_POINTS */
@@ -128,10 +123,11 @@ static inline long __trace_sched_switch_state(struct task_struct *p)
*/
TRACE_EVENT(sched_switch,
- TP_PROTO(struct task_struct *prev,
+ TP_PROTO(bool preempt,
+ struct task_struct *prev,
struct task_struct *next),
- TP_ARGS(prev, next),
+ TP_ARGS(preempt, prev, next),
TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN )
@@ -147,7 +143,7 @@ TRACE_EVENT(sched_switch,
memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid;
__entry->prev_prio = prev->prio;
- __entry->prev_state = __trace_sched_switch_state(prev);
+ __entry->prev_state = __trace_sched_switch_state(preempt, prev);
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
index 8b1f80682..5738bb3e2 100644
--- a/include/trace/events/thermal.h
+++ b/include/trace/events/thermal.h
@@ -4,6 +4,7 @@
#if !defined(_TRACE_THERMAL_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_THERMAL_H
+#include <linux/devfreq.h>
#include <linux/thermal.h>
#include <linux/tracepoint.h>
@@ -135,6 +136,58 @@ TRACE_EVENT(thermal_power_cpu_limit,
__entry->power)
);
+TRACE_EVENT(thermal_power_devfreq_get_power,
+ TP_PROTO(struct thermal_cooling_device *cdev,
+ struct devfreq_dev_status *status, unsigned long freq,
+ u32 dynamic_power, u32 static_power),
+
+ TP_ARGS(cdev, status, freq, dynamic_power, static_power),
+
+ TP_STRUCT__entry(
+ __string(type, cdev->type )
+ __field(unsigned long, freq )
+ __field(u32, load )
+ __field(u32, dynamic_power )
+ __field(u32, static_power )
+ ),
+
+ TP_fast_assign(
+ __assign_str(type, cdev->type);
+ __entry->freq = freq;
+ __entry->load = (100 * status->busy_time) / status->total_time;
+ __entry->dynamic_power = dynamic_power;
+ __entry->static_power = static_power;
+ ),
+
+ TP_printk("type=%s freq=%lu load=%u dynamic_power=%u static_power=%u",
+ __get_str(type), __entry->freq,
+ __entry->load, __entry->dynamic_power, __entry->static_power)
+);
+
+TRACE_EVENT(thermal_power_devfreq_limit,
+ TP_PROTO(struct thermal_cooling_device *cdev, unsigned long freq,
+ unsigned long cdev_state, u32 power),
+
+ TP_ARGS(cdev, freq, cdev_state, power),
+
+ TP_STRUCT__entry(
+ __string(type, cdev->type)
+ __field(unsigned int, freq )
+ __field(unsigned long, cdev_state)
+ __field(u32, power )
+ ),
+
+ TP_fast_assign(
+ __assign_str(type, cdev->type);
+ __entry->freq = freq;
+ __entry->cdev_state = cdev_state;
+ __entry->power = power;
+ ),
+
+ TP_printk("type=%s freq=%u cdev_state=%lu power=%u",
+ __get_str(type), __entry->freq, __entry->cdev_state,
+ __entry->power)
+);
#endif /* _TRACE_THERMAL_H */
/* This part must be outside protection */
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index dbf017bfd..22afa26e3 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -5,6 +5,7 @@
#define _TRACE_V4L2_H
#include <linux/tracepoint.h>
+#include <media/videobuf2-v4l2.h>
/* Enums require being exported to userspace, for user tool parsing */
#undef EM
@@ -27,6 +28,7 @@
EM( V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, "VIDEO_CAPTURE_MPLANE" ) \
EM( V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, "VIDEO_OUTPUT_MPLANE" ) \
EM( V4L2_BUF_TYPE_SDR_CAPTURE, "SDR_CAPTURE" ) \
+ EM( V4L2_BUF_TYPE_SDR_OUTPUT, "SDR_OUTPUT" ) \
EMe(V4L2_BUF_TYPE_PRIVATE, "PRIVATE" )
SHOW_TYPE
@@ -174,17 +176,12 @@ DEFINE_EVENT(v4l2_event_class, v4l2_qbuf,
TP_ARGS(minor, buf)
);
-DECLARE_EVENT_CLASS(vb2_event_class,
+DECLARE_EVENT_CLASS(vb2_v4l2_event_class,
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
TP_ARGS(q, vb),
TP_STRUCT__entry(
__field(int, minor)
- __field(u32, queued_count)
- __field(int, owned_by_drv_count)
- __field(u32, index)
- __field(u32, type)
- __field(u32, bytesused)
__field(u32, flags)
__field(u32, field)
__field(s64, timestamp)
@@ -202,38 +199,30 @@ DECLARE_EVENT_CLASS(vb2_event_class,
),
TP_fast_assign(
- __entry->minor = q->owner ? q->owner->vdev->minor : -1;
- __entry->queued_count = q->queued_count;
- __entry->owned_by_drv_count =
- atomic_read(&q->owned_by_drv_count);
- __entry->index = vb->v4l2_buf.index;
- __entry->type = vb->v4l2_buf.type;
- __entry->bytesused = vb->v4l2_planes[0].bytesused;
- __entry->flags = vb->v4l2_buf.flags;
- __entry->field = vb->v4l2_buf.field;
- __entry->timestamp = timeval_to_ns(&vb->v4l2_buf.timestamp);
- __entry->timecode_type = vb->v4l2_buf.timecode.type;
- __entry->timecode_flags = vb->v4l2_buf.timecode.flags;
- __entry->timecode_frames = vb->v4l2_buf.timecode.frames;
- __entry->timecode_seconds = vb->v4l2_buf.timecode.seconds;
- __entry->timecode_minutes = vb->v4l2_buf.timecode.minutes;
- __entry->timecode_hours = vb->v4l2_buf.timecode.hours;
- __entry->timecode_userbits0 = vb->v4l2_buf.timecode.userbits[0];
- __entry->timecode_userbits1 = vb->v4l2_buf.timecode.userbits[1];
- __entry->timecode_userbits2 = vb->v4l2_buf.timecode.userbits[2];
- __entry->timecode_userbits3 = vb->v4l2_buf.timecode.userbits[3];
- __entry->sequence = vb->v4l2_buf.sequence;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct v4l2_fh *owner = q->owner;
+
+ __entry->minor = owner ? owner->vdev->minor : -1;
+ __entry->flags = vbuf->flags;
+ __entry->field = vbuf->field;
+ __entry->timestamp = timeval_to_ns(&vbuf->timestamp);
+ __entry->timecode_type = vbuf->timecode.type;
+ __entry->timecode_flags = vbuf->timecode.flags;
+ __entry->timecode_frames = vbuf->timecode.frames;
+ __entry->timecode_seconds = vbuf->timecode.seconds;
+ __entry->timecode_minutes = vbuf->timecode.minutes;
+ __entry->timecode_hours = vbuf->timecode.hours;
+ __entry->timecode_userbits0 = vbuf->timecode.userbits[0];
+ __entry->timecode_userbits1 = vbuf->timecode.userbits[1];
+ __entry->timecode_userbits2 = vbuf->timecode.userbits[2];
+ __entry->timecode_userbits3 = vbuf->timecode.userbits[3];
+ __entry->sequence = vbuf->sequence;
),
- TP_printk("minor = %d, queued = %u, owned_by_drv = %d, index = %u, "
- "type = %s, bytesused = %u, flags = %s, field = %s, "
+ TP_printk("minor=%d flags = %s, field = %s, "
"timestamp = %llu, timecode = { type = %s, flags = %s, "
"frames = %u, seconds = %u, minutes = %u, hours = %u, "
"userbits = { %u %u %u %u } }, sequence = %u", __entry->minor,
- __entry->queued_count,
- __entry->owned_by_drv_count,
- __entry->index, show_type(__entry->type),
- __entry->bytesused,
show_flags(__entry->flags),
show_field(__entry->field),
__entry->timestamp,
@@ -251,22 +240,22 @@ DECLARE_EVENT_CLASS(vb2_event_class,
)
)
-DEFINE_EVENT(vb2_event_class, vb2_buf_done,
+DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_done,
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
TP_ARGS(q, vb)
);
-DEFINE_EVENT(vb2_event_class, vb2_buf_queue,
+DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_queue,
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
TP_ARGS(q, vb)
);
-DEFINE_EVENT(vb2_event_class, vb2_dqbuf,
+DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_dqbuf,
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
TP_ARGS(q, vb)
);
-DEFINE_EVENT(vb2_event_class, vb2_qbuf,
+DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_qbuf,
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
TP_ARGS(q, vb)
);
diff --git a/include/trace/events/vb2.h b/include/trace/events/vb2.h
new file mode 100644
index 000000000..bfeceeba3
--- /dev/null
+++ b/include/trace/events/vb2.h
@@ -0,0 +1,65 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vb2
+
+#if !defined(_TRACE_VB2_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VB2_H
+
+#include <linux/tracepoint.h>
+#include <media/videobuf2-core.h>
+
+DECLARE_EVENT_CLASS(vb2_event_class,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb),
+
+ TP_STRUCT__entry(
+ __field(void *, owner)
+ __field(u32, queued_count)
+ __field(int, owned_by_drv_count)
+ __field(u32, index)
+ __field(u32, type)
+ __field(u32, bytesused)
+ ),
+
+ TP_fast_assign(
+ __entry->owner = q->owner;
+ __entry->queued_count = q->queued_count;
+ __entry->owned_by_drv_count =
+ atomic_read(&q->owned_by_drv_count);
+ __entry->index = vb->index;
+ __entry->type = vb->type;
+ __entry->bytesused = vb->planes[0].bytesused;
+ ),
+
+ TP_printk("owner = %p, queued = %u, owned_by_drv = %d, index = %u, "
+ "type = %u, bytesused = %u", __entry->owner,
+ __entry->queued_count,
+ __entry->owned_by_drv_count,
+ __entry->index, __entry->type,
+ __entry->bytesused
+ )
+)
+
+DEFINE_EVENT(vb2_event_class, vb2_buf_done,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
+
+DEFINE_EVENT(vb2_event_class, vb2_buf_queue,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
+
+DEFINE_EVENT(vb2_event_class, vb2_dqbuf,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
+
+DEFINE_EVENT(vb2_event_class, vb2_qbuf,
+ TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
+ TP_ARGS(q, vb)
+);
+
+#endif /* if !defined(_TRACE_VB2_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/perf.h b/include/trace/perf.h
index 1b5443ceb..26486fcd7 100644
--- a/include/trace/perf.h
+++ b/include/trace/perf.h
@@ -1,261 +1,3 @@
-/*
- * Stage 4 of the trace events.
- *
- * Override the macros in <trace/trace_events.h> to include the following:
- *
- * For those macros defined with TRACE_EVENT:
- *
- * static struct trace_event_call event_<call>;
- *
- * static void trace_event_raw_event_<call>(void *__data, proto)
- * {
- * struct trace_event_file *trace_file = __data;
- * struct trace_event_call *event_call = trace_file->event_call;
- * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
- * unsigned long eflags = trace_file->flags;
- * enum event_trigger_type __tt = ETT_NONE;
- * struct ring_buffer_event *event;
- * struct trace_event_raw_<call> *entry; <-- defined in stage 1
- * struct ring_buffer *buffer;
- * unsigned long irq_flags;
- * int __data_size;
- * int pc;
- *
- * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
- * if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
- * event_triggers_call(trace_file, NULL);
- * if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
- * return;
- * }
- *
- * local_save_flags(irq_flags);
- * pc = preempt_count();
- *
- * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
- *
- * event = trace_event_buffer_lock_reserve(&buffer, trace_file,
- * event_<call>->event.type,
- * sizeof(*entry) + __data_size,
- * irq_flags, pc);
- * if (!event)
- * return;
- * entry = ring_buffer_event_data(event);
- *
- * { <assign>; } <-- Here we assign the entries by the __field and
- * __array macros.
- *
- * if (eflags & EVENT_FILE_FL_TRIGGER_COND)
- * __tt = event_triggers_call(trace_file, entry);
- *
- * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
- * &trace_file->flags))
- * ring_buffer_discard_commit(buffer, event);
- * else if (!filter_check_discard(trace_file, entry, buffer, event))
- * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
- *
- * if (__tt)
- * event_triggers_post_call(trace_file, __tt);
- * }
- *
- * static struct trace_event ftrace_event_type_<call> = {
- * .trace = trace_raw_output_<call>, <-- stage 2
- * };
- *
- * static char print_fmt_<call>[] = <TP_printk>;
- *
- * static struct trace_event_class __used event_class_<template> = {
- * .system = "<system>",
- * .define_fields = trace_event_define_fields_<call>,
- * .fields = LIST_HEAD_INIT(event_class_##call.fields),
- * .raw_init = trace_event_raw_init,
- * .probe = trace_event_raw_event_##call,
- * .reg = trace_event_reg,
- * };
- *
- * static struct trace_event_call event_<call> = {
- * .class = event_class_<template>,
- * {
- * .tp = &__tracepoint_<call>,
- * },
- * .event = &ftrace_event_type_<call>,
- * .print_fmt = print_fmt_<call>,
- * .flags = TRACE_EVENT_FL_TRACEPOINT,
- * };
- * // its only safe to use pointers when doing linker tricks to
- * // create an array.
- * static struct trace_event_call __used
- * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
- *
- */
-
-#ifdef CONFIG_PERF_EVENTS
-
-#define _TRACE_PERF_PROTO(call, proto) \
- static notrace void \
- perf_trace_##call(void *__data, proto);
-
-#define _TRACE_PERF_INIT(call) \
- .perf_probe = perf_trace_##call,
-
-#else
-#define _TRACE_PERF_PROTO(call, proto)
-#define _TRACE_PERF_INIT(call)
-#endif /* CONFIG_PERF_EVENTS */
-
-#undef __entry
-#define __entry entry
-
-#undef __field
-#define __field(type, item)
-
-#undef __field_struct
-#define __field_struct(type, item)
-
-#undef __array
-#define __array(type, item, len)
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) \
- __entry->__data_loc_##item = __data_offsets.item;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __assign_str
-#define __assign_str(dst, src) \
- strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __assign_bitmask
-#define __assign_bitmask(dst, src, nr_bits) \
- memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
-
-#undef TP_fast_assign
-#define TP_fast_assign(args...) args
-
-#undef __perf_addr
-#define __perf_addr(a) (a)
-
-#undef __perf_count
-#define __perf_count(c) (c)
-
-#undef __perf_task
-#define __perf_task(t) (t)
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
- \
-static notrace void \
-trace_event_raw_event_##call(void *__data, proto) \
-{ \
- struct trace_event_file *trace_file = __data; \
- struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
- struct trace_event_buffer fbuffer; \
- struct trace_event_raw_##call *entry; \
- int __data_size; \
- \
- if (trace_trigger_soft_disabled(trace_file)) \
- return; \
- \
- __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
- \
- entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
- sizeof(*entry) + __data_size); \
- \
- if (!entry) \
- return; \
- \
- tstruct \
- \
- { assign; } \
- \
- trace_event_buffer_commit(&fbuffer); \
-}
-/*
- * The ftrace_test_probe is compiled out, it is only here as a build time check
- * to make sure that if the tracepoint handling changes, the ftrace probe will
- * fail to compile unless it too is updated.
- */
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
-static inline void ftrace_test_probe_##call(void) \
-{ \
- check_trace_callback_type_##call(trace_event_raw_event_##template); \
-}
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-
-#undef __entry
-#define __entry REC
-
-#undef __print_flags
-#undef __print_symbolic
-#undef __print_hex
-#undef __get_dynamic_array
-#undef __get_dynamic_array_len
-#undef __get_str
-#undef __get_bitmask
-#undef __print_array
-
-#undef TP_printk
-#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-_TRACE_PERF_PROTO(call, PARAMS(proto)); \
-static char print_fmt_##call[] = print; \
-static struct trace_event_class __used __refdata event_class_##call = { \
- .system = TRACE_SYSTEM_STRING, \
- .define_fields = trace_event_define_fields_##call, \
- .fields = LIST_HEAD_INIT(event_class_##call.fields),\
- .raw_init = trace_event_raw_init, \
- .probe = trace_event_raw_event_##call, \
- .reg = trace_event_reg, \
- _TRACE_PERF_INIT(call) \
-};
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
- \
-static struct trace_event_call __used event_##call = { \
- .class = &event_class_##template, \
- { \
- .tp = &__tracepoint_##call, \
- }, \
- .event.funcs = &trace_event_type_funcs_##template, \
- .print_fmt = print_fmt_##template, \
- .flags = TRACE_EVENT_FL_TRACEPOINT, \
-}; \
-static struct trace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
- \
-static char print_fmt_##call[] = print; \
- \
-static struct trace_event_call __used event_##call = { \
- .class = &event_class_##template, \
- { \
- .tp = &__tracepoint_##call, \
- }, \
- .event.funcs = &trace_event_type_funcs_##call, \
- .print_fmt = print_fmt_##call, \
- .flags = TRACE_EVENT_FL_TRACEPOINT, \
-}; \
-static struct trace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#undef TRACE_SYSTEM_VAR
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 43be3b0e4..de996cf61 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -506,3 +506,261 @@ static inline notrace int trace_event_get_offsets_##call( \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+/*
+ * Stage 4 of the trace events.
+ *
+ * Override the macros in <trace/trace_events.h> to include the following:
+ *
+ * For those macros defined with TRACE_EVENT:
+ *
+ * static struct trace_event_call event_<call>;
+ *
+ * static void trace_event_raw_event_<call>(void *__data, proto)
+ * {
+ * struct trace_event_file *trace_file = __data;
+ * struct trace_event_call *event_call = trace_file->event_call;
+ * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
+ * unsigned long eflags = trace_file->flags;
+ * enum event_trigger_type __tt = ETT_NONE;
+ * struct ring_buffer_event *event;
+ * struct trace_event_raw_<call> *entry; <-- defined in stage 1
+ * struct ring_buffer *buffer;
+ * unsigned long irq_flags;
+ * int __data_size;
+ * int pc;
+ *
+ * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
+ * if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
+ * event_triggers_call(trace_file, NULL);
+ * if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
+ * return;
+ * }
+ *
+ * local_save_flags(irq_flags);
+ * pc = preempt_count();
+ *
+ * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
+ *
+ * event = trace_event_buffer_lock_reserve(&buffer, trace_file,
+ * event_<call>->event.type,
+ * sizeof(*entry) + __data_size,
+ * irq_flags, pc);
+ * if (!event)
+ * return;
+ * entry = ring_buffer_event_data(event);
+ *
+ * { <assign>; } <-- Here we assign the entries by the __field and
+ * __array macros.
+ *
+ * if (eflags & EVENT_FILE_FL_TRIGGER_COND)
+ * __tt = event_triggers_call(trace_file, entry);
+ *
+ * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
+ * &trace_file->flags))
+ * ring_buffer_discard_commit(buffer, event);
+ * else if (!filter_check_discard(trace_file, entry, buffer, event))
+ * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
+ *
+ * if (__tt)
+ * event_triggers_post_call(trace_file, __tt);
+ * }
+ *
+ * static struct trace_event ftrace_event_type_<call> = {
+ * .trace = trace_raw_output_<call>, <-- stage 2
+ * };
+ *
+ * static char print_fmt_<call>[] = <TP_printk>;
+ *
+ * static struct trace_event_class __used event_class_<template> = {
+ * .system = "<system>",
+ * .define_fields = trace_event_define_fields_<call>,
+ * .fields = LIST_HEAD_INIT(event_class_##call.fields),
+ * .raw_init = trace_event_raw_init,
+ * .probe = trace_event_raw_event_##call,
+ * .reg = trace_event_reg,
+ * };
+ *
+ * static struct trace_event_call event_<call> = {
+ * .class = event_class_<template>,
+ * {
+ * .tp = &__tracepoint_<call>,
+ * },
+ * .event = &ftrace_event_type_<call>,
+ * .print_fmt = print_fmt_<call>,
+ * .flags = TRACE_EVENT_FL_TRACEPOINT,
+ * };
+ * // its only safe to use pointers when doing linker tricks to
+ * // create an array.
+ * static struct trace_event_call __used
+ * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
+ *
+ */
+
+#ifdef CONFIG_PERF_EVENTS
+
+#define _TRACE_PERF_PROTO(call, proto) \
+ static notrace void \
+ perf_trace_##call(void *__data, proto);
+
+#define _TRACE_PERF_INIT(call) \
+ .perf_probe = perf_trace_##call,
+
+#else
+#define _TRACE_PERF_PROTO(call, proto)
+#define _TRACE_PERF_INIT(call)
+#endif /* CONFIG_PERF_EVENTS */
+
+#undef __entry
+#define __entry entry
+
+#undef __field
+#define __field(type, item)
+
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ __entry->__data_loc_##item = __data_offsets.item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __assign_str
+#define __assign_str(dst, src) \
+ strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+
+#undef __get_bitmask
+#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+
+#undef __assign_bitmask
+#define __assign_bitmask(dst, src, nr_bits) \
+ memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
+
+#undef TP_fast_assign
+#define TP_fast_assign(args...) args
+
+#undef __perf_addr
+#define __perf_addr(a) (a)
+
+#undef __perf_count
+#define __perf_count(c) (c)
+
+#undef __perf_task
+#define __perf_task(t) (t)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ \
+static notrace void \
+trace_event_raw_event_##call(void *__data, proto) \
+{ \
+ struct trace_event_file *trace_file = __data; \
+ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
+ struct trace_event_buffer fbuffer; \
+ struct trace_event_raw_##call *entry; \
+ int __data_size; \
+ \
+ if (trace_trigger_soft_disabled(trace_file)) \
+ return; \
+ \
+ __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
+ \
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
+ sizeof(*entry) + __data_size); \
+ \
+ if (!entry) \
+ return; \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ trace_event_buffer_commit(&fbuffer); \
+}
+/*
+ * The ftrace_test_probe is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the ftrace probe will
+ * fail to compile unless it too is updated.
+ */
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+static inline void ftrace_test_probe_##call(void) \
+{ \
+ check_trace_callback_type_##call(trace_event_raw_event_##template); \
+}
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef __entry
+#define __entry REC
+
+#undef __print_flags
+#undef __print_symbolic
+#undef __print_hex
+#undef __get_dynamic_array
+#undef __get_dynamic_array_len
+#undef __get_str
+#undef __get_bitmask
+#undef __print_array
+
+#undef TP_printk
+#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+_TRACE_PERF_PROTO(call, PARAMS(proto)); \
+static char print_fmt_##call[] = print; \
+static struct trace_event_class __used __refdata event_class_##call = { \
+ .system = TRACE_SYSTEM_STRING, \
+ .define_fields = trace_event_define_fields_##call, \
+ .fields = LIST_HEAD_INIT(event_class_##call.fields),\
+ .raw_init = trace_event_raw_init, \
+ .probe = trace_event_raw_event_##call, \
+ .reg = trace_event_reg, \
+ _TRACE_PERF_INIT(call) \
+};
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+ \
+static struct trace_event_call __used event_##call = { \
+ .class = &event_class_##template, \
+ { \
+ .tp = &__tracepoint_##call, \
+ }, \
+ .event.funcs = &trace_event_type_funcs_##template, \
+ .print_fmt = print_fmt_##template, \
+ .flags = TRACE_EVENT_FL_TRACEPOINT, \
+}; \
+static struct trace_event_call __used \
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
+ \
+static char print_fmt_##call[] = print; \
+ \
+static struct trace_event_call __used event_##call = { \
+ .class = &event_class_##template, \
+ { \
+ .tp = &__tracepoint_##call, \
+ }, \
+ .event.funcs = &trace_event_type_funcs_##call, \
+ .print_fmt = print_fmt_##call, \
+ .flags = TRACE_EVENT_FL_TRACEPOINT, \
+}; \
+static struct trace_event_call __used \
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)