summaryrefslogtreecommitdiff
path: root/tools/perf/util/evsel.c
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-10-20 00:10:27 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-10-20 00:10:27 -0300
commitd0b2f91bede3bd5e3d24dd6803e56eee959c1797 (patch)
tree7fee4ab0509879c373c4f2cbd5b8a5be5b4041ee /tools/perf/util/evsel.c
parente914f8eb445e8f74b00303c19c2ffceaedd16a05 (diff)
Linux-libre 4.8.2-gnupck-4.8.2-gnu
Diffstat (limited to 'tools/perf/util/evsel.c')
-rw-r--r--tools/perf/util/evsel.c134
1 files changed, 103 insertions, 31 deletions
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 5d7037ef7..21fd57310 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -200,6 +200,24 @@ void perf_evsel__set_sample_id(struct perf_evsel *evsel,
evsel->attr.read_format |= PERF_FORMAT_ID;
}
+/**
+ * perf_evsel__is_function_event - Return whether given evsel is a function
+ * trace event
+ *
+ * @evsel - evsel selector to be tested
+ *
+ * Return %true if event is function trace event
+ */
+bool perf_evsel__is_function_event(struct perf_evsel *evsel)
+{
+#define FUNCTION_EVENT "ftrace:function"
+
+ return evsel->name &&
+ !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
+
+#undef FUNCTION_EVENT
+}
+
void perf_evsel__init(struct perf_evsel *evsel,
struct perf_event_attr *attr, int idx)
{
@@ -235,6 +253,34 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
return evsel;
}
+struct perf_evsel *perf_evsel__new_cycles(void)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ };
+ struct perf_evsel *evsel;
+
+ event_attr_init(&attr);
+
+ perf_event_attr__set_max_precise_ip(&attr);
+
+ evsel = perf_evsel__new(&attr);
+ if (evsel == NULL)
+ goto out;
+
+ /* use asprintf() because free(evsel) assumes name is allocated */
+ if (asprintf(&evsel->name, "cycles%.*s",
+ attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0)
+ goto error_free;
+out:
+ return evsel;
+error_free:
+ perf_evsel__delete(evsel);
+ evsel = NULL;
+ goto out;
+}
+
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
@@ -461,17 +507,17 @@ static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
u8 op, result, type = (config >> 0) & 0xff;
const char *err = "unknown-ext-hardware-cache-type";
- if (type > PERF_COUNT_HW_CACHE_MAX)
+ if (type >= PERF_COUNT_HW_CACHE_MAX)
goto out_err;
op = (config >> 8) & 0xff;
err = "unknown-ext-hardware-cache-op";
- if (op > PERF_COUNT_HW_CACHE_OP_MAX)
+ if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
goto out_err;
result = (config >> 16) & 0xff;
err = "unknown-ext-hardware-cache-result";
- if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
+ if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
goto out_err;
err = "invalid-cache";
@@ -572,6 +618,8 @@ void perf_evsel__config_callchain(struct perf_evsel *evsel,
perf_evsel__set_sample_bit(evsel, CALLCHAIN);
+ attr->sample_max_stack = param->max_stack;
+
if (param->record_mode == CALLCHAIN_LBR) {
if (!opts->branch_stack) {
if (attr->exclude_user) {
@@ -635,7 +683,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
struct perf_event_attr *attr = &evsel->attr;
struct callchain_param param;
u32 dump_size = 0;
- char *callgraph_buf = NULL;
+ int max_stack = 0;
+ const char *callgraph_buf = NULL;
/* callgraph default */
param.record_mode = callchain_param.record_mode;
@@ -662,6 +711,9 @@ static void apply_config_terms(struct perf_evsel *evsel,
case PERF_EVSEL__CONFIG_TERM_STACK_USER:
dump_size = term->val.stack_user;
break;
+ case PERF_EVSEL__CONFIG_TERM_MAX_STACK:
+ max_stack = term->val.max_stack;
+ break;
case PERF_EVSEL__CONFIG_TERM_INHERIT:
/*
* attr->inherit should has already been set by
@@ -671,13 +723,21 @@ static void apply_config_terms(struct perf_evsel *evsel,
*/
attr->inherit = term->val.inherit ? 1 : 0;
break;
+ case PERF_EVSEL__CONFIG_TERM_OVERWRITE:
+ attr->write_backward = term->val.overwrite ? 1 : 0;
+ break;
default:
break;
}
}
/* User explicitly set per-event callgraph, clear the old setting and reset. */
- if ((callgraph_buf != NULL) || (dump_size > 0)) {
+ if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
+ if (max_stack) {
+ param.max_stack = max_stack;
+ if (callgraph_buf == NULL)
+ callgraph_buf = "fp";
+ }
/* parse callgraph parameters */
if (callgraph_buf != NULL) {
@@ -747,6 +807,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
attr->inherit = !opts->no_inherit;
+ attr->write_backward = opts->overwrite ? 1 : 0;
perf_evsel__set_sample_bit(evsel, IP);
perf_evsel__set_sample_bit(evsel, TID);
@@ -821,7 +882,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
perf_evsel__set_sample_bit(evsel, REGS_INTR);
}
- if (target__has_cpu(&opts->target))
+ if (target__has_cpu(&opts->target) || opts->sample_cpu)
perf_evsel__set_sample_bit(evsel, CPU);
if (opts->period)
@@ -1329,6 +1390,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(clockid, p_signed);
PRINT_ATTRf(sample_regs_intr, p_hex);
PRINT_ATTRf(aux_watermark, p_unsigned);
+ PRINT_ATTRf(sample_max_stack, p_unsigned);
return ret;
}
@@ -1347,6 +1409,9 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
int pid = -1, err;
enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
+ if (perf_missing_features.write_backward && evsel->attr.write_backward)
+ return -EINVAL;
+
if (evsel->system_wide)
nthreads = 1;
else
@@ -1377,8 +1442,6 @@ fallback_missing_features:
if (perf_missing_features.lbr_flags)
evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
PERF_SAMPLE_BRANCH_NO_CYCLES);
- if (perf_missing_features.write_backward)
- evsel->attr.write_backward = false;
retry_sample_id:
if (perf_missing_features.sample_id_all)
evsel->attr.sample_id_all = 0;
@@ -1441,12 +1504,6 @@ retry_open:
err = -EINVAL;
goto out_close;
}
-
- if (evsel->overwrite &&
- perf_missing_features.write_backward) {
- err = -EINVAL;
- goto out_close;
- }
}
}
@@ -1484,7 +1541,10 @@ try_fallback:
* Must probe features in the order they were added to the
* perf_event_attr interface.
*/
- if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
+ if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
+ perf_missing_features.write_backward = true;
+ goto out_close;
+ } else if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
perf_missing_features.clockid_wrong = true;
goto fallback_missing_features;
} else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
@@ -1509,12 +1569,7 @@ try_fallback:
PERF_SAMPLE_BRANCH_NO_FLAGS))) {
perf_missing_features.lbr_flags = true;
goto fallback_missing_features;
- } else if (!perf_missing_features.write_backward &&
- evsel->attr.write_backward) {
- perf_missing_features.write_backward = true;
- goto fallback_missing_features;
}
-
out_close:
do {
while (--thread >= 0) {
@@ -2239,17 +2294,11 @@ void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
return sample->raw_data + offset;
}
-u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
- const char *name)
+u64 format_field__intval(struct format_field *field, struct perf_sample *sample,
+ bool needs_swap)
{
- struct format_field *field = perf_evsel__field(evsel, name);
- void *ptr;
u64 value;
-
- if (!field)
- return 0;
-
- ptr = sample->raw_data + field->offset;
+ void *ptr = sample->raw_data + field->offset;
switch (field->size) {
case 1:
@@ -2267,7 +2316,7 @@ u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
return 0;
}
- if (!evsel->needs_swap)
+ if (!needs_swap)
return value;
switch (field->size) {
@@ -2284,6 +2333,17 @@ u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
return 0;
}
+u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
+ const char *name)
+{
+ struct format_field *field = perf_evsel__field(evsel, name);
+
+ if (!field)
+ return 0;
+
+ return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
+}
+
bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
char *msg, size_t msgsize)
{
@@ -2372,6 +2432,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
"No such device - did you specify an out-of-range profile CPU?");
break;
case EOPNOTSUPP:
+ if (evsel->attr.sample_period != 0)
+ return scnprintf(msg, size, "%s",
+ "PMU Hardware doesn't support sampling/overflow-interrupts.");
if (evsel->attr.precise_ip)
return scnprintf(msg, size, "%s",
"\'precise\' request may not be supported. Try removing 'p' modifier.");
@@ -2389,6 +2452,8 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
"We found oprofile daemon running, please stop it and try again.");
break;
case EINVAL:
+ if (evsel->attr.write_backward && perf_missing_features.write_backward)
+ return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
if (perf_missing_features.clockid)
return scnprintf(msg, size, "clockid feature not supported.");
if (perf_missing_features.clockid_wrong)
@@ -2402,6 +2467,13 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
"/bin/dmesg may provide additional information.\n"
"No CONFIG_PERF_EVENTS=y kernel support configured?",
- err, strerror_r(err, sbuf, sizeof(sbuf)),
+ err, str_error_r(err, sbuf, sizeof(sbuf)),
perf_evsel__name(evsel));
}
+
+char *perf_evsel__env_arch(struct perf_evsel *evsel)
+{
+ if (evsel && evsel->evlist && evsel->evlist->env)
+ return evsel->evlist->env->arch;
+ return NULL;
+}