diff options
Diffstat (limited to 'drivers/hwtracing')
26 files changed, 4993 insertions, 2938 deletions
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig index db0541031..130cb2114 100644 --- a/drivers/hwtracing/coresight/Kconfig +++ b/drivers/hwtracing/coresight/Kconfig @@ -78,4 +78,15 @@ config CORESIGHT_QCOM_REPLICATOR programmable ATB replicator sends the ATB trace stream from the ETB/ETF to the TPIUi and ETR. +config CORESIGHT_STM + bool "CoreSight System Trace Macrocell driver" + depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64 + select CORESIGHT_LINKS_AND_SINKS + select STM + help + This driver provides support for hardware assisted software + instrumentation based tracing. This is primarily used for + logging useful software events or data coming from various entities + in the system, possibly running different OSs + endif diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile index cf8c6d689..af480d9c1 100644 --- a/drivers/hwtracing/coresight/Makefile +++ b/drivers/hwtracing/coresight/Makefile @@ -1,15 +1,18 @@ # # Makefile for CoreSight drivers. # -obj-$(CONFIG_CORESIGHT) += coresight.o +obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o obj-$(CONFIG_OF) += of_coresight.o -obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o +obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o \ + coresight-tmc-etf.o \ + coresight-tmc-etr.o obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \ coresight-replicator.o obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \ - coresight-etm3x-sysfs.o \ - coresight-etm-perf.o -obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o + coresight-etm3x-sysfs.o +obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \ + coresight-etm4x-sysfs.o obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o +obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index acbce7993..4d20b0be0 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c @@ -71,26 +71,6 @@ #define ETB_FRAME_SIZE_WORDS 4 /** - * struct cs_buffer - keep track of a recording session' specifics - * @cur: index of the current buffer - * @nr_pages: max number of pages granted to us - * @offset: offset within the current buffer - * @data_size: how much we collected in this run - * @lost: other than zero if we had a HW buffer wrap around - * @snapshot: is this run in snapshot mode - * @data_pages: a handle the ring buffer - */ -struct cs_buffers { - unsigned int cur; - unsigned int nr_pages; - unsigned long offset; - local_t data_size; - local_t lost; - bool snapshot; - void **data_pages; -}; - -/** * struct etb_drvdata - specifics associated to an ETB component * @base: memory mapped base address for this component. * @dev: the device entity associated to this component. @@ -440,7 +420,7 @@ static void etb_update_buffer(struct coresight_device *csdev, u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1); /* The new read pointer must be frame size aligned */ - to_read -= handle->size & mask; + to_read = handle->size & mask; /* * Move the RAM read pointer up, keeping in mind that * everything is in frame size units. @@ -448,7 +428,8 @@ static void etb_update_buffer(struct coresight_device *csdev, read_ptr = (write_ptr + drvdata->buffer_depth) - to_read / ETB_FRAME_SIZE_WORDS; /* Wrap around if need be*/ - read_ptr &= ~(drvdata->buffer_depth - 1); + if (read_ptr > (drvdata->buffer_depth - 1)) + read_ptr -= drvdata->buffer_depth; /* let the decoder know we've skipped ahead */ local_inc(&buf->lost); } @@ -579,47 +560,29 @@ static const struct file_operations etb_fops = { .llseek = no_llseek, }; -static ssize_t status_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long flags; - u32 etb_rdr, etb_sr, etb_rrp, etb_rwp; - u32 etb_trg, etb_cr, etb_ffsr, etb_ffcr; - struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent); - - pm_runtime_get_sync(drvdata->dev); - spin_lock_irqsave(&drvdata->spinlock, flags); - CS_UNLOCK(drvdata->base); - - etb_rdr = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG); - etb_sr = readl_relaxed(drvdata->base + ETB_STATUS_REG); - etb_rrp = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER); - etb_rwp = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER); - etb_trg = readl_relaxed(drvdata->base + ETB_TRG); - etb_cr = readl_relaxed(drvdata->base + ETB_CTL_REG); - etb_ffsr = readl_relaxed(drvdata->base + ETB_FFSR); - etb_ffcr = readl_relaxed(drvdata->base + ETB_FFCR); - - CS_LOCK(drvdata->base); - spin_unlock_irqrestore(&drvdata->spinlock, flags); - - pm_runtime_put(drvdata->dev); - - return sprintf(buf, - "Depth:\t\t0x%x\n" - "Status:\t\t0x%x\n" - "RAM read ptr:\t0x%x\n" - "RAM wrt ptr:\t0x%x\n" - "Trigger cnt:\t0x%x\n" - "Control:\t0x%x\n" - "Flush status:\t0x%x\n" - "Flush ctrl:\t0x%x\n", - etb_rdr, etb_sr, etb_rrp, etb_rwp, - etb_trg, etb_cr, etb_ffsr, etb_ffcr); - - return -EINVAL; -} -static DEVICE_ATTR_RO(status); +#define coresight_etb10_simple_func(name, offset) \ + coresight_simple_func(struct etb_drvdata, name, offset) + +coresight_etb10_simple_func(rdp, ETB_RAM_DEPTH_REG); +coresight_etb10_simple_func(sts, ETB_STATUS_REG); +coresight_etb10_simple_func(rrp, ETB_RAM_READ_POINTER); +coresight_etb10_simple_func(rwp, ETB_RAM_WRITE_POINTER); +coresight_etb10_simple_func(trg, ETB_TRG); +coresight_etb10_simple_func(ctl, ETB_CTL_REG); +coresight_etb10_simple_func(ffsr, ETB_FFSR); +coresight_etb10_simple_func(ffcr, ETB_FFCR); + +static struct attribute *coresight_etb_mgmt_attrs[] = { + &dev_attr_rdp.attr, + &dev_attr_sts.attr, + &dev_attr_rrp.attr, + &dev_attr_rwp.attr, + &dev_attr_trg.attr, + &dev_attr_ctl.attr, + &dev_attr_ffsr.attr, + &dev_attr_ffcr.attr, + NULL, +}; static ssize_t trigger_cntr_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -649,10 +612,23 @@ static DEVICE_ATTR_RW(trigger_cntr); static struct attribute *coresight_etb_attrs[] = { &dev_attr_trigger_cntr.attr, - &dev_attr_status.attr, NULL, }; -ATTRIBUTE_GROUPS(coresight_etb); + +static const struct attribute_group coresight_etb_group = { + .attrs = coresight_etb_attrs, +}; + +static const struct attribute_group coresight_etb_mgmt_group = { + .attrs = coresight_etb_mgmt_attrs, + .name = "mgmt", +}; + +const struct attribute_group *coresight_etb_groups[] = { + &coresight_etb_group, + &coresight_etb_mgmt_group, + NULL, +}; static int etb_probe(struct amba_device *adev, const struct amba_id *id) { @@ -729,7 +705,6 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id) if (ret) goto err_misc_register; - dev_info(dev, "ETB initialized\n"); return 0; err_misc_register: diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c index cbb4046c1..02d4b6298 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c @@ -1221,26 +1221,19 @@ static struct attribute *coresight_etm_attrs[] = { NULL, }; -#define coresight_simple_func(name, offset) \ -static ssize_t name##_show(struct device *_dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \ - return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ - readl_relaxed(drvdata->base + offset)); \ -} \ -DEVICE_ATTR_RO(name) - -coresight_simple_func(etmccr, ETMCCR); -coresight_simple_func(etmccer, ETMCCER); -coresight_simple_func(etmscr, ETMSCR); -coresight_simple_func(etmidr, ETMIDR); -coresight_simple_func(etmcr, ETMCR); -coresight_simple_func(etmtraceidr, ETMTRACEIDR); -coresight_simple_func(etmteevr, ETMTEEVR); -coresight_simple_func(etmtssvr, ETMTSSCR); -coresight_simple_func(etmtecr1, ETMTECR1); -coresight_simple_func(etmtecr2, ETMTECR2); +#define coresight_etm3x_simple_func(name, offset) \ + coresight_simple_func(struct etm_drvdata, name, offset) + +coresight_etm3x_simple_func(etmccr, ETMCCR); +coresight_etm3x_simple_func(etmccer, ETMCCER); +coresight_etm3x_simple_func(etmscr, ETMSCR); +coresight_etm3x_simple_func(etmidr, ETMIDR); +coresight_etm3x_simple_func(etmcr, ETMCR); +coresight_etm3x_simple_func(etmtraceidr, ETMTRACEIDR); +coresight_etm3x_simple_func(etmteevr, ETMTEEVR); +coresight_etm3x_simple_func(etmtssvr, ETMTSSCR); +coresight_etm3x_simple_func(etmtecr1, ETMTECR1); +coresight_etm3x_simple_func(etmtecr2, ETMTECR2); static struct attribute *coresight_etm_mgmt_attrs[] = { &dev_attr_etmccr.attr, diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c new file mode 100644 index 000000000..7c84308c5 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c @@ -0,0 +1,2126 @@ +/* + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/pm_runtime.h> +#include <linux/sysfs.h> +#include "coresight-etm4x.h" + +static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude) +{ + u8 idx; + struct etmv4_config *config = &drvdata->config; + + idx = config->addr_idx; + + /* + * TRCACATRn.TYPE bit[1:0]: type of comparison + * the trace unit performs + */ + if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) { + if (idx % 2 != 0) + return -EINVAL; + + /* + * We are performing instruction address comparison. Set the + * relevant bit of ViewInst Include/Exclude Control register + * for corresponding address comparator pair. + */ + if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE || + config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE) + return -EINVAL; + + if (exclude == true) { + /* + * Set exclude bit and unset the include bit + * corresponding to comparator pair + */ + config->viiectlr |= BIT(idx / 2 + 16); + config->viiectlr &= ~BIT(idx / 2); + } else { + /* + * Set include bit and unset exclude bit + * corresponding to comparator pair + */ + config->viiectlr |= BIT(idx / 2); + config->viiectlr &= ~BIT(idx / 2 + 16); + } + } + return 0; +} + +static ssize_t nr_pe_cmp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_pe_cmp; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_pe_cmp); + +static ssize_t nr_addr_cmp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_addr_cmp; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_addr_cmp); + +static ssize_t nr_cntr_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_cntr; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_cntr); + +static ssize_t nr_ext_inp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_ext_inp; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_ext_inp); + +static ssize_t numcidc_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->numcidc; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(numcidc); + +static ssize_t numvmidc_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->numvmidc; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(numvmidc); + +static ssize_t nrseqstate_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nrseqstate; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nrseqstate); + +static ssize_t nr_resource_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_resource; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_resource); + +static ssize_t nr_ss_cmp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_ss_cmp; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_ss_cmp); + +static ssize_t reset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int i; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + if (val) + config->mode = 0x0; + + /* Disable data tracing: do not trace load and store data transfers */ + config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE); + config->cfg &= ~(BIT(1) | BIT(2)); + + /* Disable data value and data address tracing */ + config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR | + ETM_MODE_DATA_TRACE_VAL); + config->cfg &= ~(BIT(16) | BIT(17)); + + /* Disable all events tracing */ + config->eventctrl0 = 0x0; + config->eventctrl1 = 0x0; + + /* Disable timestamp event */ + config->ts_ctrl = 0x0; + + /* Disable stalling */ + config->stall_ctrl = 0x0; + + /* Reset trace synchronization period to 2^8 = 256 bytes*/ + if (drvdata->syncpr == false) + config->syncfreq = 0x8; + + /* + * Enable ViewInst to trace everything with start-stop logic in + * started state. ARM recommends start-stop logic is set before + * each trace run. + */ + config->vinst_ctrl |= BIT(0); + if (drvdata->nr_addr_cmp == true) { + config->mode |= ETM_MODE_VIEWINST_STARTSTOP; + /* SSSTATUS, bit[9] */ + config->vinst_ctrl |= BIT(9); + } + + /* No address range filtering for ViewInst */ + config->viiectlr = 0x0; + + /* No start-stop filtering for ViewInst */ + config->vissctlr = 0x0; + + /* Disable seq events */ + for (i = 0; i < drvdata->nrseqstate-1; i++) + config->seq_ctrl[i] = 0x0; + config->seq_rst = 0x0; + config->seq_state = 0x0; + + /* Disable external input events */ + config->ext_inp = 0x0; + + config->cntr_idx = 0x0; + for (i = 0; i < drvdata->nr_cntr; i++) { + config->cntrldvr[i] = 0x0; + config->cntr_ctrl[i] = 0x0; + config->cntr_val[i] = 0x0; + } + + config->res_idx = 0x0; + for (i = 0; i < drvdata->nr_resource; i++) + config->res_ctrl[i] = 0x0; + + for (i = 0; i < drvdata->nr_ss_cmp; i++) { + config->ss_ctrl[i] = 0x0; + config->ss_pe_cmp[i] = 0x0; + } + + config->addr_idx = 0x0; + for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) { + config->addr_val[i] = 0x0; + config->addr_acc[i] = 0x0; + config->addr_type[i] = ETM_ADDR_TYPE_NONE; + } + + config->ctxid_idx = 0x0; + for (i = 0; i < drvdata->numcidc; i++) { + config->ctxid_pid[i] = 0x0; + config->ctxid_vpid[i] = 0x0; + } + + config->ctxid_mask0 = 0x0; + config->ctxid_mask1 = 0x0; + + config->vmid_idx = 0x0; + for (i = 0; i < drvdata->numvmidc; i++) + config->vmid_val[i] = 0x0; + config->vmid_mask0 = 0x0; + config->vmid_mask1 = 0x0; + + drvdata->trcid = drvdata->cpu + 1; + + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_WO(reset); + +static ssize_t mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->mode; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val, mode; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + config->mode = val & ETMv4_MODE_ALL; + + if (config->mode & ETM_MODE_EXCLUDE) + etm4_set_mode_exclude(drvdata, true); + else + etm4_set_mode_exclude(drvdata, false); + + if (drvdata->instrp0 == true) { + /* start by clearing instruction P0 field */ + config->cfg &= ~(BIT(1) | BIT(2)); + if (config->mode & ETM_MODE_LOAD) + /* 0b01 Trace load instructions as P0 instructions */ + config->cfg |= BIT(1); + if (config->mode & ETM_MODE_STORE) + /* 0b10 Trace store instructions as P0 instructions */ + config->cfg |= BIT(2); + if (config->mode & ETM_MODE_LOAD_STORE) + /* + * 0b11 Trace load and store instructions + * as P0 instructions + */ + config->cfg |= BIT(1) | BIT(2); + } + + /* bit[3], Branch broadcast mode */ + if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true)) + config->cfg |= BIT(3); + else + config->cfg &= ~BIT(3); + + /* bit[4], Cycle counting instruction trace bit */ + if ((config->mode & ETMv4_MODE_CYCACC) && + (drvdata->trccci == true)) + config->cfg |= BIT(4); + else + config->cfg &= ~BIT(4); + + /* bit[6], Context ID tracing bit */ + if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size)) + config->cfg |= BIT(6); + else + config->cfg &= ~BIT(6); + + if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size)) + config->cfg |= BIT(7); + else + config->cfg &= ~BIT(7); + + /* bits[10:8], Conditional instruction tracing bit */ + mode = ETM_MODE_COND(config->mode); + if (drvdata->trccond == true) { + config->cfg &= ~(BIT(8) | BIT(9) | BIT(10)); + config->cfg |= mode << 8; + } + + /* bit[11], Global timestamp tracing bit */ + if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size)) + config->cfg |= BIT(11); + else + config->cfg &= ~BIT(11); + + /* bit[12], Return stack enable bit */ + if ((config->mode & ETM_MODE_RETURNSTACK) && + (drvdata->retstack == true)) + config->cfg |= BIT(12); + else + config->cfg &= ~BIT(12); + + /* bits[14:13], Q element enable field */ + mode = ETM_MODE_QELEM(config->mode); + /* start by clearing QE bits */ + config->cfg &= ~(BIT(13) | BIT(14)); + /* if supported, Q elements with instruction counts are enabled */ + if ((mode & BIT(0)) && (drvdata->q_support & BIT(0))) + config->cfg |= BIT(13); + /* + * if supported, Q elements with and without instruction + * counts are enabled + */ + if ((mode & BIT(1)) && (drvdata->q_support & BIT(1))) + config->cfg |= BIT(14); + + /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */ + if ((config->mode & ETM_MODE_ATB_TRIGGER) && + (drvdata->atbtrig == true)) + config->eventctrl1 |= BIT(11); + else + config->eventctrl1 &= ~BIT(11); + + /* bit[12], Low-power state behavior override bit */ + if ((config->mode & ETM_MODE_LPOVERRIDE) && + (drvdata->lpoverride == true)) + config->eventctrl1 |= BIT(12); + else + config->eventctrl1 &= ~BIT(12); + + /* bit[8], Instruction stall bit */ + if (config->mode & ETM_MODE_ISTALL_EN) + config->stall_ctrl |= BIT(8); + else + config->stall_ctrl &= ~BIT(8); + + /* bit[10], Prioritize instruction trace bit */ + if (config->mode & ETM_MODE_INSTPRIO) + config->stall_ctrl |= BIT(10); + else + config->stall_ctrl &= ~BIT(10); + + /* bit[13], Trace overflow prevention bit */ + if ((config->mode & ETM_MODE_NOOVERFLOW) && + (drvdata->nooverflow == true)) + config->stall_ctrl |= BIT(13); + else + config->stall_ctrl &= ~BIT(13); + + /* bit[9] Start/stop logic control bit */ + if (config->mode & ETM_MODE_VIEWINST_STARTSTOP) + config->vinst_ctrl |= BIT(9); + else + config->vinst_ctrl &= ~BIT(9); + + /* bit[10], Whether a trace unit must trace a Reset exception */ + if (config->mode & ETM_MODE_TRACE_RESET) + config->vinst_ctrl |= BIT(10); + else + config->vinst_ctrl &= ~BIT(10); + + /* bit[11], Whether a trace unit must trace a system error exception */ + if ((config->mode & ETM_MODE_TRACE_ERR) && + (drvdata->trc_error == true)) + config->vinst_ctrl |= BIT(11); + else + config->vinst_ctrl &= ~BIT(11); + + if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) + etm4_config_trace_mode(config); + + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(mode); + +static ssize_t pe_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->pe_sel; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t pe_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + if (val > drvdata->nr_pe) { + spin_unlock(&drvdata->spinlock); + return -EINVAL; + } + + config->pe_sel = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(pe); + +static ssize_t event_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->eventctrl0; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + switch (drvdata->nr_event) { + case 0x0: + /* EVENT0, bits[7:0] */ + config->eventctrl0 = val & 0xFF; + break; + case 0x1: + /* EVENT1, bits[15:8] */ + config->eventctrl0 = val & 0xFFFF; + break; + case 0x2: + /* EVENT2, bits[23:16] */ + config->eventctrl0 = val & 0xFFFFFF; + break; + case 0x3: + /* EVENT3, bits[31:24] */ + config->eventctrl0 = val; + break; + default: + break; + } + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(event); + +static ssize_t event_instren_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = BMVAL(config->eventctrl1, 0, 3); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t event_instren_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + /* start by clearing all instruction event enable bits */ + config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3)); + switch (drvdata->nr_event) { + case 0x0: + /* generate Event element for event 1 */ + config->eventctrl1 |= val & BIT(1); + break; + case 0x1: + /* generate Event element for event 1 and 2 */ + config->eventctrl1 |= val & (BIT(0) | BIT(1)); + break; + case 0x2: + /* generate Event element for event 1, 2 and 3 */ + config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2)); + break; + case 0x3: + /* generate Event element for all 4 events */ + config->eventctrl1 |= val & 0xF; + break; + default: + break; + } + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(event_instren); + +static ssize_t event_ts_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->ts_ctrl; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t event_ts_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (!drvdata->ts_size) + return -EINVAL; + + config->ts_ctrl = val & ETMv4_EVENT_MASK; + return size; +} +static DEVICE_ATTR_RW(event_ts); + +static ssize_t syncfreq_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->syncfreq; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t syncfreq_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (drvdata->syncpr == true) + return -EINVAL; + + config->syncfreq = val & ETMv4_SYNC_MASK; + return size; +} +static DEVICE_ATTR_RW(syncfreq); + +static ssize_t cyc_threshold_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->ccctlr; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t cyc_threshold_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val < drvdata->ccitmin) + return -EINVAL; + + config->ccctlr = val & ETM_CYC_THRESHOLD_MASK; + return size; +} +static DEVICE_ATTR_RW(cyc_threshold); + +static ssize_t bb_ctrl_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->bb_ctrl; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t bb_ctrl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (drvdata->trcbb == false) + return -EINVAL; + if (!drvdata->nr_addr_cmp) + return -EINVAL; + /* + * Bit[7:0] selects which address range comparator is used for + * branch broadcast control. + */ + if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp) + return -EINVAL; + + config->bb_ctrl = val; + return size; +} +static DEVICE_ATTR_RW(bb_ctrl); + +static ssize_t event_vinst_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->vinst_ctrl & ETMv4_EVENT_MASK; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t event_vinst_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + val &= ETMv4_EVENT_MASK; + config->vinst_ctrl &= ~ETMv4_EVENT_MASK; + config->vinst_ctrl |= val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(event_vinst); + +static ssize_t s_exlevel_vinst_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = BMVAL(config->vinst_ctrl, 16, 19); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t s_exlevel_vinst_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + /* clear all EXLEVEL_S bits (bit[18] is never implemented) */ + config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19)); + /* enable instruction tracing for corresponding exception level */ + val &= drvdata->s_ex_level; + config->vinst_ctrl |= (val << 16); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(s_exlevel_vinst); + +static ssize_t ns_exlevel_vinst_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + /* EXLEVEL_NS, bits[23:20] */ + val = BMVAL(config->vinst_ctrl, 20, 23); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t ns_exlevel_vinst_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + /* clear EXLEVEL_NS bits (bit[23] is never implemented */ + config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22)); + /* enable instruction tracing for corresponding exception level */ + val &= drvdata->ns_ex_level; + config->vinst_ctrl |= (val << 20); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(ns_exlevel_vinst); + +static ssize_t addr_idx_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->addr_idx; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t addr_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val >= drvdata->nr_addr_cmp * 2) + return -EINVAL; + + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + config->addr_idx = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_idx); + +static ssize_t addr_instdatatype_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + ssize_t len; + u8 val, idx; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + val = BMVAL(config->addr_acc[idx], 0, 1); + len = scnprintf(buf, PAGE_SIZE, "%s\n", + val == ETM_INSTR_ADDR ? "instr" : + (val == ETM_DATA_LOAD_ADDR ? "data_load" : + (val == ETM_DATA_STORE_ADDR ? "data_store" : + "data_load_store"))); + spin_unlock(&drvdata->spinlock); + return len; +} + +static ssize_t addr_instdatatype_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + char str[20] = ""; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (strlen(buf) >= 20) + return -EINVAL; + if (sscanf(buf, "%s", str) != 1) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!strcmp(str, "instr")) + /* TYPE, bits[1:0] */ + config->addr_acc[idx] &= ~(BIT(0) | BIT(1)); + + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_instdatatype); + +static ssize_t addr_single_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + idx = config->addr_idx; + spin_lock(&drvdata->spinlock); + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + val = (unsigned long)config->addr_val[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t addr_single_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + config->addr_val[idx] = (u64)val; + config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_single); + +static ssize_t addr_range_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val1, val2; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (idx % 2 != 0) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && + config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || + (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && + config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + val1 = (unsigned long)config->addr_val[idx]; + val2 = (unsigned long)config->addr_val[idx + 1]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); +} + +static ssize_t addr_range_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val1, val2; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) + return -EINVAL; + /* lower address comparator cannot have a higher address value */ + if (val1 > val2) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (idx % 2 != 0) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && + config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || + (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && + config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + config->addr_val[idx] = (u64)val1; + config->addr_type[idx] = ETM_ADDR_TYPE_RANGE; + config->addr_val[idx + 1] = (u64)val2; + config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; + /* + * Program include or exclude control bits for vinst or vdata + * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE + */ + if (config->mode & ETM_MODE_EXCLUDE) + etm4_set_mode_exclude(drvdata, true); + else + etm4_set_mode_exclude(drvdata, false); + + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_range); + +static ssize_t addr_start_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_START)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + val = (unsigned long)config->addr_val[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t addr_start_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!drvdata->nr_addr_cmp) { + spin_unlock(&drvdata->spinlock); + return -EINVAL; + } + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_START)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + config->addr_val[idx] = (u64)val; + config->addr_type[idx] = ETM_ADDR_TYPE_START; + config->vissctlr |= BIT(idx); + /* SSSTATUS, bit[9] - turn on start/stop logic */ + config->vinst_ctrl |= BIT(9); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_start); + +static ssize_t addr_stop_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + val = (unsigned long)config->addr_val[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t addr_stop_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!drvdata->nr_addr_cmp) { + spin_unlock(&drvdata->spinlock); + return -EINVAL; + } + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + config->addr_val[idx] = (u64)val; + config->addr_type[idx] = ETM_ADDR_TYPE_STOP; + config->vissctlr |= BIT(idx + 16); + /* SSSTATUS, bit[9] - turn on start/stop logic */ + config->vinst_ctrl |= BIT(9); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_stop); + +static ssize_t addr_ctxtype_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + ssize_t len; + u8 idx, val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + /* CONTEXTTYPE, bits[3:2] */ + val = BMVAL(config->addr_acc[idx], 2, 3); + len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" : + (val == ETM_CTX_CTXID ? "ctxid" : + (val == ETM_CTX_VMID ? "vmid" : "all"))); + spin_unlock(&drvdata->spinlock); + return len; +} + +static ssize_t addr_ctxtype_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + char str[10] = ""; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (strlen(buf) >= 10) + return -EINVAL; + if (sscanf(buf, "%s", str) != 1) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!strcmp(str, "none")) + /* start by clearing context type bits */ + config->addr_acc[idx] &= ~(BIT(2) | BIT(3)); + else if (!strcmp(str, "ctxid")) { + /* 0b01 The trace unit performs a Context ID */ + if (drvdata->numcidc) { + config->addr_acc[idx] |= BIT(2); + config->addr_acc[idx] &= ~BIT(3); + } + } else if (!strcmp(str, "vmid")) { + /* 0b10 The trace unit performs a VMID */ + if (drvdata->numvmidc) { + config->addr_acc[idx] &= ~BIT(2); + config->addr_acc[idx] |= BIT(3); + } + } else if (!strcmp(str, "all")) { + /* + * 0b11 The trace unit performs a Context ID + * comparison and a VMID + */ + if (drvdata->numcidc) + config->addr_acc[idx] |= BIT(2); + if (drvdata->numvmidc) + config->addr_acc[idx] |= BIT(3); + } + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_ctxtype); + +static ssize_t addr_context_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + /* context ID comparator bits[6:4] */ + val = BMVAL(config->addr_acc[idx], 4, 6); + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t addr_context_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1)) + return -EINVAL; + if (val >= (drvdata->numcidc >= drvdata->numvmidc ? + drvdata->numcidc : drvdata->numvmidc)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + /* clear context ID comparator bits[6:4] */ + config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6)); + config->addr_acc[idx] |= (val << 4); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_context); + +static ssize_t seq_idx_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->seq_idx; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t seq_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val >= drvdata->nrseqstate - 1) + return -EINVAL; + + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + config->seq_idx = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(seq_idx); + +static ssize_t seq_state_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->seq_state; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t seq_state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val >= drvdata->nrseqstate) + return -EINVAL; + + config->seq_state = val; + return size; +} +static DEVICE_ATTR_RW(seq_state); + +static ssize_t seq_event_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->seq_idx; + val = config->seq_ctrl[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t seq_event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->seq_idx; + /* RST, bits[7:0] */ + config->seq_ctrl[idx] = val & 0xFF; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(seq_event); + +static ssize_t seq_reset_event_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->seq_rst; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t seq_reset_event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (!(drvdata->nrseqstate)) + return -EINVAL; + + config->seq_rst = val & ETMv4_EVENT_MASK; + return size; +} +static DEVICE_ATTR_RW(seq_reset_event); + +static ssize_t cntr_idx_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->cntr_idx; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t cntr_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val >= drvdata->nr_cntr) + return -EINVAL; + + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + config->cntr_idx = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(cntr_idx); + +static ssize_t cntrldvr_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->cntr_idx; + val = config->cntrldvr[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t cntrldvr_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val > ETM_CNTR_MAX_VAL) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->cntr_idx; + config->cntrldvr[idx] = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(cntrldvr); + +static ssize_t cntr_val_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->cntr_idx; + val = config->cntr_val[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t cntr_val_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val > ETM_CNTR_MAX_VAL) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->cntr_idx; + config->cntr_val[idx] = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(cntr_val); + +static ssize_t cntr_ctrl_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->cntr_idx; + val = config->cntr_ctrl[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t cntr_ctrl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->cntr_idx; + config->cntr_ctrl[idx] = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(cntr_ctrl); + +static ssize_t res_idx_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->res_idx; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t res_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + /* Resource selector pair 0 is always implemented and reserved */ + if ((val == 0) || (val >= drvdata->nr_resource)) + return -EINVAL; + + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + config->res_idx = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(res_idx); + +static ssize_t res_ctrl_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->res_idx; + val = config->res_ctrl[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t res_ctrl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->res_idx; + /* For odd idx pair inversal bit is RES0 */ + if (idx % 2 != 0) + /* PAIRINV, bit[21] */ + val &= ~BIT(21); + config->res_ctrl[idx] = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(res_ctrl); + +static ssize_t ctxid_idx_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->ctxid_idx; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t ctxid_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val >= drvdata->numcidc) + return -EINVAL; + + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + config->ctxid_idx = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(ctxid_idx); + +static ssize_t ctxid_pid_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->ctxid_idx; + val = (unsigned long)config->ctxid_vpid[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t ctxid_pid_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long vpid, pid; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + /* + * only implemented when ctxid tracing is enabled, i.e. at least one + * ctxid comparator is implemented and ctxid is greater than 0 bits + * in length + */ + if (!drvdata->ctxid_size || !drvdata->numcidc) + return -EINVAL; + if (kstrtoul(buf, 16, &vpid)) + return -EINVAL; + + pid = coresight_vpid_to_pid(vpid); + + spin_lock(&drvdata->spinlock); + idx = config->ctxid_idx; + config->ctxid_pid[idx] = (u64)pid; + config->ctxid_vpid[idx] = (u64)vpid; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(ctxid_pid); + +static ssize_t ctxid_masks_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val1, val2; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + val1 = config->ctxid_mask0; + val2 = config->ctxid_mask1; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); +} + +static ssize_t ctxid_masks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 i, j, maskbyte; + unsigned long val1, val2, mask; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + /* + * only implemented when ctxid tracing is enabled, i.e. at least one + * ctxid comparator is implemented and ctxid is greater than 0 bits + * in length + */ + if (!drvdata->ctxid_size || !drvdata->numcidc) + return -EINVAL; + if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + /* + * each byte[0..3] controls mask value applied to ctxid + * comparator[0..3] + */ + switch (drvdata->numcidc) { + case 0x1: + /* COMP0, bits[7:0] */ + config->ctxid_mask0 = val1 & 0xFF; + break; + case 0x2: + /* COMP1, bits[15:8] */ + config->ctxid_mask0 = val1 & 0xFFFF; + break; + case 0x3: + /* COMP2, bits[23:16] */ + config->ctxid_mask0 = val1 & 0xFFFFFF; + break; + case 0x4: + /* COMP3, bits[31:24] */ + config->ctxid_mask0 = val1; + break; + case 0x5: + /* COMP4, bits[7:0] */ + config->ctxid_mask0 = val1; + config->ctxid_mask1 = val2 & 0xFF; + break; + case 0x6: + /* COMP5, bits[15:8] */ + config->ctxid_mask0 = val1; + config->ctxid_mask1 = val2 & 0xFFFF; + break; + case 0x7: + /* COMP6, bits[23:16] */ + config->ctxid_mask0 = val1; + config->ctxid_mask1 = val2 & 0xFFFFFF; + break; + case 0x8: + /* COMP7, bits[31:24] */ + config->ctxid_mask0 = val1; + config->ctxid_mask1 = val2; + break; + default: + break; + } + /* + * If software sets a mask bit to 1, it must program relevant byte + * of ctxid comparator value 0x0, otherwise behavior is unpredictable. + * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24] + * of ctxid comparator0 value (corresponding to byte 0) register. + */ + mask = config->ctxid_mask0; + for (i = 0; i < drvdata->numcidc; i++) { + /* mask value of corresponding ctxid comparator */ + maskbyte = mask & ETMv4_EVENT_MASK; + /* + * each bit corresponds to a byte of respective ctxid comparator + * value register + */ + for (j = 0; j < 8; j++) { + if (maskbyte & 1) + config->ctxid_pid[i] &= ~(0xFF << (j * 8)); + maskbyte >>= 1; + } + /* Select the next ctxid comparator mask value */ + if (i == 3) + /* ctxid comparators[4-7] */ + mask = config->ctxid_mask1; + else + mask >>= 0x8; + } + + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(ctxid_masks); + +static ssize_t vmid_idx_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->vmid_idx; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t vmid_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val >= drvdata->numvmidc) + return -EINVAL; + + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + config->vmid_idx = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(vmid_idx); + +static ssize_t vmid_val_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = (unsigned long)config->vmid_val[config->vmid_idx]; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t vmid_val_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + /* + * only implemented when vmid tracing is enabled, i.e. at least one + * vmid comparator is implemented and at least 8 bit vmid size + */ + if (!drvdata->vmid_size || !drvdata->numvmidc) + return -EINVAL; + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + config->vmid_val[config->vmid_idx] = (u64)val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(vmid_val); + +static ssize_t vmid_masks_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val1, val2; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + val1 = config->vmid_mask0; + val2 = config->vmid_mask1; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); +} + +static ssize_t vmid_masks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 i, j, maskbyte; + unsigned long val1, val2, mask; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + /* + * only implemented when vmid tracing is enabled, i.e. at least one + * vmid comparator is implemented and at least 8 bit vmid size + */ + if (!drvdata->vmid_size || !drvdata->numvmidc) + return -EINVAL; + if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + + /* + * each byte[0..3] controls mask value applied to vmid + * comparator[0..3] + */ + switch (drvdata->numvmidc) { + case 0x1: + /* COMP0, bits[7:0] */ + config->vmid_mask0 = val1 & 0xFF; + break; + case 0x2: + /* COMP1, bits[15:8] */ + config->vmid_mask0 = val1 & 0xFFFF; + break; + case 0x3: + /* COMP2, bits[23:16] */ + config->vmid_mask0 = val1 & 0xFFFFFF; + break; + case 0x4: + /* COMP3, bits[31:24] */ + config->vmid_mask0 = val1; + break; + case 0x5: + /* COMP4, bits[7:0] */ + config->vmid_mask0 = val1; + config->vmid_mask1 = val2 & 0xFF; + break; + case 0x6: + /* COMP5, bits[15:8] */ + config->vmid_mask0 = val1; + config->vmid_mask1 = val2 & 0xFFFF; + break; + case 0x7: + /* COMP6, bits[23:16] */ + config->vmid_mask0 = val1; + config->vmid_mask1 = val2 & 0xFFFFFF; + break; + case 0x8: + /* COMP7, bits[31:24] */ + config->vmid_mask0 = val1; + config->vmid_mask1 = val2; + break; + default: + break; + } + + /* + * If software sets a mask bit to 1, it must program relevant byte + * of vmid comparator value 0x0, otherwise behavior is unpredictable. + * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24] + * of vmid comparator0 value (corresponding to byte 0) register. + */ + mask = config->vmid_mask0; + for (i = 0; i < drvdata->numvmidc; i++) { + /* mask value of corresponding vmid comparator */ + maskbyte = mask & ETMv4_EVENT_MASK; + /* + * each bit corresponds to a byte of respective vmid comparator + * value register + */ + for (j = 0; j < 8; j++) { + if (maskbyte & 1) + config->vmid_val[i] &= ~(0xFF << (j * 8)); + maskbyte >>= 1; + } + /* Select the next vmid comparator mask value */ + if (i == 3) + /* vmid comparators[4-7] */ + mask = config->vmid_mask1; + else + mask >>= 0x8; + } + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(vmid_masks); + +static ssize_t cpu_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->cpu; + return scnprintf(buf, PAGE_SIZE, "%d\n", val); + +} +static DEVICE_ATTR_RO(cpu); + +static struct attribute *coresight_etmv4_attrs[] = { + &dev_attr_nr_pe_cmp.attr, + &dev_attr_nr_addr_cmp.attr, + &dev_attr_nr_cntr.attr, + &dev_attr_nr_ext_inp.attr, + &dev_attr_numcidc.attr, + &dev_attr_numvmidc.attr, + &dev_attr_nrseqstate.attr, + &dev_attr_nr_resource.attr, + &dev_attr_nr_ss_cmp.attr, + &dev_attr_reset.attr, + &dev_attr_mode.attr, + &dev_attr_pe.attr, + &dev_attr_event.attr, + &dev_attr_event_instren.attr, + &dev_attr_event_ts.attr, + &dev_attr_syncfreq.attr, + &dev_attr_cyc_threshold.attr, + &dev_attr_bb_ctrl.attr, + &dev_attr_event_vinst.attr, + &dev_attr_s_exlevel_vinst.attr, + &dev_attr_ns_exlevel_vinst.attr, + &dev_attr_addr_idx.attr, + &dev_attr_addr_instdatatype.attr, + &dev_attr_addr_single.attr, + &dev_attr_addr_range.attr, + &dev_attr_addr_start.attr, + &dev_attr_addr_stop.attr, + &dev_attr_addr_ctxtype.attr, + &dev_attr_addr_context.attr, + &dev_attr_seq_idx.attr, + &dev_attr_seq_state.attr, + &dev_attr_seq_event.attr, + &dev_attr_seq_reset_event.attr, + &dev_attr_cntr_idx.attr, + &dev_attr_cntrldvr.attr, + &dev_attr_cntr_val.attr, + &dev_attr_cntr_ctrl.attr, + &dev_attr_res_idx.attr, + &dev_attr_res_ctrl.attr, + &dev_attr_ctxid_idx.attr, + &dev_attr_ctxid_pid.attr, + &dev_attr_ctxid_masks.attr, + &dev_attr_vmid_idx.attr, + &dev_attr_vmid_val.attr, + &dev_attr_vmid_masks.attr, + &dev_attr_cpu.attr, + NULL, +}; + +#define coresight_etm4x_simple_func(name, offset) \ + coresight_simple_func(struct etmv4_drvdata, name, offset) + +coresight_etm4x_simple_func(trcoslsr, TRCOSLSR); +coresight_etm4x_simple_func(trcpdcr, TRCPDCR); +coresight_etm4x_simple_func(trcpdsr, TRCPDSR); +coresight_etm4x_simple_func(trclsr, TRCLSR); +coresight_etm4x_simple_func(trcconfig, TRCCONFIGR); +coresight_etm4x_simple_func(trctraceid, TRCTRACEIDR); +coresight_etm4x_simple_func(trcauthstatus, TRCAUTHSTATUS); +coresight_etm4x_simple_func(trcdevid, TRCDEVID); +coresight_etm4x_simple_func(trcdevtype, TRCDEVTYPE); +coresight_etm4x_simple_func(trcpidr0, TRCPIDR0); +coresight_etm4x_simple_func(trcpidr1, TRCPIDR1); +coresight_etm4x_simple_func(trcpidr2, TRCPIDR2); +coresight_etm4x_simple_func(trcpidr3, TRCPIDR3); + +static struct attribute *coresight_etmv4_mgmt_attrs[] = { + &dev_attr_trcoslsr.attr, + &dev_attr_trcpdcr.attr, + &dev_attr_trcpdsr.attr, + &dev_attr_trclsr.attr, + &dev_attr_trcconfig.attr, + &dev_attr_trctraceid.attr, + &dev_attr_trcauthstatus.attr, + &dev_attr_trcdevid.attr, + &dev_attr_trcdevtype.attr, + &dev_attr_trcpidr0.attr, + &dev_attr_trcpidr1.attr, + &dev_attr_trcpidr2.attr, + &dev_attr_trcpidr3.attr, + NULL, +}; + +coresight_etm4x_simple_func(trcidr0, TRCIDR0); +coresight_etm4x_simple_func(trcidr1, TRCIDR1); +coresight_etm4x_simple_func(trcidr2, TRCIDR2); +coresight_etm4x_simple_func(trcidr3, TRCIDR3); +coresight_etm4x_simple_func(trcidr4, TRCIDR4); +coresight_etm4x_simple_func(trcidr5, TRCIDR5); +/* trcidr[6,7] are reserved */ +coresight_etm4x_simple_func(trcidr8, TRCIDR8); +coresight_etm4x_simple_func(trcidr9, TRCIDR9); +coresight_etm4x_simple_func(trcidr10, TRCIDR10); +coresight_etm4x_simple_func(trcidr11, TRCIDR11); +coresight_etm4x_simple_func(trcidr12, TRCIDR12); +coresight_etm4x_simple_func(trcidr13, TRCIDR13); + +static struct attribute *coresight_etmv4_trcidr_attrs[] = { + &dev_attr_trcidr0.attr, + &dev_attr_trcidr1.attr, + &dev_attr_trcidr2.attr, + &dev_attr_trcidr3.attr, + &dev_attr_trcidr4.attr, + &dev_attr_trcidr5.attr, + /* trcidr[6,7] are reserved */ + &dev_attr_trcidr8.attr, + &dev_attr_trcidr9.attr, + &dev_attr_trcidr10.attr, + &dev_attr_trcidr11.attr, + &dev_attr_trcidr12.attr, + &dev_attr_trcidr13.attr, + NULL, +}; + +static const struct attribute_group coresight_etmv4_group = { + .attrs = coresight_etmv4_attrs, +}; + +static const struct attribute_group coresight_etmv4_mgmt_group = { + .attrs = coresight_etmv4_mgmt_attrs, + .name = "mgmt", +}; + +static const struct attribute_group coresight_etmv4_trcidr_group = { + .attrs = coresight_etmv4_trcidr_attrs, + .name = "trcidr", +}; + +const struct attribute_group *coresight_etmv4_groups[] = { + &coresight_etmv4_group, + &coresight_etmv4_mgmt_group, + &coresight_etmv4_trcidr_group, + NULL, +}; diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index 1c59bd368..462f0dc15 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -26,15 +26,19 @@ #include <linux/clk.h> #include <linux/cpu.h> #include <linux/coresight.h> +#include <linux/coresight-pmu.h> #include <linux/pm_wakeup.h> #include <linux/amba/bus.h> #include <linux/seq_file.h> #include <linux/uaccess.h> +#include <linux/perf_event.h> #include <linux/pm_runtime.h> #include <linux/perf_event.h> #include <asm/sections.h> +#include <asm/local.h> #include "coresight-etm4x.h" +#include "coresight-etm-perf.h" static int boot_enable; module_param_named(boot_enable, boot_enable, int, S_IRUGO); @@ -42,13 +46,13 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO); /* The number of ETMv4 currently registered */ static int etm4_count; static struct etmv4_drvdata *etmdrvdata[NR_CPUS]; +static void etm4_set_default(struct etmv4_config *config); -static void etm4_os_unlock(void *info) +static void etm4_os_unlock(struct etmv4_drvdata *drvdata) { - struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info; - /* Writing any value to ETMOSLAR unlocks the trace registers */ writel_relaxed(0x0, drvdata->base + TRCOSLAR); + drvdata->os_unlock = true; isb(); } @@ -76,7 +80,7 @@ static int etm4_trace_id(struct coresight_device *csdev) unsigned long flags; int trace_id = -1; - if (!drvdata->enable) + if (!local_read(&drvdata->mode)) return drvdata->trcid; spin_lock_irqsave(&drvdata->spinlock, flags); @@ -95,6 +99,7 @@ static void etm4_enable_hw(void *info) { int i; struct etmv4_drvdata *drvdata = info; + struct etmv4_config *config = &drvdata->config; CS_UNLOCK(drvdata->base); @@ -109,69 +114,69 @@ static void etm4_enable_hw(void *info) "timeout observed when probing at offset %#x\n", TRCSTATR); - writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR); - writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR); + writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR); + writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR); /* nothing specific implemented */ writel_relaxed(0x0, drvdata->base + TRCAUXCTLR); - writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R); - writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R); - writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR); - writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR); - writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR); - writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR); - writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR); + writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R); + writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R); + writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR); + writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR); + writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR); + writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR); + writel_relaxed(config->bb_ctrl, drvdata->base + TRCBBCTLR); writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR); - writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR); - writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR); - writel_relaxed(drvdata->vissctlr, + writel_relaxed(config->vinst_ctrl, drvdata->base + TRCVICTLR); + writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR); + writel_relaxed(config->vissctlr, drvdata->base + TRCVISSCTLR); - writel_relaxed(drvdata->vipcssctlr, + writel_relaxed(config->vipcssctlr, drvdata->base + TRCVIPCSSCTLR); for (i = 0; i < drvdata->nrseqstate - 1; i++) - writel_relaxed(drvdata->seq_ctrl[i], + writel_relaxed(config->seq_ctrl[i], drvdata->base + TRCSEQEVRn(i)); - writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR); - writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR); - writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR); + writel_relaxed(config->seq_rst, drvdata->base + TRCSEQRSTEVR); + writel_relaxed(config->seq_state, drvdata->base + TRCSEQSTR); + writel_relaxed(config->ext_inp, drvdata->base + TRCEXTINSELR); for (i = 0; i < drvdata->nr_cntr; i++) { - writel_relaxed(drvdata->cntrldvr[i], + writel_relaxed(config->cntrldvr[i], drvdata->base + TRCCNTRLDVRn(i)); - writel_relaxed(drvdata->cntr_ctrl[i], + writel_relaxed(config->cntr_ctrl[i], drvdata->base + TRCCNTCTLRn(i)); - writel_relaxed(drvdata->cntr_val[i], + writel_relaxed(config->cntr_val[i], drvdata->base + TRCCNTVRn(i)); } /* Resource selector pair 0 is always implemented and reserved */ - for (i = 2; i < drvdata->nr_resource * 2; i++) - writel_relaxed(drvdata->res_ctrl[i], + for (i = 0; i < drvdata->nr_resource * 2; i++) + writel_relaxed(config->res_ctrl[i], drvdata->base + TRCRSCTLRn(i)); for (i = 0; i < drvdata->nr_ss_cmp; i++) { - writel_relaxed(drvdata->ss_ctrl[i], + writel_relaxed(config->ss_ctrl[i], drvdata->base + TRCSSCCRn(i)); - writel_relaxed(drvdata->ss_status[i], + writel_relaxed(config->ss_status[i], drvdata->base + TRCSSCSRn(i)); - writel_relaxed(drvdata->ss_pe_cmp[i], + writel_relaxed(config->ss_pe_cmp[i], drvdata->base + TRCSSPCICRn(i)); } for (i = 0; i < drvdata->nr_addr_cmp; i++) { - writeq_relaxed(drvdata->addr_val[i], + writeq_relaxed(config->addr_val[i], drvdata->base + TRCACVRn(i)); - writeq_relaxed(drvdata->addr_acc[i], + writeq_relaxed(config->addr_acc[i], drvdata->base + TRCACATRn(i)); } for (i = 0; i < drvdata->numcidc; i++) - writeq_relaxed(drvdata->ctxid_pid[i], + writeq_relaxed(config->ctxid_pid[i], drvdata->base + TRCCIDCVRn(i)); - writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0); - writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1); + writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0); + writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1); for (i = 0; i < drvdata->numvmidc; i++) - writeq_relaxed(drvdata->vmid_val[i], + writeq_relaxed(config->vmid_val[i], drvdata->base + TRCVMIDCVRn(i)); - writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0); - writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1); + writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0); + writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1); /* Enable the trace unit */ writel_relaxed(1, drvdata->base + TRCPRGCTLR); @@ -187,2120 +192,210 @@ static void etm4_enable_hw(void *info) dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); } -static int etm4_enable(struct coresight_device *csdev, - struct perf_event_attr *attr, u32 mode) +static int etm4_parse_event_config(struct etmv4_drvdata *drvdata, + struct perf_event_attr *attr) { - struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - int ret; - - spin_lock(&drvdata->spinlock); - - /* - * Executing etm4_enable_hw on the cpu whose ETM is being enabled - * ensures that register writes occur when cpu is powered. - */ - ret = smp_call_function_single(drvdata->cpu, - etm4_enable_hw, drvdata, 1); - if (ret) - goto err; - drvdata->enable = true; - drvdata->sticky_enable = true; - - spin_unlock(&drvdata->spinlock); + struct etmv4_config *config = &drvdata->config; - dev_info(drvdata->dev, "ETM tracing enabled\n"); - return 0; -err: - spin_unlock(&drvdata->spinlock); - return ret; -} + if (!attr) + return -EINVAL; -static void etm4_disable_hw(void *info) -{ - u32 control; - struct etmv4_drvdata *drvdata = info; + /* Clear configuration from previous run */ + memset(config, 0, sizeof(struct etmv4_config)); - CS_UNLOCK(drvdata->base); + if (attr->exclude_kernel) + config->mode = ETM_MODE_EXCL_KERN; - control = readl_relaxed(drvdata->base + TRCPRGCTLR); + if (attr->exclude_user) + config->mode = ETM_MODE_EXCL_USER; - /* EN, bit[0] Trace unit enable bit */ - control &= ~0x1; - - /* make sure everything completes before disabling */ - mb(); - isb(); - writel_relaxed(control, drvdata->base + TRCPRGCTLR); - - CS_LOCK(drvdata->base); - - dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); -} - -static void etm4_disable(struct coresight_device *csdev) -{ - struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + /* Always start from the default config */ + etm4_set_default(config); /* - * Taking hotplug lock here protects from clocks getting disabled - * with tracing being left on (crash scenario) if user disable occurs - * after cpu online mask indicates the cpu is offline but before the - * DYING hotplug callback is serviced by the ETM driver. + * By default the tracers are configured to trace the whole address + * range. Narrow the field only if requested by user space. */ - get_online_cpus(); - spin_lock(&drvdata->spinlock); - - /* - * Executing etm4_disable_hw on the cpu whose ETM is being disabled - * ensures that register writes occur when cpu is powered. - */ - smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1); - drvdata->enable = false; - - spin_unlock(&drvdata->spinlock); - put_online_cpus(); - - dev_info(drvdata->dev, "ETM tracing disabled\n"); -} - -static const struct coresight_ops_source etm4_source_ops = { - .cpu_id = etm4_cpu_id, - .trace_id = etm4_trace_id, - .enable = etm4_enable, - .disable = etm4_disable, -}; - -static const struct coresight_ops etm4_cs_ops = { - .source_ops = &etm4_source_ops, -}; + if (config->mode) + etm4_config_trace_mode(config); -static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude) -{ - u8 idx = drvdata->addr_idx; + /* Go from generic option to ETMv4 specifics */ + if (attr->config & BIT(ETM_OPT_CYCACC)) + config->cfg |= ETMv4_MODE_CYCACC; + if (attr->config & BIT(ETM_OPT_TS)) + config->cfg |= ETMv4_MODE_TIMESTAMP; - /* - * TRCACATRn.TYPE bit[1:0]: type of comparison - * the trace unit performs - */ - if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) { - if (idx % 2 != 0) - return -EINVAL; - - /* - * We are performing instruction address comparison. Set the - * relevant bit of ViewInst Include/Exclude Control register - * for corresponding address comparator pair. - */ - if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE || - drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE) - return -EINVAL; - - if (exclude == true) { - /* - * Set exclude bit and unset the include bit - * corresponding to comparator pair - */ - drvdata->viiectlr |= BIT(idx / 2 + 16); - drvdata->viiectlr &= ~BIT(idx / 2); - } else { - /* - * Set include bit and unset exclude bit - * corresponding to comparator pair - */ - drvdata->viiectlr |= BIT(idx / 2); - drvdata->viiectlr &= ~BIT(idx / 2 + 16); - } - } return 0; } -static ssize_t nr_pe_cmp_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static int etm4_enable_perf(struct coresight_device *csdev, + struct perf_event_attr *attr) { - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nr_pe_cmp; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nr_pe_cmp); - -static ssize_t nr_addr_cmp_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nr_addr_cmp; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nr_addr_cmp); - -static ssize_t nr_cntr_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nr_cntr; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nr_cntr); - -static ssize_t nr_ext_inp_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nr_ext_inp; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nr_ext_inp); - -static ssize_t numcidc_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->numcidc; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(numcidc); - -static ssize_t numvmidc_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->numvmidc; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(numvmidc); - -static ssize_t nrseqstate_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nrseqstate; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nrseqstate); - -static ssize_t nr_resource_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nr_resource; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nr_resource); - -static ssize_t nr_ss_cmp_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nr_ss_cmp; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nr_ss_cmp); - -static ssize_t reset_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int i; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - if (kstrtoul(buf, 16, &val)) + if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) return -EINVAL; - spin_lock(&drvdata->spinlock); - if (val) - drvdata->mode = 0x0; - - /* Disable data tracing: do not trace load and store data transfers */ - drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE); - drvdata->cfg &= ~(BIT(1) | BIT(2)); - - /* Disable data value and data address tracing */ - drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR | - ETM_MODE_DATA_TRACE_VAL); - drvdata->cfg &= ~(BIT(16) | BIT(17)); - - /* Disable all events tracing */ - drvdata->eventctrl0 = 0x0; - drvdata->eventctrl1 = 0x0; - - /* Disable timestamp event */ - drvdata->ts_ctrl = 0x0; - - /* Disable stalling */ - drvdata->stall_ctrl = 0x0; - - /* Reset trace synchronization period to 2^8 = 256 bytes*/ - if (drvdata->syncpr == false) - drvdata->syncfreq = 0x8; - - /* - * Enable ViewInst to trace everything with start-stop logic in - * started state. ARM recommends start-stop logic is set before - * each trace run. - */ - drvdata->vinst_ctrl |= BIT(0); - if (drvdata->nr_addr_cmp == true) { - drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP; - /* SSSTATUS, bit[9] */ - drvdata->vinst_ctrl |= BIT(9); - } - - /* No address range filtering for ViewInst */ - drvdata->viiectlr = 0x0; - - /* No start-stop filtering for ViewInst */ - drvdata->vissctlr = 0x0; - - /* Disable seq events */ - for (i = 0; i < drvdata->nrseqstate-1; i++) - drvdata->seq_ctrl[i] = 0x0; - drvdata->seq_rst = 0x0; - drvdata->seq_state = 0x0; - - /* Disable external input events */ - drvdata->ext_inp = 0x0; - - drvdata->cntr_idx = 0x0; - for (i = 0; i < drvdata->nr_cntr; i++) { - drvdata->cntrldvr[i] = 0x0; - drvdata->cntr_ctrl[i] = 0x0; - drvdata->cntr_val[i] = 0x0; - } - - /* Resource selector pair 0 is always implemented and reserved */ - drvdata->res_idx = 0x2; - for (i = 2; i < drvdata->nr_resource * 2; i++) - drvdata->res_ctrl[i] = 0x0; - - for (i = 0; i < drvdata->nr_ss_cmp; i++) { - drvdata->ss_ctrl[i] = 0x0; - drvdata->ss_pe_cmp[i] = 0x0; - } - - drvdata->addr_idx = 0x0; - for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) { - drvdata->addr_val[i] = 0x0; - drvdata->addr_acc[i] = 0x0; - drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE; - } - - drvdata->ctxid_idx = 0x0; - for (i = 0; i < drvdata->numcidc; i++) { - drvdata->ctxid_pid[i] = 0x0; - drvdata->ctxid_vpid[i] = 0x0; - } - - drvdata->ctxid_mask0 = 0x0; - drvdata->ctxid_mask1 = 0x0; - - drvdata->vmid_idx = 0x0; - for (i = 0; i < drvdata->numvmidc; i++) - drvdata->vmid_val[i] = 0x0; - drvdata->vmid_mask0 = 0x0; - drvdata->vmid_mask1 = 0x0; - - drvdata->trcid = drvdata->cpu + 1; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_WO(reset); + /* Configure the tracer based on the session's specifics */ + etm4_parse_event_config(drvdata, attr); + /* And enable it */ + etm4_enable_hw(drvdata); -static ssize_t mode_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->mode; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); + return 0; } -static ssize_t mode_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) +static int etm4_enable_sysfs(struct coresight_device *csdev) { - unsigned long val, mode; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + int ret; spin_lock(&drvdata->spinlock); - drvdata->mode = val & ETMv4_MODE_ALL; - - if (drvdata->mode & ETM_MODE_EXCLUDE) - etm4_set_mode_exclude(drvdata, true); - else - etm4_set_mode_exclude(drvdata, false); - - if (drvdata->instrp0 == true) { - /* start by clearing instruction P0 field */ - drvdata->cfg &= ~(BIT(1) | BIT(2)); - if (drvdata->mode & ETM_MODE_LOAD) - /* 0b01 Trace load instructions as P0 instructions */ - drvdata->cfg |= BIT(1); - if (drvdata->mode & ETM_MODE_STORE) - /* 0b10 Trace store instructions as P0 instructions */ - drvdata->cfg |= BIT(2); - if (drvdata->mode & ETM_MODE_LOAD_STORE) - /* - * 0b11 Trace load and store instructions - * as P0 instructions - */ - drvdata->cfg |= BIT(1) | BIT(2); - } - - /* bit[3], Branch broadcast mode */ - if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true)) - drvdata->cfg |= BIT(3); - else - drvdata->cfg &= ~BIT(3); - - /* bit[4], Cycle counting instruction trace bit */ - if ((drvdata->mode & ETMv4_MODE_CYCACC) && - (drvdata->trccci == true)) - drvdata->cfg |= BIT(4); - else - drvdata->cfg &= ~BIT(4); - - /* bit[6], Context ID tracing bit */ - if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size)) - drvdata->cfg |= BIT(6); - else - drvdata->cfg &= ~BIT(6); - - if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size)) - drvdata->cfg |= BIT(7); - else - drvdata->cfg &= ~BIT(7); - - /* bits[10:8], Conditional instruction tracing bit */ - mode = ETM_MODE_COND(drvdata->mode); - if (drvdata->trccond == true) { - drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10)); - drvdata->cfg |= mode << 8; - } - - /* bit[11], Global timestamp tracing bit */ - if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size)) - drvdata->cfg |= BIT(11); - else - drvdata->cfg &= ~BIT(11); - /* bit[12], Return stack enable bit */ - if ((drvdata->mode & ETM_MODE_RETURNSTACK) && - (drvdata->retstack == true)) - drvdata->cfg |= BIT(12); - else - drvdata->cfg &= ~BIT(12); - - /* bits[14:13], Q element enable field */ - mode = ETM_MODE_QELEM(drvdata->mode); - /* start by clearing QE bits */ - drvdata->cfg &= ~(BIT(13) | BIT(14)); - /* if supported, Q elements with instruction counts are enabled */ - if ((mode & BIT(0)) && (drvdata->q_support & BIT(0))) - drvdata->cfg |= BIT(13); /* - * if supported, Q elements with and without instruction - * counts are enabled + * Executing etm4_enable_hw on the cpu whose ETM is being enabled + * ensures that register writes occur when cpu is powered. */ - if ((mode & BIT(1)) && (drvdata->q_support & BIT(1))) - drvdata->cfg |= BIT(14); - - /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */ - if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) && - (drvdata->atbtrig == true)) - drvdata->eventctrl1 |= BIT(11); - else - drvdata->eventctrl1 &= ~BIT(11); - - /* bit[12], Low-power state behavior override bit */ - if ((drvdata->mode & ETM_MODE_LPOVERRIDE) && - (drvdata->lpoverride == true)) - drvdata->eventctrl1 |= BIT(12); - else - drvdata->eventctrl1 &= ~BIT(12); - - /* bit[8], Instruction stall bit */ - if (drvdata->mode & ETM_MODE_ISTALL_EN) - drvdata->stall_ctrl |= BIT(8); - else - drvdata->stall_ctrl &= ~BIT(8); - - /* bit[10], Prioritize instruction trace bit */ - if (drvdata->mode & ETM_MODE_INSTPRIO) - drvdata->stall_ctrl |= BIT(10); - else - drvdata->stall_ctrl &= ~BIT(10); - - /* bit[13], Trace overflow prevention bit */ - if ((drvdata->mode & ETM_MODE_NOOVERFLOW) && - (drvdata->nooverflow == true)) - drvdata->stall_ctrl |= BIT(13); - else - drvdata->stall_ctrl &= ~BIT(13); - - /* bit[9] Start/stop logic control bit */ - if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP) - drvdata->vinst_ctrl |= BIT(9); - else - drvdata->vinst_ctrl &= ~BIT(9); - - /* bit[10], Whether a trace unit must trace a Reset exception */ - if (drvdata->mode & ETM_MODE_TRACE_RESET) - drvdata->vinst_ctrl |= BIT(10); - else - drvdata->vinst_ctrl &= ~BIT(10); - - /* bit[11], Whether a trace unit must trace a system error exception */ - if ((drvdata->mode & ETM_MODE_TRACE_ERR) && - (drvdata->trc_error == true)) - drvdata->vinst_ctrl |= BIT(11); - else - drvdata->vinst_ctrl &= ~BIT(11); - - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(mode); - -static ssize_t pe_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->pe_sel; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t pe_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - if (val > drvdata->nr_pe) { - spin_unlock(&drvdata->spinlock); - return -EINVAL; - } + ret = smp_call_function_single(drvdata->cpu, + etm4_enable_hw, drvdata, 1); + if (ret) + goto err; - drvdata->pe_sel = val; + drvdata->sticky_enable = true; spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(pe); - -static ssize_t event_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->eventctrl0; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static ssize_t event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; + dev_info(drvdata->dev, "ETM tracing enabled\n"); + return 0; - spin_lock(&drvdata->spinlock); - switch (drvdata->nr_event) { - case 0x0: - /* EVENT0, bits[7:0] */ - drvdata->eventctrl0 = val & 0xFF; - break; - case 0x1: - /* EVENT1, bits[15:8] */ - drvdata->eventctrl0 = val & 0xFFFF; - break; - case 0x2: - /* EVENT2, bits[23:16] */ - drvdata->eventctrl0 = val & 0xFFFFFF; - break; - case 0x3: - /* EVENT3, bits[31:24] */ - drvdata->eventctrl0 = val; - break; - default: - break; - } +err: spin_unlock(&drvdata->spinlock); - return size; + return ret; } -static DEVICE_ATTR_RW(event); -static ssize_t event_instren_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static int etm4_enable(struct coresight_device *csdev, + struct perf_event_attr *attr, u32 mode) { - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = BMVAL(drvdata->eventctrl1, 0, 3); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} + int ret; + u32 val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); -static ssize_t event_instren_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode); - if (kstrtoul(buf, 16, &val)) - return -EINVAL; + /* Someone is already using the tracer */ + if (val) + return -EBUSY; - spin_lock(&drvdata->spinlock); - /* start by clearing all instruction event enable bits */ - drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3)); - switch (drvdata->nr_event) { - case 0x0: - /* generate Event element for event 1 */ - drvdata->eventctrl1 |= val & BIT(1); + switch (mode) { + case CS_MODE_SYSFS: + ret = etm4_enable_sysfs(csdev); break; - case 0x1: - /* generate Event element for event 1 and 2 */ - drvdata->eventctrl1 |= val & (BIT(0) | BIT(1)); - break; - case 0x2: - /* generate Event element for event 1, 2 and 3 */ - drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2)); - break; - case 0x3: - /* generate Event element for all 4 events */ - drvdata->eventctrl1 |= val & 0xF; + case CS_MODE_PERF: + ret = etm4_enable_perf(csdev, attr); break; default: - break; - } - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(event_instren); - -static ssize_t event_ts_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->ts_ctrl; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t event_ts_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (!drvdata->ts_size) - return -EINVAL; - - drvdata->ts_ctrl = val & ETMv4_EVENT_MASK; - return size; -} -static DEVICE_ATTR_RW(event_ts); - -static ssize_t syncfreq_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->syncfreq; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t syncfreq_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (drvdata->syncpr == true) - return -EINVAL; - - drvdata->syncfreq = val & ETMv4_SYNC_MASK; - return size; -} -static DEVICE_ATTR_RW(syncfreq); - -static ssize_t cyc_threshold_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->ccctlr; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t cyc_threshold_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val < drvdata->ccitmin) - return -EINVAL; - - drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK; - return size; -} -static DEVICE_ATTR_RW(cyc_threshold); - -static ssize_t bb_ctrl_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->bb_ctrl; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t bb_ctrl_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (drvdata->trcbb == false) - return -EINVAL; - if (!drvdata->nr_addr_cmp) - return -EINVAL; - /* - * Bit[7:0] selects which address range comparator is used for - * branch broadcast control. - */ - if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp) - return -EINVAL; - - drvdata->bb_ctrl = val; - return size; -} -static DEVICE_ATTR_RW(bb_ctrl); - -static ssize_t event_vinst_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t event_vinst_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - val &= ETMv4_EVENT_MASK; - drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK; - drvdata->vinst_ctrl |= val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(event_vinst); - -static ssize_t s_exlevel_vinst_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = BMVAL(drvdata->vinst_ctrl, 16, 19); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t s_exlevel_vinst_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - /* clear all EXLEVEL_S bits (bit[18] is never implemented) */ - drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19)); - /* enable instruction tracing for corresponding exception level */ - val &= drvdata->s_ex_level; - drvdata->vinst_ctrl |= (val << 16); - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(s_exlevel_vinst); - -static ssize_t ns_exlevel_vinst_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - /* EXLEVEL_NS, bits[23:20] */ - val = BMVAL(drvdata->vinst_ctrl, 20, 23); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t ns_exlevel_vinst_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - /* clear EXLEVEL_NS bits (bit[23] is never implemented */ - drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22)); - /* enable instruction tracing for corresponding exception level */ - val &= drvdata->ns_ex_level; - drvdata->vinst_ctrl |= (val << 20); - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(ns_exlevel_vinst); - -static ssize_t addr_idx_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->addr_idx; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t addr_idx_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val >= drvdata->nr_addr_cmp * 2) - return -EINVAL; - - /* - * Use spinlock to ensure index doesn't change while it gets - * dereferenced multiple times within a spinlock block elsewhere. - */ - spin_lock(&drvdata->spinlock); - drvdata->addr_idx = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_idx); - -static ssize_t addr_instdatatype_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - ssize_t len; - u8 val, idx; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - val = BMVAL(drvdata->addr_acc[idx], 0, 1); - len = scnprintf(buf, PAGE_SIZE, "%s\n", - val == ETM_INSTR_ADDR ? "instr" : - (val == ETM_DATA_LOAD_ADDR ? "data_load" : - (val == ETM_DATA_STORE_ADDR ? "data_store" : - "data_load_store"))); - spin_unlock(&drvdata->spinlock); - return len; -} - -static ssize_t addr_instdatatype_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - char str[20] = ""; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (strlen(buf) >= 20) - return -EINVAL; - if (sscanf(buf, "%s", str) != 1) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!strcmp(str, "instr")) - /* TYPE, bits[1:0] */ - drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1)); - - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_instdatatype); - -static ssize_t addr_single_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - idx = drvdata->addr_idx; - spin_lock(&drvdata->spinlock); - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - val = (unsigned long)drvdata->addr_val[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t addr_single_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - drvdata->addr_val[idx] = (u64)val; - drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_single); - -static ssize_t addr_range_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val1, val2; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (idx % 2 != 0) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && - drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || - (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && - drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - val1 = (unsigned long)drvdata->addr_val[idx]; - val2 = (unsigned long)drvdata->addr_val[idx + 1]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); -} - -static ssize_t addr_range_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val1, val2; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) - return -EINVAL; - /* lower address comparator cannot have a higher address value */ - if (val1 > val2) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (idx % 2 != 0) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && - drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || - (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && - drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - drvdata->addr_val[idx] = (u64)val1; - drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE; - drvdata->addr_val[idx + 1] = (u64)val2; - drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; - /* - * Program include or exclude control bits for vinst or vdata - * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE - */ - if (drvdata->mode & ETM_MODE_EXCLUDE) - etm4_set_mode_exclude(drvdata, true); - else - etm4_set_mode_exclude(drvdata, false); - - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_range); - -static ssize_t addr_start_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - val = (unsigned long)drvdata->addr_val[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t addr_start_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!drvdata->nr_addr_cmp) { - spin_unlock(&drvdata->spinlock); - return -EINVAL; - } - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - drvdata->addr_val[idx] = (u64)val; - drvdata->addr_type[idx] = ETM_ADDR_TYPE_START; - drvdata->vissctlr |= BIT(idx); - /* SSSTATUS, bit[9] - turn on start/stop logic */ - drvdata->vinst_ctrl |= BIT(9); - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_start); - -static ssize_t addr_stop_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - val = (unsigned long)drvdata->addr_val[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t addr_stop_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!drvdata->nr_addr_cmp) { - spin_unlock(&drvdata->spinlock); - return -EINVAL; - } - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - drvdata->addr_val[idx] = (u64)val; - drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP; - drvdata->vissctlr |= BIT(idx + 16); - /* SSSTATUS, bit[9] - turn on start/stop logic */ - drvdata->vinst_ctrl |= BIT(9); - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_stop); - -static ssize_t addr_ctxtype_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - ssize_t len; - u8 idx, val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - /* CONTEXTTYPE, bits[3:2] */ - val = BMVAL(drvdata->addr_acc[idx], 2, 3); - len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" : - (val == ETM_CTX_CTXID ? "ctxid" : - (val == ETM_CTX_VMID ? "vmid" : "all"))); - spin_unlock(&drvdata->spinlock); - return len; -} - -static ssize_t addr_ctxtype_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - char str[10] = ""; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (strlen(buf) >= 10) - return -EINVAL; - if (sscanf(buf, "%s", str) != 1) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!strcmp(str, "none")) - /* start by clearing context type bits */ - drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3)); - else if (!strcmp(str, "ctxid")) { - /* 0b01 The trace unit performs a Context ID */ - if (drvdata->numcidc) { - drvdata->addr_acc[idx] |= BIT(2); - drvdata->addr_acc[idx] &= ~BIT(3); - } - } else if (!strcmp(str, "vmid")) { - /* 0b10 The trace unit performs a VMID */ - if (drvdata->numvmidc) { - drvdata->addr_acc[idx] &= ~BIT(2); - drvdata->addr_acc[idx] |= BIT(3); - } - } else if (!strcmp(str, "all")) { - /* - * 0b11 The trace unit performs a Context ID - * comparison and a VMID - */ - if (drvdata->numcidc) - drvdata->addr_acc[idx] |= BIT(2); - if (drvdata->numvmidc) - drvdata->addr_acc[idx] |= BIT(3); + ret = -EINVAL; } - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_ctxtype); - -static ssize_t addr_context_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - /* context ID comparator bits[6:4] */ - val = BMVAL(drvdata->addr_acc[idx], 4, 6); - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t addr_context_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1)) - return -EINVAL; - if (val >= (drvdata->numcidc >= drvdata->numvmidc ? - drvdata->numcidc : drvdata->numvmidc)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - /* clear context ID comparator bits[6:4] */ - drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6)); - drvdata->addr_acc[idx] |= (val << 4); - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_context); - -static ssize_t seq_idx_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->seq_idx; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t seq_idx_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val >= drvdata->nrseqstate - 1) - return -EINVAL; - - /* - * Use spinlock to ensure index doesn't change while it gets - * dereferenced multiple times within a spinlock block elsewhere. - */ - spin_lock(&drvdata->spinlock); - drvdata->seq_idx = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(seq_idx); - -static ssize_t seq_state_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - val = drvdata->seq_state; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t seq_state_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val >= drvdata->nrseqstate) - return -EINVAL; - - drvdata->seq_state = val; - return size; -} -static DEVICE_ATTR_RW(seq_state); - -static ssize_t seq_event_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->seq_idx; - val = drvdata->seq_ctrl[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t seq_event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->seq_idx; - /* RST, bits[7:0] */ - drvdata->seq_ctrl[idx] = val & 0xFF; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(seq_event); - -static ssize_t seq_reset_event_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->seq_rst; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t seq_reset_event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (!(drvdata->nrseqstate)) - return -EINVAL; - - drvdata->seq_rst = val & ETMv4_EVENT_MASK; - return size; -} -static DEVICE_ATTR_RW(seq_reset_event); - -static ssize_t cntr_idx_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->cntr_idx; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t cntr_idx_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val >= drvdata->nr_cntr) - return -EINVAL; - - /* - * Use spinlock to ensure index doesn't change while it gets - * dereferenced multiple times within a spinlock block elsewhere. - */ - spin_lock(&drvdata->spinlock); - drvdata->cntr_idx = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(cntr_idx); - -static ssize_t cntrldvr_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->cntr_idx; - val = drvdata->cntrldvr[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t cntrldvr_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val > ETM_CNTR_MAX_VAL) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->cntr_idx; - drvdata->cntrldvr[idx] = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(cntrldvr); - -static ssize_t cntr_val_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->cntr_idx; - val = drvdata->cntr_val[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t cntr_val_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val > ETM_CNTR_MAX_VAL) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->cntr_idx; - drvdata->cntr_val[idx] = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(cntr_val); - -static ssize_t cntr_ctrl_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->cntr_idx; - val = drvdata->cntr_ctrl[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t cntr_ctrl_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->cntr_idx; - drvdata->cntr_ctrl[idx] = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(cntr_ctrl); - -static ssize_t res_idx_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->res_idx; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t res_idx_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - /* Resource selector pair 0 is always implemented and reserved */ - if (val < 2 || val >= drvdata->nr_resource * 2) - return -EINVAL; + /* The tracer didn't start */ + if (ret) + local_set(&drvdata->mode, CS_MODE_DISABLED); - /* - * Use spinlock to ensure index doesn't change while it gets - * dereferenced multiple times within a spinlock block elsewhere. - */ - spin_lock(&drvdata->spinlock); - drvdata->res_idx = val; - spin_unlock(&drvdata->spinlock); - return size; + return ret; } -static DEVICE_ATTR_RW(res_idx); -static ssize_t res_ctrl_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static void etm4_disable_hw(void *info) { - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + u32 control; + struct etmv4_drvdata *drvdata = info; - spin_lock(&drvdata->spinlock); - idx = drvdata->res_idx; - val = drvdata->res_ctrl[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} + CS_UNLOCK(drvdata->base); -static ssize_t res_ctrl_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + control = readl_relaxed(drvdata->base + TRCPRGCTLR); - if (kstrtoul(buf, 16, &val)) - return -EINVAL; + /* EN, bit[0] Trace unit enable bit */ + control &= ~0x1; - spin_lock(&drvdata->spinlock); - idx = drvdata->res_idx; - /* For odd idx pair inversal bit is RES0 */ - if (idx % 2 != 0) - /* PAIRINV, bit[21] */ - val &= ~BIT(21); - drvdata->res_ctrl[idx] = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(res_ctrl); + /* make sure everything completes before disabling */ + mb(); + isb(); + writel_relaxed(control, drvdata->base + TRCPRGCTLR); -static ssize_t ctxid_idx_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + CS_LOCK(drvdata->base); - val = drvdata->ctxid_idx; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); + dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); } -static ssize_t ctxid_idx_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) +static int etm4_disable_perf(struct coresight_device *csdev) { - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val >= drvdata->numcidc) + if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) return -EINVAL; - /* - * Use spinlock to ensure index doesn't change while it gets - * dereferenced multiple times within a spinlock block elsewhere. - */ - spin_lock(&drvdata->spinlock); - drvdata->ctxid_idx = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(ctxid_idx); - -static ssize_t ctxid_pid_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->ctxid_idx; - val = (unsigned long)drvdata->ctxid_vpid[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); + etm4_disable_hw(drvdata); + return 0; } -static ssize_t ctxid_pid_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) +static void etm4_disable_sysfs(struct coresight_device *csdev) { - u8 idx; - unsigned long vpid, pid; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); /* - * only implemented when ctxid tracing is enabled, i.e. at least one - * ctxid comparator is implemented and ctxid is greater than 0 bits - * in length + * Taking hotplug lock here protects from clocks getting disabled + * with tracing being left on (crash scenario) if user disable occurs + * after cpu online mask indicates the cpu is offline but before the + * DYING hotplug callback is serviced by the ETM driver. */ - if (!drvdata->ctxid_size || !drvdata->numcidc) - return -EINVAL; - if (kstrtoul(buf, 16, &vpid)) - return -EINVAL; - - pid = coresight_vpid_to_pid(vpid); - - spin_lock(&drvdata->spinlock); - idx = drvdata->ctxid_idx; - drvdata->ctxid_pid[idx] = (u64)pid; - drvdata->ctxid_vpid[idx] = (u64)vpid; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(ctxid_pid); - -static ssize_t ctxid_masks_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val1, val2; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - + get_online_cpus(); spin_lock(&drvdata->spinlock); - val1 = drvdata->ctxid_mask0; - val2 = drvdata->ctxid_mask1; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); -} -static ssize_t ctxid_masks_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 i, j, maskbyte; - unsigned long val1, val2, mask; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - /* - * only implemented when ctxid tracing is enabled, i.e. at least one - * ctxid comparator is implemented and ctxid is greater than 0 bits - * in length - */ - if (!drvdata->ctxid_size || !drvdata->numcidc) - return -EINVAL; - if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - /* - * each byte[0..3] controls mask value applied to ctxid - * comparator[0..3] - */ - switch (drvdata->numcidc) { - case 0x1: - /* COMP0, bits[7:0] */ - drvdata->ctxid_mask0 = val1 & 0xFF; - break; - case 0x2: - /* COMP1, bits[15:8] */ - drvdata->ctxid_mask0 = val1 & 0xFFFF; - break; - case 0x3: - /* COMP2, bits[23:16] */ - drvdata->ctxid_mask0 = val1 & 0xFFFFFF; - break; - case 0x4: - /* COMP3, bits[31:24] */ - drvdata->ctxid_mask0 = val1; - break; - case 0x5: - /* COMP4, bits[7:0] */ - drvdata->ctxid_mask0 = val1; - drvdata->ctxid_mask1 = val2 & 0xFF; - break; - case 0x6: - /* COMP5, bits[15:8] */ - drvdata->ctxid_mask0 = val1; - drvdata->ctxid_mask1 = val2 & 0xFFFF; - break; - case 0x7: - /* COMP6, bits[23:16] */ - drvdata->ctxid_mask0 = val1; - drvdata->ctxid_mask1 = val2 & 0xFFFFFF; - break; - case 0x8: - /* COMP7, bits[31:24] */ - drvdata->ctxid_mask0 = val1; - drvdata->ctxid_mask1 = val2; - break; - default: - break; - } /* - * If software sets a mask bit to 1, it must program relevant byte - * of ctxid comparator value 0x0, otherwise behavior is unpredictable. - * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24] - * of ctxid comparator0 value (corresponding to byte 0) register. + * Executing etm4_disable_hw on the cpu whose ETM is being disabled + * ensures that register writes occur when cpu is powered. */ - mask = drvdata->ctxid_mask0; - for (i = 0; i < drvdata->numcidc; i++) { - /* mask value of corresponding ctxid comparator */ - maskbyte = mask & ETMv4_EVENT_MASK; - /* - * each bit corresponds to a byte of respective ctxid comparator - * value register - */ - for (j = 0; j < 8; j++) { - if (maskbyte & 1) - drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8)); - maskbyte >>= 1; - } - /* Select the next ctxid comparator mask value */ - if (i == 3) - /* ctxid comparators[4-7] */ - mask = drvdata->ctxid_mask1; - else - mask >>= 0x8; - } + smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1); spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(ctxid_masks); - -static ssize_t vmid_idx_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->vmid_idx; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t vmid_idx_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val >= drvdata->numvmidc) - return -EINVAL; - - /* - * Use spinlock to ensure index doesn't change while it gets - * dereferenced multiple times within a spinlock block elsewhere. - */ - spin_lock(&drvdata->spinlock); - drvdata->vmid_idx = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(vmid_idx); - -static ssize_t vmid_val_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx]; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t vmid_val_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - /* - * only implemented when vmid tracing is enabled, i.e. at least one - * vmid comparator is implemented and at least 8 bit vmid size - */ - if (!drvdata->vmid_size || !drvdata->numvmidc) - return -EINVAL; - if (kstrtoul(buf, 16, &val)) - return -EINVAL; + put_online_cpus(); - spin_lock(&drvdata->spinlock); - drvdata->vmid_val[drvdata->vmid_idx] = (u64)val; - spin_unlock(&drvdata->spinlock); - return size; + dev_info(drvdata->dev, "ETM tracing disabled\n"); } -static DEVICE_ATTR_RW(vmid_val); -static ssize_t vmid_masks_show(struct device *dev, - struct device_attribute *attr, char *buf) +static void etm4_disable(struct coresight_device *csdev) { - unsigned long val1, val2; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - val1 = drvdata->vmid_mask0; - val2 = drvdata->vmid_mask1; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); -} + u32 mode; + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); -static ssize_t vmid_masks_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 i, j, maskbyte; - unsigned long val1, val2, mask; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); /* - * only implemented when vmid tracing is enabled, i.e. at least one - * vmid comparator is implemented and at least 8 bit vmid size + * For as long as the tracer isn't disabled another entity can't + * change its status. As such we can read the status here without + * fearing it will change under us. */ - if (!drvdata->vmid_size || !drvdata->numvmidc) - return -EINVAL; - if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) - return -EINVAL; - - spin_lock(&drvdata->spinlock); + mode = local_read(&drvdata->mode); - /* - * each byte[0..3] controls mask value applied to vmid - * comparator[0..3] - */ - switch (drvdata->numvmidc) { - case 0x1: - /* COMP0, bits[7:0] */ - drvdata->vmid_mask0 = val1 & 0xFF; - break; - case 0x2: - /* COMP1, bits[15:8] */ - drvdata->vmid_mask0 = val1 & 0xFFFF; - break; - case 0x3: - /* COMP2, bits[23:16] */ - drvdata->vmid_mask0 = val1 & 0xFFFFFF; + switch (mode) { + case CS_MODE_DISABLED: break; - case 0x4: - /* COMP3, bits[31:24] */ - drvdata->vmid_mask0 = val1; + case CS_MODE_SYSFS: + etm4_disable_sysfs(csdev); break; - case 0x5: - /* COMP4, bits[7:0] */ - drvdata->vmid_mask0 = val1; - drvdata->vmid_mask1 = val2 & 0xFF; - break; - case 0x6: - /* COMP5, bits[15:8] */ - drvdata->vmid_mask0 = val1; - drvdata->vmid_mask1 = val2 & 0xFFFF; - break; - case 0x7: - /* COMP6, bits[23:16] */ - drvdata->vmid_mask0 = val1; - drvdata->vmid_mask1 = val2 & 0xFFFFFF; - break; - case 0x8: - /* COMP7, bits[31:24] */ - drvdata->vmid_mask0 = val1; - drvdata->vmid_mask1 = val2; - break; - default: + case CS_MODE_PERF: + etm4_disable_perf(csdev); break; } - /* - * If software sets a mask bit to 1, it must program relevant byte - * of vmid comparator value 0x0, otherwise behavior is unpredictable. - * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24] - * of vmid comparator0 value (corresponding to byte 0) register. - */ - mask = drvdata->vmid_mask0; - for (i = 0; i < drvdata->numvmidc; i++) { - /* mask value of corresponding vmid comparator */ - maskbyte = mask & ETMv4_EVENT_MASK; - /* - * each bit corresponds to a byte of respective vmid comparator - * value register - */ - for (j = 0; j < 8; j++) { - if (maskbyte & 1) - drvdata->vmid_val[i] &= ~(0xFF << (j * 8)); - maskbyte >>= 1; - } - /* Select the next vmid comparator mask value */ - if (i == 3) - /* vmid comparators[4-7] */ - mask = drvdata->vmid_mask1; - else - mask >>= 0x8; - } - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(vmid_masks); - -static ssize_t cpu_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - int val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->cpu; - return scnprintf(buf, PAGE_SIZE, "%d\n", val); - + if (mode) + local_set(&drvdata->mode, CS_MODE_DISABLED); } -static DEVICE_ATTR_RO(cpu); - -static struct attribute *coresight_etmv4_attrs[] = { - &dev_attr_nr_pe_cmp.attr, - &dev_attr_nr_addr_cmp.attr, - &dev_attr_nr_cntr.attr, - &dev_attr_nr_ext_inp.attr, - &dev_attr_numcidc.attr, - &dev_attr_numvmidc.attr, - &dev_attr_nrseqstate.attr, - &dev_attr_nr_resource.attr, - &dev_attr_nr_ss_cmp.attr, - &dev_attr_reset.attr, - &dev_attr_mode.attr, - &dev_attr_pe.attr, - &dev_attr_event.attr, - &dev_attr_event_instren.attr, - &dev_attr_event_ts.attr, - &dev_attr_syncfreq.attr, - &dev_attr_cyc_threshold.attr, - &dev_attr_bb_ctrl.attr, - &dev_attr_event_vinst.attr, - &dev_attr_s_exlevel_vinst.attr, - &dev_attr_ns_exlevel_vinst.attr, - &dev_attr_addr_idx.attr, - &dev_attr_addr_instdatatype.attr, - &dev_attr_addr_single.attr, - &dev_attr_addr_range.attr, - &dev_attr_addr_start.attr, - &dev_attr_addr_stop.attr, - &dev_attr_addr_ctxtype.attr, - &dev_attr_addr_context.attr, - &dev_attr_seq_idx.attr, - &dev_attr_seq_state.attr, - &dev_attr_seq_event.attr, - &dev_attr_seq_reset_event.attr, - &dev_attr_cntr_idx.attr, - &dev_attr_cntrldvr.attr, - &dev_attr_cntr_val.attr, - &dev_attr_cntr_ctrl.attr, - &dev_attr_res_idx.attr, - &dev_attr_res_ctrl.attr, - &dev_attr_ctxid_idx.attr, - &dev_attr_ctxid_pid.attr, - &dev_attr_ctxid_masks.attr, - &dev_attr_vmid_idx.attr, - &dev_attr_vmid_val.attr, - &dev_attr_vmid_masks.attr, - &dev_attr_cpu.attr, - NULL, -}; - -#define coresight_simple_func(name, offset) \ -static ssize_t name##_show(struct device *_dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \ - return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ - readl_relaxed(drvdata->base + offset)); \ -} \ -static DEVICE_ATTR_RO(name) - -coresight_simple_func(trcoslsr, TRCOSLSR); -coresight_simple_func(trcpdcr, TRCPDCR); -coresight_simple_func(trcpdsr, TRCPDSR); -coresight_simple_func(trclsr, TRCLSR); -coresight_simple_func(trcauthstatus, TRCAUTHSTATUS); -coresight_simple_func(trcdevid, TRCDEVID); -coresight_simple_func(trcdevtype, TRCDEVTYPE); -coresight_simple_func(trcpidr0, TRCPIDR0); -coresight_simple_func(trcpidr1, TRCPIDR1); -coresight_simple_func(trcpidr2, TRCPIDR2); -coresight_simple_func(trcpidr3, TRCPIDR3); - -static struct attribute *coresight_etmv4_mgmt_attrs[] = { - &dev_attr_trcoslsr.attr, - &dev_attr_trcpdcr.attr, - &dev_attr_trcpdsr.attr, - &dev_attr_trclsr.attr, - &dev_attr_trcauthstatus.attr, - &dev_attr_trcdevid.attr, - &dev_attr_trcdevtype.attr, - &dev_attr_trcpidr0.attr, - &dev_attr_trcpidr1.attr, - &dev_attr_trcpidr2.attr, - &dev_attr_trcpidr3.attr, - NULL, -}; -coresight_simple_func(trcidr0, TRCIDR0); -coresight_simple_func(trcidr1, TRCIDR1); -coresight_simple_func(trcidr2, TRCIDR2); -coresight_simple_func(trcidr3, TRCIDR3); -coresight_simple_func(trcidr4, TRCIDR4); -coresight_simple_func(trcidr5, TRCIDR5); -/* trcidr[6,7] are reserved */ -coresight_simple_func(trcidr8, TRCIDR8); -coresight_simple_func(trcidr9, TRCIDR9); -coresight_simple_func(trcidr10, TRCIDR10); -coresight_simple_func(trcidr11, TRCIDR11); -coresight_simple_func(trcidr12, TRCIDR12); -coresight_simple_func(trcidr13, TRCIDR13); - -static struct attribute *coresight_etmv4_trcidr_attrs[] = { - &dev_attr_trcidr0.attr, - &dev_attr_trcidr1.attr, - &dev_attr_trcidr2.attr, - &dev_attr_trcidr3.attr, - &dev_attr_trcidr4.attr, - &dev_attr_trcidr5.attr, - /* trcidr[6,7] are reserved */ - &dev_attr_trcidr8.attr, - &dev_attr_trcidr9.attr, - &dev_attr_trcidr10.attr, - &dev_attr_trcidr11.attr, - &dev_attr_trcidr12.attr, - &dev_attr_trcidr13.attr, - NULL, -}; - -static const struct attribute_group coresight_etmv4_group = { - .attrs = coresight_etmv4_attrs, -}; - -static const struct attribute_group coresight_etmv4_mgmt_group = { - .attrs = coresight_etmv4_mgmt_attrs, - .name = "mgmt", -}; - -static const struct attribute_group coresight_etmv4_trcidr_group = { - .attrs = coresight_etmv4_trcidr_attrs, - .name = "trcidr", +static const struct coresight_ops_source etm4_source_ops = { + .cpu_id = etm4_cpu_id, + .trace_id = etm4_trace_id, + .enable = etm4_enable, + .disable = etm4_disable, }; -static const struct attribute_group *coresight_etmv4_groups[] = { - &coresight_etmv4_group, - &coresight_etmv4_mgmt_group, - &coresight_etmv4_trcidr_group, - NULL, +static const struct coresight_ops etm4_cs_ops = { + .source_ops = &etm4_source_ops, }; static void etm4_init_arch_data(void *info) @@ -2313,6 +408,9 @@ static void etm4_init_arch_data(void *info) u32 etmidr5; struct etmv4_drvdata *drvdata = info; + /* Make sure all registers are accessible */ + etm4_os_unlock(drvdata); + CS_UNLOCK(drvdata->base); /* find all capabilities of the tracing unit */ @@ -2464,93 +562,115 @@ static void etm4_init_arch_data(void *info) CS_LOCK(drvdata->base); } -static void etm4_init_default_data(struct etmv4_drvdata *drvdata) +static void etm4_set_default(struct etmv4_config *config) { - int i; + if (WARN_ON_ONCE(!config)) + return; - drvdata->pe_sel = 0x0; - drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID | - ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK); + /* + * Make default initialisation trace everything + * + * Select the "always true" resource selector on the + * "Enablign Event" line and configure address range comparator + * '0' to trace all the possible address range. From there + * configure the "include/exclude" engine to include address + * range comparator '0'. + */ /* disable all events tracing */ - drvdata->eventctrl0 = 0x0; - drvdata->eventctrl1 = 0x0; + config->eventctrl0 = 0x0; + config->eventctrl1 = 0x0; /* disable stalling */ - drvdata->stall_ctrl = 0x0; + config->stall_ctrl = 0x0; + + /* enable trace synchronization every 4096 bytes, if available */ + config->syncfreq = 0xC; /* disable timestamp event */ - drvdata->ts_ctrl = 0x0; + config->ts_ctrl = 0x0; - /* enable trace synchronization every 4096 bytes for trace */ - if (drvdata->syncpr == false) - drvdata->syncfreq = 0xC; + /* TRCVICTLR::EVENT = 0x01, select the always on logic */ + config->vinst_ctrl |= BIT(0); /* - * enable viewInst to trace everything with start-stop logic in - * started state + * TRCVICTLR::SSSTATUS == 1, the start-stop logic is + * in the started state */ - drvdata->vinst_ctrl |= BIT(0); - /* set initial state of start-stop logic */ - if (drvdata->nr_addr_cmp) - drvdata->vinst_ctrl |= BIT(9); + config->vinst_ctrl |= BIT(9); - /* no address range filtering for ViewInst */ - drvdata->viiectlr = 0x0; - /* no start-stop filtering for ViewInst */ - drvdata->vissctlr = 0x0; + /* + * Configure address range comparator '0' to encompass all + * possible addresses. + */ - /* disable seq events */ - for (i = 0; i < drvdata->nrseqstate-1; i++) - drvdata->seq_ctrl[i] = 0x0; - drvdata->seq_rst = 0x0; - drvdata->seq_state = 0x0; + /* First half of default address comparator: start at address 0 */ + config->addr_val[ETM_DEFAULT_ADDR_COMP] = 0x0; + /* trace instruction addresses */ + config->addr_acc[ETM_DEFAULT_ADDR_COMP] &= ~(BIT(0) | BIT(1)); + /* EXLEVEL_NS, bits[12:15], only trace application and kernel space */ + config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= ETM_EXLEVEL_NS_HYP; + /* EXLEVEL_S, bits[11:8], don't trace anything in secure state */ + config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= (ETM_EXLEVEL_S_APP | + ETM_EXLEVEL_S_OS | + ETM_EXLEVEL_S_HYP); + config->addr_type[ETM_DEFAULT_ADDR_COMP] = ETM_ADDR_TYPE_RANGE; - /* disable external input events */ - drvdata->ext_inp = 0x0; + /* + * Second half of default address comparator: go all + * the way to the top. + */ + config->addr_val[ETM_DEFAULT_ADDR_COMP + 1] = ~0x0; + /* trace instruction addresses */ + config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] &= ~(BIT(0) | BIT(1)); + /* Address comparator type must be equal for both halves */ + config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = + config->addr_acc[ETM_DEFAULT_ADDR_COMP]; + config->addr_type[ETM_DEFAULT_ADDR_COMP + 1] = ETM_ADDR_TYPE_RANGE; - for (i = 0; i < drvdata->nr_cntr; i++) { - drvdata->cntrldvr[i] = 0x0; - drvdata->cntr_ctrl[i] = 0x0; - drvdata->cntr_val[i] = 0x0; - } + /* + * Configure the ViewInst function to filter on address range + * comparator '0'. + */ + config->viiectlr = BIT(0); - /* Resource selector pair 0 is always implemented and reserved */ - drvdata->res_idx = 0x2; - for (i = 2; i < drvdata->nr_resource * 2; i++) - drvdata->res_ctrl[i] = 0x0; + /* no start-stop filtering for ViewInst */ + config->vissctlr = 0x0; +} - for (i = 0; i < drvdata->nr_ss_cmp; i++) { - drvdata->ss_ctrl[i] = 0x0; - drvdata->ss_pe_cmp[i] = 0x0; - } +void etm4_config_trace_mode(struct etmv4_config *config) +{ + u32 addr_acc, mode; - if (drvdata->nr_addr_cmp >= 1) { - drvdata->addr_val[0] = (unsigned long)_stext; - drvdata->addr_val[1] = (unsigned long)_etext; - drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE; - drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE; - } + mode = config->mode; + mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER); - for (i = 0; i < drvdata->numcidc; i++) { - drvdata->ctxid_pid[i] = 0x0; - drvdata->ctxid_vpid[i] = 0x0; - } + /* excluding kernel AND user space doesn't make sense */ + WARN_ON_ONCE(mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)); - drvdata->ctxid_mask0 = 0x0; - drvdata->ctxid_mask1 = 0x0; + /* nothing to do if neither flags are set */ + if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER)) + return; - for (i = 0; i < drvdata->numvmidc; i++) - drvdata->vmid_val[i] = 0x0; - drvdata->vmid_mask0 = 0x0; - drvdata->vmid_mask1 = 0x0; + addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP]; + /* clear default config */ + addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS); /* - * A trace ID value of 0 is invalid, so let's start at some - * random value that fits in 7 bits. ETMv3.x has 0x10 so let's - * start at 0x20. + * EXLEVEL_NS, bits[15:12] + * The Exception levels are: + * Bit[12] Exception level 0 - Application + * Bit[13] Exception level 1 - OS + * Bit[14] Exception level 2 - Hypervisor + * Bit[15] Never implemented */ - drvdata->trcid = 0x20 + drvdata->cpu; + if (mode & ETM_MODE_EXCL_KERN) + addr_acc |= ETM_EXLEVEL_NS_OS; + else + addr_acc |= ETM_EXLEVEL_NS_APP; + + config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc; + config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc; } static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action, @@ -2569,7 +689,7 @@ static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action, etmdrvdata[cpu]->os_unlock = true; } - if (etmdrvdata[cpu]->enable) + if (local_read(&etmdrvdata[cpu]->mode)) etm4_enable_hw(etmdrvdata[cpu]); spin_unlock(&etmdrvdata[cpu]->spinlock); break; @@ -2582,7 +702,7 @@ static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action, case CPU_DYING: spin_lock(&etmdrvdata[cpu]->spinlock); - if (etmdrvdata[cpu]->enable) + if (local_read(&etmdrvdata[cpu]->mode)) etm4_disable_hw(etmdrvdata[cpu]); spin_unlock(&etmdrvdata[cpu]->spinlock); break; @@ -2595,6 +715,11 @@ static struct notifier_block etm4_cpu_notifier = { .notifier_call = etm4_cpu_callback, }; +static void etm4_init_trace_id(struct etmv4_drvdata *drvdata) +{ + drvdata->trcid = coresight_get_trace_id(drvdata->cpu); +} + static int etm4_probe(struct amba_device *adev, const struct amba_id *id) { int ret; @@ -2638,9 +763,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) get_online_cpus(); etmdrvdata[drvdata->cpu] = drvdata; - if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1)) - drvdata->os_unlock = true; - if (smp_call_function_single(drvdata->cpu, etm4_init_arch_data, drvdata, 1)) dev_err(dev, "ETM arch init failed\n"); @@ -2654,9 +776,9 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) ret = -EINVAL; goto err_arch_supported; } - etm4_init_default_data(drvdata); - pm_runtime_put(&adev->dev); + etm4_init_trace_id(drvdata); + etm4_set_default(&drvdata->config); desc->type = CORESIGHT_DEV_TYPE_SOURCE; desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; @@ -2667,9 +789,16 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) drvdata->csdev = coresight_register(desc); if (IS_ERR(drvdata->csdev)) { ret = PTR_ERR(drvdata->csdev); - goto err_coresight_register; + goto err_arch_supported; } + ret = etm_perf_symlink(drvdata->csdev, true); + if (ret) { + coresight_unregister(drvdata->csdev); + goto err_arch_supported; + } + + pm_runtime_put(&adev->dev); dev_info(dev, "%s initialized\n", (char *)id->data); if (boot_enable) { @@ -2680,8 +809,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) return 0; err_arch_supported: - pm_runtime_put(&adev->dev); -err_coresight_register: if (--etm4_count == 0) unregister_hotcpu_notifier(&etm4_cpu_notifier); return ret; @@ -2698,6 +825,11 @@ static struct amba_id etm4_ids[] = { .mask = 0x000fffff, .data = "ETM 4.0", }, + { /* ETM 4.0 - A72, Maia, HiSilicon */ + .id = 0x000bb95a, + .mask = 0x000fffff, + .data = "ETM 4.0", + }, { 0, 0}, }; diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index c34100205..5359c5197 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -13,6 +13,7 @@ #ifndef _CORESIGHT_CORESIGHT_ETM_H #define _CORESIGHT_CORESIGHT_ETM_H +#include <asm/local.h> #include <linux/spinlock.h> #include "coresight-priv.h" @@ -175,71 +176,38 @@ #define ETM_MODE_TRACE_RESET BIT(25) #define ETM_MODE_TRACE_ERR BIT(26) #define ETM_MODE_VIEWINST_STARTSTOP BIT(27) -#define ETMv4_MODE_ALL 0xFFFFFFF +#define ETMv4_MODE_ALL (GENMASK(27, 0) | \ + ETM_MODE_EXCL_KERN | \ + ETM_MODE_EXCL_USER) #define TRCSTATR_IDLE_BIT 0 +#define ETM_DEFAULT_ADDR_COMP 0 + +/* secure state access levels */ +#define ETM_EXLEVEL_S_APP BIT(8) +#define ETM_EXLEVEL_S_OS BIT(9) +#define ETM_EXLEVEL_S_NA BIT(10) +#define ETM_EXLEVEL_S_HYP BIT(11) +/* non-secure state access levels */ +#define ETM_EXLEVEL_NS_APP BIT(12) +#define ETM_EXLEVEL_NS_OS BIT(13) +#define ETM_EXLEVEL_NS_HYP BIT(14) +#define ETM_EXLEVEL_NS_NA BIT(15) /** - * struct etm4_drvdata - specifics associated to an ETM component - * @base: Memory mapped base address for this component. - * @dev: The device entity associated to this component. - * @csdev: Component vitals needed by the framework. - * @spinlock: Only one at a time pls. - * @cpu: The cpu this component is affined to. - * @arch: ETM version number. - * @enable: Is this ETM currently tracing. - * @sticky_enable: true if ETM base configuration has been done. - * @boot_enable:True if we should start tracing at boot time. - * @os_unlock: True if access to management registers is allowed. - * @nr_pe: The number of processing entity available for tracing. - * @nr_pe_cmp: The number of processing entity comparator inputs that are - * available for tracing. - * @nr_addr_cmp:Number of pairs of address comparators available - * as found in ETMIDR4 0-3. - * @nr_cntr: Number of counters as found in ETMIDR5 bit 28-30. - * @nr_ext_inp: Number of external input. - * @numcidc: Number of contextID comparators. - * @numvmidc: Number of VMID comparators. - * @nrseqstate: The number of sequencer states that are implemented. - * @nr_event: Indicates how many events the trace unit support. - * @nr_resource:The number of resource selection pairs available for tracing. - * @nr_ss_cmp: Number of single-shot comparator controls that are available. + * struct etmv4_config - configuration information related to an ETMv4 * @mode: Controls various modes supported by this ETM. - * @trcid: value of the current ID for this component. - * @trcid_size: Indicates the trace ID width. - * @instrp0: Tracing of load and store instructions - * as P0 elements is supported. - * @trccond: If the trace unit supports conditional - * instruction tracing. - * @retstack: Indicates if the implementation supports a return stack. - * @trc_error: Whether a trace unit can trace a system - * error exception. - * @atbtrig: If the implementation can support ATB triggers - * @lpoverride: If the implementation can support low-power state over. * @pe_sel: Controls which PE to trace. * @cfg: Controls the tracing options. * @eventctrl0: Controls the tracing of arbitrary events. * @eventctrl1: Controls the behavior of the events that @event_ctrl0 selects. * @stallctl: If functionality that prevents trace unit buffer overflows * is available. - * @sysstall: Does the system support stall control of the PE? - * @nooverflow: Indicate if overflow prevention is supported. - * @stall_ctrl: Enables trace unit functionality that prevents trace - * unit buffer overflows. - * @ts_size: Global timestamp size field. * @ts_ctrl: Controls the insertion of global timestamps in the * trace streams. - * @syncpr: Indicates if an implementation has a fixed - * synchronization period. * @syncfreq: Controls how often trace synchronization requests occur. - * @trccci: Indicates if the trace unit supports cycle counting - * for instruction. - * @ccsize: Indicates the size of the cycle counter in bits. - * @ccitmin: minimum value that can be programmed in * the TRCCCCTLR register. * @ccctlr: Sets the threshold value for cycle counting. - * @trcbb: Indicates if the trace unit supports branch broadcast tracing. - * @q_support: Q element support characteristics. * @vinst_ctrl: Controls instruction trace filtering. * @viiectlr: Set or read, the address range comparators. * @vissctlr: Set, or read, the single address comparators that control the @@ -264,73 +232,28 @@ * @addr_acc: Address comparator access type. * @addr_type: Current status of the comparator register. * @ctxid_idx: Context ID index selector. - * @ctxid_size: Size of the context ID field to consider. * @ctxid_pid: Value of the context ID comparator. * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise * the same value of ctxid_pid. * @ctxid_mask0:Context ID comparator mask for comparator 0-3. * @ctxid_mask1:Context ID comparator mask for comparator 4-7. * @vmid_idx: VM ID index selector. - * @vmid_size: Size of the VM ID comparator to consider. * @vmid_val: Value of the VM ID comparator. * @vmid_mask0: VM ID comparator mask for comparator 0-3. * @vmid_mask1: VM ID comparator mask for comparator 4-7. - * @s_ex_level: In secure state, indicates whether instruction tracing is - * supported for the corresponding Exception level. - * @ns_ex_level:In non-secure state, indicates whether instruction tracing is - * supported for the corresponding Exception level. * @ext_inp: External input selection. */ -struct etmv4_drvdata { - void __iomem *base; - struct device *dev; - struct coresight_device *csdev; - spinlock_t spinlock; - int cpu; - u8 arch; - bool enable; - bool sticky_enable; - bool boot_enable; - bool os_unlock; - u8 nr_pe; - u8 nr_pe_cmp; - u8 nr_addr_cmp; - u8 nr_cntr; - u8 nr_ext_inp; - u8 numcidc; - u8 numvmidc; - u8 nrseqstate; - u8 nr_event; - u8 nr_resource; - u8 nr_ss_cmp; +struct etmv4_config { u32 mode; - u8 trcid; - u8 trcid_size; - bool instrp0; - bool trccond; - bool retstack; - bool trc_error; - bool atbtrig; - bool lpoverride; u32 pe_sel; u32 cfg; u32 eventctrl0; u32 eventctrl1; - bool stallctl; - bool sysstall; - bool nooverflow; u32 stall_ctrl; - u8 ts_size; u32 ts_ctrl; - bool syncpr; u32 syncfreq; - bool trccci; - u8 ccsize; - u8 ccitmin; u32 ccctlr; - bool trcbb; u32 bb_ctrl; - bool q_support; u32 vinst_ctrl; u32 viiectlr; u32 vissctlr; @@ -353,19 +276,119 @@ struct etmv4_drvdata { u64 addr_acc[ETM_MAX_SINGLE_ADDR_CMP]; u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP]; u8 ctxid_idx; - u8 ctxid_size; u64 ctxid_pid[ETMv4_MAX_CTXID_CMP]; u64 ctxid_vpid[ETMv4_MAX_CTXID_CMP]; u32 ctxid_mask0; u32 ctxid_mask1; u8 vmid_idx; - u8 vmid_size; u64 vmid_val[ETM_MAX_VMID_CMP]; u32 vmid_mask0; u32 vmid_mask1; + u32 ext_inp; +}; + +/** + * struct etm4_drvdata - specifics associated to an ETM component + * @base: Memory mapped base address for this component. + * @dev: The device entity associated to this component. + * @csdev: Component vitals needed by the framework. + * @spinlock: Only one at a time pls. + * @mode: This tracer's mode, i.e sysFS, Perf or disabled. + * @cpu: The cpu this component is affined to. + * @arch: ETM version number. + * @nr_pe: The number of processing entity available for tracing. + * @nr_pe_cmp: The number of processing entity comparator inputs that are + * available for tracing. + * @nr_addr_cmp:Number of pairs of address comparators available + * as found in ETMIDR4 0-3. + * @nr_cntr: Number of counters as found in ETMIDR5 bit 28-30. + * @nr_ext_inp: Number of external input. + * @numcidc: Number of contextID comparators. + * @numvmidc: Number of VMID comparators. + * @nrseqstate: The number of sequencer states that are implemented. + * @nr_event: Indicates how many events the trace unit support. + * @nr_resource:The number of resource selection pairs available for tracing. + * @nr_ss_cmp: Number of single-shot comparator controls that are available. + * @trcid: value of the current ID for this component. + * @trcid_size: Indicates the trace ID width. + * @ts_size: Global timestamp size field. + * @ctxid_size: Size of the context ID field to consider. + * @vmid_size: Size of the VM ID comparator to consider. + * @ccsize: Indicates the size of the cycle counter in bits. + * @ccitmin: minimum value that can be programmed in + * @s_ex_level: In secure state, indicates whether instruction tracing is + * supported for the corresponding Exception level. + * @ns_ex_level:In non-secure state, indicates whether instruction tracing is + * supported for the corresponding Exception level. + * @sticky_enable: true if ETM base configuration has been done. + * @boot_enable:True if we should start tracing at boot time. + * @os_unlock: True if access to management registers is allowed. + * @instrp0: Tracing of load and store instructions + * as P0 elements is supported. + * @trcbb: Indicates if the trace unit supports branch broadcast tracing. + * @trccond: If the trace unit supports conditional + * instruction tracing. + * @retstack: Indicates if the implementation supports a return stack. + * @trccci: Indicates if the trace unit supports cycle counting + * for instruction. + * @q_support: Q element support characteristics. + * @trc_error: Whether a trace unit can trace a system + * error exception. + * @syncpr: Indicates if an implementation has a fixed + * synchronization period. + * @stall_ctrl: Enables trace unit functionality that prevents trace + * unit buffer overflows. + * @sysstall: Does the system support stall control of the PE? + * @nooverflow: Indicate if overflow prevention is supported. + * @atbtrig: If the implementation can support ATB triggers + * @lpoverride: If the implementation can support low-power state over. + * @config: structure holding configuration parameters. + */ +struct etmv4_drvdata { + void __iomem *base; + struct device *dev; + struct coresight_device *csdev; + spinlock_t spinlock; + local_t mode; + int cpu; + u8 arch; + u8 nr_pe; + u8 nr_pe_cmp; + u8 nr_addr_cmp; + u8 nr_cntr; + u8 nr_ext_inp; + u8 numcidc; + u8 numvmidc; + u8 nrseqstate; + u8 nr_event; + u8 nr_resource; + u8 nr_ss_cmp; + u8 trcid; + u8 trcid_size; + u8 ts_size; + u8 ctxid_size; + u8 vmid_size; + u8 ccsize; + u8 ccitmin; u8 s_ex_level; u8 ns_ex_level; - u32 ext_inp; + u8 q_support; + bool sticky_enable; + bool boot_enable; + bool os_unlock; + bool instrp0; + bool trcbb; + bool trccond; + bool retstack; + bool trccci; + bool trc_error; + bool syncpr; + bool stallctl; + bool sysstall; + bool nooverflow; + bool atbtrig; + bool lpoverride; + struct etmv4_config config; }; /* Address comparator access types */ @@ -391,4 +414,7 @@ enum etm_addr_type { ETM_ADDR_TYPE_START, ETM_ADDR_TYPE_STOP, }; + +extern const struct attribute_group *coresight_etmv4_groups[]; +void etm4_config_trace_mode(struct etmv4_config *config); #endif diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index 0600ca306..05df78905 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c @@ -221,7 +221,6 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id) if (IS_ERR(drvdata->csdev)) return PTR_ERR(drvdata->csdev); - dev_info(dev, "FUNNEL initialized\n"); return 0; } diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index 333eddaed..ad975c580 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -37,12 +37,42 @@ #define ETM_MODE_EXCL_KERN BIT(30) #define ETM_MODE_EXCL_USER BIT(31) +#define coresight_simple_func(type, name, offset) \ +static ssize_t name##_show(struct device *_dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + type *drvdata = dev_get_drvdata(_dev->parent); \ + return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ + readl_relaxed(drvdata->base + offset)); \ +} \ +static DEVICE_ATTR_RO(name) + enum cs_mode { CS_MODE_DISABLED, CS_MODE_SYSFS, CS_MODE_PERF, }; +/** + * struct cs_buffer - keep track of a recording session' specifics + * @cur: index of the current buffer + * @nr_pages: max number of pages granted to us + * @offset: offset within the current buffer + * @data_size: how much we collected in this run + * @lost: other than zero if we had a HW buffer wrap around + * @snapshot: is this run in snapshot mode + * @data_pages: a handle the ring buffer + */ +struct cs_buffers { + unsigned int cur; + unsigned int nr_pages; + unsigned long offset; + local_t data_size; + local_t lost; + bool snapshot; + void **data_pages; +}; + static inline void CS_LOCK(void __iomem *addr) { do { diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index 4299c0569..c6982e312 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c @@ -114,7 +114,6 @@ static int replicator_probe(struct platform_device *pdev) pm_runtime_put(&pdev->dev); - dev_info(dev, "REPLICATOR initialized\n"); return 0; out_disable_pm: diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c new file mode 100644 index 000000000..73be58a11 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -0,0 +1,920 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * Description: CoreSight System Trace Macrocell driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Initial implementation by Pratik Patel + * (C) 2014-2015 Pratik Patel <pratikp@codeaurora.org> + * + * Serious refactoring, code cleanup and upgrading to the Coresight upstream + * framework by Mathieu Poirier + * (C) 2015-2016 Mathieu Poirier <mathieu.poirier@linaro.org> + * + * Guaranteed timing and support for various packet type coming from the + * generic STM API by Chunyan Zhang + * (C) 2015-2016 Chunyan Zhang <zhang.chunyan@linaro.org> + */ +#include <asm/local.h> +#include <linux/amba/bus.h> +#include <linux/bitmap.h> +#include <linux/clk.h> +#include <linux/coresight.h> +#include <linux/coresight-stm.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/moduleparam.h> +#include <linux/of_address.h> +#include <linux/perf_event.h> +#include <linux/pm_runtime.h> +#include <linux/stm.h> + +#include "coresight-priv.h" + +#define STMDMASTARTR 0xc04 +#define STMDMASTOPR 0xc08 +#define STMDMASTATR 0xc0c +#define STMDMACTLR 0xc10 +#define STMDMAIDR 0xcfc +#define STMHEER 0xd00 +#define STMHETER 0xd20 +#define STMHEBSR 0xd60 +#define STMHEMCR 0xd64 +#define STMHEMASTR 0xdf4 +#define STMHEFEAT1R 0xdf8 +#define STMHEIDR 0xdfc +#define STMSPER 0xe00 +#define STMSPTER 0xe20 +#define STMPRIVMASKR 0xe40 +#define STMSPSCR 0xe60 +#define STMSPMSCR 0xe64 +#define STMSPOVERRIDER 0xe68 +#define STMSPMOVERRIDER 0xe6c +#define STMSPTRIGCSR 0xe70 +#define STMTCSR 0xe80 +#define STMTSSTIMR 0xe84 +#define STMTSFREQR 0xe8c +#define STMSYNCR 0xe90 +#define STMAUXCR 0xe94 +#define STMSPFEAT1R 0xea0 +#define STMSPFEAT2R 0xea4 +#define STMSPFEAT3R 0xea8 +#define STMITTRIGGER 0xee8 +#define STMITATBDATA0 0xeec +#define STMITATBCTR2 0xef0 +#define STMITATBID 0xef4 +#define STMITATBCTR0 0xef8 + +#define STM_32_CHANNEL 32 +#define BYTES_PER_CHANNEL 256 +#define STM_TRACE_BUF_SIZE 4096 +#define STM_SW_MASTER_END 127 + +/* Register bit definition */ +#define STMTCSR_BUSY_BIT 23 +/* Reserve the first 10 channels for kernel usage */ +#define STM_CHANNEL_OFFSET 0 + +enum stm_pkt_type { + STM_PKT_TYPE_DATA = 0x98, + STM_PKT_TYPE_FLAG = 0xE8, + STM_PKT_TYPE_TRIG = 0xF8, +}; + +#define stm_channel_addr(drvdata, ch) (drvdata->chs.base + \ + (ch * BYTES_PER_CHANNEL)) +#define stm_channel_off(type, opts) (type & ~opts) + +static int boot_nr_channel; + +/* + * Not really modular but using module_param is the easiest way to + * remain consistent with existing use cases for now. + */ +module_param_named( + boot_nr_channel, boot_nr_channel, int, S_IRUGO +); + +/** + * struct channel_space - central management entity for extended ports + * @base: memory mapped base address where channels start. + * @guaraneed: is the channel delivery guaranteed. + */ +struct channel_space { + void __iomem *base; + unsigned long *guaranteed; +}; + +/** + * struct stm_drvdata - specifics associated to an STM component + * @base: memory mapped base address for this component. + * @dev: the device entity associated to this component. + * @atclk: optional clock for the core parts of the STM. + * @csdev: component vitals needed by the framework. + * @spinlock: only one at a time pls. + * @chs: the channels accociated to this STM. + * @stm: structure associated to the generic STM interface. + * @mode: this tracer's mode, i.e sysFS, or disabled. + * @traceid: value of the current ID for this component. + * @write_bytes: Maximus bytes this STM can write at a time. + * @stmsper: settings for register STMSPER. + * @stmspscr: settings for register STMSPSCR. + * @numsp: the total number of stimulus port support by this STM. + * @stmheer: settings for register STMHEER. + * @stmheter: settings for register STMHETER. + * @stmhebsr: settings for register STMHEBSR. + */ +struct stm_drvdata { + void __iomem *base; + struct device *dev; + struct clk *atclk; + struct coresight_device *csdev; + spinlock_t spinlock; + struct channel_space chs; + struct stm_data stm; + local_t mode; + u8 traceid; + u32 write_bytes; + u32 stmsper; + u32 stmspscr; + u32 numsp; + u32 stmheer; + u32 stmheter; + u32 stmhebsr; +}; + +static void stm_hwevent_enable_hw(struct stm_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + writel_relaxed(drvdata->stmhebsr, drvdata->base + STMHEBSR); + writel_relaxed(drvdata->stmheter, drvdata->base + STMHETER); + writel_relaxed(drvdata->stmheer, drvdata->base + STMHEER); + writel_relaxed(0x01 | /* Enable HW event tracing */ + 0x04, /* Error detection on event tracing */ + drvdata->base + STMHEMCR); + + CS_LOCK(drvdata->base); +} + +static void stm_port_enable_hw(struct stm_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + /* ATB trigger enable on direct writes to TRIG locations */ + writel_relaxed(0x10, + drvdata->base + STMSPTRIGCSR); + writel_relaxed(drvdata->stmspscr, drvdata->base + STMSPSCR); + writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER); + + CS_LOCK(drvdata->base); +} + +static void stm_enable_hw(struct stm_drvdata *drvdata) +{ + if (drvdata->stmheer) + stm_hwevent_enable_hw(drvdata); + + stm_port_enable_hw(drvdata); + + CS_UNLOCK(drvdata->base); + + /* 4096 byte between synchronisation packets */ + writel_relaxed(0xFFF, drvdata->base + STMSYNCR); + writel_relaxed((drvdata->traceid << 16 | /* trace id */ + 0x02 | /* timestamp enable */ + 0x01), /* global STM enable */ + drvdata->base + STMTCSR); + + CS_LOCK(drvdata->base); +} + +static int stm_enable(struct coresight_device *csdev, + struct perf_event_attr *attr, u32 mode) +{ + u32 val; + struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + if (mode != CS_MODE_SYSFS) + return -EINVAL; + + val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode); + + /* Someone is already using the tracer */ + if (val) + return -EBUSY; + + pm_runtime_get_sync(drvdata->dev); + + spin_lock(&drvdata->spinlock); + stm_enable_hw(drvdata); + spin_unlock(&drvdata->spinlock); + + dev_info(drvdata->dev, "STM tracing enabled\n"); + return 0; +} + +static void stm_hwevent_disable_hw(struct stm_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + writel_relaxed(0x0, drvdata->base + STMHEMCR); + writel_relaxed(0x0, drvdata->base + STMHEER); + writel_relaxed(0x0, drvdata->base + STMHETER); + + CS_LOCK(drvdata->base); +} + +static void stm_port_disable_hw(struct stm_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + writel_relaxed(0x0, drvdata->base + STMSPER); + writel_relaxed(0x0, drvdata->base + STMSPTRIGCSR); + + CS_LOCK(drvdata->base); +} + +static void stm_disable_hw(struct stm_drvdata *drvdata) +{ + u32 val; + + CS_UNLOCK(drvdata->base); + + val = readl_relaxed(drvdata->base + STMTCSR); + val &= ~0x1; /* clear global STM enable [0] */ + writel_relaxed(val, drvdata->base + STMTCSR); + + CS_LOCK(drvdata->base); + + stm_port_disable_hw(drvdata); + if (drvdata->stmheer) + stm_hwevent_disable_hw(drvdata); +} + +static void stm_disable(struct coresight_device *csdev) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + /* + * For as long as the tracer isn't disabled another entity can't + * change its status. As such we can read the status here without + * fearing it will change under us. + */ + if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { + spin_lock(&drvdata->spinlock); + stm_disable_hw(drvdata); + spin_unlock(&drvdata->spinlock); + + /* Wait until the engine has completely stopped */ + coresight_timeout(drvdata, STMTCSR, STMTCSR_BUSY_BIT, 0); + + pm_runtime_put(drvdata->dev); + + local_set(&drvdata->mode, CS_MODE_DISABLED); + dev_info(drvdata->dev, "STM tracing disabled\n"); + } +} + +static int stm_trace_id(struct coresight_device *csdev) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + return drvdata->traceid; +} + +static const struct coresight_ops_source stm_source_ops = { + .trace_id = stm_trace_id, + .enable = stm_enable, + .disable = stm_disable, +}; + +static const struct coresight_ops stm_cs_ops = { + .source_ops = &stm_source_ops, +}; + +static inline bool stm_addr_unaligned(const void *addr, u8 write_bytes) +{ + return ((unsigned long)addr & (write_bytes - 1)); +} + +static void stm_send(void *addr, const void *data, u32 size, u8 write_bytes) +{ + u8 paload[8]; + + if (stm_addr_unaligned(data, write_bytes)) { + memcpy(paload, data, size); + data = paload; + } + + /* now we are 64bit/32bit aligned */ + switch (size) { +#ifdef CONFIG_64BIT + case 8: + writeq_relaxed(*(u64 *)data, addr); + break; +#endif + case 4: + writel_relaxed(*(u32 *)data, addr); + break; + case 2: + writew_relaxed(*(u16 *)data, addr); + break; + case 1: + writeb_relaxed(*(u8 *)data, addr); + break; + default: + break; + } +} + +static int stm_generic_link(struct stm_data *stm_data, + unsigned int master, unsigned int channel) +{ + struct stm_drvdata *drvdata = container_of(stm_data, + struct stm_drvdata, stm); + if (!drvdata || !drvdata->csdev) + return -EINVAL; + + return coresight_enable(drvdata->csdev); +} + +static void stm_generic_unlink(struct stm_data *stm_data, + unsigned int master, unsigned int channel) +{ + struct stm_drvdata *drvdata = container_of(stm_data, + struct stm_drvdata, stm); + if (!drvdata || !drvdata->csdev) + return; + + stm_disable(drvdata->csdev); +} + +static long stm_generic_set_options(struct stm_data *stm_data, + unsigned int master, + unsigned int channel, + unsigned int nr_chans, + unsigned long options) +{ + struct stm_drvdata *drvdata = container_of(stm_data, + struct stm_drvdata, stm); + if (!(drvdata && local_read(&drvdata->mode))) + return -EINVAL; + + if (channel >= drvdata->numsp) + return -EINVAL; + + switch (options) { + case STM_OPTION_GUARANTEED: + set_bit(channel, drvdata->chs.guaranteed); + break; + + case STM_OPTION_INVARIANT: + clear_bit(channel, drvdata->chs.guaranteed); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static ssize_t stm_generic_packet(struct stm_data *stm_data, + unsigned int master, + unsigned int channel, + unsigned int packet, + unsigned int flags, + unsigned int size, + const unsigned char *payload) +{ + unsigned long ch_addr; + struct stm_drvdata *drvdata = container_of(stm_data, + struct stm_drvdata, stm); + + if (!(drvdata && local_read(&drvdata->mode))) + return 0; + + if (channel >= drvdata->numsp) + return 0; + + ch_addr = (unsigned long)stm_channel_addr(drvdata, channel); + + flags = (flags == STP_PACKET_TIMESTAMPED) ? STM_FLAG_TIMESTAMPED : 0; + flags |= test_bit(channel, drvdata->chs.guaranteed) ? + STM_FLAG_GUARANTEED : 0; + + if (size > drvdata->write_bytes) + size = drvdata->write_bytes; + else + size = rounddown_pow_of_two(size); + + switch (packet) { + case STP_PACKET_FLAG: + ch_addr |= stm_channel_off(STM_PKT_TYPE_FLAG, flags); + + /* + * The generic STM core sets a size of '0' on flag packets. + * As such send a flag packet of size '1' and tell the + * core we did so. + */ + stm_send((void *)ch_addr, payload, 1, drvdata->write_bytes); + size = 1; + break; + + case STP_PACKET_DATA: + ch_addr |= stm_channel_off(STM_PKT_TYPE_DATA, flags); + stm_send((void *)ch_addr, payload, size, + drvdata->write_bytes); + break; + + default: + return -ENOTSUPP; + } + + return size; +} + +static ssize_t hwevent_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val = drvdata->stmheer; + + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t hwevent_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + int ret = 0; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return -EINVAL; + + drvdata->stmheer = val; + /* HW event enable and trigger go hand in hand */ + drvdata->stmheter = val; + + return size; +} +static DEVICE_ATTR_RW(hwevent_enable); + +static ssize_t hwevent_select_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val = drvdata->stmhebsr; + + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t hwevent_select_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + int ret = 0; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return -EINVAL; + + drvdata->stmhebsr = val; + + return size; +} +static DEVICE_ATTR_RW(hwevent_select); + +static ssize_t port_select_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + + if (!local_read(&drvdata->mode)) { + val = drvdata->stmspscr; + } else { + spin_lock(&drvdata->spinlock); + val = readl_relaxed(drvdata->base + STMSPSCR); + spin_unlock(&drvdata->spinlock); + } + + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t port_select_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val, stmsper; + int ret = 0; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + spin_lock(&drvdata->spinlock); + drvdata->stmspscr = val; + + if (local_read(&drvdata->mode)) { + CS_UNLOCK(drvdata->base); + /* Process as per ARM's TRM recommendation */ + stmsper = readl_relaxed(drvdata->base + STMSPER); + writel_relaxed(0x0, drvdata->base + STMSPER); + writel_relaxed(drvdata->stmspscr, drvdata->base + STMSPSCR); + writel_relaxed(stmsper, drvdata->base + STMSPER); + CS_LOCK(drvdata->base); + } + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(port_select); + +static ssize_t port_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + + if (!local_read(&drvdata->mode)) { + val = drvdata->stmsper; + } else { + spin_lock(&drvdata->spinlock); + val = readl_relaxed(drvdata->base + STMSPER); + spin_unlock(&drvdata->spinlock); + } + + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t port_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + int ret = 0; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + spin_lock(&drvdata->spinlock); + drvdata->stmsper = val; + + if (local_read(&drvdata->mode)) { + CS_UNLOCK(drvdata->base); + writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER); + CS_LOCK(drvdata->base); + } + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(port_enable); + +static ssize_t traceid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->traceid; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t traceid_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + /* traceid field is 7bit wide on STM32 */ + drvdata->traceid = val & 0x7f; + return size; +} +static DEVICE_ATTR_RW(traceid); + +#define coresight_stm_simple_func(name, offset) \ + coresight_simple_func(struct stm_drvdata, name, offset) + +coresight_stm_simple_func(tcsr, STMTCSR); +coresight_stm_simple_func(tsfreqr, STMTSFREQR); +coresight_stm_simple_func(syncr, STMSYNCR); +coresight_stm_simple_func(sper, STMSPER); +coresight_stm_simple_func(spter, STMSPTER); +coresight_stm_simple_func(privmaskr, STMPRIVMASKR); +coresight_stm_simple_func(spscr, STMSPSCR); +coresight_stm_simple_func(spmscr, STMSPMSCR); +coresight_stm_simple_func(spfeat1r, STMSPFEAT1R); +coresight_stm_simple_func(spfeat2r, STMSPFEAT2R); +coresight_stm_simple_func(spfeat3r, STMSPFEAT3R); +coresight_stm_simple_func(devid, CORESIGHT_DEVID); + +static struct attribute *coresight_stm_attrs[] = { + &dev_attr_hwevent_enable.attr, + &dev_attr_hwevent_select.attr, + &dev_attr_port_enable.attr, + &dev_attr_port_select.attr, + &dev_attr_traceid.attr, + NULL, +}; + +static struct attribute *coresight_stm_mgmt_attrs[] = { + &dev_attr_tcsr.attr, + &dev_attr_tsfreqr.attr, + &dev_attr_syncr.attr, + &dev_attr_sper.attr, + &dev_attr_spter.attr, + &dev_attr_privmaskr.attr, + &dev_attr_spscr.attr, + &dev_attr_spmscr.attr, + &dev_attr_spfeat1r.attr, + &dev_attr_spfeat2r.attr, + &dev_attr_spfeat3r.attr, + &dev_attr_devid.attr, + NULL, +}; + +static const struct attribute_group coresight_stm_group = { + .attrs = coresight_stm_attrs, +}; + +static const struct attribute_group coresight_stm_mgmt_group = { + .attrs = coresight_stm_mgmt_attrs, + .name = "mgmt", +}; + +static const struct attribute_group *coresight_stm_groups[] = { + &coresight_stm_group, + &coresight_stm_mgmt_group, + NULL, +}; + +static int stm_get_resource_byname(struct device_node *np, + char *ch_base, struct resource *res) +{ + const char *name = NULL; + int index = 0, found = 0; + + while (!of_property_read_string_index(np, "reg-names", index, &name)) { + if (strcmp(ch_base, name)) { + index++; + continue; + } + + /* We have a match and @index is where it's at */ + found = 1; + break; + } + + if (!found) + return -EINVAL; + + return of_address_to_resource(np, index, res); +} + +static u32 stm_fundamental_data_size(struct stm_drvdata *drvdata) +{ + u32 stmspfeat2r; + + if (!IS_ENABLED(CONFIG_64BIT)) + return 4; + + stmspfeat2r = readl_relaxed(drvdata->base + STMSPFEAT2R); + + /* + * bit[15:12] represents the fundamental data size + * 0 - 32-bit data + * 1 - 64-bit data + */ + return BMVAL(stmspfeat2r, 12, 15) ? 8 : 4; +} + +static u32 stm_num_stimulus_port(struct stm_drvdata *drvdata) +{ + u32 numsp; + + numsp = readl_relaxed(drvdata->base + CORESIGHT_DEVID); + /* + * NUMPS in STMDEVID is 17 bit long and if equal to 0x0, + * 32 stimulus ports are supported. + */ + numsp &= 0x1ffff; + if (!numsp) + numsp = STM_32_CHANNEL; + return numsp; +} + +static void stm_init_default_data(struct stm_drvdata *drvdata) +{ + /* Don't use port selection */ + drvdata->stmspscr = 0x0; + /* + * Enable all channel regardless of their number. When port + * selection isn't used (see above) STMSPER applies to all + * 32 channel group available, hence setting all 32 bits to 1 + */ + drvdata->stmsper = ~0x0; + + /* + * The trace ID value for *ETM* tracers start at CPU_ID * 2 + 0x10 and + * anything equal to or higher than 0x70 is reserved. Since 0x00 is + * also reserved the STM trace ID needs to be higher than 0x00 and + * lowner than 0x10. + */ + drvdata->traceid = 0x1; + + /* Set invariant transaction timing on all channels */ + bitmap_clear(drvdata->chs.guaranteed, 0, drvdata->numsp); +} + +static void stm_init_generic_data(struct stm_drvdata *drvdata) +{ + drvdata->stm.name = dev_name(drvdata->dev); + + /* + * MasterIDs are assigned at HW design phase. As such the core is + * using a single master for interaction with this device. + */ + drvdata->stm.sw_start = 1; + drvdata->stm.sw_end = 1; + drvdata->stm.hw_override = true; + drvdata->stm.sw_nchannels = drvdata->numsp; + drvdata->stm.packet = stm_generic_packet; + drvdata->stm.link = stm_generic_link; + drvdata->stm.unlink = stm_generic_unlink; + drvdata->stm.set_options = stm_generic_set_options; +} + +static int stm_probe(struct amba_device *adev, const struct amba_id *id) +{ + int ret; + void __iomem *base; + unsigned long *guaranteed; + struct device *dev = &adev->dev; + struct coresight_platform_data *pdata = NULL; + struct stm_drvdata *drvdata; + struct resource *res = &adev->res; + struct resource ch_res; + size_t res_size, bitmap_size; + struct coresight_desc *desc; + struct device_node *np = adev->dev.of_node; + + if (np) { + pdata = of_get_coresight_platform_data(dev, np); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + adev->dev.platform_data = pdata; + } + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + drvdata->dev = &adev->dev; + drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ + if (!IS_ERR(drvdata->atclk)) { + ret = clk_prepare_enable(drvdata->atclk); + if (ret) + return ret; + } + dev_set_drvdata(dev, drvdata); + + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + drvdata->base = base; + + ret = stm_get_resource_byname(np, "stm-stimulus-base", &ch_res); + if (ret) + return ret; + + base = devm_ioremap_resource(dev, &ch_res); + if (IS_ERR(base)) + return PTR_ERR(base); + drvdata->chs.base = base; + + drvdata->write_bytes = stm_fundamental_data_size(drvdata); + + if (boot_nr_channel) { + drvdata->numsp = boot_nr_channel; + res_size = min((resource_size_t)(boot_nr_channel * + BYTES_PER_CHANNEL), resource_size(res)); + } else { + drvdata->numsp = stm_num_stimulus_port(drvdata); + res_size = min((resource_size_t)(drvdata->numsp * + BYTES_PER_CHANNEL), resource_size(res)); + } + bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long); + + guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); + if (!guaranteed) + return -ENOMEM; + drvdata->chs.guaranteed = guaranteed; + + spin_lock_init(&drvdata->spinlock); + + stm_init_default_data(drvdata); + stm_init_generic_data(drvdata); + + if (stm_register_device(dev, &drvdata->stm, THIS_MODULE)) { + dev_info(dev, + "stm_register_device failed, probing deffered\n"); + return -EPROBE_DEFER; + } + + desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); + if (!desc) { + ret = -ENOMEM; + goto stm_unregister; + } + + desc->type = CORESIGHT_DEV_TYPE_SOURCE; + desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE; + desc->ops = &stm_cs_ops; + desc->pdata = pdata; + desc->dev = dev; + desc->groups = coresight_stm_groups; + drvdata->csdev = coresight_register(desc); + if (IS_ERR(drvdata->csdev)) { + ret = PTR_ERR(drvdata->csdev); + goto stm_unregister; + } + + pm_runtime_put(&adev->dev); + + dev_info(dev, "%s initialized\n", (char *)id->data); + return 0; + +stm_unregister: + stm_unregister_device(&drvdata->stm); + return ret; +} + +#ifdef CONFIG_PM +static int stm_runtime_suspend(struct device *dev) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev); + + if (drvdata && !IS_ERR(drvdata->atclk)) + clk_disable_unprepare(drvdata->atclk); + + return 0; +} + +static int stm_runtime_resume(struct device *dev) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev); + + if (drvdata && !IS_ERR(drvdata->atclk)) + clk_prepare_enable(drvdata->atclk); + + return 0; +} +#endif + +static const struct dev_pm_ops stm_dev_pm_ops = { + SET_RUNTIME_PM_OPS(stm_runtime_suspend, stm_runtime_resume, NULL) +}; + +static struct amba_id stm_ids[] = { + { + .id = 0x0003b962, + .mask = 0x0003ffff, + .data = "STM32", + }, + { 0, 0}, +}; + +static struct amba_driver stm_driver = { + .drv = { + .name = "coresight-stm", + .owner = THIS_MODULE, + .pm = &stm_dev_pm_ops, + .suppress_bind_attrs = true, + }, + .probe = stm_probe, + .id_table = stm_ids, +}; + +builtin_amba_driver(stm_driver); diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c new file mode 100644 index 000000000..466af86fd --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -0,0 +1,604 @@ +/* + * Copyright(C) 2016 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/circ_buf.h> +#include <linux/coresight.h> +#include <linux/perf_event.h> +#include <linux/slab.h> +#include "coresight-priv.h" +#include "coresight-tmc.h" + +void tmc_etb_enable_hw(struct tmc_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + /* Wait for TMCSReady bit to be set */ + tmc_wait_for_tmcready(drvdata); + + writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); + writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | + TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | + TMC_FFCR_TRIGON_TRIGIN, + drvdata->base + TMC_FFCR); + + writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); + tmc_enable_hw(drvdata); + + CS_LOCK(drvdata->base); +} + +static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata) +{ + char *bufp; + u32 read_data; + int i; + + bufp = drvdata->buf; + while (1) { + for (i = 0; i < drvdata->memwidth; i++) { + read_data = readl_relaxed(drvdata->base + TMC_RRD); + if (read_data == 0xFFFFFFFF) + return; + memcpy(bufp, &read_data, 4); + bufp += 4; + } + } +} + +static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + tmc_flush_and_stop(drvdata); + /* + * When operating in sysFS mode the content of the buffer needs to be + * read before the TMC is disabled. + */ + if (local_read(&drvdata->mode) == CS_MODE_SYSFS) + tmc_etb_dump_hw(drvdata); + tmc_disable_hw(drvdata); + + CS_LOCK(drvdata->base); +} + +static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + /* Wait for TMCSReady bit to be set */ + tmc_wait_for_tmcready(drvdata); + + writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE); + writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI, + drvdata->base + TMC_FFCR); + writel_relaxed(0x0, drvdata->base + TMC_BUFWM); + tmc_enable_hw(drvdata); + + CS_LOCK(drvdata->base); +} + +static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + tmc_flush_and_stop(drvdata); + tmc_disable_hw(drvdata); + + CS_LOCK(drvdata->base); +} + +static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode) +{ + int ret = 0; + bool used = false; + char *buf = NULL; + long val; + unsigned long flags; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + /* This shouldn't be happening */ + if (WARN_ON(mode != CS_MODE_SYSFS)) + return -EINVAL; + + /* + * If we don't have a buffer release the lock and allocate memory. + * Otherwise keep the lock and move along. + */ + spin_lock_irqsave(&drvdata->spinlock, flags); + if (!drvdata->buf) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + /* Allocating the memory here while outside of the spinlock */ + buf = kzalloc(drvdata->size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Let's try again */ + spin_lock_irqsave(&drvdata->spinlock, flags); + } + + if (drvdata->reading) { + ret = -EBUSY; + goto out; + } + + val = local_xchg(&drvdata->mode, mode); + /* + * In sysFS mode we can have multiple writers per sink. Since this + * sink is already enabled no memory is needed and the HW need not be + * touched. + */ + if (val == CS_MODE_SYSFS) + goto out; + + /* + * If drvdata::buf isn't NULL, memory was allocated for a previous + * trace run but wasn't read. If so simply zero-out the memory. + * Otherwise use the memory allocated above. + * + * The memory is freed when users read the buffer using the + * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for + * details. + */ + if (drvdata->buf) { + memset(drvdata->buf, 0, drvdata->size); + } else { + used = true; + drvdata->buf = buf; + } + + tmc_etb_enable_hw(drvdata); +out: + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + /* Free memory outside the spinlock if need be */ + if (!used && buf) + kfree(buf); + + if (!ret) + dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n"); + + return ret; +} + +static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, u32 mode) +{ + int ret = 0; + long val; + unsigned long flags; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + /* This shouldn't be happening */ + if (WARN_ON(mode != CS_MODE_PERF)) + return -EINVAL; + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + ret = -EINVAL; + goto out; + } + + val = local_xchg(&drvdata->mode, mode); + /* + * In Perf mode there can be only one writer per sink. There + * is also no need to continue if the ETB/ETR is already operated + * from sysFS. + */ + if (val != CS_MODE_DISABLED) { + ret = -EINVAL; + goto out; + } + + tmc_etb_enable_hw(drvdata); +out: + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + return ret; +} + +static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode) +{ + switch (mode) { + case CS_MODE_SYSFS: + return tmc_enable_etf_sink_sysfs(csdev, mode); + case CS_MODE_PERF: + return tmc_enable_etf_sink_perf(csdev, mode); + } + + /* We shouldn't be here */ + return -EINVAL; +} + +static void tmc_disable_etf_sink(struct coresight_device *csdev) +{ + long val; + unsigned long flags; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + return; + } + + val = local_xchg(&drvdata->mode, CS_MODE_DISABLED); + /* Disable the TMC only if it needs to */ + if (val != CS_MODE_DISABLED) + tmc_etb_disable_hw(drvdata); + + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n"); +} + +static int tmc_enable_etf_link(struct coresight_device *csdev, + int inport, int outport) +{ + unsigned long flags; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + return -EBUSY; + } + + tmc_etf_enable_hw(drvdata); + local_set(&drvdata->mode, CS_MODE_SYSFS); + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + dev_info(drvdata->dev, "TMC-ETF enabled\n"); + return 0; +} + +static void tmc_disable_etf_link(struct coresight_device *csdev, + int inport, int outport) +{ + unsigned long flags; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + return; + } + + tmc_etf_disable_hw(drvdata); + local_set(&drvdata->mode, CS_MODE_DISABLED); + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + dev_info(drvdata->dev, "TMC disabled\n"); +} + +static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu, + void **pages, int nr_pages, bool overwrite) +{ + int node; + struct cs_buffers *buf; + + if (cpu == -1) + cpu = smp_processor_id(); + node = cpu_to_node(cpu); + + /* Allocate memory structure for interaction with Perf */ + buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node); + if (!buf) + return NULL; + + buf->snapshot = overwrite; + buf->nr_pages = nr_pages; + buf->data_pages = pages; + + return buf; +} + +static void tmc_free_etf_buffer(void *config) +{ + struct cs_buffers *buf = config; + + kfree(buf); +} + +static int tmc_set_etf_buffer(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config) +{ + int ret = 0; + unsigned long head; + struct cs_buffers *buf = sink_config; + + /* wrap head around to the amount of space we have */ + head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); + + /* find the page to write to */ + buf->cur = head / PAGE_SIZE; + + /* and offset within that page */ + buf->offset = head % PAGE_SIZE; + + local_set(&buf->data_size, 0); + + return ret; +} + +static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config, bool *lost) +{ + long size = 0; + struct cs_buffers *buf = sink_config; + + if (buf) { + /* + * In snapshot mode ->data_size holds the new address of the + * ring buffer's head. The size itself is the whole address + * range since we want the latest information. + */ + if (buf->snapshot) + handle->head = local_xchg(&buf->data_size, + buf->nr_pages << PAGE_SHIFT); + /* + * Tell the tracer PMU how much we got in this run and if + * something went wrong along the way. Nobody else can use + * this cs_buffers instance until we are done. As such + * resetting parameters here and squaring off with the ring + * buffer API in the tracer PMU is fine. + */ + *lost = !!local_xchg(&buf->lost, 0); + size = local_xchg(&buf->data_size, 0); + } + + return size; +} + +static void tmc_update_etf_buffer(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config) +{ + int i, cur; + u32 *buf_ptr; + u32 read_ptr, write_ptr; + u32 status, to_read; + unsigned long offset; + struct cs_buffers *buf = sink_config; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + if (!buf) + return; + + /* This shouldn't happen */ + if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF)) + return; + + CS_UNLOCK(drvdata->base); + + tmc_flush_and_stop(drvdata); + + read_ptr = readl_relaxed(drvdata->base + TMC_RRP); + write_ptr = readl_relaxed(drvdata->base + TMC_RWP); + + /* + * Get a hold of the status register and see if a wrap around + * has occurred. If so adjust things accordingly. + */ + status = readl_relaxed(drvdata->base + TMC_STS); + if (status & TMC_STS_FULL) { + local_inc(&buf->lost); + to_read = drvdata->size; + } else { + to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size); + } + + /* + * The TMC RAM buffer may be bigger than the space available in the + * perf ring buffer (handle->size). If so advance the RRP so that we + * get the latest trace data. + */ + if (to_read > handle->size) { + u32 mask = 0; + + /* + * The value written to RRP must be byte-address aligned to + * the width of the trace memory databus _and_ to a frame + * boundary (16 byte), whichever is the biggest. For example, + * for 32-bit, 64-bit and 128-bit wide trace memory, the four + * LSBs must be 0s. For 256-bit wide trace memory, the five + * LSBs must be 0s. + */ + switch (drvdata->memwidth) { + case TMC_MEM_INTF_WIDTH_32BITS: + case TMC_MEM_INTF_WIDTH_64BITS: + case TMC_MEM_INTF_WIDTH_128BITS: + mask = GENMASK(31, 5); + break; + case TMC_MEM_INTF_WIDTH_256BITS: + mask = GENMASK(31, 6); + break; + } + + /* + * Make sure the new size is aligned in accordance with the + * requirement explained above. + */ + to_read = handle->size & mask; + /* Move the RAM read pointer up */ + read_ptr = (write_ptr + drvdata->size) - to_read; + /* Make sure we are still within our limits */ + if (read_ptr > (drvdata->size - 1)) + read_ptr -= drvdata->size; + /* Tell the HW */ + writel_relaxed(read_ptr, drvdata->base + TMC_RRP); + local_inc(&buf->lost); + } + + cur = buf->cur; + offset = buf->offset; + + /* for every byte to read */ + for (i = 0; i < to_read; i += 4) { + buf_ptr = buf->data_pages[cur] + offset; + *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD); + + offset += 4; + if (offset >= PAGE_SIZE) { + offset = 0; + cur++; + /* wrap around at the end of the buffer */ + cur &= buf->nr_pages - 1; + } + } + + /* + * In snapshot mode all we have to do is communicate to + * perf_aux_output_end() the address of the current head. In full + * trace mode the same function expects a size to move rb->aux_head + * forward. + */ + if (buf->snapshot) + local_set(&buf->data_size, (cur * PAGE_SIZE) + offset); + else + local_add(to_read, &buf->data_size); + + CS_LOCK(drvdata->base); +} + +static const struct coresight_ops_sink tmc_etf_sink_ops = { + .enable = tmc_enable_etf_sink, + .disable = tmc_disable_etf_sink, + .alloc_buffer = tmc_alloc_etf_buffer, + .free_buffer = tmc_free_etf_buffer, + .set_buffer = tmc_set_etf_buffer, + .reset_buffer = tmc_reset_etf_buffer, + .update_buffer = tmc_update_etf_buffer, +}; + +static const struct coresight_ops_link tmc_etf_link_ops = { + .enable = tmc_enable_etf_link, + .disable = tmc_disable_etf_link, +}; + +const struct coresight_ops tmc_etb_cs_ops = { + .sink_ops = &tmc_etf_sink_ops, +}; + +const struct coresight_ops tmc_etf_cs_ops = { + .sink_ops = &tmc_etf_sink_ops, + .link_ops = &tmc_etf_link_ops, +}; + +int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) +{ + long val; + enum tmc_mode mode; + int ret = 0; + unsigned long flags; + + /* config types are set a boot time and never change */ + if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && + drvdata->config_type != TMC_CONFIG_TYPE_ETF)) + return -EINVAL; + + spin_lock_irqsave(&drvdata->spinlock, flags); + + if (drvdata->reading) { + ret = -EBUSY; + goto out; + } + + /* There is no point in reading a TMC in HW FIFO mode */ + mode = readl_relaxed(drvdata->base + TMC_MODE); + if (mode != TMC_MODE_CIRCULAR_BUFFER) { + ret = -EINVAL; + goto out; + } + + val = local_read(&drvdata->mode); + /* Don't interfere if operated from Perf */ + if (val == CS_MODE_PERF) { + ret = -EINVAL; + goto out; + } + + /* If drvdata::buf is NULL the trace data has been read already */ + if (drvdata->buf == NULL) { + ret = -EINVAL; + goto out; + } + + /* Disable the TMC if need be */ + if (val == CS_MODE_SYSFS) + tmc_etb_disable_hw(drvdata); + + drvdata->reading = true; +out: + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + return ret; +} + +int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) +{ + char *buf = NULL; + enum tmc_mode mode; + unsigned long flags; + + /* config types are set a boot time and never change */ + if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && + drvdata->config_type != TMC_CONFIG_TYPE_ETF)) + return -EINVAL; + + spin_lock_irqsave(&drvdata->spinlock, flags); + + /* There is no point in reading a TMC in HW FIFO mode */ + mode = readl_relaxed(drvdata->base + TMC_MODE); + if (mode != TMC_MODE_CIRCULAR_BUFFER) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + return -EINVAL; + } + + /* Re-enable the TMC if need be */ + if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { + /* + * The trace run will continue with the same allocated trace + * buffer. As such zero-out the buffer so that we don't end + * up with stale data. + * + * Since the tracer is still enabled drvdata::buf + * can't be NULL. + */ + memset(drvdata->buf, 0, drvdata->size); + tmc_etb_enable_hw(drvdata); + } else { + /* + * The ETB/ETF is not tracing and the buffer was just read. + * As such prepare to free the trace buffer. + */ + buf = drvdata->buf; + drvdata->buf = NULL; + } + + drvdata->reading = false; + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + /* + * Free allocated memory outside of the spinlock. There is no need + * to assert the validity of 'buf' since calling kfree(NULL) is safe. + */ + kfree(buf); + + return 0; +} diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c new file mode 100644 index 000000000..688be9e06 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -0,0 +1,326 @@ +/* + * Copyright(C) 2016 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/coresight.h> +#include <linux/dma-mapping.h> +#include "coresight-priv.h" +#include "coresight-tmc.h" + +void tmc_etr_enable_hw(struct tmc_drvdata *drvdata) +{ + u32 axictl; + + /* Zero out the memory to help with debug */ + memset(drvdata->vaddr, 0, drvdata->size); + + CS_UNLOCK(drvdata->base); + + /* Wait for TMCSReady bit to be set */ + tmc_wait_for_tmcready(drvdata); + + writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ); + writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); + + axictl = readl_relaxed(drvdata->base + TMC_AXICTL); + axictl |= TMC_AXICTL_WR_BURST_16; + writel_relaxed(axictl, drvdata->base + TMC_AXICTL); + axictl &= ~TMC_AXICTL_SCT_GAT_MODE; + writel_relaxed(axictl, drvdata->base + TMC_AXICTL); + axictl = (axictl & + ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) | + TMC_AXICTL_PROT_CTL_B1; + writel_relaxed(axictl, drvdata->base + TMC_AXICTL); + + writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO); + writel_relaxed(0x0, drvdata->base + TMC_DBAHI); + writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | + TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | + TMC_FFCR_TRIGON_TRIGIN, + drvdata->base + TMC_FFCR); + writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); + tmc_enable_hw(drvdata); + + CS_LOCK(drvdata->base); +} + +static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata) +{ + u32 rwp, val; + + rwp = readl_relaxed(drvdata->base + TMC_RWP); + val = readl_relaxed(drvdata->base + TMC_STS); + + /* How much memory do we still have */ + if (val & BIT(0)) + drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr; + else + drvdata->buf = drvdata->vaddr; +} + +static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + tmc_flush_and_stop(drvdata); + /* + * When operating in sysFS mode the content of the buffer needs to be + * read before the TMC is disabled. + */ + if (local_read(&drvdata->mode) == CS_MODE_SYSFS) + tmc_etr_dump_hw(drvdata); + tmc_disable_hw(drvdata); + + CS_LOCK(drvdata->base); +} + +static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode) +{ + int ret = 0; + bool used = false; + long val; + unsigned long flags; + void __iomem *vaddr = NULL; + dma_addr_t paddr; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + /* This shouldn't be happening */ + if (WARN_ON(mode != CS_MODE_SYSFS)) + return -EINVAL; + + /* + * If we don't have a buffer release the lock and allocate memory. + * Otherwise keep the lock and move along. + */ + spin_lock_irqsave(&drvdata->spinlock, flags); + if (!drvdata->vaddr) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + /* + * Contiguous memory can't be allocated while a spinlock is + * held. As such allocate memory here and free it if a buffer + * has already been allocated (from a previous session). + */ + vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size, + &paddr, GFP_KERNEL); + if (!vaddr) + return -ENOMEM; + + /* Let's try again */ + spin_lock_irqsave(&drvdata->spinlock, flags); + } + + if (drvdata->reading) { + ret = -EBUSY; + goto out; + } + + val = local_xchg(&drvdata->mode, mode); + /* + * In sysFS mode we can have multiple writers per sink. Since this + * sink is already enabled no memory is needed and the HW need not be + * touched. + */ + if (val == CS_MODE_SYSFS) + goto out; + + /* + * If drvdata::buf == NULL, use the memory allocated above. + * Otherwise a buffer still exists from a previous session, so + * simply use that. + */ + if (drvdata->buf == NULL) { + used = true; + drvdata->vaddr = vaddr; + drvdata->paddr = paddr; + drvdata->buf = drvdata->vaddr; + } + + memset(drvdata->vaddr, 0, drvdata->size); + + tmc_etr_enable_hw(drvdata); +out: + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + /* Free memory outside the spinlock if need be */ + if (!used && vaddr) + dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr); + + if (!ret) + dev_info(drvdata->dev, "TMC-ETR enabled\n"); + + return ret; +} + +static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, u32 mode) +{ + int ret = 0; + long val; + unsigned long flags; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + /* This shouldn't be happening */ + if (WARN_ON(mode != CS_MODE_PERF)) + return -EINVAL; + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + ret = -EINVAL; + goto out; + } + + val = local_xchg(&drvdata->mode, mode); + /* + * In Perf mode there can be only one writer per sink. There + * is also no need to continue if the ETR is already operated + * from sysFS. + */ + if (val != CS_MODE_DISABLED) { + ret = -EINVAL; + goto out; + } + + tmc_etr_enable_hw(drvdata); +out: + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + return ret; +} + +static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode) +{ + switch (mode) { + case CS_MODE_SYSFS: + return tmc_enable_etr_sink_sysfs(csdev, mode); + case CS_MODE_PERF: + return tmc_enable_etr_sink_perf(csdev, mode); + } + + /* We shouldn't be here */ + return -EINVAL; +} + +static void tmc_disable_etr_sink(struct coresight_device *csdev) +{ + long val; + unsigned long flags; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + return; + } + + val = local_xchg(&drvdata->mode, CS_MODE_DISABLED); + /* Disable the TMC only if it needs to */ + if (val != CS_MODE_DISABLED) + tmc_etr_disable_hw(drvdata); + + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + dev_info(drvdata->dev, "TMC-ETR disabled\n"); +} + +static const struct coresight_ops_sink tmc_etr_sink_ops = { + .enable = tmc_enable_etr_sink, + .disable = tmc_disable_etr_sink, +}; + +const struct coresight_ops tmc_etr_cs_ops = { + .sink_ops = &tmc_etr_sink_ops, +}; + +int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) +{ + int ret = 0; + long val; + unsigned long flags; + + /* config types are set a boot time and never change */ + if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) + return -EINVAL; + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + ret = -EBUSY; + goto out; + } + + val = local_read(&drvdata->mode); + /* Don't interfere if operated from Perf */ + if (val == CS_MODE_PERF) { + ret = -EINVAL; + goto out; + } + + /* If drvdata::buf is NULL the trace data has been read already */ + if (drvdata->buf == NULL) { + ret = -EINVAL; + goto out; + } + + /* Disable the TMC if need be */ + if (val == CS_MODE_SYSFS) + tmc_etr_disable_hw(drvdata); + + drvdata->reading = true; +out: + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + return ret; +} + +int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) +{ + unsigned long flags; + dma_addr_t paddr; + void __iomem *vaddr = NULL; + + /* config types are set a boot time and never change */ + if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) + return -EINVAL; + + spin_lock_irqsave(&drvdata->spinlock, flags); + + /* RE-enable the TMC if need be */ + if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { + /* + * The trace run will continue with the same allocated trace + * buffer. The trace buffer is cleared in tmc_etr_enable_hw(), + * so we don't have to explicitly clear it. Also, since the + * tracer is still enabled drvdata::buf can't be NULL. + */ + tmc_etr_enable_hw(drvdata); + } else { + /* + * The ETR is not tracing and the buffer was just read. + * As such prepare to free the trace buffer. + */ + vaddr = drvdata->vaddr; + paddr = drvdata->paddr; + drvdata->buf = drvdata->vaddr = NULL; + } + + drvdata->reading = false; + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + /* Free allocated memory out side of the spinlock */ + if (vaddr) + dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr); + + return 0; +} diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index 1be191f5d..9e02ac963 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -30,127 +30,27 @@ #include <linux/amba/bus.h> #include "coresight-priv.h" +#include "coresight-tmc.h" -#define TMC_RSZ 0x004 -#define TMC_STS 0x00c -#define TMC_RRD 0x010 -#define TMC_RRP 0x014 -#define TMC_RWP 0x018 -#define TMC_TRG 0x01c -#define TMC_CTL 0x020 -#define TMC_RWD 0x024 -#define TMC_MODE 0x028 -#define TMC_LBUFLEVEL 0x02c -#define TMC_CBUFLEVEL 0x030 -#define TMC_BUFWM 0x034 -#define TMC_RRPHI 0x038 -#define TMC_RWPHI 0x03c -#define TMC_AXICTL 0x110 -#define TMC_DBALO 0x118 -#define TMC_DBAHI 0x11c -#define TMC_FFSR 0x300 -#define TMC_FFCR 0x304 -#define TMC_PSCR 0x308 -#define TMC_ITMISCOP0 0xee0 -#define TMC_ITTRFLIN 0xee8 -#define TMC_ITATBDATA0 0xeec -#define TMC_ITATBCTR2 0xef0 -#define TMC_ITATBCTR1 0xef4 -#define TMC_ITATBCTR0 0xef8 - -/* register description */ -/* TMC_CTL - 0x020 */ -#define TMC_CTL_CAPT_EN BIT(0) -/* TMC_STS - 0x00C */ -#define TMC_STS_TRIGGERED BIT(1) -/* TMC_AXICTL - 0x110 */ -#define TMC_AXICTL_PROT_CTL_B0 BIT(0) -#define TMC_AXICTL_PROT_CTL_B1 BIT(1) -#define TMC_AXICTL_SCT_GAT_MODE BIT(7) -#define TMC_AXICTL_WR_BURST_LEN 0xF00 -/* TMC_FFCR - 0x304 */ -#define TMC_FFCR_EN_FMT BIT(0) -#define TMC_FFCR_EN_TI BIT(1) -#define TMC_FFCR_FON_FLIN BIT(4) -#define TMC_FFCR_FON_TRIG_EVT BIT(5) -#define TMC_FFCR_FLUSHMAN BIT(6) -#define TMC_FFCR_TRIGON_TRIGIN BIT(8) -#define TMC_FFCR_STOP_ON_FLUSH BIT(12) - -#define TMC_STS_TRIGGERED_BIT 2 -#define TMC_FFCR_FLUSHMAN_BIT 6 - -enum tmc_config_type { - TMC_CONFIG_TYPE_ETB, - TMC_CONFIG_TYPE_ETR, - TMC_CONFIG_TYPE_ETF, -}; - -enum tmc_mode { - TMC_MODE_CIRCULAR_BUFFER, - TMC_MODE_SOFTWARE_FIFO, - TMC_MODE_HARDWARE_FIFO, -}; - -enum tmc_mem_intf_width { - TMC_MEM_INTF_WIDTH_32BITS = 0x2, - TMC_MEM_INTF_WIDTH_64BITS = 0x3, - TMC_MEM_INTF_WIDTH_128BITS = 0x4, - TMC_MEM_INTF_WIDTH_256BITS = 0x5, -}; - -/** - * struct tmc_drvdata - specifics associated to an TMC component - * @base: memory mapped base address for this component. - * @dev: the device entity associated to this component. - * @csdev: component vitals needed by the framework. - * @miscdev: specifics to handle "/dev/xyz.tmc" entry. - * @spinlock: only one at a time pls. - * @read_count: manages preparation of buffer for reading. - * @buf: area of memory where trace data get sent. - * @paddr: DMA start location in RAM. - * @vaddr: virtual representation of @paddr. - * @size: @buf size. - * @enable: this TMC is being used. - * @config_type: TMC variant, must be of type @tmc_config_type. - * @trigger_cntr: amount of words to store after a trigger. - */ -struct tmc_drvdata { - void __iomem *base; - struct device *dev; - struct coresight_device *csdev; - struct miscdevice miscdev; - spinlock_t spinlock; - int read_count; - bool reading; - char *buf; - dma_addr_t paddr; - void *vaddr; - u32 size; - bool enable; - enum tmc_config_type config_type; - u32 trigger_cntr; -}; - -static void tmc_wait_for_ready(struct tmc_drvdata *drvdata) +void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata) { /* Ensure formatter, unformatter and hardware fifo are empty */ if (coresight_timeout(drvdata->base, - TMC_STS, TMC_STS_TRIGGERED_BIT, 1)) { + TMC_STS, TMC_STS_TMCREADY_BIT, 1)) { dev_err(drvdata->dev, "timeout observed when probing at offset %#x\n", TMC_STS); } } -static void tmc_flush_and_stop(struct tmc_drvdata *drvdata) +void tmc_flush_and_stop(struct tmc_drvdata *drvdata) { u32 ffcr; ffcr = readl_relaxed(drvdata->base + TMC_FFCR); ffcr |= TMC_FFCR_STOP_ON_FLUSH; writel_relaxed(ffcr, drvdata->base + TMC_FFCR); - ffcr |= TMC_FFCR_FLUSHMAN; + ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT); writel_relaxed(ffcr, drvdata->base + TMC_FFCR); /* Ensure flush completes */ if (coresight_timeout(drvdata->base, @@ -160,338 +60,73 @@ static void tmc_flush_and_stop(struct tmc_drvdata *drvdata) TMC_FFCR); } - tmc_wait_for_ready(drvdata); + tmc_wait_for_tmcready(drvdata); } -static void tmc_enable_hw(struct tmc_drvdata *drvdata) +void tmc_enable_hw(struct tmc_drvdata *drvdata) { writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL); } -static void tmc_disable_hw(struct tmc_drvdata *drvdata) +void tmc_disable_hw(struct tmc_drvdata *drvdata) { writel_relaxed(0x0, drvdata->base + TMC_CTL); } -static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata) -{ - /* Zero out the memory to help with debug */ - memset(drvdata->buf, 0, drvdata->size); - - CS_UNLOCK(drvdata->base); - - writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); - writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | - TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | - TMC_FFCR_TRIGON_TRIGIN, - drvdata->base + TMC_FFCR); - - writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); - tmc_enable_hw(drvdata); - - CS_LOCK(drvdata->base); -} - -static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata) -{ - u32 axictl; - - /* Zero out the memory to help with debug */ - memset(drvdata->vaddr, 0, drvdata->size); - - CS_UNLOCK(drvdata->base); - - writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ); - writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); - - axictl = readl_relaxed(drvdata->base + TMC_AXICTL); - axictl |= TMC_AXICTL_WR_BURST_LEN; - writel_relaxed(axictl, drvdata->base + TMC_AXICTL); - axictl &= ~TMC_AXICTL_SCT_GAT_MODE; - writel_relaxed(axictl, drvdata->base + TMC_AXICTL); - axictl = (axictl & - ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) | - TMC_AXICTL_PROT_CTL_B1; - writel_relaxed(axictl, drvdata->base + TMC_AXICTL); - - writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO); - writel_relaxed(0x0, drvdata->base + TMC_DBAHI); - writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | - TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | - TMC_FFCR_TRIGON_TRIGIN, - drvdata->base + TMC_FFCR); - writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); - tmc_enable_hw(drvdata); - - CS_LOCK(drvdata->base); -} - -static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata) -{ - CS_UNLOCK(drvdata->base); - - writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE); - writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI, - drvdata->base + TMC_FFCR); - writel_relaxed(0x0, drvdata->base + TMC_BUFWM); - tmc_enable_hw(drvdata); - - CS_LOCK(drvdata->base); -} - -static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode) -{ - unsigned long flags; - - spin_lock_irqsave(&drvdata->spinlock, flags); - if (drvdata->reading) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); - return -EBUSY; - } - - if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { - tmc_etb_enable_hw(drvdata); - } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { - tmc_etr_enable_hw(drvdata); - } else { - if (mode == TMC_MODE_CIRCULAR_BUFFER) - tmc_etb_enable_hw(drvdata); - else - tmc_etf_enable_hw(drvdata); - } - drvdata->enable = true; - spin_unlock_irqrestore(&drvdata->spinlock, flags); - - dev_info(drvdata->dev, "TMC enabled\n"); - return 0; -} - -static int tmc_enable_sink(struct coresight_device *csdev, u32 mode) -{ - struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - - return tmc_enable(drvdata, TMC_MODE_CIRCULAR_BUFFER); -} - -static int tmc_enable_link(struct coresight_device *csdev, int inport, - int outport) +static int tmc_read_prepare(struct tmc_drvdata *drvdata) { - struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - - return tmc_enable(drvdata, TMC_MODE_HARDWARE_FIFO); -} + int ret = 0; -static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata) -{ - enum tmc_mem_intf_width memwidth; - u8 memwords; - char *bufp; - u32 read_data; - int i; - - memwidth = BMVAL(readl_relaxed(drvdata->base + CORESIGHT_DEVID), 8, 10); - if (memwidth == TMC_MEM_INTF_WIDTH_32BITS) - memwords = 1; - else if (memwidth == TMC_MEM_INTF_WIDTH_64BITS) - memwords = 2; - else if (memwidth == TMC_MEM_INTF_WIDTH_128BITS) - memwords = 4; - else - memwords = 8; - - bufp = drvdata->buf; - while (1) { - for (i = 0; i < memwords; i++) { - read_data = readl_relaxed(drvdata->base + TMC_RRD); - if (read_data == 0xFFFFFFFF) - return; - memcpy(bufp, &read_data, 4); - bufp += 4; - } + switch (drvdata->config_type) { + case TMC_CONFIG_TYPE_ETB: + case TMC_CONFIG_TYPE_ETF: + ret = tmc_read_prepare_etb(drvdata); + break; + case TMC_CONFIG_TYPE_ETR: + ret = tmc_read_prepare_etr(drvdata); + break; + default: + ret = -EINVAL; } -} - -static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata) -{ - CS_UNLOCK(drvdata->base); - - tmc_flush_and_stop(drvdata); - tmc_etb_dump_hw(drvdata); - tmc_disable_hw(drvdata); - - CS_LOCK(drvdata->base); -} - -static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata) -{ - u32 rwp, val; - rwp = readl_relaxed(drvdata->base + TMC_RWP); - val = readl_relaxed(drvdata->base + TMC_STS); - - /* How much memory do we still have */ - if (val & BIT(0)) - drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr; - else - drvdata->buf = drvdata->vaddr; -} + if (!ret) + dev_info(drvdata->dev, "TMC read start\n"); -static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) -{ - CS_UNLOCK(drvdata->base); - - tmc_flush_and_stop(drvdata); - tmc_etr_dump_hw(drvdata); - tmc_disable_hw(drvdata); - - CS_LOCK(drvdata->base); -} - -static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata) -{ - CS_UNLOCK(drvdata->base); - - tmc_flush_and_stop(drvdata); - tmc_disable_hw(drvdata); - - CS_LOCK(drvdata->base); + return ret; } -static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode) +static int tmc_read_unprepare(struct tmc_drvdata *drvdata) { - unsigned long flags; - - spin_lock_irqsave(&drvdata->spinlock, flags); - if (drvdata->reading) - goto out; + int ret = 0; - if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { - tmc_etb_disable_hw(drvdata); - } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { - tmc_etr_disable_hw(drvdata); - } else { - if (mode == TMC_MODE_CIRCULAR_BUFFER) - tmc_etb_disable_hw(drvdata); - else - tmc_etf_disable_hw(drvdata); + switch (drvdata->config_type) { + case TMC_CONFIG_TYPE_ETB: + case TMC_CONFIG_TYPE_ETF: + ret = tmc_read_unprepare_etb(drvdata); + break; + case TMC_CONFIG_TYPE_ETR: + ret = tmc_read_unprepare_etr(drvdata); + break; + default: + ret = -EINVAL; } -out: - drvdata->enable = false; - spin_unlock_irqrestore(&drvdata->spinlock, flags); - - dev_info(drvdata->dev, "TMC disabled\n"); -} - -static void tmc_disable_sink(struct coresight_device *csdev) -{ - struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - - tmc_disable(drvdata, TMC_MODE_CIRCULAR_BUFFER); -} - -static void tmc_disable_link(struct coresight_device *csdev, int inport, - int outport) -{ - struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - - tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO); -} - -static const struct coresight_ops_sink tmc_sink_ops = { - .enable = tmc_enable_sink, - .disable = tmc_disable_sink, -}; - -static const struct coresight_ops_link tmc_link_ops = { - .enable = tmc_enable_link, - .disable = tmc_disable_link, -}; - -static const struct coresight_ops tmc_etb_cs_ops = { - .sink_ops = &tmc_sink_ops, -}; - -static const struct coresight_ops tmc_etr_cs_ops = { - .sink_ops = &tmc_sink_ops, -}; - -static const struct coresight_ops tmc_etf_cs_ops = { - .sink_ops = &tmc_sink_ops, - .link_ops = &tmc_link_ops, -}; - -static int tmc_read_prepare(struct tmc_drvdata *drvdata) -{ - int ret; - unsigned long flags; - enum tmc_mode mode; - spin_lock_irqsave(&drvdata->spinlock, flags); - if (!drvdata->enable) - goto out; + if (!ret) + dev_info(drvdata->dev, "TMC read end\n"); - if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { - tmc_etb_disable_hw(drvdata); - } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { - tmc_etr_disable_hw(drvdata); - } else { - mode = readl_relaxed(drvdata->base + TMC_MODE); - if (mode == TMC_MODE_CIRCULAR_BUFFER) { - tmc_etb_disable_hw(drvdata); - } else { - ret = -ENODEV; - goto err; - } - } -out: - drvdata->reading = true; - spin_unlock_irqrestore(&drvdata->spinlock, flags); - - dev_info(drvdata->dev, "TMC read start\n"); - return 0; -err: - spin_unlock_irqrestore(&drvdata->spinlock, flags); return ret; } -static void tmc_read_unprepare(struct tmc_drvdata *drvdata) -{ - unsigned long flags; - enum tmc_mode mode; - - spin_lock_irqsave(&drvdata->spinlock, flags); - if (!drvdata->enable) - goto out; - - if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { - tmc_etb_enable_hw(drvdata); - } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { - tmc_etr_enable_hw(drvdata); - } else { - mode = readl_relaxed(drvdata->base + TMC_MODE); - if (mode == TMC_MODE_CIRCULAR_BUFFER) - tmc_etb_enable_hw(drvdata); - } -out: - drvdata->reading = false; - spin_unlock_irqrestore(&drvdata->spinlock, flags); - - dev_info(drvdata->dev, "TMC read end\n"); -} - static int tmc_open(struct inode *inode, struct file *file) { + int ret; struct tmc_drvdata *drvdata = container_of(file->private_data, struct tmc_drvdata, miscdev); - int ret = 0; - - if (drvdata->read_count++) - goto out; ret = tmc_read_prepare(drvdata); if (ret) return ret; -out: + nonseekable_open(inode, file); dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__); @@ -531,19 +166,14 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len, static int tmc_release(struct inode *inode, struct file *file) { + int ret; struct tmc_drvdata *drvdata = container_of(file->private_data, struct tmc_drvdata, miscdev); - if (--drvdata->read_count) { - if (drvdata->read_count < 0) { - dev_err(drvdata->dev, "mismatched close\n"); - drvdata->read_count = 0; - } - goto out; - } + ret = tmc_read_unprepare(drvdata); + if (ret) + return ret; - tmc_read_unprepare(drvdata); -out: dev_dbg(drvdata->dev, "%s: released\n", __func__); return 0; } @@ -556,56 +186,71 @@ static const struct file_operations tmc_fops = { .llseek = no_llseek, }; -static ssize_t status_show(struct device *dev, - struct device_attribute *attr, char *buf) +static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid) { - unsigned long flags; - u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg; - u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr; - u32 devid; - struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); + enum tmc_mem_intf_width memwidth; - pm_runtime_get_sync(drvdata->dev); - spin_lock_irqsave(&drvdata->spinlock, flags); - CS_UNLOCK(drvdata->base); - - tmc_rsz = readl_relaxed(drvdata->base + TMC_RSZ); - tmc_sts = readl_relaxed(drvdata->base + TMC_STS); - tmc_rrp = readl_relaxed(drvdata->base + TMC_RRP); - tmc_rwp = readl_relaxed(drvdata->base + TMC_RWP); - tmc_trg = readl_relaxed(drvdata->base + TMC_TRG); - tmc_ctl = readl_relaxed(drvdata->base + TMC_CTL); - tmc_ffsr = readl_relaxed(drvdata->base + TMC_FFSR); - tmc_ffcr = readl_relaxed(drvdata->base + TMC_FFCR); - tmc_mode = readl_relaxed(drvdata->base + TMC_MODE); - tmc_pscr = readl_relaxed(drvdata->base + TMC_PSCR); - devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); + /* + * Excerpt from the TRM: + * + * DEVID::MEMWIDTH[10:8] + * 0x2 Memory interface databus is 32 bits wide. + * 0x3 Memory interface databus is 64 bits wide. + * 0x4 Memory interface databus is 128 bits wide. + * 0x5 Memory interface databus is 256 bits wide. + */ + switch (BMVAL(devid, 8, 10)) { + case 0x2: + memwidth = TMC_MEM_INTF_WIDTH_32BITS; + break; + case 0x3: + memwidth = TMC_MEM_INTF_WIDTH_64BITS; + break; + case 0x4: + memwidth = TMC_MEM_INTF_WIDTH_128BITS; + break; + case 0x5: + memwidth = TMC_MEM_INTF_WIDTH_256BITS; + break; + default: + memwidth = 0; + } - CS_LOCK(drvdata->base); - spin_unlock_irqrestore(&drvdata->spinlock, flags); - pm_runtime_put(drvdata->dev); - - return sprintf(buf, - "Depth:\t\t0x%x\n" - "Status:\t\t0x%x\n" - "RAM read ptr:\t0x%x\n" - "RAM wrt ptr:\t0x%x\n" - "Trigger cnt:\t0x%x\n" - "Control:\t0x%x\n" - "Flush status:\t0x%x\n" - "Flush ctrl:\t0x%x\n" - "Mode:\t\t0x%x\n" - "PSRC:\t\t0x%x\n" - "DEVID:\t\t0x%x\n", - tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg, - tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid); - - return -EINVAL; + return memwidth; } -static DEVICE_ATTR_RO(status); -static ssize_t trigger_cntr_show(struct device *dev, - struct device_attribute *attr, char *buf) +#define coresight_tmc_simple_func(name, offset) \ + coresight_simple_func(struct tmc_drvdata, name, offset) + +coresight_tmc_simple_func(rsz, TMC_RSZ); +coresight_tmc_simple_func(sts, TMC_STS); +coresight_tmc_simple_func(rrp, TMC_RRP); +coresight_tmc_simple_func(rwp, TMC_RWP); +coresight_tmc_simple_func(trg, TMC_TRG); +coresight_tmc_simple_func(ctl, TMC_CTL); +coresight_tmc_simple_func(ffsr, TMC_FFSR); +coresight_tmc_simple_func(ffcr, TMC_FFCR); +coresight_tmc_simple_func(mode, TMC_MODE); +coresight_tmc_simple_func(pscr, TMC_PSCR); +coresight_tmc_simple_func(devid, CORESIGHT_DEVID); + +static struct attribute *coresight_tmc_mgmt_attrs[] = { + &dev_attr_rsz.attr, + &dev_attr_sts.attr, + &dev_attr_rrp.attr, + &dev_attr_rwp.attr, + &dev_attr_trg.attr, + &dev_attr_ctl.attr, + &dev_attr_ffsr.attr, + &dev_attr_ffcr.attr, + &dev_attr_mode.attr, + &dev_attr_pscr.attr, + &dev_attr_devid.attr, + NULL, +}; + +ssize_t trigger_cntr_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); unsigned long val = drvdata->trigger_cntr; @@ -630,26 +275,25 @@ static ssize_t trigger_cntr_store(struct device *dev, } static DEVICE_ATTR_RW(trigger_cntr); -static struct attribute *coresight_etb_attrs[] = { +static struct attribute *coresight_tmc_attrs[] = { &dev_attr_trigger_cntr.attr, - &dev_attr_status.attr, NULL, }; -ATTRIBUTE_GROUPS(coresight_etb); -static struct attribute *coresight_etr_attrs[] = { - &dev_attr_trigger_cntr.attr, - &dev_attr_status.attr, - NULL, +static const struct attribute_group coresight_tmc_group = { + .attrs = coresight_tmc_attrs, }; -ATTRIBUTE_GROUPS(coresight_etr); -static struct attribute *coresight_etf_attrs[] = { - &dev_attr_trigger_cntr.attr, - &dev_attr_status.attr, +static const struct attribute_group coresight_tmc_mgmt_group = { + .attrs = coresight_tmc_mgmt_attrs, + .name = "mgmt", +}; + +const struct attribute_group *coresight_tmc_groups[] = { + &coresight_tmc_group, + &coresight_tmc_mgmt_group, NULL, }; -ATTRIBUTE_GROUPS(coresight_etf); static int tmc_probe(struct amba_device *adev, const struct amba_id *id) { @@ -688,6 +332,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); drvdata->config_type = BMVAL(devid, 6, 7); + drvdata->memwidth = tmc_get_memwidth(devid); if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { if (np) @@ -702,20 +347,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) pm_runtime_put(&adev->dev); - if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { - drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size, - &drvdata->paddr, GFP_KERNEL); - if (!drvdata->vaddr) - return -ENOMEM; - - memset(drvdata->vaddr, 0, drvdata->size); - drvdata->buf = drvdata->vaddr; - } else { - drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL); - if (!drvdata->buf) - return -ENOMEM; - } - desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) { ret = -ENOMEM; @@ -725,20 +356,18 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) desc->pdata = pdata; desc->dev = dev; desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER; + desc->groups = coresight_tmc_groups; if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { desc->type = CORESIGHT_DEV_TYPE_SINK; desc->ops = &tmc_etb_cs_ops; - desc->groups = coresight_etb_groups; } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { desc->type = CORESIGHT_DEV_TYPE_SINK; desc->ops = &tmc_etr_cs_ops; - desc->groups = coresight_etr_groups; } else { desc->type = CORESIGHT_DEV_TYPE_LINKSINK; desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO; desc->ops = &tmc_etf_cs_ops; - desc->groups = coresight_etf_groups; } drvdata->csdev = coresight_register(desc); @@ -754,7 +383,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) if (ret) goto err_misc_register; - dev_info(dev, "TMC initialized\n"); return 0; err_misc_register: diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h new file mode 100644 index 000000000..5c5fe2ad2 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-tmc.h @@ -0,0 +1,140 @@ +/* + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _CORESIGHT_TMC_H +#define _CORESIGHT_TMC_H + +#include <linux/miscdevice.h> + +#define TMC_RSZ 0x004 +#define TMC_STS 0x00c +#define TMC_RRD 0x010 +#define TMC_RRP 0x014 +#define TMC_RWP 0x018 +#define TMC_TRG 0x01c +#define TMC_CTL 0x020 +#define TMC_RWD 0x024 +#define TMC_MODE 0x028 +#define TMC_LBUFLEVEL 0x02c +#define TMC_CBUFLEVEL 0x030 +#define TMC_BUFWM 0x034 +#define TMC_RRPHI 0x038 +#define TMC_RWPHI 0x03c +#define TMC_AXICTL 0x110 +#define TMC_DBALO 0x118 +#define TMC_DBAHI 0x11c +#define TMC_FFSR 0x300 +#define TMC_FFCR 0x304 +#define TMC_PSCR 0x308 +#define TMC_ITMISCOP0 0xee0 +#define TMC_ITTRFLIN 0xee8 +#define TMC_ITATBDATA0 0xeec +#define TMC_ITATBCTR2 0xef0 +#define TMC_ITATBCTR1 0xef4 +#define TMC_ITATBCTR0 0xef8 + +/* register description */ +/* TMC_CTL - 0x020 */ +#define TMC_CTL_CAPT_EN BIT(0) +/* TMC_STS - 0x00C */ +#define TMC_STS_TMCREADY_BIT 2 +#define TMC_STS_FULL BIT(0) +#define TMC_STS_TRIGGERED BIT(1) +/* TMC_AXICTL - 0x110 */ +#define TMC_AXICTL_PROT_CTL_B0 BIT(0) +#define TMC_AXICTL_PROT_CTL_B1 BIT(1) +#define TMC_AXICTL_SCT_GAT_MODE BIT(7) +#define TMC_AXICTL_WR_BURST_16 0xF00 +/* TMC_FFCR - 0x304 */ +#define TMC_FFCR_FLUSHMAN_BIT 6 +#define TMC_FFCR_EN_FMT BIT(0) +#define TMC_FFCR_EN_TI BIT(1) +#define TMC_FFCR_FON_FLIN BIT(4) +#define TMC_FFCR_FON_TRIG_EVT BIT(5) +#define TMC_FFCR_TRIGON_TRIGIN BIT(8) +#define TMC_FFCR_STOP_ON_FLUSH BIT(12) + + +enum tmc_config_type { + TMC_CONFIG_TYPE_ETB, + TMC_CONFIG_TYPE_ETR, + TMC_CONFIG_TYPE_ETF, +}; + +enum tmc_mode { + TMC_MODE_CIRCULAR_BUFFER, + TMC_MODE_SOFTWARE_FIFO, + TMC_MODE_HARDWARE_FIFO, +}; + +enum tmc_mem_intf_width { + TMC_MEM_INTF_WIDTH_32BITS = 1, + TMC_MEM_INTF_WIDTH_64BITS = 2, + TMC_MEM_INTF_WIDTH_128BITS = 4, + TMC_MEM_INTF_WIDTH_256BITS = 8, +}; + +/** + * struct tmc_drvdata - specifics associated to an TMC component + * @base: memory mapped base address for this component. + * @dev: the device entity associated to this component. + * @csdev: component vitals needed by the framework. + * @miscdev: specifics to handle "/dev/xyz.tmc" entry. + * @spinlock: only one at a time pls. + * @buf: area of memory where trace data get sent. + * @paddr: DMA start location in RAM. + * @vaddr: virtual representation of @paddr. + * @size: @buf size. + * @mode: how this TMC is being used. + * @config_type: TMC variant, must be of type @tmc_config_type. + * @memwidth: width of the memory interface databus, in bytes. + * @trigger_cntr: amount of words to store after a trigger. + */ +struct tmc_drvdata { + void __iomem *base; + struct device *dev; + struct coresight_device *csdev; + struct miscdevice miscdev; + spinlock_t spinlock; + bool reading; + char *buf; + dma_addr_t paddr; + void __iomem *vaddr; + u32 size; + local_t mode; + enum tmc_config_type config_type; + enum tmc_mem_intf_width memwidth; + u32 trigger_cntr; +}; + +/* Generic functions */ +void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata); +void tmc_flush_and_stop(struct tmc_drvdata *drvdata); +void tmc_enable_hw(struct tmc_drvdata *drvdata); +void tmc_disable_hw(struct tmc_drvdata *drvdata); + +/* ETB/ETF functions */ +int tmc_read_prepare_etb(struct tmc_drvdata *drvdata); +int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata); +extern const struct coresight_ops tmc_etb_cs_ops; +extern const struct coresight_ops tmc_etf_cs_ops; + +/* ETR functions */ +int tmc_read_prepare_etr(struct tmc_drvdata *drvdata); +int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata); +extern const struct coresight_ops tmc_etr_cs_ops; +#endif diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index 8fb09d923..4e471e2e9 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c @@ -167,7 +167,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id) if (IS_ERR(drvdata->csdev)) return PTR_ERR(drvdata->csdev); - dev_info(dev, "TPIU initialized\n"); return 0; } diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 2ea596109..d08d1ab9b 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -43,7 +43,15 @@ struct coresight_node { * When operating Coresight drivers from the sysFS interface, only a single * path can exist from a tracer (associated to a CPU) to a sink. */ -static DEFINE_PER_CPU(struct list_head *, sysfs_path); +static DEFINE_PER_CPU(struct list_head *, tracer_path); + +/* + * As of this writing only a single STM can be found in CS topologies. Since + * there is no way to know if we'll ever see more and what kind of + * configuration they will enact, for the time being only define a single path + * for STM. + */ +static struct list_head *stm_path; static int coresight_id_match(struct device *dev, void *data) { @@ -257,15 +265,27 @@ static void coresight_disable_source(struct coresight_device *csdev) void coresight_disable_path(struct list_head *path) { + u32 type; struct coresight_node *nd; struct coresight_device *csdev, *parent, *child; list_for_each_entry(nd, path, link) { csdev = nd->csdev; + type = csdev->type; + + /* + * ETF devices are tricky... They can be a link or a sink, + * depending on how they are configured. If an ETF has been + * "activated" it will be configured as a sink, otherwise + * go ahead with the link configuration. + */ + if (type == CORESIGHT_DEV_TYPE_LINKSINK) + type = (csdev == coresight_get_sink(path)) ? + CORESIGHT_DEV_TYPE_SINK : + CORESIGHT_DEV_TYPE_LINK; - switch (csdev->type) { + switch (type) { case CORESIGHT_DEV_TYPE_SINK: - case CORESIGHT_DEV_TYPE_LINKSINK: coresight_disable_sink(csdev); break; case CORESIGHT_DEV_TYPE_SOURCE: @@ -286,15 +306,27 @@ int coresight_enable_path(struct list_head *path, u32 mode) { int ret = 0; + u32 type; struct coresight_node *nd; struct coresight_device *csdev, *parent, *child; list_for_each_entry_reverse(nd, path, link) { csdev = nd->csdev; + type = csdev->type; - switch (csdev->type) { + /* + * ETF devices are tricky... They can be a link or a sink, + * depending on how they are configured. If an ETF has been + * "activated" it will be configured as a sink, otherwise + * go ahead with the link configuration. + */ + if (type == CORESIGHT_DEV_TYPE_LINKSINK) + type = (csdev == coresight_get_sink(path)) ? + CORESIGHT_DEV_TYPE_SINK : + CORESIGHT_DEV_TYPE_LINK; + + switch (type) { case CORESIGHT_DEV_TYPE_SINK: - case CORESIGHT_DEV_TYPE_LINKSINK: ret = coresight_enable_sink(csdev, mode); if (ret) goto err; @@ -353,7 +385,6 @@ static int _coresight_build_path(struct coresight_device *csdev, int i; bool found = false; struct coresight_node *node; - struct coresight_connection *conn; /* An activated sink has been found. Enqueue the element */ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || @@ -362,8 +393,9 @@ static int _coresight_build_path(struct coresight_device *csdev, /* Not a sink - recursively explore each port found on this element */ for (i = 0; i < csdev->nr_outport; i++) { - conn = &csdev->conns[i]; - if (_coresight_build_path(conn->child_dev, path) == 0) { + struct coresight_device *child_dev = csdev->conns[i].child_dev; + + if (child_dev && _coresight_build_path(child_dev, path) == 0) { found = true; break; } @@ -393,6 +425,7 @@ out: struct list_head *coresight_build_path(struct coresight_device *csdev) { struct list_head *path; + int rc; path = kzalloc(sizeof(struct list_head), GFP_KERNEL); if (!path) @@ -400,9 +433,10 @@ struct list_head *coresight_build_path(struct coresight_device *csdev) INIT_LIST_HEAD(path); - if (_coresight_build_path(csdev, path)) { + rc = _coresight_build_path(csdev, path); + if (rc) { kfree(path); - path = NULL; + return ERR_PTR(rc); } return path; @@ -432,24 +466,52 @@ void coresight_release_path(struct list_head *path) path = NULL; } +/** coresight_validate_source - make sure a source has the right credentials + * @csdev: the device structure for a source. + * @function: the function this was called from. + * + * Assumes the coresight_mutex is held. + */ +static int coresight_validate_source(struct coresight_device *csdev, + const char *function) +{ + u32 type, subtype; + + type = csdev->type; + subtype = csdev->subtype.source_subtype; + + if (type != CORESIGHT_DEV_TYPE_SOURCE) { + dev_err(&csdev->dev, "wrong device type in %s\n", function); + return -EINVAL; + } + + if (subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_PROC && + subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE) { + dev_err(&csdev->dev, "wrong device subtype in %s\n", function); + return -EINVAL; + } + + return 0; +} + int coresight_enable(struct coresight_device *csdev) { - int ret = 0; - int cpu; + int cpu, ret = 0; struct list_head *path; mutex_lock(&coresight_mutex); - if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { - ret = -EINVAL; - dev_err(&csdev->dev, "wrong device type in %s\n", __func__); + + ret = coresight_validate_source(csdev, __func__); + if (ret) goto out; - } + if (csdev->enable) goto out; path = coresight_build_path(csdev); - if (!path) { + if (IS_ERR(path)) { pr_err("building path(s) failed\n"); + ret = PTR_ERR(path); goto out; } @@ -461,15 +523,25 @@ int coresight_enable(struct coresight_device *csdev) if (ret) goto err_source; - /* - * When working from sysFS it is important to keep track - * of the paths that were created so that they can be - * undone in 'coresight_disable()'. Since there can only - * be a single session per tracer (when working from sysFS) - * a per-cpu variable will do just fine. - */ - cpu = source_ops(csdev)->cpu_id(csdev); - per_cpu(sysfs_path, cpu) = path; + switch (csdev->subtype.source_subtype) { + case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC: + /* + * When working from sysFS it is important to keep track + * of the paths that were created so that they can be + * undone in 'coresight_disable()'. Since there can only + * be a single session per tracer (when working from sysFS) + * a per-cpu variable will do just fine. + */ + cpu = source_ops(csdev)->cpu_id(csdev); + per_cpu(tracer_path, cpu) = path; + break; + case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE: + stm_path = path; + break; + default: + /* We can't be here */ + break; + } out: mutex_unlock(&coresight_mutex); @@ -486,23 +558,36 @@ EXPORT_SYMBOL_GPL(coresight_enable); void coresight_disable(struct coresight_device *csdev) { - int cpu; - struct list_head *path; + int cpu, ret; + struct list_head *path = NULL; mutex_lock(&coresight_mutex); - if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { - dev_err(&csdev->dev, "wrong device type in %s\n", __func__); + + ret = coresight_validate_source(csdev, __func__); + if (ret) goto out; - } + if (!csdev->enable) goto out; - cpu = source_ops(csdev)->cpu_id(csdev); - path = per_cpu(sysfs_path, cpu); + switch (csdev->subtype.source_subtype) { + case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC: + cpu = source_ops(csdev)->cpu_id(csdev); + path = per_cpu(tracer_path, cpu); + per_cpu(tracer_path, cpu) = NULL; + break; + case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE: + path = stm_path; + stm_path = NULL; + break; + default: + /* We can't be here */ + break; + } + coresight_disable_source(csdev); coresight_disable_path(path); coresight_release_path(path); - per_cpu(sysfs_path, cpu) = NULL; out: mutex_unlock(&coresight_mutex); @@ -514,7 +599,7 @@ static ssize_t enable_sink_show(struct device *dev, { struct coresight_device *csdev = to_coresight_device(dev); - return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->activated); + return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->activated); } static ssize_t enable_sink_store(struct device *dev, @@ -544,7 +629,7 @@ static ssize_t enable_source_show(struct device *dev, { struct coresight_device *csdev = to_coresight_device(dev); - return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->enable); + return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->enable); } static ssize_t enable_source_store(struct device *dev, diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index 4272f2ce5..1be543e8e 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -71,6 +71,15 @@ static int intel_th_probe(struct device *dev) if (ret) return ret; + if (thdrv->attr_group) { + ret = sysfs_create_group(&thdev->dev.kobj, thdrv->attr_group); + if (ret) { + thdrv->remove(thdev); + + return ret; + } + } + if (thdev->type == INTEL_TH_OUTPUT && !intel_th_output_assigned(thdev)) ret = hubdrv->assign(hub, thdev); @@ -91,6 +100,9 @@ static int intel_th_remove(struct device *dev) return err; } + if (thdrv->attr_group) + sysfs_remove_group(&thdev->dev.kobj, thdrv->attr_group); + thdrv->remove(thdev); if (intel_th_output_assigned(thdev)) { @@ -171,7 +183,14 @@ static DEVICE_ATTR_RO(port); static int intel_th_output_activate(struct intel_th_device *thdev) { - struct intel_th_driver *thdrv = to_intel_th_driver(thdev->dev.driver); + struct intel_th_driver *thdrv = + to_intel_th_driver_or_null(thdev->dev.driver); + + if (!thdrv) + return -ENODEV; + + if (!try_module_get(thdrv->driver.owner)) + return -ENODEV; if (thdrv->activate) return thdrv->activate(thdev); @@ -183,12 +202,18 @@ static int intel_th_output_activate(struct intel_th_device *thdev) static void intel_th_output_deactivate(struct intel_th_device *thdev) { - struct intel_th_driver *thdrv = to_intel_th_driver(thdev->dev.driver); + struct intel_th_driver *thdrv = + to_intel_th_driver_or_null(thdev->dev.driver); + + if (!thdrv) + return; if (thdrv->deactivate) thdrv->deactivate(thdev); else intel_th_trace_disable(thdev); + + module_put(thdrv->driver.owner); } static ssize_t active_show(struct device *dev, struct device_attribute *attr, diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h index eedd09332..0df22e306 100644 --- a/drivers/hwtracing/intel_th/intel_th.h +++ b/drivers/hwtracing/intel_th/intel_th.h @@ -115,6 +115,7 @@ intel_th_output_assigned(struct intel_th_device *thdev) * @enable: enable tracing for a given output device * @disable: disable tracing for a given output device * @fops: file operations for device nodes + * @attr_group: attributes provided by the driver * * Callbacks @probe and @remove are required for all device types. * Switch device driver needs to fill in @assign, @enable and @disable @@ -139,6 +140,8 @@ struct intel_th_driver { void (*deactivate)(struct intel_th_device *thdev); /* file_operations for those who want a device node */ const struct file_operations *fops; + /* optional attributes */ + struct attribute_group *attr_group; /* source ops */ int (*set_output)(struct intel_th_device *thdev, @@ -148,6 +151,9 @@ struct intel_th_driver { #define to_intel_th_driver(_d) \ container_of((_d), struct intel_th_driver, driver) +#define to_intel_th_driver_or_null(_d) \ + ((_d) ? to_intel_th_driver(_d) : NULL) + static inline struct intel_th_device * to_intel_th_hub(struct intel_th_device *thdev) { diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index d9d6022c5..e8d55a153 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -122,7 +122,6 @@ struct msc { atomic_t mmap_count; struct mutex buf_mutex; - struct mutex iter_mutex; struct list_head iter_list; /* config */ @@ -257,23 +256,37 @@ static struct msc_iter *msc_iter_install(struct msc *msc) iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) - return NULL; + return ERR_PTR(-ENOMEM); + + mutex_lock(&msc->buf_mutex); + + /* + * Reading and tracing are mutually exclusive; if msc is + * enabled, open() will fail; otherwise existing readers + * will prevent enabling the msc and the rest of fops don't + * need to worry about it. + */ + if (msc->enabled) { + kfree(iter); + iter = ERR_PTR(-EBUSY); + goto unlock; + } msc_iter_init(iter); iter->msc = msc; - mutex_lock(&msc->iter_mutex); list_add_tail(&iter->entry, &msc->iter_list); - mutex_unlock(&msc->iter_mutex); +unlock: + mutex_unlock(&msc->buf_mutex); return iter; } static void msc_iter_remove(struct msc_iter *iter, struct msc *msc) { - mutex_lock(&msc->iter_mutex); + mutex_lock(&msc->buf_mutex); list_del(&iter->entry); - mutex_unlock(&msc->iter_mutex); + mutex_unlock(&msc->buf_mutex); kfree(iter); } @@ -454,7 +467,6 @@ static void msc_buffer_clear_hw_header(struct msc *msc) { struct msc_window *win; - mutex_lock(&msc->buf_mutex); list_for_each_entry(win, &msc->win_list, entry) { unsigned int blk; size_t hw_sz = sizeof(struct msc_block_desc) - @@ -466,7 +478,6 @@ static void msc_buffer_clear_hw_header(struct msc *msc) memset(&bdesc->hw_tag, 0, hw_sz); } } - mutex_unlock(&msc->buf_mutex); } /** @@ -474,12 +485,15 @@ static void msc_buffer_clear_hw_header(struct msc *msc) * @msc: the MSC device to configure * * Program storage mode, wrapping, burst length and trace buffer address - * into a given MSC. If msc::enabled is set, enable the trace, too. + * into a given MSC. Then, enable tracing and set msc::enabled. + * The latter is serialized on msc::buf_mutex, so make sure to hold it. */ static int msc_configure(struct msc *msc) { u32 reg; + lockdep_assert_held(&msc->buf_mutex); + if (msc->mode > MSC_MODE_MULTI) return -ENOTSUPP; @@ -497,21 +511,19 @@ static int msc_configure(struct msc *msc) reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD); + reg |= MSC_EN; reg |= msc->mode << __ffs(MSC_MODE); reg |= msc->burst_len << __ffs(MSC_LEN); - /*if (msc->mode == MSC_MODE_MULTI) - reg |= MSC_RD_HDR_OVRD; */ + if (msc->wrap) reg |= MSC_WRAPEN; - if (msc->enabled) - reg |= MSC_EN; iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); - if (msc->enabled) { - msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; - intel_th_trace_enable(msc->thdev); - } + msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; + intel_th_trace_enable(msc->thdev); + msc->enabled = 1; + return 0; } @@ -521,15 +533,14 @@ static int msc_configure(struct msc *msc) * @msc: MSC device to disable * * If @msc is enabled, disable tracing on the switch and then disable MSC - * storage. + * storage. Caller must hold msc::buf_mutex. */ static void msc_disable(struct msc *msc) { unsigned long count; u32 reg; - if (!msc->enabled) - return; + lockdep_assert_held(&msc->buf_mutex); intel_th_trace_disable(msc->thdev); @@ -569,33 +580,35 @@ static void msc_disable(struct msc *msc) static int intel_th_msc_activate(struct intel_th_device *thdev) { struct msc *msc = dev_get_drvdata(&thdev->dev); - int ret = 0; + int ret = -EBUSY; if (!atomic_inc_unless_negative(&msc->user_count)) return -ENODEV; - mutex_lock(&msc->iter_mutex); - if (!list_empty(&msc->iter_list)) - ret = -EBUSY; - mutex_unlock(&msc->iter_mutex); + mutex_lock(&msc->buf_mutex); - if (ret) { - atomic_dec(&msc->user_count); - return ret; - } + /* if there are readers, refuse */ + if (list_empty(&msc->iter_list)) + ret = msc_configure(msc); - msc->enabled = 1; + mutex_unlock(&msc->buf_mutex); + + if (ret) + atomic_dec(&msc->user_count); - return msc_configure(msc); + return ret; } static void intel_th_msc_deactivate(struct intel_th_device *thdev) { struct msc *msc = dev_get_drvdata(&thdev->dev); - msc_disable(msc); - - atomic_dec(&msc->user_count); + mutex_lock(&msc->buf_mutex); + if (msc->enabled) { + msc_disable(msc); + atomic_dec(&msc->user_count); + } + mutex_unlock(&msc->buf_mutex); } /** @@ -1035,8 +1048,8 @@ static int intel_th_msc_open(struct inode *inode, struct file *file) return -EPERM; iter = msc_iter_install(msc); - if (!iter) - return -ENOMEM; + if (IS_ERR(iter)) + return PTR_ERR(iter); file->private_data = iter; @@ -1101,11 +1114,6 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf, if (!atomic_inc_unless_negative(&msc->user_count)) return 0; - if (msc->enabled) { - ret = -EBUSY; - goto put_count; - } - if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap) size = msc->single_sz; else @@ -1164,7 +1172,7 @@ static void msc_mmap_close(struct vm_area_struct *vma) if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex)) return; - /* drop page _counts */ + /* drop page _refcounts */ for (pg = 0; pg < msc->nr_pages; pg++) { struct page *page = msc_buffer_get_page(msc, pg); @@ -1245,6 +1253,7 @@ static const struct file_operations intel_th_msc_fops = { .read = intel_th_msc_read, .mmap = intel_th_msc_mmap, .llseek = no_llseek, + .owner = THIS_MODULE, }; static int intel_th_msc_init(struct msc *msc) @@ -1254,8 +1263,6 @@ static int intel_th_msc_init(struct msc *msc) msc->mode = MSC_MODE_MULTI; mutex_init(&msc->buf_mutex); INIT_LIST_HEAD(&msc->win_list); - - mutex_init(&msc->iter_mutex); INIT_LIST_HEAD(&msc->iter_list); msc->burst_len = @@ -1393,6 +1400,11 @@ nr_pages_store(struct device *dev, struct device_attribute *attr, do { end = memchr(p, ',', len); s = kstrndup(p, end ? end - p : len, GFP_KERNEL); + if (!s) { + ret = -ENOMEM; + goto free_win; + } + ret = kstrtoul(s, 10, &val); kfree(s); @@ -1473,10 +1485,6 @@ static int intel_th_msc_probe(struct intel_th_device *thdev) if (err) return err; - err = sysfs_create_group(&dev->kobj, &msc_output_group); - if (err) - return err; - dev_set_drvdata(dev, msc); return 0; @@ -1484,7 +1492,18 @@ static int intel_th_msc_probe(struct intel_th_device *thdev) static void intel_th_msc_remove(struct intel_th_device *thdev) { - sysfs_remove_group(&thdev->dev.kobj, &msc_output_group); + struct msc *msc = dev_get_drvdata(&thdev->dev); + int ret; + + intel_th_msc_deactivate(thdev); + + /* + * Buffers should not be used at this point except if the + * output character device is still open and the parent + * device gets detached from its bus, which is a FIXME. + */ + ret = msc_buffer_free_unless_used(msc); + WARN_ON_ONCE(ret); } static struct intel_th_driver intel_th_msc_driver = { @@ -1493,6 +1512,7 @@ static struct intel_th_driver intel_th_msc_driver = { .activate = intel_th_msc_activate, .deactivate = intel_th_msc_deactivate, .fops = &intel_th_msc_fops, + .attr_group = &msc_output_group, .driver = { .name = "msc", .owner = THIS_MODULE, diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index bca7a2ac0..5e25c7eb3 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -75,6 +75,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0a80), .driver_data = (kernel_ulong_t)0, }, + { + /* Broxton B-step */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1a8e), + .driver_data = (kernel_ulong_t)0, + }, { 0 }, }; diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c index 57cbfdcc7..35738b5bf 100644 --- a/drivers/hwtracing/intel_th/pti.c +++ b/drivers/hwtracing/intel_th/pti.c @@ -200,7 +200,6 @@ static int intel_th_pti_probe(struct intel_th_device *thdev) struct resource *res; struct pti_device *pti; void __iomem *base; - int ret; res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0); if (!res) @@ -219,10 +218,6 @@ static int intel_th_pti_probe(struct intel_th_device *thdev) read_hw_config(pti); - ret = sysfs_create_group(&dev->kobj, &pti_output_group); - if (ret) - return ret; - dev_set_drvdata(dev, pti); return 0; @@ -237,6 +232,7 @@ static struct intel_th_driver intel_th_pti_driver = { .remove = intel_th_pti_remove, .activate = intel_th_pti_activate, .deactivate = intel_th_pti_deactivate, + .attr_group = &pti_output_group, .driver = { .name = "pti", .owner = THIS_MODULE, diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index de80d45d8..ff31108b0 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -67,9 +67,24 @@ static ssize_t channels_show(struct device *dev, static DEVICE_ATTR_RO(channels); +static ssize_t hw_override_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct stm_device *stm = to_stm_device(dev); + int ret; + + ret = sprintf(buf, "%u\n", stm->data->hw_override); + + return ret; +} + +static DEVICE_ATTR_RO(hw_override); + static struct attribute *stm_attrs[] = { &dev_attr_masters.attr, &dev_attr_channels.attr, + &dev_attr_hw_override.attr, NULL, }; @@ -546,8 +561,6 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg) if (ret) goto err_free; - ret = 0; - if (stm->data->link) ret = stm->data->link(stm->data, stmf->output.master, stmf->output.channel); @@ -668,18 +681,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, stm->dev.parent = parent; stm->dev.release = stm_device_release; - err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name); - if (err) - goto err_device; - - err = device_add(&stm->dev); - if (err) - goto err_device; - mutex_init(&stm->link_mutex); spin_lock_init(&stm->link_lock); INIT_LIST_HEAD(&stm->link_list); + /* initialize the object before it is accessible via sysfs */ spin_lock_init(&stm->mc_lock); mutex_init(&stm->policy_mutex); stm->sw_nmasters = nmasters; @@ -687,9 +693,19 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, stm->data = stm_data; stm_data->stm = stm; + err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name); + if (err) + goto err_device; + + err = device_add(&stm->dev); + if (err) + goto err_device; + return 0; err_device: + unregister_chrdev(stm->major, stm_data->name); + /* matches device_initialize() above */ put_device(&stm->dev); err_free: diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c index 310adf57e..a86612d98 100644 --- a/drivers/hwtracing/stm/dummy_stm.c +++ b/drivers/hwtracing/stm/dummy_stm.c @@ -46,9 +46,7 @@ static struct stm_data dummy_stm[DUMMY_STM_MAX]; static int nr_dummies = 4; -module_param(nr_dummies, int, 0600); - -static unsigned int dummy_stm_nr; +module_param(nr_dummies, int, 0400); static unsigned int fail_mode; @@ -65,12 +63,12 @@ static int dummy_stm_link(struct stm_data *data, unsigned int master, static int dummy_stm_init(void) { - int i, ret = -ENOMEM, __nr_dummies = ACCESS_ONCE(nr_dummies); + int i, ret = -ENOMEM; - if (__nr_dummies < 0 || __nr_dummies > DUMMY_STM_MAX) + if (nr_dummies < 0 || nr_dummies > DUMMY_STM_MAX) return -EINVAL; - for (i = 0; i < __nr_dummies; i++) { + for (i = 0; i < nr_dummies; i++) { dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i); if (!dummy_stm[i].name) goto fail_unregister; @@ -86,8 +84,6 @@ static int dummy_stm_init(void) goto fail_free; } - dummy_stm_nr = __nr_dummies; - return 0; fail_unregister: @@ -105,7 +101,7 @@ static void dummy_stm_exit(void) { int i; - for (i = 0; i < dummy_stm_nr; i++) { + for (i = 0; i < nr_dummies; i++) { stm_unregister_device(&dummy_stm[i]); kfree(dummy_stm[i].name); } diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c index 0133571b5..3da7b673a 100644 --- a/drivers/hwtracing/stm/heartbeat.c +++ b/drivers/hwtracing/stm/heartbeat.c @@ -26,7 +26,7 @@ static int nr_devs = 4; static int interval_ms = 10; -module_param(nr_devs, int, 0600); +module_param(nr_devs, int, 0400); module_param(interval_ms, int, 0600); static struct stm_heartbeat { @@ -35,8 +35,6 @@ static struct stm_heartbeat { unsigned int active; } stm_heartbeat[STM_HEARTBEAT_MAX]; -static unsigned int nr_instances; - static const char str[] = "heartbeat stm source driver is here to serve you"; static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr) @@ -74,12 +72,12 @@ static void stm_heartbeat_unlink(struct stm_source_data *data) static int stm_heartbeat_init(void) { - int i, ret = -ENOMEM, __nr_instances = ACCESS_ONCE(nr_devs); + int i, ret = -ENOMEM; - if (__nr_instances < 0 || __nr_instances > STM_HEARTBEAT_MAX) + if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX) return -EINVAL; - for (i = 0; i < __nr_instances; i++) { + for (i = 0; i < nr_devs; i++) { stm_heartbeat[i].data.name = kasprintf(GFP_KERNEL, "heartbeat.%d", i); if (!stm_heartbeat[i].data.name) @@ -98,8 +96,6 @@ static int stm_heartbeat_init(void) goto fail_free; } - nr_instances = __nr_instances; - return 0; fail_unregister: @@ -116,7 +112,7 @@ static void stm_heartbeat_exit(void) { int i; - for (i = 0; i < nr_instances; i++) { + for (i = 0; i < nr_devs; i++) { stm_source_unregister_device(&stm_heartbeat[i].data); kfree(stm_heartbeat[i].data.name); } diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c index 1db189657..6c0ae2996 100644 --- a/drivers/hwtracing/stm/policy.c +++ b/drivers/hwtracing/stm/policy.c @@ -107,8 +107,7 @@ stp_policy_node_masters_store(struct config_item *item, const char *page, goto unlock; /* must be within [sw_start..sw_end], which is an inclusive range */ - if (first > INT_MAX || last > INT_MAX || first > last || - first < stm->data->sw_start || + if (first > last || first < stm->data->sw_start || last > stm->data->sw_end) { ret = -ERANGE; goto unlock; @@ -342,7 +341,7 @@ stp_policies_make(struct config_group *group, const char *name) return ERR_PTR(-EINVAL); } - *p++ = '\0'; + *p = '\0'; stm = stm_find_device(devname); kfree(devname); |