diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-10-20 00:10:27 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-10-20 00:10:27 -0300 |
commit | d0b2f91bede3bd5e3d24dd6803e56eee959c1797 (patch) | |
tree | 7fee4ab0509879c373c4f2cbd5b8a5be5b4041ee /drivers/perf/arm_pmu.c | |
parent | e914f8eb445e8f74b00303c19c2ffceaedd16a05 (diff) |
Linux-libre 4.8.2-gnupck-4.8.2-gnu
Diffstat (limited to 'drivers/perf/arm_pmu.c')
-rw-r--r-- | drivers/perf/arm_pmu.c | 88 |
1 files changed, 61 insertions, 27 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 140436a04..f5e1008a2 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -603,7 +603,8 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) irq = platform_get_irq(pmu_device, 0); if (irq >= 0 && irq_is_percpu(irq)) { - on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); + on_each_cpu_mask(&cpu_pmu->supported_cpus, + cpu_pmu_disable_percpu_irq, &irq, 1); free_percpu_irq(irq, &hw_events->percpu_pmu); } else { for (i = 0; i < irqs; ++i) { @@ -645,7 +646,9 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) irq); return err; } - on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); + + on_each_cpu_mask(&cpu_pmu->supported_cpus, + cpu_pmu_enable_percpu_irq, &irq, 1); } else { for (i = 0; i < irqs; ++i) { int cpu = i; @@ -685,30 +688,29 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) return 0; } +static DEFINE_SPINLOCK(arm_pmu_lock); +static LIST_HEAD(arm_pmu_list); + /* * PMU hardware loses all context when a CPU goes offline. * When a CPU is hotplugged back in, since some hardware registers are * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading * junk values out of them. */ -static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, - void *hcpu) +static int arm_perf_starting_cpu(unsigned int cpu) { - int cpu = (unsigned long)hcpu; - struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb); - - if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) - return NOTIFY_DONE; + struct arm_pmu *pmu; - if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) - return NOTIFY_DONE; + spin_lock(&arm_pmu_lock); + list_for_each_entry(pmu, &arm_pmu_list, entry) { - if (pmu->reset) - pmu->reset(pmu); - else - return NOTIFY_DONE; - - return NOTIFY_OK; + if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) + continue; + if (pmu->reset) + pmu->reset(pmu); + } + spin_unlock(&arm_pmu_lock); + return 0; } #ifdef CONFIG_CPU_PM @@ -819,10 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) if (!cpu_hw_events) return -ENOMEM; - cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify; - err = register_cpu_notifier(&cpu_pmu->hotplug_nb); - if (err) - goto out_hw_events; + spin_lock(&arm_pmu_lock); + list_add_tail(&cpu_pmu->entry, &arm_pmu_list); + spin_unlock(&arm_pmu_lock); err = cpu_pm_pmu_register(cpu_pmu); if (err) @@ -858,8 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) return 0; out_unregister: - unregister_cpu_notifier(&cpu_pmu->hotplug_nb); -out_hw_events: + spin_lock(&arm_pmu_lock); + list_del(&cpu_pmu->entry); + spin_unlock(&arm_pmu_lock); free_percpu(cpu_hw_events); return err; } @@ -867,7 +869,9 @@ out_hw_events: static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) { cpu_pm_pmu_unregister(cpu_pmu); - unregister_cpu_notifier(&cpu_pmu->hotplug_nb); + spin_lock(&arm_pmu_lock); + list_del(&cpu_pmu->entry); + spin_unlock(&arm_pmu_lock); free_percpu(cpu_pmu->hw_events); } @@ -921,6 +925,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) if (i > 0 && spi != using_spi) { pr_err("PPI/SPI IRQ type mismatch for %s!\n", dn->name); + of_node_put(dn); kfree(irqs); return -EINVAL; } @@ -961,9 +966,24 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) i++; } while (1); - /* If we didn't manage to parse anything, claim to support all CPUs */ - if (cpumask_weight(&pmu->supported_cpus) == 0) - cpumask_setall(&pmu->supported_cpus); + /* If we didn't manage to parse anything, try the interrupt affinity */ + if (cpumask_weight(&pmu->supported_cpus) == 0) { + int irq = platform_get_irq(pdev, 0); + + if (irq >= 0 && irq_is_percpu(irq)) { + /* If using PPIs, check the affinity of the partition */ + int ret; + + ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); + if (ret) { + kfree(irqs); + return ret; + } + } else { + /* Otherwise default to all CPUs */ + cpumask_setall(&pmu->supported_cpus); + } + } /* If we matched up the IRQ affinities, use them to route the SPIs */ if (using_spi && i == pdev->num_resources) @@ -1044,3 +1064,17 @@ out_free: kfree(pmu); return ret; } + +static int arm_pmu_hp_init(void) +{ + int ret; + + ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING, + "AP_PERF_ARM_STARTING", + arm_perf_starting_cpu, NULL); + if (ret) + pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", + ret); + return ret; +} +subsys_initcall(arm_pmu_hp_init); |