From b4b7ff4b08e691656c9d77c758fc355833128ac0 Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Wed, 20 Jan 2016 14:01:31 -0300 Subject: Linux-libre 4.4-gnu --- drivers/base/power/Makefile | 2 +- drivers/base/power/clock_ops.c | 12 +- drivers/base/power/domain.c | 407 +++------ drivers/base/power/domain_governor.c | 9 +- drivers/base/power/generic_ops.c | 23 +- drivers/base/power/opp.c | 1603 ---------------------------------- drivers/base/power/opp/Makefile | 2 + drivers/base/power/opp/core.c | 1316 ++++++++++++++++++++++++++++ drivers/base/power/opp/cpu.c | 270 ++++++ drivers/base/power/opp/opp.h | 146 ++++ drivers/base/power/wakeirq.c | 6 + drivers/base/power/wakeup.c | 16 +- 12 files changed, 1882 insertions(+), 1930 deletions(-) delete mode 100644 drivers/base/power/opp.c create mode 100644 drivers/base/power/opp/Makefile create mode 100644 drivers/base/power/opp/core.c create mode 100644 drivers/base/power/opp/cpu.c create mode 100644 drivers/base/power/opp/opp.h (limited to 'drivers/base/power') diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index f94a6ccfe..5998c5328 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -1,7 +1,7 @@ obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o -obj-$(CONFIG_PM_OPP) += opp.o +obj-$(CONFIG_PM_OPP) += opp/ obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o obj-$(CONFIG_HAVE_CLK) += clock_ops.o diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 652b5a367..60ee5591e 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -17,7 +17,7 @@ #include #include -#ifdef CONFIG_PM +#ifdef CONFIG_PM_CLK enum pce_status { PCE_STATUS_NONE = 0, @@ -93,7 +93,7 @@ static int __pm_clk_add(struct device *dev, const char *con_id, return -ENOMEM; } } else { - if (IS_ERR(clk) || !__clk_get(clk)) { + if (IS_ERR(clk)) { kfree(ce); return -ENOENT; } @@ -127,7 +127,9 @@ int pm_clk_add(struct device *dev, const char *con_id) * @clk: Clock pointer * * Add the clock to the list of clocks used for the power management of @dev. - * It will increment refcount on clock pointer, use clk_put() on it when done. + * The power-management code will take control of the clock reference, so + * callers should not call clk_put() on @clk after this function sucessfully + * returned. */ int pm_clk_add_clk(struct device *dev, struct clk *clk) { @@ -404,7 +406,7 @@ int pm_clk_runtime_resume(struct device *dev) return pm_generic_runtime_resume(dev); } -#else /* !CONFIG_PM */ +#else /* !CONFIG_PM_CLK */ /** * enable_clock - Enable a device clock. @@ -484,7 +486,7 @@ static int pm_clk_notify(struct notifier_block *nb, return 0; } -#endif /* !CONFIG_PM */ +#endif /* !CONFIG_PM_CLK */ /** * pm_clk_add_notifier - Add bus type notifier for power management clocks. diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 16550c63d..65f50eccd 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -34,43 +34,9 @@ __ret; \ }) -#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ -({ \ - ktime_t __start = ktime_get(); \ - type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ - s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ - struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ - if (!__retval && __elapsed > __td->field) { \ - __td->field = __elapsed; \ - dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \ - __elapsed); \ - genpd->max_off_time_changed = true; \ - __td->constraint_changed = true; \ - } \ - __retval; \ -}) - static LIST_HEAD(gpd_list); static DEFINE_MUTEX(gpd_list_lock); -static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) -{ - struct generic_pm_domain *genpd = NULL, *gpd; - - if (IS_ERR_OR_NULL(domain_name)) - return NULL; - - mutex_lock(&gpd_list_lock); - list_for_each_entry(gpd, &gpd_list, gpd_list_node) { - if (!strcmp(gpd->name, domain_name)) { - genpd = gpd; - break; - } - } - mutex_unlock(&gpd_list_lock); - return genpd; -} - /* * Get the generic PM domain for a particular struct device. * This validates the struct device pointer, the PM domain pointer, @@ -110,18 +76,12 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev) static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) { - return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, - stop_latency_ns, "stop"); + return GENPD_DEV_CALLBACK(genpd, int, stop, dev); } -static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev, - bool timed) +static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) { - if (!timed) - return GENPD_DEV_CALLBACK(genpd, int, start, dev); - - return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, - start_latency_ns, "start"); + return GENPD_DEV_CALLBACK(genpd, int, start, dev); } static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) @@ -140,19 +100,6 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) smp_mb__after_atomic(); } -static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) -{ - s64 usecs64; - - if (!genpd->cpuidle_data) - return; - - usecs64 = genpd->power_on_latency_ns; - do_div(usecs64, NSEC_PER_USEC); - usecs64 += genpd->cpuidle_data->saved_exit_latency; - genpd->cpuidle_data->idle_state->exit_latency = usecs64; -} - static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) { ktime_t time_start; @@ -176,7 +123,6 @@ static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) genpd->power_on_latency_ns = elapsed_ns; genpd->max_off_time_changed = true; - genpd_recalc_cpu_exit_latency(genpd); pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", genpd->name, "on", elapsed_ns); @@ -213,10 +159,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) } /** - * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). + * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff(). * @genpd: PM domait to power off. * - * Queue up the execution of pm_genpd_poweroff() unless it's already been done + * Queue up the execution of genpd_poweroff() unless it's already been done * before. */ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) @@ -224,14 +170,16 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) queue_work(pm_wq, &genpd->power_off_work); } +static int genpd_poweron(struct generic_pm_domain *genpd); + /** - * __pm_genpd_poweron - Restore power to a given PM domain and its masters. + * __genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. * * Restore power to @genpd and all of its masters so that it is possible to * resume a device belonging to it. */ -static int __pm_genpd_poweron(struct generic_pm_domain *genpd) +static int __genpd_poweron(struct generic_pm_domain *genpd) { struct gpd_link *link; int ret = 0; @@ -240,13 +188,6 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) || (genpd->prepared_count > 0 && genpd->suspend_power_off)) return 0; - if (genpd->cpuidle_data) { - cpuidle_pause_and_lock(); - genpd->cpuidle_data->idle_state->disabled = true; - cpuidle_resume_and_unlock(); - goto out; - } - /* * The list is guaranteed not to change while the loop below is being * executed, unless one of the masters' .power_on() callbacks fiddles @@ -255,7 +196,7 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_inc(link->master); - ret = pm_genpd_poweron(link->master); + ret = genpd_poweron(link->master); if (ret) { genpd_sd_counter_dec(link->master); goto err; @@ -266,7 +207,6 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) if (ret) goto err; - out: genpd->status = GPD_STATE_ACTIVE; return 0; @@ -282,46 +222,28 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) } /** - * pm_genpd_poweron - Restore power to a given PM domain and its masters. + * genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. */ -int pm_genpd_poweron(struct generic_pm_domain *genpd) +static int genpd_poweron(struct generic_pm_domain *genpd) { int ret; mutex_lock(&genpd->lock); - ret = __pm_genpd_poweron(genpd); + ret = __genpd_poweron(genpd); mutex_unlock(&genpd->lock); return ret; } -/** - * pm_genpd_name_poweron - Restore power to a given PM domain and its masters. - * @domain_name: Name of the PM domain to power up. - */ -int pm_genpd_name_poweron(const char *domain_name) -{ - struct generic_pm_domain *genpd; - - genpd = pm_genpd_lookup_name(domain_name); - return genpd ? pm_genpd_poweron(genpd) : -EINVAL; -} - static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) { - return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, - save_state_latency_ns, "state save"); + return GENPD_DEV_CALLBACK(genpd, int, save_state, dev); } static int genpd_restore_dev(struct generic_pm_domain *genpd, - struct device *dev, bool timed) + struct device *dev) { - if (!timed) - return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev); - - return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, - restore_state_latency_ns, - "state restore"); + return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev); } static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, @@ -365,13 +287,14 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, } /** - * pm_genpd_poweroff - Remove power from a given PM domain. + * genpd_poweroff - Remove power from a given PM domain. * @genpd: PM domain to power down. + * @is_async: PM domain is powered down from a scheduled work * * If all of the @genpd's devices have been suspended and all of its subdomains * have been powered down, remove power from @genpd. */ -static int pm_genpd_poweroff(struct generic_pm_domain *genpd) +static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async) { struct pm_domain_data *pdd; struct gpd_link *link; @@ -398,12 +321,11 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) if (stat > PM_QOS_FLAGS_NONE) return -EBUSY; - if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) - || pdd->dev->power.irq_safe)) + if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe) not_suspended++; } - if (not_suspended > genpd->in_progress) + if (not_suspended > 1 || (not_suspended == 1 && is_async)) return -EBUSY; if (genpd->gov && genpd->gov->power_down_ok) { @@ -411,21 +333,6 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) return -EAGAIN; } - if (genpd->cpuidle_data) { - /* - * If cpuidle_data is set, cpuidle should turn the domain off - * when the CPU in it is idle. In that case we don't decrement - * the subdomain counts of the master domains, so that power is - * not removed from the current domain prematurely as a result - * of cutting off the masters' power. - */ - genpd->status = GPD_STATE_POWER_OFF; - cpuidle_pause_and_lock(); - genpd->cpuidle_data->idle_state->disabled = false; - cpuidle_resume_and_unlock(); - return 0; - } - if (genpd->power_off) { int ret; @@ -434,10 +341,10 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) /* * If sd_count > 0 at this point, one of the subdomains hasn't - * managed to call pm_genpd_poweron() for the master yet after - * incrementing it. In that case pm_genpd_poweron() will wait + * managed to call genpd_poweron() for the master yet after + * incrementing it. In that case genpd_poweron() will wait * for us to drop the lock, so we can call .power_off() and let - * the pm_genpd_poweron() restore power for us (this shouldn't + * the genpd_poweron() restore power for us (this shouldn't * happen very often). */ ret = genpd_power_off(genpd, true); @@ -466,7 +373,7 @@ static void genpd_power_off_work_fn(struct work_struct *work) genpd = container_of(work, struct generic_pm_domain, power_off_work); mutex_lock(&genpd->lock); - pm_genpd_poweroff(genpd); + genpd_poweroff(genpd, true); mutex_unlock(&genpd->lock); } @@ -482,6 +389,10 @@ static int pm_genpd_runtime_suspend(struct device *dev) { struct generic_pm_domain *genpd; bool (*stop_ok)(struct device *__dev); + struct gpd_timing_data *td = &dev_gpd_data(dev)->td; + bool runtime_pm = pm_runtime_enabled(dev); + ktime_t time_start; + s64 elapsed_ns; int ret; dev_dbg(dev, "%s()\n", __func__); @@ -490,20 +401,42 @@ static int pm_genpd_runtime_suspend(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; + /* + * A runtime PM centric subsystem/driver may re-use the runtime PM + * callbacks for other purposes than runtime PM. In those scenarios + * runtime PM is disabled. Under these circumstances, we shall skip + * validating/measuring the PM QoS latency. + */ stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; - if (stop_ok && !stop_ok(dev)) + if (runtime_pm && stop_ok && !stop_ok(dev)) return -EBUSY; + /* Measure suspend latency. */ + if (runtime_pm) + time_start = ktime_get(); + ret = genpd_save_dev(genpd, dev); if (ret) return ret; ret = genpd_stop_dev(genpd, dev); if (ret) { - genpd_restore_dev(genpd, dev, true); + genpd_restore_dev(genpd, dev); return ret; } + /* Update suspend latency value if the measured time exceeds it. */ + if (runtime_pm) { + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns > td->suspend_latency_ns) { + td->suspend_latency_ns = elapsed_ns; + dev_dbg(dev, "suspend latency exceeded, %lld ns\n", + elapsed_ns); + genpd->max_off_time_changed = true; + td->constraint_changed = true; + } + } + /* * If power.irq_safe is set, this routine will be run with interrupts * off, so it can't use mutexes. @@ -512,9 +445,7 @@ static int pm_genpd_runtime_suspend(struct device *dev) return 0; mutex_lock(&genpd->lock); - genpd->in_progress++; - pm_genpd_poweroff(genpd); - genpd->in_progress--; + genpd_poweroff(genpd, false); mutex_unlock(&genpd->lock); return 0; @@ -531,6 +462,10 @@ static int pm_genpd_runtime_suspend(struct device *dev) static int pm_genpd_runtime_resume(struct device *dev) { struct generic_pm_domain *genpd; + struct gpd_timing_data *td = &dev_gpd_data(dev)->td; + bool runtime_pm = pm_runtime_enabled(dev); + ktime_t time_start; + s64 elapsed_ns; int ret; bool timed = true; @@ -547,15 +482,31 @@ static int pm_genpd_runtime_resume(struct device *dev) } mutex_lock(&genpd->lock); - ret = __pm_genpd_poweron(genpd); + ret = __genpd_poweron(genpd); mutex_unlock(&genpd->lock); if (ret) return ret; out: - genpd_start_dev(genpd, dev, timed); - genpd_restore_dev(genpd, dev, timed); + /* Measure resume latency. */ + if (timed && runtime_pm) + time_start = ktime_get(); + + genpd_start_dev(genpd, dev); + genpd_restore_dev(genpd, dev); + + /* Update resume latency value if the measured time exceeds it. */ + if (timed && runtime_pm) { + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns > td->resume_latency_ns) { + td->resume_latency_ns = elapsed_ns; + dev_dbg(dev, "resume latency exceeded, %lld ns\n", + elapsed_ns); + genpd->max_off_time_changed = true; + td->constraint_changed = true; + } + } return 0; } @@ -569,15 +520,15 @@ static int __init pd_ignore_unused_setup(char *__unused) __setup("pd_ignore_unused", pd_ignore_unused_setup); /** - * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. + * genpd_poweroff_unused - Power off all PM domains with no devices in use. */ -void pm_genpd_poweroff_unused(void) +static int __init genpd_poweroff_unused(void) { struct generic_pm_domain *genpd; if (pd_ignore_unused) { pr_warn("genpd: Not disabling unused power domains\n"); - return; + return 0; } mutex_lock(&gpd_list_lock); @@ -586,11 +537,7 @@ void pm_genpd_poweroff_unused(void) genpd_queue_power_off_work(genpd); mutex_unlock(&gpd_list_lock); -} -static int __init genpd_poweroff_unused(void) -{ - pm_genpd_poweroff_unused(); return 0; } late_initcall(genpd_poweroff_unused); @@ -764,7 +711,7 @@ static int pm_genpd_prepare(struct device *dev) /* * The PM domain must be in the GPD_STATE_ACTIVE state at this point, - * so pm_genpd_poweron() will return immediately, but if the device + * so genpd_poweron() will return immediately, but if the device * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need * to make it operational. */ @@ -890,7 +837,7 @@ static int pm_genpd_resume_noirq(struct device *dev) pm_genpd_sync_poweron(genpd, true); genpd->suspended_count--; - return genpd_start_dev(genpd, dev, true); + return genpd_start_dev(genpd, dev); } /** @@ -1018,7 +965,8 @@ static int pm_genpd_thaw_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true); + return genpd->suspend_power_off ? + 0 : genpd_start_dev(genpd, dev); } /** @@ -1112,7 +1060,7 @@ static int pm_genpd_restore_noirq(struct device *dev) pm_genpd_sync_poweron(genpd, true); - return genpd_start_dev(genpd, dev, true); + return genpd_start_dev(genpd, dev); } /** @@ -1316,18 +1264,6 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, return ret; } -/** - * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. - * @domain_name: Name of the PM domain to add the device to. - * @dev: Device to be added. - * @td: Set of PM QoS timing parameters to attach to the device. - */ -int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, - struct gpd_timing_data *td) -{ - return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td); -} - /** * pm_genpd_remove_device - Remove a device from an I/O PM domain. * @genpd: PM domain to remove the device from. @@ -1386,13 +1322,17 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *subdomain) { - struct gpd_link *link; + struct gpd_link *link, *itr; int ret = 0; if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) || genpd == subdomain) return -EINVAL; + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) + return -ENOMEM; + mutex_lock(&genpd->lock); mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); @@ -1402,18 +1342,13 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, goto out; } - list_for_each_entry(link, &genpd->master_links, master_node) { - if (link->slave == subdomain && link->master == genpd) { + list_for_each_entry(itr, &genpd->master_links, master_node) { + if (itr->slave == subdomain && itr->master == genpd) { ret = -EINVAL; goto out; } } - link = kzalloc(sizeof(*link), GFP_KERNEL); - if (!link) { - ret = -ENOMEM; - goto out; - } link->master = genpd; list_add_tail(&link->master_node, &genpd->master_links); link->slave = subdomain; @@ -1424,38 +1359,11 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, out: mutex_unlock(&subdomain->lock); mutex_unlock(&genpd->lock); - + if (ret) + kfree(link); return ret; } - -/** - * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain. - * @master_name: Name of the master PM domain to add the subdomain to. - * @subdomain_name: Name of the subdomain to be added. - */ -int pm_genpd_add_subdomain_names(const char *master_name, - const char *subdomain_name) -{ - struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd; - - if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name)) - return -EINVAL; - - mutex_lock(&gpd_list_lock); - list_for_each_entry(gpd, &gpd_list, gpd_list_node) { - if (!master && !strcmp(gpd->name, master_name)) - master = gpd; - - if (!subdomain && !strcmp(gpd->name, subdomain_name)) - subdomain = gpd; - - if (master && subdomain) - break; - } - mutex_unlock(&gpd_list_lock); - - return pm_genpd_add_subdomain(master, subdomain); -} +EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); /** * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. @@ -1503,124 +1411,7 @@ out: return ret; } - -/** - * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. - * @genpd: PM domain to be connected with cpuidle. - * @state: cpuidle state this domain can disable/enable. - * - * Make a PM domain behave as though it contained a CPU core, that is, instead - * of calling its power down routine it will enable the given cpuidle state so - * that the cpuidle subsystem can power it down (if possible and desirable). - */ -int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) -{ - struct cpuidle_driver *cpuidle_drv; - struct gpd_cpuidle_data *cpuidle_data; - struct cpuidle_state *idle_state; - int ret = 0; - - if (IS_ERR_OR_NULL(genpd) || state < 0) - return -EINVAL; - - mutex_lock(&genpd->lock); - - if (genpd->cpuidle_data) { - ret = -EEXIST; - goto out; - } - cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL); - if (!cpuidle_data) { - ret = -ENOMEM; - goto out; - } - cpuidle_drv = cpuidle_driver_ref(); - if (!cpuidle_drv) { - ret = -ENODEV; - goto err_drv; - } - if (cpuidle_drv->state_count <= state) { - ret = -EINVAL; - goto err; - } - idle_state = &cpuidle_drv->states[state]; - if (!idle_state->disabled) { - ret = -EAGAIN; - goto err; - } - cpuidle_data->idle_state = idle_state; - cpuidle_data->saved_exit_latency = idle_state->exit_latency; - genpd->cpuidle_data = cpuidle_data; - genpd_recalc_cpu_exit_latency(genpd); - - out: - mutex_unlock(&genpd->lock); - return ret; - - err: - cpuidle_driver_unref(); - - err_drv: - kfree(cpuidle_data); - goto out; -} - -/** - * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it. - * @name: Name of the domain to connect to cpuidle. - * @state: cpuidle state this domain can manipulate. - */ -int pm_genpd_name_attach_cpuidle(const char *name, int state) -{ - return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state); -} - -/** - * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain. - * @genpd: PM domain to remove the cpuidle connection from. - * - * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the - * given PM domain. - */ -int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) -{ - struct gpd_cpuidle_data *cpuidle_data; - struct cpuidle_state *idle_state; - int ret = 0; - - if (IS_ERR_OR_NULL(genpd)) - return -EINVAL; - - mutex_lock(&genpd->lock); - - cpuidle_data = genpd->cpuidle_data; - if (!cpuidle_data) { - ret = -ENODEV; - goto out; - } - idle_state = cpuidle_data->idle_state; - if (!idle_state->disabled) { - ret = -EAGAIN; - goto out; - } - idle_state->exit_latency = cpuidle_data->saved_exit_latency; - cpuidle_driver_unref(); - genpd->cpuidle_data = NULL; - kfree(cpuidle_data); - - out: - mutex_unlock(&genpd->lock); - return ret; -} - -/** - * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it. - * @name: Name of the domain to disconnect cpuidle from. - */ -int pm_genpd_name_detach_cpuidle(const char *name) -{ - return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name)); -} +EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); /* Default device callbacks for generic PM domains. */ @@ -1688,7 +1479,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd, mutex_init(&genpd->lock); genpd->gov = gov; INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); - genpd->in_progress = 0; atomic_set(&genpd->sd_count, 0); genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; genpd->device_count = 0; @@ -1996,10 +1786,10 @@ int genpd_dev_pm_attach(struct device *dev) } pd = of_genpd_get_from_provider(&pd_args); + of_node_put(pd_args.np); if (IS_ERR(pd)) { dev_dbg(dev, "%s() failed to find PM domain: %ld\n", __func__, PTR_ERR(pd)); - of_node_put(dev->of_node); return -EPROBE_DEFER; } @@ -2017,13 +1807,12 @@ int genpd_dev_pm_attach(struct device *dev) if (ret < 0) { dev_err(dev, "failed to add to PM domain %s: %d", pd->name, ret); - of_node_put(dev->of_node); goto out; } dev->pm_domain->detach = genpd_dev_pm_detach; dev->pm_domain->sync = genpd_dev_pm_sync; - ret = pm_genpd_poweron(pd); + ret = genpd_poweron(pd); out: return ret ? -EPROBE_DEFER : 0; diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 85e17bacc..1e937ac5f 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -77,10 +77,8 @@ static bool default_stop_ok(struct device *dev) dev_update_qos_constraint); if (constraint_ns > 0) { - constraint_ns -= td->save_state_latency_ns + - td->stop_latency_ns + - td->start_latency_ns + - td->restore_state_latency_ns; + constraint_ns -= td->suspend_latency_ns + + td->resume_latency_ns; if (constraint_ns == 0) return false; } @@ -162,9 +160,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) struct gpd_timing_data *td; s64 constraint_ns; - if (!pdd->dev->driver) - continue; - /* * Check if the device is allowed to be off long enough for the * domain to turn off and on (that's how much time it will diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 96a92db83..07c3c4a95 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -9,6 +9,7 @@ #include #include #include +#include #ifdef CONFIG_PM /** @@ -296,11 +297,27 @@ void pm_generic_complete(struct device *dev) if (drv && drv->pm && drv->pm->complete) drv->pm->complete(dev); +} +/** + * pm_complete_with_resume_check - Complete a device power transition. + * @dev: Device to handle. + * + * Complete a device power transition during a system-wide power transition and + * optionally schedule a runtime resume of the device if the system resume in + * progress has been initated by the platform firmware and the device had its + * power.direct_complete flag set. + */ +void pm_complete_with_resume_check(struct device *dev) +{ + pm_generic_complete(dev); /* - * Let runtime PM try to suspend devices that haven't been in use before - * going into the system-wide sleep state we're resuming from. + * If the device had been runtime-suspended before the system went into + * the sleep state it is going out of and it has never been resumed till + * now, resume it in case the firmware powered it up. */ - pm_request_idle(dev); + if (dev->power.direct_complete && pm_resume_via_firmware()) + pm_request_resume(dev); } +EXPORT_SYMBOL_GPL(pm_complete_with_resume_check); #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c deleted file mode 100644 index 7ae7cd990..000000000 --- a/drivers/base/power/opp.c +++ /dev/null @@ -1,1603 +0,0 @@ -/* - * Generic OPP Interface - * - * Copyright (C) 2009-2010 Texas Instruments Incorporated. - * Nishanth Menon - * Romit Dasgupta - * Kevin Hilman - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Internal data structure organization with the OPP layer library is as - * follows: - * dev_opp_list (root) - * |- device 1 (represents voltage domain 1) - * | |- opp 1 (availability, freq, voltage) - * | |- opp 2 .. - * ... ... - * | `- opp n .. - * |- device 2 (represents the next voltage domain) - * ... - * `- device m (represents mth voltage domain) - * device 1, 2.. are represented by dev_opp structure while each opp - * is represented by the opp structure. - */ - -/** - * struct dev_pm_opp - Generic OPP description structure - * @node: opp list node. The nodes are maintained throughout the lifetime - * of boot. It is expected only an optimal set of OPPs are - * added to the library by the SoC framework. - * RCU usage: opp list is traversed with RCU locks. node - * modification is possible realtime, hence the modifications - * are protected by the dev_opp_list_lock for integrity. - * IMPORTANT: the opp nodes should be maintained in increasing - * order. - * @dynamic: not-created from static DT entries. - * @available: true/false - marks if this OPP as available or not - * @turbo: true if turbo (boost) OPP - * @rate: Frequency in hertz - * @u_volt: Target voltage in microvolts corresponding to this OPP - * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP - * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP - * @u_amp: Maximum current drawn by the device in microamperes - * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's - * frequency from any other OPP's frequency. - * @dev_opp: points back to the device_opp struct this opp belongs to - * @rcu_head: RCU callback head used for deferred freeing - * @np: OPP's device node. - * - * This structure stores the OPP information for a given device. - */ -struct dev_pm_opp { - struct list_head node; - - bool available; - bool dynamic; - bool turbo; - unsigned long rate; - - unsigned long u_volt; - unsigned long u_volt_min; - unsigned long u_volt_max; - unsigned long u_amp; - unsigned long clock_latency_ns; - - struct device_opp *dev_opp; - struct rcu_head rcu_head; - - struct device_node *np; -}; - -/** - * struct device_list_opp - devices managed by 'struct device_opp' - * @node: list node - * @dev: device to which the struct object belongs - * @rcu_head: RCU callback head used for deferred freeing - * - * This is an internal data structure maintaining the list of devices that are - * managed by 'struct device_opp'. - */ -struct device_list_opp { - struct list_head node; - const struct device *dev; - struct rcu_head rcu_head; -}; - -/** - * struct device_opp - Device opp structure - * @node: list node - contains the devices with OPPs that - * have been registered. Nodes once added are not modified in this - * list. - * RCU usage: nodes are not modified in the list of device_opp, - * however addition is possible and is secured by dev_opp_list_lock - * @srcu_head: notifier head to notify the OPP availability changes. - * @rcu_head: RCU callback head used for deferred freeing - * @dev_list: list of devices that share these OPPs - * @opp_list: list of opps - * @np: struct device_node pointer for opp's DT node. - * @shared_opp: OPP is shared between multiple devices. - * - * This is an internal data structure maintaining the link to opps attached to - * a device. This structure is not meant to be shared to users as it is - * meant for book keeping and private to OPP library. - * - * Because the opp structures can be used from both rcu and srcu readers, we - * need to wait for the grace period of both of them before freeing any - * resources. And so we have used kfree_rcu() from within call_srcu() handlers. - */ -struct device_opp { - struct list_head node; - - struct srcu_notifier_head srcu_head; - struct rcu_head rcu_head; - struct list_head dev_list; - struct list_head opp_list; - - struct device_node *np; - unsigned long clock_latency_ns_max; - bool shared_opp; - struct dev_pm_opp *suspend_opp; -}; - -/* - * The root of the list of all devices. All device_opp structures branch off - * from here, with each device_opp containing the list of opp it supports in - * various states of availability. - */ -static LIST_HEAD(dev_opp_list); -/* Lock to allow exclusive modification to the device and opp lists */ -static DEFINE_MUTEX(dev_opp_list_lock); - -#define opp_rcu_lockdep_assert() \ -do { \ - RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ - !lockdep_is_held(&dev_opp_list_lock), \ - "Missing rcu_read_lock() or " \ - "dev_opp_list_lock protection"); \ -} while (0) - -static struct device_list_opp *_find_list_dev(const struct device *dev, - struct device_opp *dev_opp) -{ - struct device_list_opp *list_dev; - - list_for_each_entry(list_dev, &dev_opp->dev_list, node) - if (list_dev->dev == dev) - return list_dev; - - return NULL; -} - -static struct device_opp *_managed_opp(const struct device_node *np) -{ - struct device_opp *dev_opp; - - list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) { - if (dev_opp->np == np) { - /* - * Multiple devices can point to the same OPP table and - * so will have same node-pointer, np. - * - * But the OPPs will be considered as shared only if the - * OPP table contains a "opp-shared" property. - */ - return dev_opp->shared_opp ? dev_opp : NULL; - } - } - - return NULL; -} - -/** - * _find_device_opp() - find device_opp struct using device pointer - * @dev: device pointer used to lookup device OPPs - * - * Search list of device OPPs for one containing matching device. Does a RCU - * reader operation to grab the pointer needed. - * - * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or - * -EINVAL based on type of error. - * - * Locking: This function must be called under rcu_read_lock(). device_opp - * is a RCU protected pointer. This means that device_opp is valid as long - * as we are under RCU lock. - */ -static struct device_opp *_find_device_opp(struct device *dev) -{ - struct device_opp *dev_opp; - - if (IS_ERR_OR_NULL(dev)) { - pr_err("%s: Invalid parameters\n", __func__); - return ERR_PTR(-EINVAL); - } - - list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) - if (_find_list_dev(dev, dev_opp)) - return dev_opp; - - return ERR_PTR(-ENODEV); -} - -/** - * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp - * @opp: opp for which voltage has to be returned for - * - * Return: voltage in micro volt corresponding to the opp, else - * return 0 - * - * Locking: This function must be called under rcu_read_lock(). opp is a rcu - * protected pointer. This means that opp which could have been fetched by - * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are - * under RCU lock. The pointer returned by the opp_find_freq family must be - * used in the same section as the usage of this function with the pointer - * prior to unlocking with rcu_read_unlock() to maintain the integrity of the - * pointer. - */ -unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) -{ - struct dev_pm_opp *tmp_opp; - unsigned long v = 0; - - opp_rcu_lockdep_assert(); - - tmp_opp = rcu_dereference(opp); - if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) - pr_err("%s: Invalid parameters\n", __func__); - else - v = tmp_opp->u_volt; - - return v; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); - -/** - * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp - * @opp: opp for which frequency has to be returned for - * - * Return: frequency in hertz corresponding to the opp, else - * return 0 - * - * Locking: This function must be called under rcu_read_lock(). opp is a rcu - * protected pointer. This means that opp which could have been fetched by - * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are - * under RCU lock. The pointer returned by the opp_find_freq family must be - * used in the same section as the usage of this function with the pointer - * prior to unlocking with rcu_read_unlock() to maintain the integrity of the - * pointer. - */ -unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) -{ - struct dev_pm_opp *tmp_opp; - unsigned long f = 0; - - opp_rcu_lockdep_assert(); - - tmp_opp = rcu_dereference(opp); - if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) - pr_err("%s: Invalid parameters\n", __func__); - else - f = tmp_opp->rate; - - return f; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); - -/** - * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not - * @opp: opp for which turbo mode is being verified - * - * Turbo OPPs are not for normal use, and can be enabled (under certain - * conditions) for short duration of times to finish high throughput work - * quickly. Running on them for longer times may overheat the chip. - * - * Return: true if opp is turbo opp, else false. - * - * Locking: This function must be called under rcu_read_lock(). opp is a rcu - * protected pointer. This means that opp which could have been fetched by - * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are - * under RCU lock. The pointer returned by the opp_find_freq family must be - * used in the same section as the usage of this function with the pointer - * prior to unlocking with rcu_read_unlock() to maintain the integrity of the - * pointer. - */ -bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) -{ - struct dev_pm_opp *tmp_opp; - - opp_rcu_lockdep_assert(); - - tmp_opp = rcu_dereference(opp); - if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) { - pr_err("%s: Invalid parameters\n", __func__); - return false; - } - - return tmp_opp->turbo; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); - -/** - * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds - * @dev: device for which we do this operation - * - * Return: This function returns the max clock latency in nanoseconds. - * - * Locking: This function takes rcu_read_lock(). - */ -unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) -{ - struct device_opp *dev_opp; - unsigned long clock_latency_ns; - - rcu_read_lock(); - - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) - clock_latency_ns = 0; - else - clock_latency_ns = dev_opp->clock_latency_ns_max; - - rcu_read_unlock(); - return clock_latency_ns; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); - -/** - * dev_pm_opp_get_suspend_opp() - Get suspend opp - * @dev: device for which we do this operation - * - * Return: This function returns pointer to the suspend opp if it is - * defined and available, otherwise it returns NULL. - * - * Locking: This function must be called under rcu_read_lock(). opp is a rcu - * protected pointer. The reason for the same is that the opp pointer which is - * returned will remain valid for use with opp_get_{voltage, freq} only while - * under the locked area. The pointer returned must be used prior to unlocking - * with rcu_read_unlock() to maintain the integrity of the pointer. - */ -struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) -{ - struct device_opp *dev_opp; - - opp_rcu_lockdep_assert(); - - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp) || !dev_opp->suspend_opp || - !dev_opp->suspend_opp->available) - return NULL; - - return dev_opp->suspend_opp; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp); - -/** - * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list - * @dev: device for which we do this operation - * - * Return: This function returns the number of available opps if there are any, - * else returns 0 if none or the corresponding error value. - * - * Locking: This function takes rcu_read_lock(). - */ -int dev_pm_opp_get_opp_count(struct device *dev) -{ - struct device_opp *dev_opp; - struct dev_pm_opp *temp_opp; - int count = 0; - - rcu_read_lock(); - - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) { - count = PTR_ERR(dev_opp); - dev_err(dev, "%s: device OPP not found (%d)\n", - __func__, count); - goto out_unlock; - } - - list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { - if (temp_opp->available) - count++; - } - -out_unlock: - rcu_read_unlock(); - return count; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); - -/** - * dev_pm_opp_find_freq_exact() - search for an exact frequency - * @dev: device for which we do this operation - * @freq: frequency to search for - * @available: true/false - match for available opp - * - * Return: Searches for exact match in the opp list and returns pointer to the - * matching opp if found, else returns ERR_PTR in case of error and should - * be handled using IS_ERR. Error return values can be: - * EINVAL: for bad pointer - * ERANGE: no match found for search - * ENODEV: if device not found in list of registered devices - * - * Note: available is a modifier for the search. if available=true, then the - * match is for exact matching frequency and is available in the stored OPP - * table. if false, the match is for exact frequency which is not available. - * - * This provides a mechanism to enable an opp which is not available currently - * or the opposite as well. - * - * Locking: This function must be called under rcu_read_lock(). opp is a rcu - * protected pointer. The reason for the same is that the opp pointer which is - * returned will remain valid for use with opp_get_{voltage, freq} only while - * under the locked area. The pointer returned must be used prior to unlocking - * with rcu_read_unlock() to maintain the integrity of the pointer. - */ -struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, - unsigned long freq, - bool available) -{ - struct device_opp *dev_opp; - struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); - - opp_rcu_lockdep_assert(); - - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) { - int r = PTR_ERR(dev_opp); - dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); - return ERR_PTR(r); - } - - list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { - if (temp_opp->available == available && - temp_opp->rate == freq) { - opp = temp_opp; - break; - } - } - - return opp; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); - -/** - * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq - * @dev: device for which we do this operation - * @freq: Start frequency - * - * Search for the matching ceil *available* OPP from a starting freq - * for a device. - * - * Return: matching *opp and refreshes *freq accordingly, else returns - * ERR_PTR in case of error and should be handled using IS_ERR. Error return - * values can be: - * EINVAL: for bad pointer - * ERANGE: no match found for search - * ENODEV: if device not found in list of registered devices - * - * Locking: This function must be called under rcu_read_lock(). opp is a rcu - * protected pointer. The reason for the same is that the opp pointer which is - * returned will remain valid for use with opp_get_{voltage, freq} only while - * under the locked area. The pointer returned must be used prior to unlocking - * with rcu_read_unlock() to maintain the integrity of the pointer. - */ -struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, - unsigned long *freq) -{ - struct device_opp *dev_opp; - struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); - - opp_rcu_lockdep_assert(); - - if (!dev || !freq) { - dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); - return ERR_PTR(-EINVAL); - } - - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) - return ERR_CAST(dev_opp); - - list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { - if (temp_opp->available && temp_opp->rate >= *freq) { - opp = temp_opp; - *freq = opp->rate; - break; - } - } - - return opp; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); - -/** - * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq - * @dev: device for which we do this operation - * @freq: Start frequency - * - * Search for the matching floor *available* OPP from a starting freq - * for a device. - * - * Return: matching *opp and refreshes *freq accordingly, else returns - * ERR_PTR in case of error and should be handled using IS_ERR. Error return - * values can be: - * EINVAL: for bad pointer - * ERANGE: no match found for search - * ENODEV: if device not found in list of registered devices - * - * Locking: This function must be called under rcu_read_lock(). opp is a rcu - * protected pointer. The reason for the same is that the opp pointer which is - * returned will remain valid for use with opp_get_{voltage, freq} only while - * under the locked area. The pointer returned must be used prior to unlocking - * with rcu_read_unlock() to maintain the integrity of the pointer. - */ -struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, - unsigned long *freq) -{ - struct device_opp *dev_opp; - struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); - - opp_rcu_lockdep_assert(); - - if (!dev || !freq) { - dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); - return ERR_PTR(-EINVAL); - } - - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) - return ERR_CAST(dev_opp); - - list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { - if (temp_opp->available) { - /* go to the next node, before choosing prev */ - if (temp_opp->rate > *freq) - break; - else - opp = temp_opp; - } - } - if (!IS_ERR(opp)) - *freq = opp->rate; - - return opp; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); - -/* List-dev Helpers */ -static void _kfree_list_dev_rcu(struct rcu_head *head) -{ - struct device_list_opp *list_dev; - - list_dev = container_of(head, struct device_list_opp, rcu_head); - kfree_rcu(list_dev, rcu_head); -} - -static void _remove_list_dev(struct device_list_opp *list_dev, - struct device_opp *dev_opp) -{ - list_del(&list_dev->node); - call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head, - _kfree_list_dev_rcu); -} - -static struct device_list_opp *_add_list_dev(const struct device *dev, - struct device_opp *dev_opp) -{ - struct device_list_opp *list_dev; - - list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL); - if (!list_dev) - return NULL; - - /* Initialize list-dev */ - list_dev->dev = dev; - list_add_rcu(&list_dev->node, &dev_opp->dev_list); - - return list_dev; -} - -/** - * _add_device_opp() - Find device OPP table or allocate a new one - * @dev: device for which we do this operation - * - * It tries to find an existing table first, if it couldn't find one, it - * allocates a new OPP table and returns that. - * - * Return: valid device_opp pointer if success, else NULL. - */ -static struct device_opp *_add_device_opp(struct device *dev) -{ - struct device_opp *dev_opp; - struct device_list_opp *list_dev; - - /* Check for existing list for 'dev' first */ - dev_opp = _find_device_opp(dev); - if (!IS_ERR(dev_opp)) - return dev_opp; - - /* - * Allocate a new device OPP table. In the infrequent case where a new - * device is needed to be added, we pay this penalty. - */ - dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL); - if (!dev_opp) - return NULL; - - INIT_LIST_HEAD(&dev_opp->dev_list); - - list_dev = _add_list_dev(dev, dev_opp); - if (!list_dev) { - kfree(dev_opp); - return NULL; - } - - srcu_init_notifier_head(&dev_opp->srcu_head); - INIT_LIST_HEAD(&dev_opp->opp_list); - - /* Secure the device list modification */ - list_add_rcu(&dev_opp->node, &dev_opp_list); - return dev_opp; -} - -/** - * _kfree_device_rcu() - Free device_opp RCU handler - * @head: RCU head - */ -static void _kfree_device_rcu(struct rcu_head *head) -{ - struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); - - kfree_rcu(device_opp, rcu_head); -} - -/** - * _remove_device_opp() - Removes a device OPP table - * @dev_opp: device OPP table to be removed. - * - * Removes/frees device OPP table it it doesn't contain any OPPs. - */ -static void _remove_device_opp(struct device_opp *dev_opp) -{ - struct device_list_opp *list_dev; - - if (!list_empty(&dev_opp->opp_list)) - return; - - list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp, - node); - - _remove_list_dev(list_dev, dev_opp); - - /* dev_list must be empty now */ - WARN_ON(!list_empty(&dev_opp->dev_list)); - - list_del_rcu(&dev_opp->node); - call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, - _kfree_device_rcu); -} - -/** - * _kfree_opp_rcu() - Free OPP RCU handler - * @head: RCU head - */ -static void _kfree_opp_rcu(struct rcu_head *head) -{ - struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); - - kfree_rcu(opp, rcu_head); -} - -/** - * _opp_remove() - Remove an OPP from a table definition - * @dev_opp: points back to the device_opp struct this opp belongs to - * @opp: pointer to the OPP to remove - * @notify: OPP_EVENT_REMOVE notification should be sent or not - * - * This function removes an opp definition from the opp list. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * It is assumed that the caller holds required mutex for an RCU updater - * strategy. - */ -static void _opp_remove(struct device_opp *dev_opp, - struct dev_pm_opp *opp, bool notify) -{ - /* - * Notify the changes in the availability of the operable - * frequency/voltage list. - */ - if (notify) - srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); - list_del_rcu(&opp->node); - call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); - - _remove_device_opp(dev_opp); -} - -/** - * dev_pm_opp_remove() - Remove an OPP from OPP list - * @dev: device for which we do this operation - * @freq: OPP to remove with matching 'freq' - * - * This function removes an opp from the opp list. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * Hence this function internally uses RCU updater strategy with mutex locks - * to keep the integrity of the internal data structures. Callers should ensure - * that this function is *NOT* called under RCU protection or in contexts where - * mutex cannot be locked. - */ -void dev_pm_opp_remove(struct device *dev, unsigned long freq) -{ - struct dev_pm_opp *opp; - struct device_opp *dev_opp; - bool found = false; - - /* Hold our list modification lock here */ - mutex_lock(&dev_opp_list_lock); - - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) - goto unlock; - - list_for_each_entry(opp, &dev_opp->opp_list, node) { - if (opp->rate == freq) { - found = true; - break; - } - } - - if (!found) { - dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", - __func__, freq); - goto unlock; - } - - _opp_remove(dev_opp, opp, true); -unlock: - mutex_unlock(&dev_opp_list_lock); -} -EXPORT_SYMBOL_GPL(dev_pm_opp_remove); - -static struct dev_pm_opp *_allocate_opp(struct device *dev, - struct device_opp **dev_opp) -{ - struct dev_pm_opp *opp; - - /* allocate new OPP node */ - opp = kzalloc(sizeof(*opp), GFP_KERNEL); - if (!opp) - return NULL; - - INIT_LIST_HEAD(&opp->node); - - *dev_opp = _add_device_opp(dev); - if (!*dev_opp) { - kfree(opp); - return NULL; - } - - return opp; -} - -static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, - struct device_opp *dev_opp) -{ - struct dev_pm_opp *opp; - struct list_head *head = &dev_opp->opp_list; - - /* - * Insert new OPP in order of increasing frequency and discard if - * already present. - * - * Need to use &dev_opp->opp_list in the condition part of the 'for' - * loop, don't replace it with head otherwise it will become an infinite - * loop. - */ - list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { - if (new_opp->rate > opp->rate) { - head = &opp->node; - continue; - } - - if (new_opp->rate < opp->rate) - break; - - /* Duplicate OPPs */ - dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", - __func__, opp->rate, opp->u_volt, opp->available, - new_opp->rate, new_opp->u_volt, new_opp->available); - - return opp->available && new_opp->u_volt == opp->u_volt ? - 0 : -EEXIST; - } - - new_opp->dev_opp = dev_opp; - list_add_rcu(&new_opp->node, head); - - return 0; -} - -/** - * _opp_add_dynamic() - Allocate a dynamic OPP. - * @dev: device for which we do this operation - * @freq: Frequency in Hz for this OPP - * @u_volt: Voltage in uVolts for this OPP - * @dynamic: Dynamically added OPPs. - * - * This function adds an opp definition to the opp list and returns status. - * The opp is made available by default and it can be controlled using - * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. - * - * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and - * freed by of_free_opp_table. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * Hence this function internally uses RCU updater strategy with mutex locks - * to keep the integrity of the internal data structures. Callers should ensure - * that this function is *NOT* called under RCU protection or in contexts where - * mutex cannot be locked. - * - * Return: - * 0 On success OR - * Duplicate OPPs (both freq and volt are same) and opp->available - * -EEXIST Freq are same and volt are different OR - * Duplicate OPPs (both freq and volt are same) and !opp->available - * -ENOMEM Memory allocation failure - */ -static int _opp_add_dynamic(struct device *dev, unsigned long freq, - long u_volt, bool dynamic) -{ - struct device_opp *dev_opp; - struct dev_pm_opp *new_opp; - int ret; - - /* Hold our list modification lock here */ - mutex_lock(&dev_opp_list_lock); - - new_opp = _allocate_opp(dev, &dev_opp); - if (!new_opp) { - ret = -ENOMEM; - goto unlock; - } - - /* populate the opp table */ - new_opp->rate = freq; - new_opp->u_volt = u_volt; - new_opp->available = true; - new_opp->dynamic = dynamic; - - ret = _opp_add(dev, new_opp, dev_opp); - if (ret) - goto free_opp; - - mutex_unlock(&dev_opp_list_lock); - - /* - * Notify the changes in the availability of the operable - * frequency/voltage list. - */ - srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); - return 0; - -free_opp: - _opp_remove(dev_opp, new_opp, false); -unlock: - mutex_unlock(&dev_opp_list_lock); - return ret; -} - -/* TODO: Support multiple regulators */ -static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev) -{ - u32 microvolt[3] = {0}; - int count, ret; - - /* Missing property isn't a problem, but an invalid entry is */ - if (!of_find_property(opp->np, "opp-microvolt", NULL)) - return 0; - - count = of_property_count_u32_elems(opp->np, "opp-microvolt"); - if (count < 0) { - dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n", - __func__, count); - return count; - } - - /* There can be one or three elements here */ - if (count != 1 && count != 3) { - dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", - __func__, count); - return -EINVAL; - } - - ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt, - count); - if (ret) { - dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__, - ret); - return -EINVAL; - } - - opp->u_volt = microvolt[0]; - opp->u_volt_min = microvolt[1]; - opp->u_volt_max = microvolt[2]; - - return 0; -} - -/** - * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) - * @dev: device for which we do this operation - * @np: device node - * - * This function adds an opp definition to the opp list and returns status. The - * opp can be controlled using dev_pm_opp_enable/disable functions and may be - * removed by dev_pm_opp_remove. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * Hence this function internally uses RCU updater strategy with mutex locks - * to keep the integrity of the internal data structures. Callers should ensure - * that this function is *NOT* called under RCU protection or in contexts where - * mutex cannot be locked. - * - * Return: - * 0 On success OR - * Duplicate OPPs (both freq and volt are same) and opp->available - * -EEXIST Freq are same and volt are different OR - * Duplicate OPPs (both freq and volt are same) and !opp->available - * -ENOMEM Memory allocation failure - * -EINVAL Failed parsing the OPP node - */ -static int _opp_add_static_v2(struct device *dev, struct device_node *np) -{ - struct device_opp *dev_opp; - struct dev_pm_opp *new_opp; - u64 rate; - u32 val; - int ret; - - /* Hold our list modification lock here */ - mutex_lock(&dev_opp_list_lock); - - new_opp = _allocate_opp(dev, &dev_opp); - if (!new_opp) { - ret = -ENOMEM; - goto unlock; - } - - ret = of_property_read_u64(np, "opp-hz", &rate); - if (ret < 0) { - dev_err(dev, "%s: opp-hz not found\n", __func__); - goto free_opp; - } - - /* - * Rate is defined as an unsigned long in clk API, and so casting - * explicitly to its type. Must be fixed once rate is 64 bit - * guaranteed in clk API. - */ - new_opp->rate = (unsigned long)rate; - new_opp->turbo = of_property_read_bool(np, "turbo-mode"); - - new_opp->np = np; - new_opp->dynamic = false; - new_opp->available = true; - - if (!of_property_read_u32(np, "clock-latency-ns", &val)) - new_opp->clock_latency_ns = val; - - ret = opp_get_microvolt(new_opp, dev); - if (ret) - goto free_opp; - - if (!of_property_read_u32(new_opp->np, "opp-microamp", &val)) - new_opp->u_amp = val; - - ret = _opp_add(dev, new_opp, dev_opp); - if (ret) - goto free_opp; - - /* OPP to select on device suspend */ - if (of_property_read_bool(np, "opp-suspend")) { - if (dev_opp->suspend_opp) - dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", - __func__, dev_opp->suspend_opp->rate, - new_opp->rate); - else - dev_opp->suspend_opp = new_opp; - } - - if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max) - dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns; - - mutex_unlock(&dev_opp_list_lock); - - pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", - __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, - new_opp->u_volt_min, new_opp->u_volt_max, - new_opp->clock_latency_ns); - - /* - * Notify the changes in the availability of the operable - * frequency/voltage list. - */ - srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); - return 0; - -free_opp: - _opp_remove(dev_opp, new_opp, false); -unlock: - mutex_unlock(&dev_opp_list_lock); - return ret; -} - -/** - * dev_pm_opp_add() - Add an OPP table from a table definitions - * @dev: device for which we do this operation - * @freq: Frequency in Hz for this OPP - * @u_volt: Voltage in uVolts for this OPP - * - * This function adds an opp definition to the opp list and returns status. - * The opp is made available by default and it can be controlled using - * dev_pm_opp_enable/disable functions. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * Hence this function internally uses RCU updater strategy with mutex locks - * to keep the integrity of the internal data structures. Callers should ensure - * that this function is *NOT* called under RCU protection or in contexts where - * mutex cannot be locked. - * - * Return: - * 0 On success OR - * Duplicate OPPs (both freq and volt are same) and opp->available - * -EEXIST Freq are same and volt are different OR - * Duplicate OPPs (both freq and volt are same) and !opp->available - * -ENOMEM Memory allocation failure - */ -int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) -{ - return _opp_add_dynamic(dev, freq, u_volt, true); -} -EXPORT_SYMBOL_GPL(dev_pm_opp_add); - -/** - * _opp_set_availability() - helper to set the availability of an opp - * @dev: device for which we do this operation - * @freq: OPP frequency to modify availability - * @availability_req: availability status requested for this opp - * - * Set the availability of an OPP with an RCU operation, opp_{enable,disable} - * share a common logic which is isolated here. - * - * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modification was done OR modification was - * successful. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * Hence this function internally uses RCU updater strategy with mutex locks to - * keep the integrity of the internal data structures. Callers should ensure - * that this function is *NOT* called under RCU protection or in contexts where - * mutex locking or synchronize_rcu() blocking calls cannot be used. - */ -static int _opp_set_availability(struct device *dev, unsigned long freq, - bool availability_req) -{ - struct device_opp *dev_opp; - struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); - int r = 0; - - /* keep the node allocated */ - new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); - if (!new_opp) - return -ENOMEM; - - mutex_lock(&dev_opp_list_lock); - - /* Find the device_opp */ - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) { - r = PTR_ERR(dev_opp); - dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); - goto unlock; - } - - /* Do we have the frequency? */ - list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) { - if (tmp_opp->rate == freq) { - opp = tmp_opp; - break; - } - } - if (IS_ERR(opp)) { - r = PTR_ERR(opp); - goto unlock; - } - - /* Is update really needed? */ - if (opp->available == availability_req) - goto unlock; - /* copy the old data over */ - *new_opp = *opp; - - /* plug in new node */ - new_opp->available = availability_req; - - list_replace_rcu(&opp->node, &new_opp->node); - mutex_unlock(&dev_opp_list_lock); - call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); - - /* Notify the change of the OPP availability */ - if (availability_req) - srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE, - new_opp); - else - srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE, - new_opp); - - return 0; - -unlock: - mutex_unlock(&dev_opp_list_lock); - kfree(new_opp); - return r; -} - -/** - * dev_pm_opp_enable() - Enable a specific OPP - * @dev: device for which we do this operation - * @freq: OPP frequency to enable - * - * Enables a provided opp. If the operation is valid, this returns 0, else the - * corresponding error value. It is meant to be used for users an OPP available - * after being temporarily made unavailable with dev_pm_opp_disable. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * Hence this function indirectly uses RCU and mutex locks to keep the - * integrity of the internal data structures. Callers should ensure that - * this function is *NOT* called under RCU protection or in contexts where - * mutex locking or synchronize_rcu() blocking calls cannot be used. - * - * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modification was done OR modification was - * successful. - */ -int dev_pm_opp_enable(struct device *dev, unsigned long freq) -{ - return _opp_set_availability(dev, freq, true); -} -EXPORT_SYMBOL_GPL(dev_pm_opp_enable); - -/** - * dev_pm_opp_disable() - Disable a specific OPP - * @dev: device for which we do this operation - * @freq: OPP frequency to disable - * - * Disables a provided opp. If the operation is valid, this returns - * 0, else the corresponding error value. It is meant to be a temporary - * control by users to make this OPP not available until the circumstances are - * right to make it available again (with a call to dev_pm_opp_enable). - * - * Locking: The internal device_opp and opp structures are RCU protected. - * Hence this function indirectly uses RCU and mutex locks to keep the - * integrity of the internal data structures. Callers should ensure that - * this function is *NOT* called under RCU protection or in contexts where - * mutex locking or synchronize_rcu() blocking calls cannot be used. - * - * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modification was done OR modification was - * successful. - */ -int dev_pm_opp_disable(struct device *dev, unsigned long freq) -{ - return _opp_set_availability(dev, freq, false); -} -EXPORT_SYMBOL_GPL(dev_pm_opp_disable); - -/** - * dev_pm_opp_get_notifier() - find notifier_head of the device with opp - * @dev: device pointer used to lookup device OPPs. - * - * Return: pointer to notifier head if found, otherwise -ENODEV or - * -EINVAL based on type of error casted as pointer. value must be checked - * with IS_ERR to determine valid pointer or error result. - * - * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU - * protected pointer. The reason for the same is that the opp pointer which is - * returned will remain valid for use with opp_get_{voltage, freq} only while - * under the locked area. The pointer returned must be used prior to unlocking - * with rcu_read_unlock() to maintain the integrity of the pointer. - */ -struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) -{ - struct device_opp *dev_opp = _find_device_opp(dev); - - if (IS_ERR(dev_opp)) - return ERR_CAST(dev_opp); /* matching type */ - - return &dev_opp->srcu_head; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); - -#ifdef CONFIG_OF -/** - * of_free_opp_table() - Free OPP table entries created from static DT entries - * @dev: device pointer used to lookup device OPPs. - * - * Free OPPs created using static entries present in DT. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * Hence this function indirectly uses RCU updater strategy with mutex locks - * to keep the integrity of the internal data structures. Callers should ensure - * that this function is *NOT* called under RCU protection or in contexts where - * mutex cannot be locked. - */ -void of_free_opp_table(struct device *dev) -{ - struct device_opp *dev_opp; - struct dev_pm_opp *opp, *tmp; - - /* Hold our list modification lock here */ - mutex_lock(&dev_opp_list_lock); - - /* Check for existing list for 'dev' */ - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) { - int error = PTR_ERR(dev_opp); - - if (error != -ENODEV) - WARN(1, "%s: dev_opp: %d\n", - IS_ERR_OR_NULL(dev) ? - "Invalid device" : dev_name(dev), - error); - goto unlock; - } - - /* Find if dev_opp manages a single device */ - if (list_is_singular(&dev_opp->dev_list)) { - /* Free static OPPs */ - list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { - if (!opp->dynamic) - _opp_remove(dev_opp, opp, true); - } - } else { - _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp); - } - -unlock: - mutex_unlock(&dev_opp_list_lock); -} -EXPORT_SYMBOL_GPL(of_free_opp_table); - -void of_cpumask_free_opp_table(cpumask_var_t cpumask) -{ - struct device *cpu_dev; - int cpu; - - WARN_ON(cpumask_empty(cpumask)); - - for_each_cpu(cpu, cpumask) { - cpu_dev = get_cpu_device(cpu); - if (!cpu_dev) { - pr_err("%s: failed to get cpu%d device\n", __func__, - cpu); - continue; - } - - of_free_opp_table(cpu_dev); - } -} -EXPORT_SYMBOL_GPL(of_cpumask_free_opp_table); - -/* Returns opp descriptor node from its phandle. Caller must do of_node_put() */ -static struct device_node * -_of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop) -{ - struct device_node *opp_np; - - opp_np = of_find_node_by_phandle(be32_to_cpup(prop->value)); - if (!opp_np) { - dev_err(dev, "%s: Prop: %s contains invalid opp desc phandle\n", - __func__, prop->name); - return ERR_PTR(-EINVAL); - } - - return opp_np; -} - -/* Returns opp descriptor node for a device. Caller must do of_node_put() */ -static struct device_node *_of_get_opp_desc_node(struct device *dev) -{ - const struct property *prop; - - prop = of_find_property(dev->of_node, "operating-points-v2", NULL); - if (!prop) - return ERR_PTR(-ENODEV); - if (!prop->value) - return ERR_PTR(-ENODATA); - - /* - * TODO: Support for multiple OPP tables. - * - * There should be only ONE phandle present in "operating-points-v2" - * property. - */ - if (prop->length != sizeof(__be32)) { - dev_err(dev, "%s: Invalid opp desc phandle\n", __func__); - return ERR_PTR(-EINVAL); - } - - return _of_get_opp_desc_node_from_prop(dev, prop); -} - -/* Initializes OPP tables based on new bindings */ -static int _of_init_opp_table_v2(struct device *dev, - const struct property *prop) -{ - struct device_node *opp_np, *np; - struct device_opp *dev_opp; - int ret = 0, count = 0; - - if (!prop->value) - return -ENODATA; - - /* Get opp node */ - opp_np = _of_get_opp_desc_node_from_prop(dev, prop); - if (IS_ERR(opp_np)) - return PTR_ERR(opp_np); - - dev_opp = _managed_opp(opp_np); - if (dev_opp) { - /* OPPs are already managed */ - if (!_add_list_dev(dev, dev_opp)) - ret = -ENOMEM; - goto put_opp_np; - } - - /* We have opp-list node now, iterate over it and add OPPs */ - for_each_available_child_of_node(opp_np, np) { - count++; - - ret = _opp_add_static_v2(dev, np); - if (ret) { - dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, - ret); - goto free_table; - } - } - - /* There should be one of more OPP defined */ - if (WARN_ON(!count)) { - ret = -ENOENT; - goto put_opp_np; - } - - dev_opp = _find_device_opp(dev); - if (WARN_ON(IS_ERR(dev_opp))) { - ret = PTR_ERR(dev_opp); - goto free_table; - } - - dev_opp->np = opp_np; - dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared"); - - of_node_put(opp_np); - return 0; - -free_table: - of_free_opp_table(dev); -put_opp_np: - of_node_put(opp_np); - - return ret; -} - -/* Initializes OPP tables based on old-deprecated bindings */ -static int _of_init_opp_table_v1(struct device *dev) -{ - const struct property *prop; - const __be32 *val; - int nr; - - prop = of_find_property(dev->of_node, "operating-points", NULL); - if (!prop) - return -ENODEV; - if (!prop->value) - return -ENODATA; - - /* - * Each OPP is a set of tuples consisting of frequency and - * voltage like . - */ - nr = prop->length / sizeof(u32); - if (nr % 2) { - dev_err(dev, "%s: Invalid OPP list\n", __func__); - return -EINVAL; - } - - val = prop->value; - while (nr) { - unsigned long freq = be32_to_cpup(val++) * 1000; - unsigned long volt = be32_to_cpup(val++); - - if (_opp_add_dynamic(dev, freq, volt, false)) - dev_warn(dev, "%s: Failed to add OPP %ld\n", - __func__, freq); - nr -= 2; - } - - return 0; -} - -/** - * of_init_opp_table() - Initialize opp table from device tree - * @dev: device pointer used to lookup device OPPs. - * - * Register the initial OPP table with the OPP library for given device. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * Hence this function indirectly uses RCU updater strategy with mutex locks - * to keep the integrity of the internal data structures. Callers should ensure - * that this function is *NOT* called under RCU protection or in contexts where - * mutex cannot be locked. - * - * Return: - * 0 On success OR - * Duplicate OPPs (both freq and volt are same) and opp->available - * -EEXIST Freq are same and volt are different OR - * Duplicate OPPs (both freq and volt are same) and !opp->available - * -ENOMEM Memory allocation failure - * -ENODEV when 'operating-points' property is not found or is invalid data - * in device node. - * -ENODATA when empty 'operating-points' property is found - * -EINVAL when invalid entries are found in opp-v2 table - */ -int of_init_opp_table(struct device *dev) -{ - const struct property *prop; - - /* - * OPPs have two version of bindings now. The older one is deprecated, - * try for the new binding first. - */ - prop = of_find_property(dev->of_node, "operating-points-v2", NULL); - if (!prop) { - /* - * Try old-deprecated bindings for backward compatibility with - * older dtbs. - */ - return _of_init_opp_table_v1(dev); - } - - return _of_init_opp_table_v2(dev, prop); -} -EXPORT_SYMBOL_GPL(of_init_opp_table); - -int of_cpumask_init_opp_table(cpumask_var_t cpumask) -{ - struct device *cpu_dev; - int cpu, ret = 0; - - WARN_ON(cpumask_empty(cpumask)); - - for_each_cpu(cpu, cpumask) { - cpu_dev = get_cpu_device(cpu); - if (!cpu_dev) { - pr_err("%s: failed to get cpu%d device\n", __func__, - cpu); - continue; - } - - ret = of_init_opp_table(cpu_dev); - if (ret) { - pr_err("%s: couldn't find opp table for cpu:%d, %d\n", - __func__, cpu, ret); - - /* Free all other OPPs */ - of_cpumask_free_opp_table(cpumask); - break; - } - } - - return ret; -} -EXPORT_SYMBOL_GPL(of_cpumask_init_opp_table); - -/* Required only for V1 bindings, as v2 can manage it from DT itself */ -int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) -{ - struct device_list_opp *list_dev; - struct device_opp *dev_opp; - struct device *dev; - int cpu, ret = 0; - - rcu_read_lock(); - - dev_opp = _find_device_opp(cpu_dev); - if (IS_ERR(dev_opp)) { - ret = -EINVAL; - goto out_rcu_read_unlock; - } - - for_each_cpu(cpu, cpumask) { - if (cpu == cpu_dev->id) - continue; - - dev = get_cpu_device(cpu); - if (!dev) { - dev_err(cpu_dev, "%s: failed to get cpu%d device\n", - __func__, cpu); - continue; - } - - list_dev = _add_list_dev(dev, dev_opp); - if (!list_dev) { - dev_err(dev, "%s: failed to add list-dev for cpu%d device\n", - __func__, cpu); - continue; - } - } -out_rcu_read_unlock: - rcu_read_unlock(); - - return 0; -} -EXPORT_SYMBOL_GPL(set_cpus_sharing_opps); - -/* - * Works only for OPP v2 bindings. - * - * cpumask should be already set to mask of cpu_dev->id. - * Returns -ENOENT if operating-points-v2 bindings aren't supported. - */ -int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) -{ - struct device_node *np, *tmp_np; - struct device *tcpu_dev; - int cpu, ret = 0; - - /* Get OPP descriptor node */ - np = _of_get_opp_desc_node(cpu_dev); - if (IS_ERR(np)) { - dev_dbg(cpu_dev, "%s: Couldn't find opp node: %ld\n", __func__, - PTR_ERR(np)); - return -ENOENT; - } - - /* OPPs are shared ? */ - if (!of_property_read_bool(np, "opp-shared")) - goto put_cpu_node; - - for_each_possible_cpu(cpu) { - if (cpu == cpu_dev->id) - continue; - - tcpu_dev = get_cpu_device(cpu); - if (!tcpu_dev) { - dev_err(cpu_dev, "%s: failed to get cpu%d device\n", - __func__, cpu); - ret = -ENODEV; - goto put_cpu_node; - } - - /* Get OPP descriptor node */ - tmp_np = _of_get_opp_desc_node(tcpu_dev); - if (IS_ERR(tmp_np)) { - dev_err(tcpu_dev, "%s: Couldn't find opp node: %ld\n", - __func__, PTR_ERR(tmp_np)); - ret = PTR_ERR(tmp_np); - goto put_cpu_node; - } - - /* CPUs are sharing opp node */ - if (np == tmp_np) - cpumask_set_cpu(cpu, cpumask); - - of_node_put(tmp_np); - } - -put_cpu_node: - of_node_put(np); - return ret; -} -EXPORT_SYMBOL_GPL(of_get_cpus_sharing_opps); -#endif diff --git a/drivers/base/power/opp/Makefile b/drivers/base/power/opp/Makefile new file mode 100644 index 000000000..33c1e18c4 --- /dev/null +++ b/drivers/base/power/opp/Makefile @@ -0,0 +1,2 @@ +ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG +obj-y += core.o cpu.o diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c new file mode 100644 index 000000000..b8e76f750 --- /dev/null +++ b/drivers/base/power/opp/core.c @@ -0,0 +1,1316 @@ +/* + * Generic OPP Interface + * + * Copyright (C) 2009-2010 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#include "opp.h" + +/* + * The root of the list of all devices. All device_opp structures branch off + * from here, with each device_opp containing the list of opp it supports in + * various states of availability. + */ +static LIST_HEAD(dev_opp_list); +/* Lock to allow exclusive modification to the device and opp lists */ +DEFINE_MUTEX(dev_opp_list_lock); + +#define opp_rcu_lockdep_assert() \ +do { \ + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&dev_opp_list_lock), \ + "Missing rcu_read_lock() or " \ + "dev_opp_list_lock protection"); \ +} while (0) + +static struct device_list_opp *_find_list_dev(const struct device *dev, + struct device_opp *dev_opp) +{ + struct device_list_opp *list_dev; + + list_for_each_entry(list_dev, &dev_opp->dev_list, node) + if (list_dev->dev == dev) + return list_dev; + + return NULL; +} + +static struct device_opp *_managed_opp(const struct device_node *np) +{ + struct device_opp *dev_opp; + + list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) { + if (dev_opp->np == np) { + /* + * Multiple devices can point to the same OPP table and + * so will have same node-pointer, np. + * + * But the OPPs will be considered as shared only if the + * OPP table contains a "opp-shared" property. + */ + return dev_opp->shared_opp ? dev_opp : NULL; + } + } + + return NULL; +} + +/** + * _find_device_opp() - find device_opp struct using device pointer + * @dev: device pointer used to lookup device OPPs + * + * Search list of device OPPs for one containing matching device. Does a RCU + * reader operation to grab the pointer needed. + * + * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or + * -EINVAL based on type of error. + * + * Locking: For readers, this function must be called under rcu_read_lock(). + * device_opp is a RCU protected pointer, which means that device_opp is valid + * as long as we are under RCU lock. + * + * For Writers, this function must be called with dev_opp_list_lock held. + */ +struct device_opp *_find_device_opp(struct device *dev) +{ + struct device_opp *dev_opp; + + opp_rcu_lockdep_assert(); + + if (IS_ERR_OR_NULL(dev)) { + pr_err("%s: Invalid parameters\n", __func__); + return ERR_PTR(-EINVAL); + } + + list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) + if (_find_list_dev(dev, dev_opp)) + return dev_opp; + + return ERR_PTR(-ENODEV); +} + +/** + * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp + * @opp: opp for which voltage has to be returned for + * + * Return: voltage in micro volt corresponding to the opp, else + * return 0 + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. This means that opp which could have been fetched by + * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are + * under RCU lock. The pointer returned by the opp_find_freq family must be + * used in the same section as the usage of this function with the pointer + * prior to unlocking with rcu_read_unlock() to maintain the integrity of the + * pointer. + */ +unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) +{ + struct dev_pm_opp *tmp_opp; + unsigned long v = 0; + + opp_rcu_lockdep_assert(); + + tmp_opp = rcu_dereference(opp); + if (IS_ERR_OR_NULL(tmp_opp)) + pr_err("%s: Invalid parameters\n", __func__); + else + v = tmp_opp->u_volt; + + return v; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); + +/** + * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp + * @opp: opp for which frequency has to be returned for + * + * Return: frequency in hertz corresponding to the opp, else + * return 0 + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. This means that opp which could have been fetched by + * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are + * under RCU lock. The pointer returned by the opp_find_freq family must be + * used in the same section as the usage of this function with the pointer + * prior to unlocking with rcu_read_unlock() to maintain the integrity of the + * pointer. + */ +unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) +{ + struct dev_pm_opp *tmp_opp; + unsigned long f = 0; + + opp_rcu_lockdep_assert(); + + tmp_opp = rcu_dereference(opp); + if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) + pr_err("%s: Invalid parameters\n", __func__); + else + f = tmp_opp->rate; + + return f; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); + +/** + * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not + * @opp: opp for which turbo mode is being verified + * + * Turbo OPPs are not for normal use, and can be enabled (under certain + * conditions) for short duration of times to finish high throughput work + * quickly. Running on them for longer times may overheat the chip. + * + * Return: true if opp is turbo opp, else false. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. This means that opp which could have been fetched by + * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are + * under RCU lock. The pointer returned by the opp_find_freq family must be + * used in the same section as the usage of this function with the pointer + * prior to unlocking with rcu_read_unlock() to maintain the integrity of the + * pointer. + */ +bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) +{ + struct dev_pm_opp *tmp_opp; + + opp_rcu_lockdep_assert(); + + tmp_opp = rcu_dereference(opp); + if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) { + pr_err("%s: Invalid parameters\n", __func__); + return false; + } + + return tmp_opp->turbo; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); + +/** + * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds + * @dev: device for which we do this operation + * + * Return: This function returns the max clock latency in nanoseconds. + * + * Locking: This function takes rcu_read_lock(). + */ +unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) +{ + struct device_opp *dev_opp; + unsigned long clock_latency_ns; + + rcu_read_lock(); + + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) + clock_latency_ns = 0; + else + clock_latency_ns = dev_opp->clock_latency_ns_max; + + rcu_read_unlock(); + return clock_latency_ns; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); + +/** + * dev_pm_opp_get_suspend_opp() - Get suspend opp + * @dev: device for which we do this operation + * + * Return: This function returns pointer to the suspend opp if it is + * defined and available, otherwise it returns NULL. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) +{ + struct device_opp *dev_opp; + + opp_rcu_lockdep_assert(); + + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp) || !dev_opp->suspend_opp || + !dev_opp->suspend_opp->available) + return NULL; + + return dev_opp->suspend_opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp); + +/** + * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list + * @dev: device for which we do this operation + * + * Return: This function returns the number of available opps if there are any, + * else returns 0 if none or the corresponding error value. + * + * Locking: This function takes rcu_read_lock(). + */ +int dev_pm_opp_get_opp_count(struct device *dev) +{ + struct device_opp *dev_opp; + struct dev_pm_opp *temp_opp; + int count = 0; + + rcu_read_lock(); + + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) { + count = PTR_ERR(dev_opp); + dev_err(dev, "%s: device OPP not found (%d)\n", + __func__, count); + goto out_unlock; + } + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available) + count++; + } + +out_unlock: + rcu_read_unlock(); + return count; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); + +/** + * dev_pm_opp_find_freq_exact() - search for an exact frequency + * @dev: device for which we do this operation + * @freq: frequency to search for + * @available: true/false - match for available opp + * + * Return: Searches for exact match in the opp list and returns pointer to the + * matching opp if found, else returns ERR_PTR in case of error and should + * be handled using IS_ERR. Error return values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * Note: available is a modifier for the search. if available=true, then the + * match is for exact matching frequency and is available in the stored OPP + * table. if false, the match is for exact frequency which is not available. + * + * This provides a mechanism to enable an opp which is not available currently + * or the opposite as well. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, + unsigned long freq, + bool available) +{ + struct device_opp *dev_opp; + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); + + opp_rcu_lockdep_assert(); + + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) { + int r = PTR_ERR(dev_opp); + dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); + return ERR_PTR(r); + } + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available == available && + temp_opp->rate == freq) { + opp = temp_opp; + break; + } + } + + return opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); + +/** + * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq + * @dev: device for which we do this operation + * @freq: Start frequency + * + * Search for the matching ceil *available* OPP from a starting freq + * for a device. + * + * Return: matching *opp and refreshes *freq accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. Error return + * values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, + unsigned long *freq) +{ + struct device_opp *dev_opp; + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); + + opp_rcu_lockdep_assert(); + + if (!dev || !freq) { + dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); + return ERR_PTR(-EINVAL); + } + + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) + return ERR_CAST(dev_opp); + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available && temp_opp->rate >= *freq) { + opp = temp_opp; + *freq = opp->rate; + break; + } + } + + return opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); + +/** + * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq + * @dev: device for which we do this operation + * @freq: Start frequency + * + * Search for the matching floor *available* OPP from a starting freq + * for a device. + * + * Return: matching *opp and refreshes *freq accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. Error return + * values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, + unsigned long *freq) +{ + struct device_opp *dev_opp; + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); + + opp_rcu_lockdep_assert(); + + if (!dev || !freq) { + dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); + return ERR_PTR(-EINVAL); + } + + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) + return ERR_CAST(dev_opp); + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available) { + /* go to the next node, before choosing prev */ + if (temp_opp->rate > *freq) + break; + else + opp = temp_opp; + } + } + if (!IS_ERR(opp)) + *freq = opp->rate; + + return opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); + +/* List-dev Helpers */ +static void _kfree_list_dev_rcu(struct rcu_head *head) +{ + struct device_list_opp *list_dev; + + list_dev = container_of(head, struct device_list_opp, rcu_head); + kfree_rcu(list_dev, rcu_head); +} + +static void _remove_list_dev(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ + list_del(&list_dev->node); + call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head, + _kfree_list_dev_rcu); +} + +struct device_list_opp *_add_list_dev(const struct device *dev, + struct device_opp *dev_opp) +{ + struct device_list_opp *list_dev; + + list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL); + if (!list_dev) + return NULL; + + /* Initialize list-dev */ + list_dev->dev = dev; + list_add_rcu(&list_dev->node, &dev_opp->dev_list); + + return list_dev; +} + +/** + * _add_device_opp() - Find device OPP table or allocate a new one + * @dev: device for which we do this operation + * + * It tries to find an existing table first, if it couldn't find one, it + * allocates a new OPP table and returns that. + * + * Return: valid device_opp pointer if success, else NULL. + */ +static struct device_opp *_add_device_opp(struct device *dev) +{ + struct device_opp *dev_opp; + struct device_list_opp *list_dev; + + /* Check for existing list for 'dev' first */ + dev_opp = _find_device_opp(dev); + if (!IS_ERR(dev_opp)) + return dev_opp; + + /* + * Allocate a new device OPP table. In the infrequent case where a new + * device is needed to be added, we pay this penalty. + */ + dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL); + if (!dev_opp) + return NULL; + + INIT_LIST_HEAD(&dev_opp->dev_list); + + list_dev = _add_list_dev(dev, dev_opp); + if (!list_dev) { + kfree(dev_opp); + return NULL; + } + + srcu_init_notifier_head(&dev_opp->srcu_head); + INIT_LIST_HEAD(&dev_opp->opp_list); + + /* Secure the device list modification */ + list_add_rcu(&dev_opp->node, &dev_opp_list); + return dev_opp; +} + +/** + * _kfree_device_rcu() - Free device_opp RCU handler + * @head: RCU head + */ +static void _kfree_device_rcu(struct rcu_head *head) +{ + struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); + + kfree_rcu(device_opp, rcu_head); +} + +/** + * _remove_device_opp() - Removes a device OPP table + * @dev_opp: device OPP table to be removed. + * + * Removes/frees device OPP table it it doesn't contain any OPPs. + */ +static void _remove_device_opp(struct device_opp *dev_opp) +{ + struct device_list_opp *list_dev; + + if (!list_empty(&dev_opp->opp_list)) + return; + + list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp, + node); + + _remove_list_dev(list_dev, dev_opp); + + /* dev_list must be empty now */ + WARN_ON(!list_empty(&dev_opp->dev_list)); + + list_del_rcu(&dev_opp->node); + call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, + _kfree_device_rcu); +} + +/** + * _kfree_opp_rcu() - Free OPP RCU handler + * @head: RCU head + */ +static void _kfree_opp_rcu(struct rcu_head *head) +{ + struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); + + kfree_rcu(opp, rcu_head); +} + +/** + * _opp_remove() - Remove an OPP from a table definition + * @dev_opp: points back to the device_opp struct this opp belongs to + * @opp: pointer to the OPP to remove + * @notify: OPP_EVENT_REMOVE notification should be sent or not + * + * This function removes an opp definition from the opp list. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * It is assumed that the caller holds required mutex for an RCU updater + * strategy. + */ +static void _opp_remove(struct device_opp *dev_opp, + struct dev_pm_opp *opp, bool notify) +{ + /* + * Notify the changes in the availability of the operable + * frequency/voltage list. + */ + if (notify) + srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); + list_del_rcu(&opp->node); + call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); + + _remove_device_opp(dev_opp); +} + +/** + * dev_pm_opp_remove() - Remove an OPP from OPP list + * @dev: device for which we do this operation + * @freq: OPP to remove with matching 'freq' + * + * This function removes an opp from the opp list. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +void dev_pm_opp_remove(struct device *dev, unsigned long freq) +{ + struct dev_pm_opp *opp; + struct device_opp *dev_opp; + bool found = false; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) + goto unlock; + + list_for_each_entry(opp, &dev_opp->opp_list, node) { + if (opp->rate == freq) { + found = true; + break; + } + } + + if (!found) { + dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", + __func__, freq); + goto unlock; + } + + _opp_remove(dev_opp, opp, true); +unlock: + mutex_unlock(&dev_opp_list_lock); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_remove); + +static struct dev_pm_opp *_allocate_opp(struct device *dev, + struct device_opp **dev_opp) +{ + struct dev_pm_opp *opp; + + /* allocate new OPP node */ + opp = kzalloc(sizeof(*opp), GFP_KERNEL); + if (!opp) + return NULL; + + INIT_LIST_HEAD(&opp->node); + + *dev_opp = _add_device_opp(dev); + if (!*dev_opp) { + kfree(opp); + return NULL; + } + + return opp; +} + +static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, + struct device_opp *dev_opp) +{ + struct dev_pm_opp *opp; + struct list_head *head = &dev_opp->opp_list; + + /* + * Insert new OPP in order of increasing frequency and discard if + * already present. + * + * Need to use &dev_opp->opp_list in the condition part of the 'for' + * loop, don't replace it with head otherwise it will become an infinite + * loop. + */ + list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { + if (new_opp->rate > opp->rate) { + head = &opp->node; + continue; + } + + if (new_opp->rate < opp->rate) + break; + + /* Duplicate OPPs */ + dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", + __func__, opp->rate, opp->u_volt, opp->available, + new_opp->rate, new_opp->u_volt, new_opp->available); + + return opp->available && new_opp->u_volt == opp->u_volt ? + 0 : -EEXIST; + } + + new_opp->dev_opp = dev_opp; + list_add_rcu(&new_opp->node, head); + + return 0; +} + +/** + * _opp_add_v1() - Allocate a OPP based on v1 bindings. + * @dev: device for which we do this operation + * @freq: Frequency in Hz for this OPP + * @u_volt: Voltage in uVolts for this OPP + * @dynamic: Dynamically added OPPs. + * + * This function adds an opp definition to the opp list and returns status. + * The opp is made available by default and it can be controlled using + * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. + * + * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table + * and freed by dev_pm_opp_of_remove_table. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + * + * Return: + * 0 On success OR + * Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST Freq are same and volt are different OR + * Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM Memory allocation failure + */ +static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, + bool dynamic) +{ + struct device_opp *dev_opp; + struct dev_pm_opp *new_opp; + int ret; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + new_opp = _allocate_opp(dev, &dev_opp); + if (!new_opp) { + ret = -ENOMEM; + goto unlock; + } + + /* populate the opp table */ + new_opp->rate = freq; + new_opp->u_volt = u_volt; + new_opp->available = true; + new_opp->dynamic = dynamic; + + ret = _opp_add(dev, new_opp, dev_opp); + if (ret) + goto free_opp; + + mutex_unlock(&dev_opp_list_lock); + + /* + * Notify the changes in the availability of the operable + * frequency/voltage list. + */ + srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); + return 0; + +free_opp: + _opp_remove(dev_opp, new_opp, false); +unlock: + mutex_unlock(&dev_opp_list_lock); + return ret; +} + +/* TODO: Support multiple regulators */ +static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev) +{ + u32 microvolt[3] = {0}; + u32 val; + int count, ret; + + /* Missing property isn't a problem, but an invalid entry is */ + if (!of_find_property(opp->np, "opp-microvolt", NULL)) + return 0; + + count = of_property_count_u32_elems(opp->np, "opp-microvolt"); + if (count < 0) { + dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n", + __func__, count); + return count; + } + + /* There can be one or three elements here */ + if (count != 1 && count != 3) { + dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", + __func__, count); + return -EINVAL; + } + + ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt, + count); + if (ret) { + dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__, + ret); + return -EINVAL; + } + + opp->u_volt = microvolt[0]; + opp->u_volt_min = microvolt[1]; + opp->u_volt_max = microvolt[2]; + + if (!of_property_read_u32(opp->np, "opp-microamp", &val)) + opp->u_amp = val; + + return 0; +} + +/** + * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) + * @dev: device for which we do this operation + * @np: device node + * + * This function adds an opp definition to the opp list and returns status. The + * opp can be controlled using dev_pm_opp_enable/disable functions and may be + * removed by dev_pm_opp_remove. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + * + * Return: + * 0 On success OR + * Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST Freq are same and volt are different OR + * Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM Memory allocation failure + * -EINVAL Failed parsing the OPP node + */ +static int _opp_add_static_v2(struct device *dev, struct device_node *np) +{ + struct device_opp *dev_opp; + struct dev_pm_opp *new_opp; + u64 rate; + u32 val; + int ret; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + new_opp = _allocate_opp(dev, &dev_opp); + if (!new_opp) { + ret = -ENOMEM; + goto unlock; + } + + ret = of_property_read_u64(np, "opp-hz", &rate); + if (ret < 0) { + dev_err(dev, "%s: opp-hz not found\n", __func__); + goto free_opp; + } + + /* + * Rate is defined as an unsigned long in clk API, and so casting + * explicitly to its type. Must be fixed once rate is 64 bit + * guaranteed in clk API. + */ + new_opp->rate = (unsigned long)rate; + new_opp->turbo = of_property_read_bool(np, "turbo-mode"); + + new_opp->np = np; + new_opp->dynamic = false; + new_opp->available = true; + + if (!of_property_read_u32(np, "clock-latency-ns", &val)) + new_opp->clock_latency_ns = val; + + ret = opp_parse_supplies(new_opp, dev); + if (ret) + goto free_opp; + + ret = _opp_add(dev, new_opp, dev_opp); + if (ret) + goto free_opp; + + /* OPP to select on device suspend */ + if (of_property_read_bool(np, "opp-suspend")) { + if (dev_opp->suspend_opp) + dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", + __func__, dev_opp->suspend_opp->rate, + new_opp->rate); + else + dev_opp->suspend_opp = new_opp; + } + + if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max) + dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns; + + mutex_unlock(&dev_opp_list_lock); + + pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", + __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, + new_opp->u_volt_min, new_opp->u_volt_max, + new_opp->clock_latency_ns); + + /* + * Notify the changes in the availability of the operable + * frequency/voltage list. + */ + srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); + return 0; + +free_opp: + _opp_remove(dev_opp, new_opp, false); +unlock: + mutex_unlock(&dev_opp_list_lock); + return ret; +} + +/** + * dev_pm_opp_add() - Add an OPP table from a table definitions + * @dev: device for which we do this operation + * @freq: Frequency in Hz for this OPP + * @u_volt: Voltage in uVolts for this OPP + * + * This function adds an opp definition to the opp list and returns status. + * The opp is made available by default and it can be controlled using + * dev_pm_opp_enable/disable functions. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + * + * Return: + * 0 On success OR + * Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST Freq are same and volt are different OR + * Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM Memory allocation failure + */ +int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) +{ + return _opp_add_v1(dev, freq, u_volt, true); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_add); + +/** + * _opp_set_availability() - helper to set the availability of an opp + * @dev: device for which we do this operation + * @freq: OPP frequency to modify availability + * @availability_req: availability status requested for this opp + * + * Set the availability of an OPP with an RCU operation, opp_{enable,disable} + * share a common logic which is isolated here. + * + * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the + * copy operation, returns 0 if no modification was done OR modification was + * successful. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks to + * keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex locking or synchronize_rcu() blocking calls cannot be used. + */ +static int _opp_set_availability(struct device *dev, unsigned long freq, + bool availability_req) +{ + struct device_opp *dev_opp; + struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); + int r = 0; + + /* keep the node allocated */ + new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); + if (!new_opp) + return -ENOMEM; + + mutex_lock(&dev_opp_list_lock); + + /* Find the device_opp */ + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) { + r = PTR_ERR(dev_opp); + dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); + goto unlock; + } + + /* Do we have the frequency? */ + list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) { + if (tmp_opp->rate == freq) { + opp = tmp_opp; + break; + } + } + if (IS_ERR(opp)) { + r = PTR_ERR(opp); + goto unlock; + } + + /* Is update really needed? */ + if (opp->available == availability_req) + goto unlock; + /* copy the old data over */ + *new_opp = *opp; + + /* plug in new node */ + new_opp->available = availability_req; + + list_replace_rcu(&opp->node, &new_opp->node); + mutex_unlock(&dev_opp_list_lock); + call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); + + /* Notify the change of the OPP availability */ + if (availability_req) + srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE, + new_opp); + else + srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE, + new_opp); + + return 0; + +unlock: + mutex_unlock(&dev_opp_list_lock); + kfree(new_opp); + return r; +} + +/** + * dev_pm_opp_enable() - Enable a specific OPP + * @dev: device for which we do this operation + * @freq: OPP frequency to enable + * + * Enables a provided opp. If the operation is valid, this returns 0, else the + * corresponding error value. It is meant to be used for users an OPP available + * after being temporarily made unavailable with dev_pm_opp_disable. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function indirectly uses RCU and mutex locks to keep the + * integrity of the internal data structures. Callers should ensure that + * this function is *NOT* called under RCU protection or in contexts where + * mutex locking or synchronize_rcu() blocking calls cannot be used. + * + * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the + * copy operation, returns 0 if no modification was done OR modification was + * successful. + */ +int dev_pm_opp_enable(struct device *dev, unsigned long freq) +{ + return _opp_set_availability(dev, freq, true); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_enable); + +/** + * dev_pm_opp_disable() - Disable a specific OPP + * @dev: device for which we do this operation + * @freq: OPP frequency to disable + * + * Disables a provided opp. If the operation is valid, this returns + * 0, else the corresponding error value. It is meant to be a temporary + * control by users to make this OPP not available until the circumstances are + * right to make it available again (with a call to dev_pm_opp_enable). + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function indirectly uses RCU and mutex locks to keep the + * integrity of the internal data structures. Callers should ensure that + * this function is *NOT* called under RCU protection or in contexts where + * mutex locking or synchronize_rcu() blocking calls cannot be used. + * + * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the + * copy operation, returns 0 if no modification was done OR modification was + * successful. + */ +int dev_pm_opp_disable(struct device *dev, unsigned long freq) +{ + return _opp_set_availability(dev, freq, false); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_disable); + +/** + * dev_pm_opp_get_notifier() - find notifier_head of the device with opp + * @dev: device pointer used to lookup device OPPs. + * + * Return: pointer to notifier head if found, otherwise -ENODEV or + * -EINVAL based on type of error casted as pointer. value must be checked + * with IS_ERR to determine valid pointer or error result. + * + * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) +{ + struct device_opp *dev_opp = _find_device_opp(dev); + + if (IS_ERR(dev_opp)) + return ERR_CAST(dev_opp); /* matching type */ + + return &dev_opp->srcu_head; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); + +#ifdef CONFIG_OF +/** + * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT + * entries + * @dev: device pointer used to lookup device OPPs. + * + * Free OPPs created using static entries present in DT. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function indirectly uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +void dev_pm_opp_of_remove_table(struct device *dev) +{ + struct device_opp *dev_opp; + struct dev_pm_opp *opp, *tmp; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + /* Check for existing list for 'dev' */ + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) { + int error = PTR_ERR(dev_opp); + + if (error != -ENODEV) + WARN(1, "%s: dev_opp: %d\n", + IS_ERR_OR_NULL(dev) ? + "Invalid device" : dev_name(dev), + error); + goto unlock; + } + + /* Find if dev_opp manages a single device */ + if (list_is_singular(&dev_opp->dev_list)) { + /* Free static OPPs */ + list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { + if (!opp->dynamic) + _opp_remove(dev_opp, opp, true); + } + } else { + _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp); + } + +unlock: + mutex_unlock(&dev_opp_list_lock); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); + +/* Returns opp descriptor node for a device, caller must do of_node_put() */ +struct device_node *_of_get_opp_desc_node(struct device *dev) +{ + /* + * TODO: Support for multiple OPP tables. + * + * There should be only ONE phandle present in "operating-points-v2" + * property. + */ + + return of_parse_phandle(dev->of_node, "operating-points-v2", 0); +} + +/* Initializes OPP tables based on new bindings */ +static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) +{ + struct device_node *np; + struct device_opp *dev_opp; + int ret = 0, count = 0; + + mutex_lock(&dev_opp_list_lock); + + dev_opp = _managed_opp(opp_np); + if (dev_opp) { + /* OPPs are already managed */ + if (!_add_list_dev(dev, dev_opp)) + ret = -ENOMEM; + mutex_unlock(&dev_opp_list_lock); + return ret; + } + mutex_unlock(&dev_opp_list_lock); + + /* We have opp-list node now, iterate over it and add OPPs */ + for_each_available_child_of_node(opp_np, np) { + count++; + + ret = _opp_add_static_v2(dev, np); + if (ret) { + dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, + ret); + goto free_table; + } + } + + /* There should be one of more OPP defined */ + if (WARN_ON(!count)) + return -ENOENT; + + mutex_lock(&dev_opp_list_lock); + + dev_opp = _find_device_opp(dev); + if (WARN_ON(IS_ERR(dev_opp))) { + ret = PTR_ERR(dev_opp); + mutex_unlock(&dev_opp_list_lock); + goto free_table; + } + + dev_opp->np = opp_np; + dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared"); + + mutex_unlock(&dev_opp_list_lock); + + return 0; + +free_table: + dev_pm_opp_of_remove_table(dev); + + return ret; +} + +/* Initializes OPP tables based on old-deprecated bindings */ +static int _of_add_opp_table_v1(struct device *dev) +{ + const struct property *prop; + const __be32 *val; + int nr; + + prop = of_find_property(dev->of_node, "operating-points", NULL); + if (!prop) + return -ENODEV; + if (!prop->value) + return -ENODATA; + + /* + * Each OPP is a set of tuples consisting of frequency and + * voltage like . + */ + nr = prop->length / sizeof(u32); + if (nr % 2) { + dev_err(dev, "%s: Invalid OPP list\n", __func__); + return -EINVAL; + } + + val = prop->value; + while (nr) { + unsigned long freq = be32_to_cpup(val++) * 1000; + unsigned long volt = be32_to_cpup(val++); + + if (_opp_add_v1(dev, freq, volt, false)) + dev_warn(dev, "%s: Failed to add OPP %ld\n", + __func__, freq); + nr -= 2; + } + + return 0; +} + +/** + * dev_pm_opp_of_add_table() - Initialize opp table from device tree + * @dev: device pointer used to lookup device OPPs. + * + * Register the initial OPP table with the OPP library for given device. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function indirectly uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + * + * Return: + * 0 On success OR + * Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST Freq are same and volt are different OR + * Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM Memory allocation failure + * -ENODEV when 'operating-points' property is not found or is invalid data + * in device node. + * -ENODATA when empty 'operating-points' property is found + * -EINVAL when invalid entries are found in opp-v2 table + */ +int dev_pm_opp_of_add_table(struct device *dev) +{ + struct device_node *opp_np; + int ret; + + /* + * OPPs have two version of bindings now. The older one is deprecated, + * try for the new binding first. + */ + opp_np = _of_get_opp_desc_node(dev); + if (!opp_np) { + /* + * Try old-deprecated bindings for backward compatibility with + * older dtbs. + */ + return _of_add_opp_table_v1(dev); + } + + ret = _of_add_opp_table_v2(dev, opp_np); + of_node_put(opp_np); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); +#endif diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c new file mode 100644 index 000000000..7b445e88a --- /dev/null +++ b/drivers/base/power/opp/cpu.c @@ -0,0 +1,270 @@ +/* + * Generic OPP helper interface for CPU device + * + * Copyright (C) 2009-2014 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "opp.h" + +#ifdef CONFIG_CPU_FREQ + +/** + * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device + * @dev: device for which we do this operation + * @table: Cpufreq table returned back to caller + * + * Generate a cpufreq table for a provided device- this assumes that the + * opp list is already initialized and ready for usage. + * + * This function allocates required memory for the cpufreq table. It is + * expected that the caller does the required maintenance such as freeing + * the table as required. + * + * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM + * if no memory available for the operation (table is not populated), returns 0 + * if successful and table is populated. + * + * WARNING: It is important for the callers to ensure refreshing their copy of + * the table if any of the mentioned functions have been invoked in the interim. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Since we just use the regular accessor functions to access the internal data + * structures, we use RCU read lock inside this function. As a result, users of + * this function DONOT need to use explicit locks for invoking. + */ +int dev_pm_opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + struct dev_pm_opp *opp; + struct cpufreq_frequency_table *freq_table = NULL; + int i, max_opps, ret = 0; + unsigned long rate; + + rcu_read_lock(); + + max_opps = dev_pm_opp_get_opp_count(dev); + if (max_opps <= 0) { + ret = max_opps ? max_opps : -ENODATA; + goto out; + } + + freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC); + if (!freq_table) { + ret = -ENOMEM; + goto out; + } + + for (i = 0, rate = 0; i < max_opps; i++, rate++) { + /* find next rate */ + opp = dev_pm_opp_find_freq_ceil(dev, &rate); + if (IS_ERR(opp)) { + ret = PTR_ERR(opp); + goto out; + } + freq_table[i].driver_data = i; + freq_table[i].frequency = rate / 1000; + + /* Is Boost/turbo opp ? */ + if (dev_pm_opp_is_turbo(opp)) + freq_table[i].flags = CPUFREQ_BOOST_FREQ; + } + + freq_table[i].driver_data = i; + freq_table[i].frequency = CPUFREQ_TABLE_END; + + *table = &freq_table[0]; + +out: + rcu_read_unlock(); + if (ret) + kfree(freq_table); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); + +/** + * dev_pm_opp_free_cpufreq_table() - free the cpufreq table + * @dev: device for which we do this operation + * @table: table to free + * + * Free up the table allocated by dev_pm_opp_init_cpufreq_table + */ +void dev_pm_opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + if (!table) + return; + + kfree(*table); + *table = NULL; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); +#endif /* CONFIG_CPU_FREQ */ + +/* Required only for V1 bindings, as v2 can manage it from DT itself */ +int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) +{ + struct device_list_opp *list_dev; + struct device_opp *dev_opp; + struct device *dev; + int cpu, ret = 0; + + mutex_lock(&dev_opp_list_lock); + + dev_opp = _find_device_opp(cpu_dev); + if (IS_ERR(dev_opp)) { + ret = -EINVAL; + goto unlock; + } + + for_each_cpu(cpu, cpumask) { + if (cpu == cpu_dev->id) + continue; + + dev = get_cpu_device(cpu); + if (!dev) { + dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + __func__, cpu); + continue; + } + + list_dev = _add_list_dev(dev, dev_opp); + if (!list_dev) { + dev_err(dev, "%s: failed to add list-dev for cpu%d device\n", + __func__, cpu); + continue; + } + } +unlock: + mutex_unlock(&dev_opp_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); + +#ifdef CONFIG_OF +void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask) +{ + struct device *cpu_dev; + int cpu; + + WARN_ON(cpumask_empty(cpumask)); + + for_each_cpu(cpu, cpumask) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + cpu); + continue; + } + + dev_pm_opp_of_remove_table(cpu_dev); + } +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table); + +int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask) +{ + struct device *cpu_dev; + int cpu, ret = 0; + + WARN_ON(cpumask_empty(cpumask)); + + for_each_cpu(cpu, cpumask) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + cpu); + continue; + } + + ret = dev_pm_opp_of_add_table(cpu_dev); + if (ret) { + pr_err("%s: couldn't find opp table for cpu:%d, %d\n", + __func__, cpu, ret); + + /* Free all other OPPs */ + dev_pm_opp_of_cpumask_remove_table(cpumask); + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); + +/* + * Works only for OPP v2 bindings. + * + * cpumask should be already set to mask of cpu_dev->id. + * Returns -ENOENT if operating-points-v2 bindings aren't supported. + */ +int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) +{ + struct device_node *np, *tmp_np; + struct device *tcpu_dev; + int cpu, ret = 0; + + /* Get OPP descriptor node */ + np = _of_get_opp_desc_node(cpu_dev); + if (!np) { + dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__); + return -ENOENT; + } + + /* OPPs are shared ? */ + if (!of_property_read_bool(np, "opp-shared")) + goto put_cpu_node; + + for_each_possible_cpu(cpu) { + if (cpu == cpu_dev->id) + continue; + + tcpu_dev = get_cpu_device(cpu); + if (!tcpu_dev) { + dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + __func__, cpu); + ret = -ENODEV; + goto put_cpu_node; + } + + /* Get OPP descriptor node */ + tmp_np = _of_get_opp_desc_node(tcpu_dev); + if (!tmp_np) { + dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n", + __func__); + ret = -ENOENT; + goto put_cpu_node; + } + + /* CPUs are sharing opp node */ + if (np == tmp_np) + cpumask_set_cpu(cpu, cpumask); + + of_node_put(tmp_np); + } + +put_cpu_node: + of_node_put(np); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); +#endif diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h new file mode 100644 index 000000000..7366b2aa8 --- /dev/null +++ b/drivers/base/power/opp/opp.h @@ -0,0 +1,146 @@ +/* + * Generic OPP Interface + * + * Copyright (C) 2009-2010 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __DRIVER_OPP_H__ +#define __DRIVER_OPP_H__ + +#include +#include +#include +#include +#include +#include + +/* Lock to allow exclusive modification to the device and opp lists */ +extern struct mutex dev_opp_list_lock; + +/* + * Internal data structure organization with the OPP layer library is as + * follows: + * dev_opp_list (root) + * |- device 1 (represents voltage domain 1) + * | |- opp 1 (availability, freq, voltage) + * | |- opp 2 .. + * ... ... + * | `- opp n .. + * |- device 2 (represents the next voltage domain) + * ... + * `- device m (represents mth voltage domain) + * device 1, 2.. are represented by dev_opp structure while each opp + * is represented by the opp structure. + */ + +/** + * struct dev_pm_opp - Generic OPP description structure + * @node: opp list node. The nodes are maintained throughout the lifetime + * of boot. It is expected only an optimal set of OPPs are + * added to the library by the SoC framework. + * RCU usage: opp list is traversed with RCU locks. node + * modification is possible realtime, hence the modifications + * are protected by the dev_opp_list_lock for integrity. + * IMPORTANT: the opp nodes should be maintained in increasing + * order. + * @dynamic: not-created from static DT entries. + * @available: true/false - marks if this OPP as available or not + * @turbo: true if turbo (boost) OPP + * @rate: Frequency in hertz + * @u_volt: Target voltage in microvolts corresponding to this OPP + * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP + * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP + * @u_amp: Maximum current drawn by the device in microamperes + * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's + * frequency from any other OPP's frequency. + * @dev_opp: points back to the device_opp struct this opp belongs to + * @rcu_head: RCU callback head used for deferred freeing + * @np: OPP's device node. + * + * This structure stores the OPP information for a given device. + */ +struct dev_pm_opp { + struct list_head node; + + bool available; + bool dynamic; + bool turbo; + unsigned long rate; + + unsigned long u_volt; + unsigned long u_volt_min; + unsigned long u_volt_max; + unsigned long u_amp; + unsigned long clock_latency_ns; + + struct device_opp *dev_opp; + struct rcu_head rcu_head; + + struct device_node *np; +}; + +/** + * struct device_list_opp - devices managed by 'struct device_opp' + * @node: list node + * @dev: device to which the struct object belongs + * @rcu_head: RCU callback head used for deferred freeing + * + * This is an internal data structure maintaining the list of devices that are + * managed by 'struct device_opp'. + */ +struct device_list_opp { + struct list_head node; + const struct device *dev; + struct rcu_head rcu_head; +}; + +/** + * struct device_opp - Device opp structure + * @node: list node - contains the devices with OPPs that + * have been registered. Nodes once added are not modified in this + * list. + * RCU usage: nodes are not modified in the list of device_opp, + * however addition is possible and is secured by dev_opp_list_lock + * @srcu_head: notifier head to notify the OPP availability changes. + * @rcu_head: RCU callback head used for deferred freeing + * @dev_list: list of devices that share these OPPs + * @opp_list: list of opps + * @np: struct device_node pointer for opp's DT node. + * @shared_opp: OPP is shared between multiple devices. + * + * This is an internal data structure maintaining the link to opps attached to + * a device. This structure is not meant to be shared to users as it is + * meant for book keeping and private to OPP library. + * + * Because the opp structures can be used from both rcu and srcu readers, we + * need to wait for the grace period of both of them before freeing any + * resources. And so we have used kfree_rcu() from within call_srcu() handlers. + */ +struct device_opp { + struct list_head node; + + struct srcu_notifier_head srcu_head; + struct rcu_head rcu_head; + struct list_head dev_list; + struct list_head opp_list; + + struct device_node *np; + unsigned long clock_latency_ns_max; + bool shared_opp; + struct dev_pm_opp *suspend_opp; +}; + +/* Routines internal to opp core */ +struct device_opp *_find_device_opp(struct device *dev); +struct device_list_opp *_add_list_dev(const struct device *dev, + struct device_opp *dev_opp); +struct device_node *_of_get_opp_desc_node(struct device *dev); + +#endif /* __DRIVER_OPP_H__ */ diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index eb6e67451..0d77cd6fd 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq) struct wake_irq *wirq; int err; + if (irq < 0) + return -EINVAL; + wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); if (!wirq) return -ENOMEM; @@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) struct wake_irq *wirq; int err; + if (irq < 0) + return -EINVAL; + wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); if (!wirq) return -ENOMEM; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 51f15bc15..a1e0b9ab8 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -25,6 +25,9 @@ */ bool events_check_enabled __read_mostly; +/* First wakeup IRQ seen by the kernel in the last cycle. */ +unsigned int pm_wakeup_irq __read_mostly; + /* If set and the system is suspending, terminate the suspend. */ static bool pm_abort_suspend __read_mostly; @@ -91,7 +94,7 @@ struct wakeup_source *wakeup_source_create(const char *name) if (!ws) return NULL; - wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL); + wakeup_source_prepare(ws, name ? kstrdup_const(name, GFP_KERNEL) : NULL); return ws; } EXPORT_SYMBOL_GPL(wakeup_source_create); @@ -154,7 +157,7 @@ void wakeup_source_destroy(struct wakeup_source *ws) wakeup_source_drop(ws); wakeup_source_record(ws); - kfree(ws->name); + kfree_const(ws->name); kfree(ws); } EXPORT_SYMBOL_GPL(wakeup_source_destroy); @@ -868,6 +871,15 @@ EXPORT_SYMBOL_GPL(pm_system_wakeup); void pm_wakeup_clear(void) { pm_abort_suspend = false; + pm_wakeup_irq = 0; +} + +void pm_system_irq_wakeup(unsigned int irq_number) +{ + if (pm_wakeup_irq == 0) { + pm_wakeup_irq = irq_number; + pm_system_wakeup(); + } } /** -- cgit v1.2.3-54-g00ecf