diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-09-08 01:01:14 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-09-08 01:01:14 -0300 |
commit | e5fd91f1ef340da553f7a79da9540c3db711c937 (patch) | |
tree | b11842027dc6641da63f4bcc524f8678263304a3 /drivers/cpufreq/cpufreq.c | |
parent | 2a9b0348e685a63d97486f6749622b61e9e3292f (diff) |
Linux-libre 4.2-gnu
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 626 |
1 files changed, 353 insertions, 273 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index d99bdf166..7a3c30c43 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -25,17 +25,68 @@ #include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/mutex.h> -#include <linux/sched.h> #include <linux/slab.h> #include <linux/suspend.h> #include <linux/syscore_ops.h> #include <linux/tick.h> #include <trace/events/power.h> -/* Macros to iterate over lists */ -/* Iterate over online CPUs policies */ static LIST_HEAD(cpufreq_policy_list); -#define for_each_policy(__policy) \ + +static inline bool policy_is_inactive(struct cpufreq_policy *policy) +{ + return cpumask_empty(policy->cpus); +} + +static bool suitable_policy(struct cpufreq_policy *policy, bool active) +{ + return active == !policy_is_inactive(policy); +} + +/* Finds Next Acive/Inactive policy */ +static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy, + bool active) +{ + do { + policy = list_next_entry(policy, policy_list); + + /* No more policies in the list */ + if (&policy->policy_list == &cpufreq_policy_list) + return NULL; + } while (!suitable_policy(policy, active)); + + return policy; +} + +static struct cpufreq_policy *first_policy(bool active) +{ + struct cpufreq_policy *policy; + + /* No policies in the list */ + if (list_empty(&cpufreq_policy_list)) + return NULL; + + policy = list_first_entry(&cpufreq_policy_list, typeof(*policy), + policy_list); + + if (!suitable_policy(policy, active)) + policy = next_policy(policy, active); + + return policy; +} + +/* Macros to iterate over CPU policies */ +#define for_each_suitable_policy(__policy, __active) \ + for (__policy = first_policy(__active); \ + __policy; \ + __policy = next_policy(__policy, __active)) + +#define for_each_active_policy(__policy) \ + for_each_suitable_policy(__policy, true) +#define for_each_inactive_policy(__policy) \ + for_each_suitable_policy(__policy, false) + +#define for_each_policy(__policy) \ list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) /* Iterate over governors */ @@ -50,13 +101,9 @@ static LIST_HEAD(cpufreq_governor_list); */ static struct cpufreq_driver *cpufreq_driver; static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); -static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback); static DEFINE_RWLOCK(cpufreq_driver_lock); DEFINE_MUTEX(cpufreq_governor_lock); -/* This one keeps track of the previously set governor of a removed CPU */ -static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); - /* Flag to suspend/resume CPUFreq governors */ static bool cpufreq_suspended; @@ -122,6 +169,15 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(get_governor_parent_kobj); +struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) +{ + struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + + return policy && !policy_is_inactive(policy) ? + policy->freq_table : NULL; +} +EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); + static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { u64 idle_time; @@ -179,7 +235,7 @@ int cpufreq_generic_init(struct cpufreq_policy *policy, policy->cpuinfo.transition_latency = transition_latency; /* - * The driver only supports the SMP configuartion where all processors + * The driver only supports the SMP configuration where all processors * share the clock and voltage and clock. */ cpumask_setall(policy->cpus); @@ -188,10 +244,18 @@ int cpufreq_generic_init(struct cpufreq_policy *policy, } EXPORT_SYMBOL_GPL(cpufreq_generic_init); -unsigned int cpufreq_generic_get(unsigned int cpu) +/* Only for cpufreq core internal use */ +struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) { struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; +} + +unsigned int cpufreq_generic_get(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); + if (!policy || IS_ERR(policy->clk)) { pr_err("%s: No %s associated to cpu: %d\n", __func__, policy ? "clk" : "policy", cpu); @@ -202,18 +266,29 @@ unsigned int cpufreq_generic_get(unsigned int cpu) } EXPORT_SYMBOL_GPL(cpufreq_generic_get); -/* Only for cpufreq core internal use */ -struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) -{ - return per_cpu(cpufreq_cpu_data, cpu); -} - +/** + * cpufreq_cpu_get: returns policy for a cpu and marks it busy. + * + * @cpu: cpu to find policy for. + * + * This returns policy for 'cpu', returns NULL if it doesn't exist. + * It also increments the kobject reference count to mark it busy and so would + * require a corresponding call to cpufreq_cpu_put() to decrement it back. + * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be + * freed as that depends on the kobj count. + * + * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a + * valid policy is found. This is done to make sure the driver doesn't get + * unregistered while the policy is being used. + * + * Return: A valid policy on success, otherwise NULL on failure. + */ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) { struct cpufreq_policy *policy = NULL; unsigned long flags; - if (cpu >= nr_cpu_ids) + if (WARN_ON(cpu >= nr_cpu_ids)) return NULL; if (!down_read_trylock(&cpufreq_rwsem)) @@ -224,7 +299,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) if (cpufreq_driver) { /* get the CPU */ - policy = per_cpu(cpufreq_cpu_data, cpu); + policy = cpufreq_cpu_get_raw(cpu); if (policy) kobject_get(&policy->kobj); } @@ -238,6 +313,16 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) } EXPORT_SYMBOL_GPL(cpufreq_cpu_get); +/** + * cpufreq_cpu_put: Decrements the usage count of a policy + * + * @policy: policy earlier returned by cpufreq_cpu_get(). + * + * This decrements the kobject reference count incremented earlier by calling + * cpufreq_cpu_get(). + * + * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get(). + */ void cpufreq_cpu_put(struct cpufreq_policy *policy) { kobject_put(&policy->kobj); @@ -799,11 +884,18 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, down_write(&policy->rwsem); + /* Updating inactive policies is invalid, so avoid doing that. */ + if (unlikely(policy_is_inactive(policy))) { + ret = -EBUSY; + goto unlock_policy_rwsem; + } + if (fattr->store) ret = fattr->store(policy, buf, count); else ret = -EIO; +unlock_policy_rwsem: up_write(&policy->rwsem); up_read(&cpufreq_rwsem); @@ -874,28 +966,67 @@ void cpufreq_sysfs_remove_file(const struct attribute *attr) } EXPORT_SYMBOL(cpufreq_sysfs_remove_file); -/* symlink affected CPUs */ +static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) +{ + struct device *cpu_dev; + + pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu); + + if (!policy) + return 0; + + cpu_dev = get_cpu_device(cpu); + if (WARN_ON(!cpu_dev)) + return 0; + + return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq"); +} + +static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) +{ + struct device *cpu_dev; + + pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu); + + cpu_dev = get_cpu_device(cpu); + if (WARN_ON(!cpu_dev)) + return; + + sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); +} + +/* Add/remove symlinks for all related CPUs */ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) { unsigned int j; int ret = 0; - for_each_cpu(j, policy->cpus) { - struct device *cpu_dev; - - if (j == policy->cpu) + /* Some related CPUs might not be present (physically hotplugged) */ + for_each_cpu(j, policy->real_cpus) { + if (j == policy->kobj_cpu) continue; - pr_debug("Adding link for CPU: %u\n", j); - cpu_dev = get_cpu_device(j); - ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, - "cpufreq"); + ret = add_cpu_dev_symlink(policy, j); if (ret) break; } + return ret; } +static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy) +{ + unsigned int j; + + /* Some related CPUs might not be present (physically hotplugged) */ + for_each_cpu(j, policy->real_cpus) { + if (j == policy->kobj_cpu) + continue; + + remove_cpu_dev_symlink(policy, j); + } +} + static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, struct device *dev) { @@ -938,7 +1069,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) memcpy(&new_policy, policy, sizeof(*policy)); /* Update governor of new_policy to the governor used before hotplug */ - gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); + gov = find_governor(policy->last_governor); if (gov) pr_debug("Restoring governor %s for cpu %d\n", policy->governor->name, policy->cpu); @@ -964,7 +1095,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu, struct device *dev) { int ret = 0; - unsigned long flags; + + /* Has this CPU been taken care of already? */ + if (cpumask_test_cpu(cpu, policy->cpus)) + return 0; if (has_target()) { ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); @@ -975,13 +1109,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, } down_write(&policy->rwsem); - - write_lock_irqsave(&cpufreq_driver_lock, flags); - cpumask_set_cpu(cpu, policy->cpus); - per_cpu(cpufreq_cpu_data, cpu) = policy; - write_unlock_irqrestore(&cpufreq_driver_lock, flags); - up_write(&policy->rwsem); if (has_target()) { @@ -995,7 +1123,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, } } - return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); + return 0; } static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) @@ -1004,20 +1132,26 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) unsigned long flags; read_lock_irqsave(&cpufreq_driver_lock, flags); - - policy = per_cpu(cpufreq_cpu_data_fallback, cpu); - + policy = per_cpu(cpufreq_cpu_data, cpu); read_unlock_irqrestore(&cpufreq_driver_lock, flags); - if (policy) + if (likely(policy)) { + /* Policy should be inactive here */ + WARN_ON(!policy_is_inactive(policy)); + + down_write(&policy->rwsem); + policy->cpu = cpu; policy->governor = NULL; + up_write(&policy->rwsem); + } return policy; } -static struct cpufreq_policy *cpufreq_policy_alloc(void) +static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev) { struct cpufreq_policy *policy; + int ret; policy = kzalloc(sizeof(*policy), GFP_KERNEL); if (!policy) @@ -1029,6 +1163,16 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void) if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) goto err_free_cpumask; + if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) + goto err_free_rcpumask; + + ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, + "cpufreq"); + if (ret) { + pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); + goto err_free_real_cpus; + } + INIT_LIST_HEAD(&policy->policy_list); init_rwsem(&policy->rwsem); spin_lock_init(&policy->transition_lock); @@ -1036,8 +1180,17 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void) init_completion(&policy->kobj_unregister); INIT_WORK(&policy->update, handle_update); + policy->cpu = dev->id; + + /* Set this once on allocation */ + policy->kobj_cpu = dev->id; + return policy; +err_free_real_cpus: + free_cpumask_var(policy->real_cpus); +err_free_rcpumask: + free_cpumask_var(policy->related_cpus); err_free_cpumask: free_cpumask_var(policy->cpus); err_free_policy: @@ -1046,18 +1199,20 @@ err_free_policy: return NULL; } -static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) +static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify) { struct kobject *kobj; struct completion *cmp; - blocking_notifier_call_chain(&cpufreq_policy_notifier_list, - CPUFREQ_REMOVE_POLICY, policy); + if (notify) + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_REMOVE_POLICY, policy); - down_read(&policy->rwsem); + down_write(&policy->rwsem); + cpufreq_remove_dev_symlink(policy); kobj = &policy->kobj; cmp = &policy->kobj_unregister; - up_read(&policy->rwsem); + up_write(&policy->rwsem); kobject_put(kobj); /* @@ -1070,68 +1225,68 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) pr_debug("wait complete\n"); } -static void cpufreq_policy_free(struct cpufreq_policy *policy) -{ - free_cpumask_var(policy->related_cpus); - free_cpumask_var(policy->cpus); - kfree(policy); -} - -static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu, - struct device *cpu_dev) +static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify) { - int ret; - - if (WARN_ON(cpu == policy->cpu)) - return 0; + unsigned long flags; + int cpu; - /* Move kobject to the new policy->cpu */ - ret = kobject_move(&policy->kobj, &cpu_dev->kobj); - if (ret) { - pr_err("%s: Failed to move kobj: %d\n", __func__, ret); - return ret; - } + /* Remove policy from list */ + write_lock_irqsave(&cpufreq_driver_lock, flags); + list_del(&policy->policy_list); - down_write(&policy->rwsem); - policy->cpu = cpu; - up_write(&policy->rwsem); + for_each_cpu(cpu, policy->related_cpus) + per_cpu(cpufreq_cpu_data, cpu) = NULL; + write_unlock_irqrestore(&cpufreq_driver_lock, flags); - return 0; + cpufreq_policy_put_kobj(policy, notify); + free_cpumask_var(policy->real_cpus); + free_cpumask_var(policy->related_cpus); + free_cpumask_var(policy->cpus); + kfree(policy); } -static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) +/** + * cpufreq_add_dev - add a CPU device + * + * Adds the cpufreq interface for a CPU device. + * + * The Oracle says: try running cpufreq registration/unregistration concurrently + * with with cpu hotplugging and all hell will break loose. Tried to clean this + * mess up, but more thorough testing is needed. - Mathieu + */ +static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) { unsigned int j, cpu = dev->id; int ret = -ENOMEM; struct cpufreq_policy *policy; unsigned long flags; - bool recover_policy = cpufreq_suspended; - - if (cpu_is_offline(cpu)) - return 0; + bool recover_policy = !sif; pr_debug("adding CPU %u\n", cpu); - /* check whether a different CPU already registered this - * CPU because it is in the same boat. */ - policy = cpufreq_cpu_get_raw(cpu); - if (unlikely(policy)) - return 0; + if (cpu_is_offline(cpu)) { + /* + * Only possible if we are here from the subsys_interface add + * callback. A hotplug notifier will follow and we will handle + * it as CPU online then. For now, just create the sysfs link, + * unless there is no policy or the link is already present. + */ + policy = per_cpu(cpufreq_cpu_data, cpu); + return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus) + ? add_cpu_dev_symlink(policy, cpu) : 0; + } if (!down_read_trylock(&cpufreq_rwsem)) return 0; - /* Check if this cpu was hot-unplugged earlier and has siblings */ - read_lock_irqsave(&cpufreq_driver_lock, flags); - for_each_policy(policy) { - if (cpumask_test_cpu(cpu, policy->related_cpus)) { - read_unlock_irqrestore(&cpufreq_driver_lock, flags); - ret = cpufreq_add_policy_cpu(policy, cpu, dev); - up_read(&cpufreq_rwsem); - return ret; - } + /* Check if this CPU already has a policy to manage it */ + policy = per_cpu(cpufreq_cpu_data, cpu); + if (policy && !policy_is_inactive(policy)) { + WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); + ret = cpufreq_add_policy_cpu(policy, cpu, dev); + up_read(&cpufreq_rwsem); + return ret; } - read_unlock_irqrestore(&cpufreq_driver_lock, flags); /* * Restore the saved policy when doing light-weight init and fall back @@ -1140,22 +1295,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; if (!policy) { recover_policy = false; - policy = cpufreq_policy_alloc(); + policy = cpufreq_policy_alloc(dev); if (!policy) goto nomem_out; } - /* - * In the resume path, since we restore a saved policy, the assignment - * to policy->cpu is like an update of the existing policy, rather than - * the creation of a brand new one. So we need to perform this update - * by invoking update_policy_cpu(). - */ - if (recover_policy && cpu != policy->cpu) - WARN_ON(update_policy_cpu(policy, cpu, dev)); - else - policy->cpu = cpu; - cpumask_copy(policy->cpus, cpumask_of(cpu)); /* call driver. From then on the cpufreq must be able @@ -1172,6 +1316,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) /* related cpus should atleast have policy->cpus */ cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); + /* Remember which CPUs have been present at the policy creation time. */ + if (!recover_policy) + cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask); + /* * affected cpus must always be the one, which are online. We aren't * managing offline cpus here. @@ -1182,21 +1330,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) policy->user_policy.min = policy->min; policy->user_policy.max = policy->max; - /* prepare interface data */ - ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, - &dev->kobj, "cpufreq"); - if (ret) { - pr_err("%s: failed to init policy->kobj: %d\n", - __func__, ret); - goto err_init_policy_kobj; - } + write_lock_irqsave(&cpufreq_driver_lock, flags); + for_each_cpu(j, policy->related_cpus) + per_cpu(cpufreq_cpu_data, j) = policy; + write_unlock_irqrestore(&cpufreq_driver_lock, flags); } - write_lock_irqsave(&cpufreq_driver_lock, flags); - for_each_cpu(j, policy->cpus) - per_cpu(cpufreq_cpu_data, j) = policy; - write_unlock_irqrestore(&cpufreq_driver_lock, flags); - if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { policy->cur = cpufreq_driver->get(policy->cpu); if (!policy->cur) { @@ -1254,11 +1393,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) goto err_out_unregister; blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_CREATE_POLICY, policy); - } - write_lock_irqsave(&cpufreq_driver_lock, flags); - list_add(&policy->policy_list, &cpufreq_policy_list); - write_unlock_irqrestore(&cpufreq_driver_lock, flags); + write_lock_irqsave(&cpufreq_driver_lock, flags); + list_add(&policy->policy_list, &cpufreq_policy_list); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); + } cpufreq_init_policy(policy); @@ -1282,68 +1421,27 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) err_out_unregister: err_get_freq: - write_lock_irqsave(&cpufreq_driver_lock, flags); - for_each_cpu(j, policy->cpus) - per_cpu(cpufreq_cpu_data, j) = NULL; - write_unlock_irqrestore(&cpufreq_driver_lock, flags); - - if (!recover_policy) { - kobject_put(&policy->kobj); - wait_for_completion(&policy->kobj_unregister); - } -err_init_policy_kobj: up_write(&policy->rwsem); if (cpufreq_driver->exit) cpufreq_driver->exit(policy); err_set_policy_cpu: - if (recover_policy) { - /* Do not leave stale fallback data behind. */ - per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL; - cpufreq_policy_put_kobj(policy); - } - cpufreq_policy_free(policy); - + cpufreq_policy_free(policy, recover_policy); nomem_out: up_read(&cpufreq_rwsem); return ret; } -/** - * cpufreq_add_dev - add a CPU device - * - * Adds the cpufreq interface for a CPU device. - * - * The Oracle says: try running cpufreq registration/unregistration concurrently - * with with cpu hotplugging and all hell will break loose. Tried to clean this - * mess up, but more thorough testing is needed. - Mathieu - */ -static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) +static int __cpufreq_remove_dev_prepare(struct device *dev) { - return __cpufreq_add_dev(dev, sif); -} - -static int __cpufreq_remove_dev_prepare(struct device *dev, - struct subsys_interface *sif) -{ - unsigned int cpu = dev->id, cpus; - int ret; - unsigned long flags; + unsigned int cpu = dev->id; + int ret = 0; struct cpufreq_policy *policy; pr_debug("%s: unregistering CPU %u\n", __func__, cpu); - write_lock_irqsave(&cpufreq_driver_lock, flags); - - policy = per_cpu(cpufreq_cpu_data, cpu); - - /* Save the policy somewhere when doing a light-weight tear-down */ - if (cpufreq_suspended) - per_cpu(cpufreq_cpu_data_fallback, cpu) = policy; - - write_unlock_irqrestore(&cpufreq_driver_lock, flags); - + policy = cpufreq_cpu_get_raw(cpu); if (!policy) { pr_debug("%s: No cpu_data found\n", __func__); return -EINVAL; @@ -1351,112 +1449,70 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, if (has_target()) { ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); - if (ret) { + if (ret) pr_err("%s: Failed to stop governor\n", __func__); - return ret; - } - - strncpy(per_cpu(cpufreq_cpu_governor, cpu), - policy->governor->name, CPUFREQ_NAME_LEN); } - down_read(&policy->rwsem); - cpus = cpumask_weight(policy->cpus); - up_read(&policy->rwsem); + down_write(&policy->rwsem); + cpumask_clear_cpu(cpu, policy->cpus); - if (cpu != policy->cpu) { - sysfs_remove_link(&dev->kobj, "cpufreq"); - } else if (cpus > 1) { + if (policy_is_inactive(policy)) { + if (has_target()) + strncpy(policy->last_governor, policy->governor->name, + CPUFREQ_NAME_LEN); + } else if (cpu == policy->cpu) { /* Nominate new CPU */ - int new_cpu = cpumask_any_but(policy->cpus, cpu); - struct device *cpu_dev = get_cpu_device(new_cpu); + policy->cpu = cpumask_any(policy->cpus); + } + up_write(&policy->rwsem); - sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); - ret = update_policy_cpu(policy, new_cpu, cpu_dev); - if (ret) { - if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj, - "cpufreq")) - pr_err("%s: Failed to restore kobj link to cpu:%d\n", - __func__, cpu_dev->id); - return ret; - } + /* Start governor again for active policy */ + if (!policy_is_inactive(policy)) { + if (has_target()) { + ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); + if (!ret) + ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); - if (!cpufreq_suspended) - pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", - __func__, new_cpu, cpu); + if (ret) + pr_err("%s: Failed to start governor\n", __func__); + } } else if (cpufreq_driver->stop_cpu) { cpufreq_driver->stop_cpu(policy); } - return 0; + return ret; } -static int __cpufreq_remove_dev_finish(struct device *dev, - struct subsys_interface *sif) +static int __cpufreq_remove_dev_finish(struct device *dev) { - unsigned int cpu = dev->id, cpus; + unsigned int cpu = dev->id; int ret; - unsigned long flags; - struct cpufreq_policy *policy; - - write_lock_irqsave(&cpufreq_driver_lock, flags); - policy = per_cpu(cpufreq_cpu_data, cpu); - per_cpu(cpufreq_cpu_data, cpu) = NULL; - write_unlock_irqrestore(&cpufreq_driver_lock, flags); + struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); if (!policy) { pr_debug("%s: No cpu_data found\n", __func__); return -EINVAL; } - down_write(&policy->rwsem); - cpus = cpumask_weight(policy->cpus); - - if (cpus > 1) - cpumask_clear_cpu(cpu, policy->cpus); - up_write(&policy->rwsem); + /* Only proceed for inactive policies */ + if (!policy_is_inactive(policy)) + return 0; /* If cpu is last user of policy, free policy */ - if (cpus == 1) { - if (has_target()) { - ret = __cpufreq_governor(policy, - CPUFREQ_GOV_POLICY_EXIT); - if (ret) { - pr_err("%s: Failed to exit governor\n", - __func__); - return ret; - } - } - - if (!cpufreq_suspended) - cpufreq_policy_put_kobj(policy); - - /* - * Perform the ->exit() even during light-weight tear-down, - * since this is a core component, and is essential for the - * subsequent light-weight ->init() to succeed. - */ - if (cpufreq_driver->exit) - cpufreq_driver->exit(policy); - - /* Remove policy from list of active policies */ - write_lock_irqsave(&cpufreq_driver_lock, flags); - list_del(&policy->policy_list); - write_unlock_irqrestore(&cpufreq_driver_lock, flags); - - if (!cpufreq_suspended) - cpufreq_policy_free(policy); - } else if (has_target()) { - ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); - if (!ret) - ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); - - if (ret) { - pr_err("%s: Failed to start governor\n", __func__); - return ret; - } + if (has_target()) { + ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); + if (ret) + pr_err("%s: Failed to exit governor\n", __func__); } + /* + * Perform the ->exit() even during light-weight tear-down, + * since this is a core component, and is essential for the + * subsequent light-weight ->init() to succeed. + */ + if (cpufreq_driver->exit) + cpufreq_driver->exit(policy); + return 0; } @@ -1468,17 +1524,41 @@ static int __cpufreq_remove_dev_finish(struct device *dev, static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) { unsigned int cpu = dev->id; - int ret; + struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); - if (cpu_is_offline(cpu)) + if (!policy) return 0; - ret = __cpufreq_remove_dev_prepare(dev, sif); + if (cpu_online(cpu)) { + __cpufreq_remove_dev_prepare(dev); + __cpufreq_remove_dev_finish(dev); + } - if (!ret) - ret = __cpufreq_remove_dev_finish(dev, sif); + cpumask_clear_cpu(cpu, policy->real_cpus); - return ret; + if (cpumask_empty(policy->real_cpus)) { + cpufreq_policy_free(policy, true); + return 0; + } + + if (cpu != policy->kobj_cpu) { + remove_cpu_dev_symlink(policy, cpu); + } else { + /* + * The CPU owning the policy object is going away. Move it to + * another suitable CPU. + */ + unsigned int new_cpu = cpumask_first(policy->real_cpus); + struct device *new_dev = get_cpu_device(new_cpu); + + dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu); + + sysfs_remove_link(&new_dev->kobj, "cpufreq"); + policy->kobj_cpu = new_cpu; + WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj)); + } + + return 0; } static void handle_update(struct work_struct *work) @@ -1568,6 +1648,10 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy) ret_freq = cpufreq_driver->get(policy->cpu); + /* Updating inactive policies is invalid, so avoid doing that. */ + if (unlikely(policy_is_inactive(policy))) + return ret_freq; + if (ret_freq && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { /* verify no discrepancy between actual and @@ -1657,7 +1741,7 @@ void cpufreq_suspend(void) pr_debug("%s: Suspending Governors\n", __func__); - for_each_policy(policy) { + for_each_active_policy(policy) { if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) pr_err("%s: Failed to stop governor for policy: %p\n", __func__, policy); @@ -1691,7 +1775,7 @@ void cpufreq_resume(void) pr_debug("%s: Resuming Governors\n", __func__); - for_each_policy(policy) { + for_each_active_policy(policy) { if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) pr_err("%s: Failed to resume driver: %p\n", __func__, policy); @@ -1892,7 +1976,7 @@ static int __target_index(struct cpufreq_policy *policy, * Failed after setting to intermediate freq? Driver should have * reverted back to initial frequency and so should we. Check * here for intermediate_freq instead of get_intermediate, in - * case we have't switched to intermediate freq at all. + * case we haven't switched to intermediate freq at all. */ if (unlikely(retval && intermediate_freq)) { freqs.old = intermediate_freq; @@ -1964,14 +2048,6 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, } out: -#ifdef CONFIG_SCHED_BFS - if (likely(retval != -EINVAL)) { - if (target_freq == policy->max) - cpu_nonscaling(policy->cpu); - else - cpu_scaling(policy->cpu); - } -#endif return retval; } EXPORT_SYMBOL_GPL(__cpufreq_driver_target); @@ -2101,7 +2177,8 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor) { - int cpu; + struct cpufreq_policy *policy; + unsigned long flags; if (!governor) return; @@ -2109,12 +2186,15 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) if (cpufreq_disabled()) return; - for_each_present_cpu(cpu) { - if (cpu_online(cpu)) - continue; - if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) - strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); + /* clear last_governor for all inactive policies */ + read_lock_irqsave(&cpufreq_driver_lock, flags); + for_each_inactive_policy(policy) { + if (!strcmp(policy->last_governor, governor->name)) { + policy->governor = NULL; + strcpy(policy->last_governor, "\0"); + } } + read_unlock_irqrestore(&cpufreq_driver_lock, flags); mutex_lock(&cpufreq_governor_mutex); list_del(&governor->governor_list); @@ -2313,19 +2393,19 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, if (dev) { switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: - __cpufreq_add_dev(dev, NULL); + cpufreq_add_dev(dev, NULL); break; case CPU_DOWN_PREPARE: - __cpufreq_remove_dev_prepare(dev, NULL); + __cpufreq_remove_dev_prepare(dev); break; case CPU_POST_DEAD: - __cpufreq_remove_dev_finish(dev, NULL); + __cpufreq_remove_dev_finish(dev); break; case CPU_DOWN_FAILED: - __cpufreq_add_dev(dev, NULL); + cpufreq_add_dev(dev, NULL); break; } } @@ -2345,7 +2425,7 @@ static int cpufreq_boost_set_sw(int state) struct cpufreq_policy *policy; int ret = -EINVAL; - for_each_policy(policy) { + for_each_active_policy(policy) { freq_table = cpufreq_frequency_get_table(policy->cpu); if (freq_table) { ret = cpufreq_frequency_table_cpuinfo(policy, |