summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/component.c304
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/base/cpu.c10
-rw-r--r--drivers/base/dd.c81
-rw-r--r--drivers/base/devtmpfs.c12
-rw-r--r--drivers/base/dma-mapping.c7
-rw-r--r--drivers/base/firmware_class.c8
-rw-r--r--drivers/base/memory.c35
-rw-r--r--drivers/base/platform-msi.c258
-rw-r--r--drivers/base/platform.c46
-rw-r--r--drivers/base/power/clock_ops.c6
-rw-r--r--drivers/base/power/common.c26
-rw-r--r--drivers/base/power/domain.c64
-rw-r--r--drivers/base/power/main.c52
-rw-r--r--drivers/base/power/opp/Makefile1
-rw-r--r--drivers/base/power/opp/core.c336
-rw-r--r--drivers/base/power/opp/cpu.c3
-rw-r--r--drivers/base/power/opp/debugfs.c219
-rw-r--r--drivers/base/power/opp/opp.h53
-rw-r--r--drivers/base/power/power.h5
-rw-r--r--drivers/base/power/runtime.c50
-rw-r--r--drivers/base/property.c495
-rw-r--r--drivers/base/regmap/regcache-flat.c2
-rw-r--r--drivers/base/regmap/regcache-lzo.c6
-rw-r--r--drivers/base/regmap/regcache-rbtree.c18
-rw-r--r--drivers/base/regmap/regcache.c41
-rw-r--r--drivers/base/regmap/regmap-debugfs.c69
-rw-r--r--drivers/base/regmap/regmap-irq.c113
-rw-r--r--drivers/base/regmap/regmap-mmio.c50
-rw-r--r--drivers/base/regmap/regmap.c118
31 files changed, 2002 insertions, 493 deletions
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 1782f3aa3..e05db388b 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -131,6 +131,8 @@ extern void device_remove_groups(struct device *dev,
extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
+extern void device_block_probing(void);
+extern void device_unblock_probing(void);
/* /sys/devices directory */
extern struct kset *devices_kset;
diff --git a/drivers/base/component.c b/drivers/base/component.c
index f748430bb..04a1582e8 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -18,18 +18,24 @@
#include <linux/mutex.h>
#include <linux/slab.h>
+struct component;
+
+struct component_match_array {
+ void *data;
+ int (*compare)(struct device *, void *);
+ void (*release)(struct device *, void *);
+ struct component *component;
+ bool duplicate;
+};
+
struct component_match {
size_t alloc;
size_t num;
- struct {
- void *data;
- int (*fn)(struct device *, void *);
- } compare[0];
+ struct component_match_array *compare;
};
struct master {
struct list_head node;
- struct list_head components;
bool bound;
const struct component_master_ops *ops;
@@ -39,7 +45,6 @@ struct master {
struct component {
struct list_head node;
- struct list_head master_node;
struct master *master;
bool bound;
@@ -63,48 +68,21 @@ static struct master *__master_find(struct device *dev,
return NULL;
}
-/* Attach an unattached component to a master. */
-static void component_attach_master(struct master *master, struct component *c)
-{
- c->master = master;
-
- list_add_tail(&c->master_node, &master->components);
-}
-
-/* Detach a component from a master. */
-static void component_detach_master(struct master *master, struct component *c)
-{
- list_del(&c->master_node);
-
- c->master = NULL;
-}
-
-/*
- * Add a component to a master, finding the component via the compare
- * function and compare data. This is safe to call for duplicate matches
- * and will not result in the same component being added multiple times.
- */
-int component_master_add_child(struct master *master,
+static struct component *find_component(struct master *master,
int (*compare)(struct device *, void *), void *compare_data)
{
struct component *c;
- int ret = -ENXIO;
list_for_each_entry(c, &component_list, node) {
if (c->master && c->master != master)
continue;
- if (compare(c->dev, compare_data)) {
- if (!c->master)
- component_attach_master(master, c);
- ret = 0;
- break;
- }
+ if (compare(c->dev, compare_data))
+ return c;
}
- return ret;
+ return NULL;
}
-EXPORT_SYMBOL_GPL(component_master_add_child);
static int find_components(struct master *master)
{
@@ -112,39 +90,44 @@ static int find_components(struct master *master)
size_t i;
int ret = 0;
- if (!match) {
- /*
- * Search the list of components, looking for components that
- * belong to this master, and attach them to the master.
- */
- return master->ops->add_components(master->dev, master);
- }
-
/*
* Scan the array of match functions and attach
* any components which are found to this master.
*/
for (i = 0; i < match->num; i++) {
- ret = component_master_add_child(master,
- match->compare[i].fn,
- match->compare[i].data);
- if (ret)
+ struct component_match_array *mc = &match->compare[i];
+ struct component *c;
+
+ dev_dbg(master->dev, "Looking for component %zu\n", i);
+
+ if (match->compare[i].component)
+ continue;
+
+ c = find_component(master, mc->compare, mc->data);
+ if (!c) {
+ ret = -ENXIO;
break;
+ }
+
+ dev_dbg(master->dev, "found component %s, duplicate %u\n", dev_name(c->dev), !!c->master);
+
+ /* Attach this component to the master */
+ match->compare[i].duplicate = !!c->master;
+ match->compare[i].component = c;
+ c->master = master;
}
return ret;
}
-/* Detach all attached components from this master */
-static void master_remove_components(struct master *master)
+/* Detach component from associated master */
+static void remove_component(struct master *master, struct component *c)
{
- while (!list_empty(&master->components)) {
- struct component *c = list_first_entry(&master->components,
- struct component, master_node);
-
- WARN_ON(c->master != master);
+ size_t i;
- component_detach_master(master, c);
- }
+ /* Detach the component from this master. */
+ for (i = 0; i < master->match->num; i++)
+ if (master->match->compare[i].component == c)
+ master->match->compare[i].component = NULL;
}
/*
@@ -159,44 +142,32 @@ static int try_to_bring_up_master(struct master *master,
{
int ret;
- if (master->bound)
- return 0;
+ dev_dbg(master->dev, "trying to bring up master\n");
- /*
- * Search the list of components, looking for components that
- * belong to this master, and attach them to the master.
- */
if (find_components(master)) {
- /* Failed to find all components */
- ret = 0;
- goto out;
+ dev_dbg(master->dev, "master has incomplete components\n");
+ return 0;
}
if (component && component->master != master) {
- ret = 0;
- goto out;
+ dev_dbg(master->dev, "master is not for this component (%s)\n",
+ dev_name(component->dev));
+ return 0;
}
- if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!devres_open_group(master->dev, NULL, GFP_KERNEL))
+ return -ENOMEM;
/* Found all components */
ret = master->ops->bind(master->dev);
if (ret < 0) {
devres_release_group(master->dev, NULL);
dev_info(master->dev, "master bind failed: %d\n", ret);
- goto out;
+ return ret;
}
master->bound = true;
return 1;
-
-out:
- master_remove_components(master);
-
- return ret;
}
static int try_to_bring_up_masters(struct component *component)
@@ -205,9 +176,11 @@ static int try_to_bring_up_masters(struct component *component)
int ret = 0;
list_for_each_entry(m, &masters, node) {
- ret = try_to_bring_up_master(m, component);
- if (ret != 0)
- break;
+ if (!m->bound) {
+ ret = try_to_bring_up_master(m, component);
+ if (ret != 0)
+ break;
+ }
}
return ret;
@@ -220,45 +193,59 @@ static void take_down_master(struct master *master)
devres_release_group(master->dev, NULL);
master->bound = false;
}
+}
+
+static void component_match_release(struct device *master,
+ struct component_match *match)
+{
+ unsigned int i;
+
+ for (i = 0; i < match->num; i++) {
+ struct component_match_array *mc = &match->compare[i];
+
+ if (mc->release)
+ mc->release(master, mc->data);
+ }
- master_remove_components(master);
+ kfree(match->compare);
}
-static size_t component_match_size(size_t num)
+static void devm_component_match_release(struct device *dev, void *res)
{
- return offsetof(struct component_match, compare[num]);
+ component_match_release(dev, res);
}
-static struct component_match *component_match_realloc(struct device *dev,
+static int component_match_realloc(struct device *dev,
struct component_match *match, size_t num)
{
- struct component_match *new;
+ struct component_match_array *new;
- if (match && match->alloc == num)
- return match;
+ if (match->alloc == num)
+ return 0;
- new = devm_kmalloc(dev, component_match_size(num), GFP_KERNEL);
+ new = kmalloc_array(num, sizeof(*new), GFP_KERNEL);
if (!new)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- if (match) {
- memcpy(new, match, component_match_size(min(match->num, num)));
- devm_kfree(dev, match);
- } else {
- new->num = 0;
+ if (match->compare) {
+ memcpy(new, match->compare, sizeof(*new) *
+ min(match->num, num));
+ kfree(match->compare);
}
+ match->compare = new;
+ match->alloc = num;
- new->alloc = num;
-
- return new;
+ return 0;
}
/*
- * Add a component to be matched.
+ * Add a component to be matched, with a release function.
*
* The match array is first created or extended if necessary.
*/
-void component_match_add(struct device *dev, struct component_match **matchptr,
+void component_match_add_release(struct device *master,
+ struct component_match **matchptr,
+ void (*release)(struct device *, void *),
int (*compare)(struct device *, void *), void *compare_data)
{
struct component_match *match = *matchptr;
@@ -266,22 +253,55 @@ void component_match_add(struct device *dev, struct component_match **matchptr,
if (IS_ERR(match))
return;
- if (!match || match->num == match->alloc) {
- size_t new_size = match ? match->alloc + 16 : 15;
+ if (!match) {
+ match = devres_alloc(devm_component_match_release,
+ sizeof(*match), GFP_KERNEL);
+ if (!match) {
+ *matchptr = ERR_PTR(-ENOMEM);
+ return;
+ }
- match = component_match_realloc(dev, match, new_size);
+ devres_add(master, match);
*matchptr = match;
+ }
+
+ if (match->num == match->alloc) {
+ size_t new_size = match ? match->alloc + 16 : 15;
+ int ret;
- if (IS_ERR(match))
+ ret = component_match_realloc(master, match, new_size);
+ if (ret) {
+ *matchptr = ERR_PTR(ret);
return;
+ }
}
- match->compare[match->num].fn = compare;
+ match->compare[match->num].compare = compare;
+ match->compare[match->num].release = release;
match->compare[match->num].data = compare_data;
+ match->compare[match->num].component = NULL;
match->num++;
}
-EXPORT_SYMBOL(component_match_add);
+EXPORT_SYMBOL(component_match_add_release);
+
+static void free_master(struct master *master)
+{
+ struct component_match *match = master->match;
+ int i;
+
+ list_del(&master->node);
+
+ if (match) {
+ for (i = 0; i < match->num; i++) {
+ struct component *c = match->compare[i].component;
+ if (c)
+ c->master = NULL;
+ }
+ }
+
+ kfree(master);
+}
int component_master_add_with_match(struct device *dev,
const struct component_master_ops *ops,
@@ -290,15 +310,10 @@ int component_master_add_with_match(struct device *dev,
struct master *master;
int ret;
- if (ops->add_components && match)
- return -EINVAL;
-
- if (match) {
- /* Reallocate the match array for its true size */
- match = component_match_realloc(dev, match, match->num);
- if (IS_ERR(match))
- return PTR_ERR(match);
- }
+ /* Reallocate the match array for its true size */
+ ret = component_match_realloc(dev, match, match->num);
+ if (ret)
+ return ret;
master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
@@ -307,7 +322,6 @@ int component_master_add_with_match(struct device *dev,
master->dev = dev;
master->ops = ops;
master->match = match;
- INIT_LIST_HEAD(&master->components);
/* Add to the list of available masters. */
mutex_lock(&component_mutex);
@@ -315,24 +329,15 @@ int component_master_add_with_match(struct device *dev,
ret = try_to_bring_up_master(master, NULL);
- if (ret < 0) {
- /* Delete off the list if we weren't successful */
- list_del(&master->node);
- kfree(master);
- }
+ if (ret < 0)
+ free_master(master);
+
mutex_unlock(&component_mutex);
return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL_GPL(component_master_add_with_match);
-int component_master_add(struct device *dev,
- const struct component_master_ops *ops)
-{
- return component_master_add_with_match(dev, ops, NULL);
-}
-EXPORT_SYMBOL_GPL(component_master_add);
-
void component_master_del(struct device *dev,
const struct component_master_ops *ops)
{
@@ -342,9 +347,7 @@ void component_master_del(struct device *dev,
master = __master_find(dev, ops);
if (master) {
take_down_master(master);
-
- list_del(&master->node);
- kfree(master);
+ free_master(master);
}
mutex_unlock(&component_mutex);
}
@@ -366,6 +369,7 @@ void component_unbind_all(struct device *master_dev, void *data)
{
struct master *master;
struct component *c;
+ size_t i;
WARN_ON(!mutex_is_locked(&component_mutex));
@@ -373,8 +377,12 @@ void component_unbind_all(struct device *master_dev, void *data)
if (!master)
return;
- list_for_each_entry_reverse(c, &master->components, master_node)
- component_unbind(c, master, data);
+ /* Unbind components in reverse order */
+ for (i = master->match->num; i--; )
+ if (!master->match->compare[i].duplicate) {
+ c = master->match->compare[i].component;
+ component_unbind(c, master, data);
+ }
}
EXPORT_SYMBOL_GPL(component_unbind_all);
@@ -434,6 +442,7 @@ int component_bind_all(struct device *master_dev, void *data)
{
struct master *master;
struct component *c;
+ size_t i;
int ret = 0;
WARN_ON(!mutex_is_locked(&component_mutex));
@@ -442,16 +451,21 @@ int component_bind_all(struct device *master_dev, void *data)
if (!master)
return -EINVAL;
- list_for_each_entry(c, &master->components, master_node) {
- ret = component_bind(c, master, data);
- if (ret)
- break;
- }
+ /* Bind components in match order */
+ for (i = 0; i < master->match->num; i++)
+ if (!master->match->compare[i].duplicate) {
+ c = master->match->compare[i].component;
+ ret = component_bind(c, master, data);
+ if (ret)
+ break;
+ }
if (ret != 0) {
- list_for_each_entry_continue_reverse(c, &master->components,
- master_node)
- component_unbind(c, master, data);
+ for (; i--; )
+ if (!master->match->compare[i].duplicate) {
+ c = master->match->compare[i].component;
+ component_unbind(c, master, data);
+ }
}
return ret;
@@ -477,6 +491,8 @@ int component_add(struct device *dev, const struct component_ops *ops)
ret = try_to_bring_up_masters(component);
if (ret < 0) {
+ if (component->master)
+ remove_component(component->master, component);
list_del(&component->node);
kfree(component);
@@ -499,8 +515,10 @@ void component_del(struct device *dev, const struct component_ops *ops)
break;
}
- if (component && component->master)
+ if (component && component->master) {
take_down_master(component->master);
+ remove_component(component->master, component);
+ }
mutex_unlock(&component_mutex);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b7d56c5ea..0a8bdade5 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2261,7 +2261,10 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
if (fwnode_is_primary(fn))
fn = fn->secondary;
- fwnode->secondary = fn;
+ if (fn) {
+ WARN_ON(fwnode->secondary);
+ fwnode->secondary = fn;
+ }
dev->fwnode = fwnode;
} else {
dev->fwnode = fwnode_is_primary(dev->fwnode) ?
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 91bbb1959..691eeea2f 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -200,7 +200,7 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
struct cpu_attr {
struct device_attribute attr;
- const struct cpumask *const * const map;
+ const struct cpumask *const map;
};
static ssize_t show_cpus_attr(struct device *dev,
@@ -209,7 +209,7 @@ static ssize_t show_cpus_attr(struct device *dev,
{
struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
- return cpumap_print_to_pagebuf(true, buf, *ca->map);
+ return cpumap_print_to_pagebuf(true, buf, ca->map);
}
#define _CPU_ATTR(name, map) \
@@ -217,9 +217,9 @@ static ssize_t show_cpus_attr(struct device *dev,
/* Keep in sync with cpu_subsys_attrs */
static struct cpu_attr cpu_attrs[] = {
- _CPU_ATTR(online, &cpu_online_mask),
- _CPU_ATTR(possible, &cpu_possible_mask),
- _CPU_ATTR(present, &cpu_present_mask),
+ _CPU_ATTR(online, &__cpu_online_mask),
+ _CPU_ATTR(possible, &__cpu_possible_mask),
+ _CPU_ATTR(present, &__cpu_present_mask),
};
/*
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index a641cf3cc..c4da2df62 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -55,6 +55,13 @@ static struct workqueue_struct *deferred_wq;
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
/*
+ * In some cases, like suspend to RAM or hibernation, It might be reasonable
+ * to prohibit probing of devices as it could be unsafe.
+ * Once defer_all_probes is true all drivers probes will be forcibly deferred.
+ */
+static bool defer_all_probes;
+
+/*
* deferred_probe_work_func() - Retry probing devices in the active list.
*/
static void deferred_probe_work_func(struct work_struct *work)
@@ -172,6 +179,30 @@ static void driver_deferred_probe_trigger(void)
}
/**
+ * device_block_probing() - Block/defere device's probes
+ *
+ * It will disable probing of devices and defer their probes instead.
+ */
+void device_block_probing(void)
+{
+ defer_all_probes = true;
+ /* sync with probes to avoid races. */
+ wait_for_device_probe();
+}
+
+/**
+ * device_unblock_probing() - Unblock/enable device's probes
+ *
+ * It will restore normal behavior and trigger re-probing of deferred
+ * devices.
+ */
+void device_unblock_probing(void)
+{
+ defer_all_probes = false;
+ driver_deferred_probe_trigger();
+}
+
+/**
* deferred_probe_initcall() - Enable probing of deferred devices
*
* We don't want to get in the way when the bulk of drivers are getting probed.
@@ -192,9 +223,23 @@ static int deferred_probe_initcall(void)
}
late_initcall(deferred_probe_initcall);
+/**
+ * device_is_bound() - Check if device is bound to a driver
+ * @dev: device to check
+ *
+ * Returns true if passed device has already finished probing successfully
+ * against a driver.
+ *
+ * This function must be called with the device lock held.
+ */
+bool device_is_bound(struct device *dev)
+{
+ return dev->p && klist_node_attached(&dev->p->knode_driver);
+}
+
static void driver_bound(struct device *dev)
{
- if (klist_node_attached(&dev->p->knode_driver)) {
+ if (device_is_bound(dev)) {
printk(KERN_WARNING "%s: device %s already bound\n",
__func__, kobject_name(&dev->kobj));
return;
@@ -205,6 +250,8 @@ static void driver_bound(struct device *dev)
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+ device_pm_check_callbacks(dev);
+
/*
* Make sure the device is no longer in one of the deferred lists and
* kick off retrying all pending devices
@@ -268,6 +315,9 @@ int device_bind_driver(struct device *dev)
ret = driver_sysfs_add(dev);
if (!ret)
driver_bound(dev);
+ else if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
return ret;
}
EXPORT_SYMBOL_GPL(device_bind_driver);
@@ -277,9 +327,20 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
static int really_probe(struct device *dev, struct device_driver *drv)
{
- int ret = 0;
+ int ret = -EPROBE_DEFER;
int local_trigger_count = atomic_read(&deferred_trigger_count);
+ if (defer_all_probes) {
+ /*
+ * Value of defer_all_probes can be set only by
+ * device_defer_all_probes_enable() which, in turn, will call
+ * wait_for_device_probe() right after that to avoid any races.
+ */
+ dev_dbg(dev, "Driver %s force probe deferral\n", drv->name);
+ driver_deferred_probe_add(dev);
+ return ret;
+ }
+
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
@@ -290,7 +351,7 @@ static int really_probe(struct device *dev, struct device_driver *drv)
/* If using pinctrl, bind pins now before probing */
ret = pinctrl_bind_pins(dev);
if (ret)
- goto probe_failed;
+ goto pinctrl_bind_failed;
if (driver_sysfs_add(dev)) {
printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
@@ -334,12 +395,17 @@ static int really_probe(struct device *dev, struct device_driver *drv)
goto done;
probe_failed:
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
+pinctrl_bind_failed:
devres_release_all(dev);
driver_sysfs_remove(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
+ pm_runtime_reinit(dev);
switch (ret) {
case -EPROBE_DEFER:
@@ -393,6 +459,10 @@ int driver_probe_done(void)
*/
void wait_for_device_probe(void)
{
+ /* wait for the deferred probe workqueue to finish */
+ if (driver_deferred_probe_enable)
+ flush_workqueue(deferred_wq);
+
/* wait for the known devices to complete their probing */
wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
async_synchronize_full();
@@ -547,7 +617,7 @@ static int __device_attach(struct device *dev, bool allow_async)
device_lock(dev);
if (dev->driver) {
- if (klist_node_attached(&dev->p->knode_driver)) {
+ if (device_is_bound(dev)) {
ret = 1;
goto out_unlock;
}
@@ -695,13 +765,14 @@ static void __device_release_driver(struct device *dev)
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
+ pm_runtime_reinit(dev);
klist_remove(&dev->p->knode_driver);
+ device_pm_check_callbacks(dev);
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_UNBOUND_DRIVER,
dev);
-
}
}
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 68f03141e..44a74cf13 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -215,9 +215,9 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
newattrs.ia_uid = uid;
newattrs.ia_gid = gid;
newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
- mutex_lock(&d_inode(dentry)->i_mutex);
+ inode_lock(d_inode(dentry));
notify_change(dentry, &newattrs, NULL);
- mutex_unlock(&d_inode(dentry)->i_mutex);
+ inode_unlock(d_inode(dentry));
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
@@ -244,7 +244,7 @@ static int dev_rmdir(const char *name)
err = -ENOENT;
}
dput(dentry);
- mutex_unlock(&d_inode(parent.dentry)->i_mutex);
+ inode_unlock(d_inode(parent.dentry));
path_put(&parent);
return err;
}
@@ -321,9 +321,9 @@ static int handle_remove(const char *nodename, struct device *dev)
newattrs.ia_mode = stat.mode & ~0777;
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
- mutex_lock(&d_inode(dentry)->i_mutex);
+ inode_lock(d_inode(dentry));
notify_change(dentry, &newattrs, NULL);
- mutex_unlock(&d_inode(dentry)->i_mutex);
+ inode_unlock(d_inode(dentry));
err = vfs_unlink(d_inode(parent.dentry), dentry, NULL);
if (!err || err == -ENOENT)
deleted = 1;
@@ -332,7 +332,7 @@ static int handle_remove(const char *nodename, struct device *dev)
err = -ENOENT;
}
dput(dentry);
- mutex_unlock(&d_inode(parent.dentry)->i_mutex);
+ inode_unlock(d_inode(parent.dentry));
path_put(&parent);
if (deleted && strchr(nodename, '/'))
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index d95c5971c..d799662f1 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -12,7 +12,6 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <asm-generic/dma-coherent.h>
/*
* Managed DMA API
@@ -167,7 +166,7 @@ void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
}
EXPORT_SYMBOL(dmam_free_noncoherent);
-#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
static void dmam_coherent_decl_release(struct device *dev, void *res)
{
@@ -247,7 +246,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int ret = -ENXIO;
-#ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) && !defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP)
unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
@@ -264,7 +263,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
-#endif /* CONFIG_MMU */
+#endif /* CONFIG_MMU && !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
return ret;
}
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index d6e69e89f..8a4d5b1c3 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -1118,15 +1118,17 @@ static int
_request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device, unsigned int opt_flags)
{
- struct firmware *fw;
+ struct firmware *fw = NULL;
long timeout;
int ret;
if (!firmware_p)
return -EINVAL;
- if (!name || name[0] == '\0')
- return -EINVAL;
+ if (!name || name[0] == '\0') {
+ ret = -EINVAL;
+ goto out;
+ }
ret = _request_firmware_prepare(&fw, name, device);
if (ret <= 0) /* error or already assigned */
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 25425d3f2..213456c2b 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -450,8 +450,7 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u64 phys_addr;
- int nid;
- int i, ret;
+ int nid, ret;
unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
ret = kstrtoull(buf, 0, &phys_addr);
@@ -461,15 +460,12 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
return -EINVAL;
- for (i = 0; i < sections_per_block; i++) {
- nid = memory_add_physaddr_to_nid(phys_addr);
- ret = add_memory(nid, phys_addr,
- PAGES_PER_SECTION << PAGE_SHIFT);
- if (ret)
- goto out;
+ nid = memory_add_physaddr_to_nid(phys_addr);
+ ret = add_memory(nid, phys_addr,
+ MIN_MEMORY_BLOCK_SIZE * sections_per_block);
- phys_addr += MIN_MEMORY_BLOCK_SIZE;
- }
+ if (ret)
+ goto out;
ret = count;
out:
@@ -618,7 +614,6 @@ static int init_memory_block(struct memory_block **memory,
base_memory_block_id(scn_nr) * sections_per_block;
mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
mem->state = state;
- mem->section_count++;
start_pfn = section_nr_to_pfn(mem->start_section_nr);
mem->phys_device = arch_get_memory_phys_device(start_pfn);
@@ -652,6 +647,13 @@ static int add_memory_block(int base_section_nr)
return 0;
}
+static bool is_zone_device_section(struct mem_section *ms)
+{
+ struct page *page;
+
+ page = sparse_decode_mem_map(ms->section_mem_map, __section_nr(ms));
+ return is_zone_device_page(page);
+}
/*
* need an interface for the VM to add new memory regions,
@@ -662,6 +664,9 @@ int register_new_memory(int nid, struct mem_section *section)
int ret = 0;
struct memory_block *mem;
+ if (is_zone_device_section(section))
+ return 0;
+
mutex_lock(&mem_sysfs_mutex);
mem = find_memory_block(section);
@@ -672,6 +677,7 @@ int register_new_memory(int nid, struct mem_section *section)
ret = init_memory_block(&mem, section, MEM_OFFLINE);
if (ret)
goto out;
+ mem->section_count++;
}
if (mem->section_count == sections_per_block)
@@ -692,11 +698,14 @@ unregister_memory(struct memory_block *memory)
device_unregister(&memory->dev);
}
-static int remove_memory_block(unsigned long node_id,
+static int remove_memory_section(unsigned long node_id,
struct mem_section *section, int phys_device)
{
struct memory_block *mem;
+ if (is_zone_device_section(section))
+ return 0;
+
mutex_lock(&mem_sysfs_mutex);
mem = find_memory_block(section);
unregister_mem_sect_under_nodes(mem, __section_nr(section));
@@ -716,7 +725,7 @@ int unregister_memory_section(struct mem_section *section)
if (!present_section(section))
return -EINVAL;
- return remove_memory_block(0, section, 0);
+ return remove_memory_section(0, section, 0);
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 5df4575b5..279e53989 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -24,13 +24,17 @@
#include <linux/msi.h>
#include <linux/slab.h>
-#define DEV_ID_SHIFT 24
+#define DEV_ID_SHIFT 21
+#define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT))
/*
* Internal data structure containing a (made up, but unique) devid
* and the callback to write the MSI message.
*/
struct platform_msi_priv_data {
+ struct device *dev;
+ void *host_data;
+ msi_alloc_info_t arg;
irq_write_msi_msg_t write_msg;
int devid;
};
@@ -110,39 +114,49 @@ static void platform_msi_update_chip_ops(struct msi_domain_info *info)
chip->irq_write_msi_msg = platform_msi_write_msg;
}
-static void platform_msi_free_descs(struct device *dev)
+static void platform_msi_free_descs(struct device *dev, int base, int nvec)
{
struct msi_desc *desc, *tmp;
list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
- list_del(&desc->list);
- free_msi_entry(desc);
+ if (desc->platform.msi_index >= base &&
+ desc->platform.msi_index < (base + nvec)) {
+ list_del(&desc->list);
+ free_msi_entry(desc);
+ }
}
}
-static int platform_msi_alloc_descs(struct device *dev, int nvec,
- struct platform_msi_priv_data *data)
+static int platform_msi_alloc_descs_with_irq(struct device *dev, int virq,
+ int nvec,
+ struct platform_msi_priv_data *data)
{
- int i;
+ struct msi_desc *desc;
+ int i, base = 0;
- for (i = 0; i < nvec; i++) {
- struct msi_desc *desc;
+ if (!list_empty(dev_to_msi_list(dev))) {
+ desc = list_last_entry(dev_to_msi_list(dev),
+ struct msi_desc, list);
+ base = desc->platform.msi_index + 1;
+ }
+ for (i = 0; i < nvec; i++) {
desc = alloc_msi_entry(dev);
if (!desc)
break;
desc->platform.msi_priv_data = data;
- desc->platform.msi_index = i;
+ desc->platform.msi_index = base + i;
desc->nvec_used = 1;
+ desc->irq = virq ? virq + i : 0;
list_add_tail(&desc->list, dev_to_msi_list(dev));
}
if (i != nvec) {
/* Clean up the mess */
- platform_msi_free_descs(dev);
+ platform_msi_free_descs(dev, base, nvec);
return -ENOMEM;
}
@@ -150,6 +164,13 @@ static int platform_msi_alloc_descs(struct device *dev, int nvec,
return 0;
}
+static int platform_msi_alloc_descs(struct device *dev, int nvec,
+ struct platform_msi_priv_data *data)
+
+{
+ return platform_msi_alloc_descs_with_irq(dev, 0, nvec, data);
+}
+
/**
* platform_msi_create_irq_domain - Create a platform MSI interrupt domain
* @fwnode: Optional fwnode of the interrupt controller
@@ -180,56 +201,75 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
return domain;
}
-/**
- * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
- * @dev: The device for which to allocate interrupts
- * @nvec: The number of interrupts to allocate
- * @write_msi_msg: Callback to write an interrupt message for @dev
- *
- * Returns:
- * Zero for success, or an error code in case of failure
- */
-int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg)
+static struct platform_msi_priv_data *
+platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
{
- struct platform_msi_priv_data *priv_data;
- int err;
-
+ struct platform_msi_priv_data *datap;
/*
* Limit the number of interrupts to 256 per device. Should we
* need to bump this up, DEV_ID_SHIFT should be adjusted
* accordingly (which would impact the max number of MSI
* capable devices).
*/
- if (!dev->msi_domain || !write_msi_msg || !nvec ||
- nvec > (1 << (32 - DEV_ID_SHIFT)))
- return -EINVAL;
+ if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
+ return ERR_PTR(-EINVAL);
if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
dev_err(dev, "Incompatible msi_domain, giving up\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
/* Already had a helping of MSI? Greed... */
if (!list_empty(dev_to_msi_list(dev)))
- return -EBUSY;
+ return ERR_PTR(-EBUSY);
+
+ datap = kzalloc(sizeof(*datap), GFP_KERNEL);
+ if (!datap)
+ return ERR_PTR(-ENOMEM);
+
+ datap->devid = ida_simple_get(&platform_msi_devid_ida,
+ 0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
+ if (datap->devid < 0) {
+ int err = datap->devid;
+ kfree(datap);
+ return ERR_PTR(err);
+ }
- priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
- if (!priv_data)
- return -ENOMEM;
+ datap->write_msg = write_msi_msg;
+ datap->dev = dev;
- priv_data->devid = ida_simple_get(&platform_msi_devid_ida,
- 0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
- if (priv_data->devid < 0) {
- err = priv_data->devid;
- goto out_free_data;
- }
+ return datap;
+}
+
+static void platform_msi_free_priv_data(struct platform_msi_priv_data *data)
+{
+ ida_simple_remove(&platform_msi_devid_ida, data->devid);
+ kfree(data);
+}
+
+/**
+ * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
+ * @dev: The device for which to allocate interrupts
+ * @nvec: The number of interrupts to allocate
+ * @write_msi_msg: Callback to write an interrupt message for @dev
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
+{
+ struct platform_msi_priv_data *priv_data;
+ int err;
- priv_data->write_msg = write_msi_msg;
+ priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
+ if (IS_ERR(priv_data))
+ return PTR_ERR(priv_data);
err = platform_msi_alloc_descs(dev, nvec, priv_data);
if (err)
- goto out_free_id;
+ goto out_free_priv_data;
err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec);
if (err)
@@ -238,14 +278,13 @@ int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
return 0;
out_free_desc:
- platform_msi_free_descs(dev);
-out_free_id:
- ida_simple_remove(&platform_msi_devid_ida, priv_data->devid);
-out_free_data:
- kfree(priv_data);
+ platform_msi_free_descs(dev, 0, nvec);
+out_free_priv_data:
+ platform_msi_free_priv_data(priv_data);
return err;
}
+EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
/**
* platform_msi_domain_free_irqs - Free MSI interrupts for @dev
@@ -253,18 +292,127 @@ out_free_data:
*/
void platform_msi_domain_free_irqs(struct device *dev)
{
- struct msi_desc *desc;
+ if (!list_empty(dev_to_msi_list(dev))) {
+ struct msi_desc *desc;
+
+ desc = first_msi_entry(dev);
+ platform_msi_free_priv_data(desc->platform.msi_priv_data);
+ }
+
+ msi_domain_free_irqs(dev->msi_domain, dev);
+ platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
+}
+EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
+
+/**
+ * platform_msi_get_host_data - Query the private data associated with
+ * a platform-msi domain
+ * @domain: The platform-msi domain
+ *
+ * Returns the private data provided when calling
+ * platform_msi_create_device_domain.
+ */
+void *platform_msi_get_host_data(struct irq_domain *domain)
+{
+ struct platform_msi_priv_data *data = domain->host_data;
+ return data->host_data;
+}
+
+/**
+ * platform_msi_create_device_domain - Create a platform-msi domain
+ *
+ * @dev: The device generating the MSIs
+ * @nvec: The number of MSIs that need to be allocated
+ * @write_msi_msg: Callback to write an interrupt message for @dev
+ * @ops: The hierarchy domain operations to use
+ * @host_data: Private data associated to this domain
+ *
+ * Returns an irqdomain for @nvec interrupts
+ */
+struct irq_domain *
+platform_msi_create_device_domain(struct device *dev,
+ unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct platform_msi_priv_data *data;
+ struct irq_domain *domain;
+ int err;
+
+ data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
+ if (IS_ERR(data))
+ return NULL;
+
+ data->host_data = host_data;
+ domain = irq_domain_create_hierarchy(dev->msi_domain, 0, nvec,
+ of_node_to_fwnode(dev->of_node),
+ ops, data);
+ if (!domain)
+ goto free_priv;
- desc = first_msi_entry(dev);
- if (desc) {
- struct platform_msi_priv_data *data;
+ err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg);
+ if (err)
+ goto free_domain;
+
+ return domain;
- data = desc->platform.msi_priv_data;
+free_domain:
+ irq_domain_remove(domain);
+free_priv:
+ platform_msi_free_priv_data(data);
+ return NULL;
+}
+
+/**
+ * platform_msi_domain_free - Free interrupts associated with a platform-msi
+ * domain
+ *
+ * @domain: The platform-msi domain
+ * @virq: The base irq from which to perform the free operation
+ * @nvec: How many interrupts to free from @virq
+ */
+void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nvec)
+{
+ struct platform_msi_priv_data *data = domain->host_data;
+ struct msi_desc *desc;
+ for_each_msi_entry(desc, data->dev) {
+ if (WARN_ON(!desc->irq || desc->nvec_used != 1))
+ return;
+ if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
+ continue;
- ida_simple_remove(&platform_msi_devid_ida, data->devid);
- kfree(data);
+ irq_domain_free_irqs_common(domain, desc->irq, 1);
}
+}
- msi_domain_free_irqs(dev->msi_domain, dev);
- platform_msi_free_descs(dev);
+/**
+ * platform_msi_domain_alloc - Allocate interrupts associated with
+ * a platform-msi domain
+ *
+ * @domain: The platform-msi domain
+ * @virq: The base irq from which to perform the allocate operation
+ * @nvec: How many interrupts to free from @virq
+ *
+ * Return 0 on success, or an error code on failure. Must be called
+ * with irq_domain_mutex held (which can only be done as part of a
+ * top-level interrupt allocation).
+ */
+int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct platform_msi_priv_data *data = domain->host_data;
+ int err;
+
+ err = platform_msi_alloc_descs_with_irq(data->dev, virq, nr_irqs, data);
+ if (err)
+ return err;
+
+ err = msi_domain_populate_irqs(domain->parent, data->dev,
+ virq, nr_irqs, &data->arg);
+ if (err)
+ platform_msi_domain_free(domain, virq, nr_irqs);
+
+ return err;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 176b59f5b..f437afa17 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -26,6 +26,7 @@
#include <linux/acpi.h>
#include <linux/clk/clk-conf.h>
#include <linux/limits.h>
+#include <linux/property.h>
#include "base.h"
#include "power/power.h"
@@ -117,6 +118,26 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
EXPORT_SYMBOL_GPL(platform_get_irq);
/**
+ * platform_irq_count - Count the number of IRQs a platform device uses
+ * @dev: platform device
+ *
+ * Return: Number of IRQs a platform device uses or EPROBE_DEFER
+ */
+int platform_irq_count(struct platform_device *dev)
+{
+ int ret, nr = 0;
+
+ while ((ret = platform_get_irq(dev, nr)) >= 0)
+ nr++;
+
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ return nr;
+}
+EXPORT_SYMBOL_GPL(platform_irq_count);
+
+/**
* platform_get_resource_byname - get a resource for a device by name
* @dev: platform device
* @type: resource type
@@ -299,6 +320,22 @@ int platform_device_add_data(struct platform_device *pdev, const void *data,
EXPORT_SYMBOL_GPL(platform_device_add_data);
/**
+ * platform_device_add_properties - add built-in properties to a platform device
+ * @pdev: platform device to add properties to
+ * @pset: properties to add
+ *
+ * The function will take deep copy of the properties in @pset and attach
+ * the copy to the platform device. The memory associated with properties
+ * will be freed when the platform device is released.
+ */
+int platform_device_add_properties(struct platform_device *pdev,
+ const struct property_set *pset)
+{
+ return device_add_property_set(&pdev->dev, pset);
+}
+EXPORT_SYMBOL_GPL(platform_device_add_properties);
+
+/**
* platform_device_add - add a platform device to device hierarchy
* @pdev: platform device we're adding
*
@@ -409,6 +446,8 @@ void platform_device_del(struct platform_device *pdev)
if (r->parent)
release_resource(r);
}
+
+ device_remove_property_set(&pdev->dev);
}
}
EXPORT_SYMBOL_GPL(platform_device_del);
@@ -487,6 +526,12 @@ struct platform_device *platform_device_register_full(
if (ret)
goto err;
+ if (pdevinfo->pset) {
+ ret = platform_device_add_properties(pdev, pdevinfo->pset);
+ if (ret)
+ goto err;
+ }
+
ret = platform_device_add(pdev);
if (ret) {
err:
@@ -557,7 +602,6 @@ static void platform_drv_shutdown(struct device *_dev)
if (drv->shutdown)
drv->shutdown(dev);
- dev_pm_domain_detach(_dev, true);
}
/**
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 60ee5591e..272a52eba 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -15,6 +15,7 @@
#include <linux/clkdev.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#ifdef CONFIG_PM_CLK
@@ -348,7 +349,7 @@ static int pm_clk_notify(struct notifier_block *nb,
if (error)
break;
- dev->pm_domain = clknb->pm_domain;
+ dev_pm_domain_set(dev, clknb->pm_domain);
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
pm_clk_add(dev, *con_id);
@@ -361,7 +362,7 @@ static int pm_clk_notify(struct notifier_block *nb,
if (dev->pm_domain != clknb->pm_domain)
break;
- dev->pm_domain = NULL;
+ dev_pm_domain_set(dev, NULL);
pm_clk_destroy(dev);
break;
}
@@ -473,6 +474,7 @@ static int pm_clk_notify(struct notifier_block *nb,
enable_clock(dev, NULL);
}
break;
+ case BUS_NOTIFY_DRIVER_NOT_BOUND:
case BUS_NOTIFY_UNBOUND_DRIVER:
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index f32b802b9..f6a9ad52c 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -14,6 +14,8 @@
#include <linux/acpi.h>
#include <linux/pm_domain.h>
+#include "power.h"
+
/**
* dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
* @dev: Device to handle.
@@ -112,7 +114,7 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
/**
* dev_pm_domain_detach - Detach a device from its PM domain.
- * @dev: Device to attach.
+ * @dev: Device to detach.
* @power_off: Used to indicate whether we should power off the device.
*
* This functions will reverse the actions from dev_pm_domain_attach() and thus
@@ -128,3 +130,25 @@ void dev_pm_domain_detach(struct device *dev, bool power_off)
dev->pm_domain->detach(dev, power_off);
}
EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
+
+/**
+ * dev_pm_domain_set - Set PM domain of a device.
+ * @dev: Device whose PM domain is to be set.
+ * @pd: PM domain to be set, or NULL.
+ *
+ * Sets the PM domain the device belongs to. The PM domain of a device needs
+ * to be set before its probe finishes (it's bound to a driver).
+ *
+ * This function must be called with the device lock held.
+ */
+void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd)
+{
+ if (dev->pm_domain == pd)
+ return;
+
+ WARN(pd && device_is_bound(dev),
+ "PM domains can only be changed for unbound devices\n");
+ dev->pm_domain = pd;
+ device_pm_check_callbacks(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_set);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 65f50eccd..301b785f9 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -20,6 +20,8 @@
#include <linux/suspend.h>
#include <linux/export.h>
+#include "power.h"
+
#define GENPD_RETRY_MAX_MS 250 /* Approximate */
#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
@@ -160,7 +162,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
/**
* genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
- * @genpd: PM domait to power off.
+ * @genpd: PM domain to power off.
*
* Queue up the execution of genpd_poweroff() unless it's already been done
* before.
@@ -170,16 +172,15 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
queue_work(pm_wq, &genpd->power_off_work);
}
-static int genpd_poweron(struct generic_pm_domain *genpd);
-
/**
- * __genpd_poweron - Restore power to a given PM domain and its masters.
+ * genpd_poweron - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
+ * @depth: nesting count for lockdep.
*
* Restore power to @genpd and all of its masters so that it is possible to
* resume a device belonging to it.
*/
-static int __genpd_poweron(struct generic_pm_domain *genpd)
+static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
{
struct gpd_link *link;
int ret = 0;
@@ -194,11 +195,16 @@ static int __genpd_poweron(struct generic_pm_domain *genpd)
* with it.
*/
list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_inc(link->master);
+ struct generic_pm_domain *master = link->master;
+
+ genpd_sd_counter_inc(master);
+
+ mutex_lock_nested(&master->lock, depth + 1);
+ ret = genpd_poweron(master, depth + 1);
+ mutex_unlock(&master->lock);
- ret = genpd_poweron(link->master);
if (ret) {
- genpd_sd_counter_dec(link->master);
+ genpd_sd_counter_dec(master);
goto err;
}
}
@@ -221,20 +227,6 @@ static int __genpd_poweron(struct generic_pm_domain *genpd)
return ret;
}
-/**
- * genpd_poweron - Restore power to a given PM domain and its masters.
- * @genpd: PM domain to power up.
- */
-static int genpd_poweron(struct generic_pm_domain *genpd)
-{
- int ret;
-
- mutex_lock(&genpd->lock);
- ret = __genpd_poweron(genpd);
- mutex_unlock(&genpd->lock);
- return ret;
-}
-
static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
{
return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
@@ -482,7 +474,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
}
mutex_lock(&genpd->lock);
- ret = __genpd_poweron(genpd);
+ ret = genpd_poweron(genpd, 0);
mutex_unlock(&genpd->lock);
if (ret)
@@ -1188,10 +1180,11 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
}
dev->power.subsys_data->domain_data = &gpd_data->base;
- dev->pm_domain = &genpd->domain;
spin_unlock_irq(&dev->power.lock);
+ dev_pm_domain_set(dev, &genpd->domain);
+
return gpd_data;
err_free:
@@ -1205,9 +1198,10 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
static void genpd_free_dev_data(struct device *dev,
struct generic_pm_domain_data *gpd_data)
{
+ dev_pm_domain_set(dev, NULL);
+
spin_lock_irq(&dev->power.lock);
- dev->pm_domain = NULL;
dev->power.subsys_data->domain_data = NULL;
spin_unlock_irq(&dev->power.lock);
@@ -1263,6 +1257,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
return ret;
}
+EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
/**
* pm_genpd_remove_device - Remove a device from an I/O PM domain.
@@ -1313,6 +1308,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
return ret;
}
+EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
/**
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
@@ -1333,8 +1329,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
if (!link)
return -ENOMEM;
- mutex_lock(&genpd->lock);
- mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
+ mutex_lock(&subdomain->lock);
+ mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
if (genpd->status == GPD_STATE_POWER_OFF
&& subdomain->status != GPD_STATE_POWER_OFF) {
@@ -1357,8 +1353,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
genpd_sd_counter_inc(genpd);
out:
- mutex_unlock(&subdomain->lock);
mutex_unlock(&genpd->lock);
+ mutex_unlock(&subdomain->lock);
if (ret)
kfree(link);
return ret;
@@ -1379,7 +1375,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
return -EINVAL;
- mutex_lock(&genpd->lock);
+ mutex_lock(&subdomain->lock);
+ mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
@@ -1392,22 +1389,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
if (link->slave != subdomain)
continue;
- mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
-
list_del(&link->master_node);
list_del(&link->slave_node);
kfree(link);
if (subdomain->status != GPD_STATE_POWER_OFF)
genpd_sd_counter_dec(genpd);
- mutex_unlock(&subdomain->lock);
-
ret = 0;
break;
}
out:
mutex_unlock(&genpd->lock);
+ mutex_unlock(&subdomain->lock);
return ret;
}
@@ -1812,8 +1806,10 @@ int genpd_dev_pm_attach(struct device *dev)
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
- ret = genpd_poweron(pd);
+ mutex_lock(&pd->lock);
+ ret = genpd_poweron(pd, 0);
+ mutex_unlock(&pd->lock);
out:
return ret ? -EPROBE_DEFER : 0;
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 1710c26ba..6e7c3ccea 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -125,6 +125,7 @@ void device_pm_add(struct device *dev)
{
pr_debug("PM: Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
+ device_pm_check_callbacks(dev);
mutex_lock(&dpm_list_mtx);
if (dev->parent && dev->parent->power.is_prepared)
dev_warn(dev, "parent %s should not be sleeping\n",
@@ -147,6 +148,7 @@ void device_pm_remove(struct device *dev)
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
pm_runtime_remove(dev);
+ device_pm_check_callbacks(dev);
}
/**
@@ -963,6 +965,9 @@ void dpm_complete(pm_message_t state)
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
+
+ /* Allow device probing and trigger re-probing of deferred devices */
+ device_unblock_probing();
trace_suspend_resume(TPS("dpm_complete"), state.event, false);
}
@@ -1569,6 +1574,11 @@ static int device_prepare(struct device *dev, pm_message_t state)
dev->power.wakeup_path = device_may_wakeup(dev);
+ if (dev->power.no_pm_callbacks) {
+ ret = 1; /* Let device go direct_complete */
+ goto unlock;
+ }
+
if (dev->pm_domain) {
info = "preparing power domain ";
callback = dev->pm_domain->ops.prepare;
@@ -1591,6 +1601,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
if (callback)
ret = callback(dev);
+unlock:
device_unlock(dev);
if (ret < 0) {
@@ -1624,6 +1635,20 @@ int dpm_prepare(pm_message_t state)
trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
might_sleep();
+ /*
+ * Give a chance for the known devices to complete their probes, before
+ * disable probing of devices. This sync point is important at least
+ * at boot time + hibernation restore.
+ */
+ wait_for_device_probe();
+ /*
+ * It is unsafe if probing of devices will happen during suspend or
+ * hibernation and system behavior will be unpredictable in this case.
+ * So, let's prohibit device's probing here and defer their probes
+ * instead. The normal behavior will be restored in dpm_complete().
+ */
+ device_block_probing();
+
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_list)) {
struct device *dev = to_device(dpm_list.next);
@@ -1719,3 +1744,30 @@ void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
device_pm_unlock();
}
EXPORT_SYMBOL_GPL(dpm_for_each_dev);
+
+static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
+{
+ if (!ops)
+ return true;
+
+ return !ops->prepare &&
+ !ops->suspend &&
+ !ops->suspend_late &&
+ !ops->suspend_noirq &&
+ !ops->resume_noirq &&
+ !ops->resume_early &&
+ !ops->resume &&
+ !ops->complete;
+}
+
+void device_pm_check_callbacks(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ dev->power.no_pm_callbacks =
+ (!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
+ (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
+ (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
+ (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
+ (!dev->driver || pm_ops_is_empty(dev->driver->pm));
+ spin_unlock_irq(&dev->power.lock);
+}
diff --git a/drivers/base/power/opp/Makefile b/drivers/base/power/opp/Makefile
index 33c1e18c4..19837ef04 100644
--- a/drivers/base/power/opp/Makefile
+++ b/drivers/base/power/opp/Makefile
@@ -1,2 +1,3 @@
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
obj-y += core.o cpu.o
+obj-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index b8e76f750..cf351d3da 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -463,6 +463,7 @@ static void _kfree_list_dev_rcu(struct rcu_head *head)
static void _remove_list_dev(struct device_list_opp *list_dev,
struct device_opp *dev_opp)
{
+ opp_debug_unregister(list_dev, dev_opp);
list_del(&list_dev->node);
call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
_kfree_list_dev_rcu);
@@ -472,6 +473,7 @@ struct device_list_opp *_add_list_dev(const struct device *dev,
struct device_opp *dev_opp)
{
struct device_list_opp *list_dev;
+ int ret;
list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
if (!list_dev)
@@ -481,6 +483,12 @@ struct device_list_opp *_add_list_dev(const struct device *dev,
list_dev->dev = dev;
list_add_rcu(&list_dev->node, &dev_opp->dev_list);
+ /* Create debugfs entries for the dev_opp */
+ ret = opp_debug_register(list_dev, dev_opp);
+ if (ret)
+ dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
+ __func__, ret);
+
return list_dev;
}
@@ -551,6 +559,12 @@ static void _remove_device_opp(struct device_opp *dev_opp)
if (!list_empty(&dev_opp->opp_list))
return;
+ if (dev_opp->supported_hw)
+ return;
+
+ if (dev_opp->prop_name)
+ return;
+
list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
node);
@@ -596,6 +610,7 @@ static void _opp_remove(struct device_opp *dev_opp,
*/
if (notify)
srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
+ opp_debug_remove_one(opp);
list_del_rcu(&opp->node);
call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
@@ -673,6 +688,7 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
{
struct dev_pm_opp *opp;
struct list_head *head = &dev_opp->opp_list;
+ int ret;
/*
* Insert new OPP in order of increasing frequency and discard if
@@ -703,6 +719,11 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
new_opp->dev_opp = dev_opp;
list_add_rcu(&new_opp->node, head);
+ ret = opp_debug_create_one(new_opp, dev_opp);
+ if (ret)
+ dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
+ __func__, ret);
+
return 0;
}
@@ -776,35 +797,49 @@ unlock:
}
/* TODO: Support multiple regulators */
-static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
+static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
+ struct device_opp *dev_opp)
{
u32 microvolt[3] = {0};
u32 val;
int count, ret;
+ struct property *prop = NULL;
+ char name[NAME_MAX];
+
+ /* Search for "opp-microvolt-<name>" */
+ if (dev_opp->prop_name) {
+ snprintf(name, sizeof(name), "opp-microvolt-%s",
+ dev_opp->prop_name);
+ prop = of_find_property(opp->np, name, NULL);
+ }
- /* Missing property isn't a problem, but an invalid entry is */
- if (!of_find_property(opp->np, "opp-microvolt", NULL))
- return 0;
+ if (!prop) {
+ /* Search for "opp-microvolt" */
+ sprintf(name, "opp-microvolt");
+ prop = of_find_property(opp->np, name, NULL);
- count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+ /* Missing property isn't a problem, but an invalid entry is */
+ if (!prop)
+ return 0;
+ }
+
+ count = of_property_count_u32_elems(opp->np, name);
if (count < 0) {
- dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
- __func__, count);
+ dev_err(dev, "%s: Invalid %s property (%d)\n",
+ __func__, name, count);
return count;
}
/* There can be one or three elements here */
if (count != 1 && count != 3) {
- dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
- __func__, count);
+ dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
+ __func__, name, count);
return -EINVAL;
}
- ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt,
- count);
+ ret = of_property_read_u32_array(opp->np, name, microvolt, count);
if (ret) {
- dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__,
- ret);
+ dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
return -EINVAL;
}
@@ -812,13 +847,272 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
opp->u_volt_min = microvolt[1];
opp->u_volt_max = microvolt[2];
- if (!of_property_read_u32(opp->np, "opp-microamp", &val))
+ /* Search for "opp-microamp-<name>" */
+ prop = NULL;
+ if (dev_opp->prop_name) {
+ snprintf(name, sizeof(name), "opp-microamp-%s",
+ dev_opp->prop_name);
+ prop = of_find_property(opp->np, name, NULL);
+ }
+
+ if (!prop) {
+ /* Search for "opp-microamp" */
+ sprintf(name, "opp-microamp");
+ prop = of_find_property(opp->np, name, NULL);
+ }
+
+ if (prop && !of_property_read_u32(opp->np, name, &val))
opp->u_amp = val;
return 0;
}
/**
+ * dev_pm_opp_set_supported_hw() - Set supported platforms
+ * @dev: Device for which supported-hw has to be set.
+ * @versions: Array of hierarchy of versions to match.
+ * @count: Number of elements in the array.
+ *
+ * This is required only for the V2 bindings, and it enables a platform to
+ * specify the hierarchy of versions it supports. OPP layer will then enable
+ * OPPs, which are available for those versions, based on its 'opp-supported-hw'
+ * property.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
+ unsigned int count)
+{
+ struct device_opp *dev_opp;
+ int ret = 0;
+
+ /* Hold our list modification lock here */
+ mutex_lock(&dev_opp_list_lock);
+
+ dev_opp = _add_device_opp(dev);
+ if (!dev_opp) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating dev_opp */
+ WARN_ON(!list_empty(&dev_opp->opp_list));
+
+ /* Do we already have a version hierarchy associated with dev_opp? */
+ if (dev_opp->supported_hw) {
+ dev_err(dev, "%s: Already have supported hardware list\n",
+ __func__);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ dev_opp->supported_hw = kmemdup(versions, count * sizeof(*versions),
+ GFP_KERNEL);
+ if (!dev_opp->supported_hw) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev_opp->supported_hw_count = count;
+ mutex_unlock(&dev_opp_list_lock);
+ return 0;
+
+err:
+ _remove_device_opp(dev_opp);
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
+
+/**
+ * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
+ * @dev: Device for which supported-hw has to be set.
+ *
+ * This is required only for the V2 bindings, and is called for a matching
+ * dev_pm_opp_set_supported_hw(). Until this is called, the device_opp structure
+ * will not be freed.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_supported_hw(struct device *dev)
+{
+ struct device_opp *dev_opp;
+
+ /* Hold our list modification lock here */
+ mutex_lock(&dev_opp_list_lock);
+
+ /* Check for existing list for 'dev' first */
+ dev_opp = _find_device_opp(dev);
+ if (IS_ERR(dev_opp)) {
+ dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating dev_opp */
+ WARN_ON(!list_empty(&dev_opp->opp_list));
+
+ if (!dev_opp->supported_hw) {
+ dev_err(dev, "%s: Doesn't have supported hardware list\n",
+ __func__);
+ goto unlock;
+ }
+
+ kfree(dev_opp->supported_hw);
+ dev_opp->supported_hw = NULL;
+ dev_opp->supported_hw_count = 0;
+
+ /* Try freeing device_opp if this was the last blocking resource */
+ _remove_device_opp(dev_opp);
+
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
+
+/**
+ * dev_pm_opp_set_prop_name() - Set prop-extn name
+ * @dev: Device for which the regulator has to be set.
+ * @name: name to postfix to properties.
+ *
+ * This is required only for the V2 bindings, and it enables a platform to
+ * specify the extn to be used for certain property names. The properties to
+ * which the extension will apply are opp-microvolt and opp-microamp. OPP core
+ * should postfix the property name with -<name> while looking for them.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+{
+ struct device_opp *dev_opp;
+ int ret = 0;
+
+ /* Hold our list modification lock here */
+ mutex_lock(&dev_opp_list_lock);
+
+ dev_opp = _add_device_opp(dev);
+ if (!dev_opp) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating dev_opp */
+ WARN_ON(!list_empty(&dev_opp->opp_list));
+
+ /* Do we already have a prop-name associated with dev_opp? */
+ if (dev_opp->prop_name) {
+ dev_err(dev, "%s: Already have prop-name %s\n", __func__,
+ dev_opp->prop_name);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ dev_opp->prop_name = kstrdup(name, GFP_KERNEL);
+ if (!dev_opp->prop_name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mutex_unlock(&dev_opp_list_lock);
+ return 0;
+
+err:
+ _remove_device_opp(dev_opp);
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
+
+/**
+ * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
+ * @dev: Device for which the regulator has to be set.
+ *
+ * This is required only for the V2 bindings, and is called for a matching
+ * dev_pm_opp_set_prop_name(). Until this is called, the device_opp structure
+ * will not be freed.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_prop_name(struct device *dev)
+{
+ struct device_opp *dev_opp;
+
+ /* Hold our list modification lock here */
+ mutex_lock(&dev_opp_list_lock);
+
+ /* Check for existing list for 'dev' first */
+ dev_opp = _find_device_opp(dev);
+ if (IS_ERR(dev_opp)) {
+ dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating dev_opp */
+ WARN_ON(!list_empty(&dev_opp->opp_list));
+
+ if (!dev_opp->prop_name) {
+ dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
+ goto unlock;
+ }
+
+ kfree(dev_opp->prop_name);
+ dev_opp->prop_name = NULL;
+
+ /* Try freeing device_opp if this was the last blocking resource */
+ _remove_device_opp(dev_opp);
+
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
+
+static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
+ struct device_node *np)
+{
+ unsigned int count = dev_opp->supported_hw_count;
+ u32 version;
+ int ret;
+
+ if (!dev_opp->supported_hw)
+ return true;
+
+ while (count--) {
+ ret = of_property_read_u32_index(np, "opp-supported-hw", count,
+ &version);
+ if (ret) {
+ dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
+ __func__, count, ret);
+ return false;
+ }
+
+ /* Both of these are bitwise masks of the versions */
+ if (!(version & dev_opp->supported_hw[count]))
+ return false;
+ }
+
+ return true;
+}
+
+/**
* _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
* @dev: device for which we do this operation
* @np: device node
@@ -864,6 +1158,12 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
goto free_opp;
}
+ /* Check if the OPP supports hardware's hierarchy of versions or not */
+ if (!_opp_is_supported(dev, dev_opp, np)) {
+ dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
+ goto free_opp;
+ }
+
/*
* Rate is defined as an unsigned long in clk API, and so casting
* explicitly to its type. Must be fixed once rate is 64 bit
@@ -879,7 +1179,7 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
if (!of_property_read_u32(np, "clock-latency-ns", &val))
new_opp->clock_latency_ns = val;
- ret = opp_parse_supplies(new_opp, dev);
+ ret = opp_parse_supplies(new_opp, dev, dev_opp);
if (ret)
goto free_opp;
@@ -889,12 +1189,14 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
/* OPP to select on device suspend */
if (of_property_read_bool(np, "opp-suspend")) {
- if (dev_opp->suspend_opp)
+ if (dev_opp->suspend_opp) {
dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
__func__, dev_opp->suspend_opp->rate,
new_opp->rate);
- else
+ } else {
+ new_opp->suspend = true;
dev_opp->suspend_opp = new_opp;
+ }
}
if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
index 7b445e88a..9f0c15570 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/base/power/opp/cpu.c
@@ -214,7 +214,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
/*
* Works only for OPP v2 bindings.
*
- * cpumask should be already set to mask of cpu_dev->id.
* Returns -ENOENT if operating-points-v2 bindings aren't supported.
*/
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
@@ -230,6 +229,8 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask
return -ENOENT;
}
+ cpumask_set_cpu(cpu_dev->id, cpumask);
+
/* OPPs are shared ? */
if (!of_property_read_bool(np, "opp-shared"))
goto put_cpu_node;
diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c
new file mode 100644
index 000000000..ddfe4773e
--- /dev/null
+++ b/drivers/base/power/opp/debugfs.c
@@ -0,0 +1,219 @@
+/*
+ * Generic OPP debugfs interface
+ *
+ * Copyright (C) 2015-2016 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/limits.h>
+
+#include "opp.h"
+
+static struct dentry *rootdir;
+
+static void opp_set_dev_name(const struct device *dev, char *name)
+{
+ if (dev->parent)
+ snprintf(name, NAME_MAX, "%s-%s", dev_name(dev->parent),
+ dev_name(dev));
+ else
+ snprintf(name, NAME_MAX, "%s", dev_name(dev));
+}
+
+void opp_debug_remove_one(struct dev_pm_opp *opp)
+{
+ debugfs_remove_recursive(opp->dentry);
+}
+
+int opp_debug_create_one(struct dev_pm_opp *opp, struct device_opp *dev_opp)
+{
+ struct dentry *pdentry = dev_opp->dentry;
+ struct dentry *d;
+ char name[25]; /* 20 chars for 64 bit value + 5 (opp:\0) */
+
+ /* Rate is unique to each OPP, use it to give opp-name */
+ snprintf(name, sizeof(name), "opp:%lu", opp->rate);
+
+ /* Create per-opp directory */
+ d = debugfs_create_dir(name, pdentry);
+ if (!d)
+ return -ENOMEM;
+
+ if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available))
+ return -ENOMEM;
+
+ if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic))
+ return -ENOMEM;
+
+ if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo))
+ return -ENOMEM;
+
+ if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, &opp->u_volt))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, &opp->u_volt_min))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, &opp->u_volt_max))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->u_amp))
+ return -ENOMEM;
+
+ if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
+ &opp->clock_latency_ns))
+ return -ENOMEM;
+
+ opp->dentry = d;
+ return 0;
+}
+
+static int device_opp_debug_create_dir(struct device_list_opp *list_dev,
+ struct device_opp *dev_opp)
+{
+ const struct device *dev = list_dev->dev;
+ struct dentry *d;
+
+ opp_set_dev_name(dev, dev_opp->dentry_name);
+
+ /* Create device specific directory */
+ d = debugfs_create_dir(dev_opp->dentry_name, rootdir);
+ if (!d) {
+ dev_err(dev, "%s: Failed to create debugfs dir\n", __func__);
+ return -ENOMEM;
+ }
+
+ list_dev->dentry = d;
+ dev_opp->dentry = d;
+
+ return 0;
+}
+
+static int device_opp_debug_create_link(struct device_list_opp *list_dev,
+ struct device_opp *dev_opp)
+{
+ const struct device *dev = list_dev->dev;
+ char name[NAME_MAX];
+ struct dentry *d;
+
+ opp_set_dev_name(list_dev->dev, name);
+
+ /* Create device specific directory link */
+ d = debugfs_create_symlink(name, rootdir, dev_opp->dentry_name);
+ if (!d) {
+ dev_err(dev, "%s: Failed to create link\n", __func__);
+ return -ENOMEM;
+ }
+
+ list_dev->dentry = d;
+
+ return 0;
+}
+
+/**
+ * opp_debug_register - add a device opp node to the debugfs 'opp' directory
+ * @list_dev: list-dev pointer for device
+ * @dev_opp: the device-opp being added
+ *
+ * Dynamically adds device specific directory in debugfs 'opp' directory. If the
+ * device-opp is shared with other devices, then links will be created for all
+ * devices except the first.
+ *
+ * Return: 0 on success, otherwise negative error.
+ */
+int opp_debug_register(struct device_list_opp *list_dev,
+ struct device_opp *dev_opp)
+{
+ if (!rootdir) {
+ pr_debug("%s: Uninitialized rootdir\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dev_opp->dentry)
+ return device_opp_debug_create_link(list_dev, dev_opp);
+
+ return device_opp_debug_create_dir(list_dev, dev_opp);
+}
+
+static void opp_migrate_dentry(struct device_list_opp *list_dev,
+ struct device_opp *dev_opp)
+{
+ struct device_list_opp *new_dev;
+ const struct device *dev;
+ struct dentry *dentry;
+
+ /* Look for next list-dev */
+ list_for_each_entry(new_dev, &dev_opp->dev_list, node)
+ if (new_dev != list_dev)
+ break;
+
+ /* new_dev is guaranteed to be valid here */
+ dev = new_dev->dev;
+ debugfs_remove_recursive(new_dev->dentry);
+
+ opp_set_dev_name(dev, dev_opp->dentry_name);
+
+ dentry = debugfs_rename(rootdir, list_dev->dentry, rootdir,
+ dev_opp->dentry_name);
+ if (!dentry) {
+ dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
+ __func__, dev_name(list_dev->dev), dev_name(dev));
+ return;
+ }
+
+ new_dev->dentry = dentry;
+ dev_opp->dentry = dentry;
+}
+
+/**
+ * opp_debug_unregister - remove a device opp node from debugfs opp directory
+ * @list_dev: list-dev pointer for device
+ * @dev_opp: the device-opp being removed
+ *
+ * Dynamically removes device specific directory from debugfs 'opp' directory.
+ */
+void opp_debug_unregister(struct device_list_opp *list_dev,
+ struct device_opp *dev_opp)
+{
+ if (list_dev->dentry == dev_opp->dentry) {
+ /* Move the real dentry object under another device */
+ if (!list_is_singular(&dev_opp->dev_list)) {
+ opp_migrate_dentry(list_dev, dev_opp);
+ goto out;
+ }
+ dev_opp->dentry = NULL;
+ }
+
+ debugfs_remove_recursive(list_dev->dentry);
+
+out:
+ list_dev->dentry = NULL;
+}
+
+static int __init opp_debug_init(void)
+{
+ /* Create /sys/kernel/debug/opp directory */
+ rootdir = debugfs_create_dir("opp", NULL);
+ if (!rootdir) {
+ pr_err("%s: Failed to create root directory\n", __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+core_initcall(opp_debug_init);
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index 7366b2aa8..690638ef3 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/limits.h>
#include <linux/pm_opp.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
@@ -50,9 +51,10 @@ extern struct mutex dev_opp_list_lock;
* are protected by the dev_opp_list_lock for integrity.
* IMPORTANT: the opp nodes should be maintained in increasing
* order.
- * @dynamic: not-created from static DT entries.
* @available: true/false - marks if this OPP as available or not
+ * @dynamic: not-created from static DT entries.
* @turbo: true if turbo (boost) OPP
+ * @suspend: true if suspend OPP
* @rate: Frequency in hertz
* @u_volt: Target voltage in microvolts corresponding to this OPP
* @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
@@ -63,6 +65,7 @@ extern struct mutex dev_opp_list_lock;
* @dev_opp: points back to the device_opp struct this opp belongs to
* @rcu_head: RCU callback head used for deferred freeing
* @np: OPP's device node.
+ * @dentry: debugfs dentry pointer (per opp)
*
* This structure stores the OPP information for a given device.
*/
@@ -72,6 +75,7 @@ struct dev_pm_opp {
bool available;
bool dynamic;
bool turbo;
+ bool suspend;
unsigned long rate;
unsigned long u_volt;
@@ -84,6 +88,10 @@ struct dev_pm_opp {
struct rcu_head rcu_head;
struct device_node *np;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dentry;
+#endif
};
/**
@@ -91,6 +99,7 @@ struct dev_pm_opp {
* @node: list node
* @dev: device to which the struct object belongs
* @rcu_head: RCU callback head used for deferred freeing
+ * @dentry: debugfs dentry pointer (per device)
*
* This is an internal data structure maintaining the list of devices that are
* managed by 'struct device_opp'.
@@ -99,6 +108,10 @@ struct device_list_opp {
struct list_head node;
const struct device *dev;
struct rcu_head rcu_head;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dentry;
+#endif
};
/**
@@ -113,7 +126,14 @@ struct device_list_opp {
* @dev_list: list of devices that share these OPPs
* @opp_list: list of opps
* @np: struct device_node pointer for opp's DT node.
+ * @clock_latency_ns_max: Max clock latency in nanoseconds.
* @shared_opp: OPP is shared between multiple devices.
+ * @suspend_opp: Pointer to OPP to be used during device suspend.
+ * @supported_hw: Array of version number to support.
+ * @supported_hw_count: Number of elements in supported_hw array.
+ * @prop_name: A name to postfix to many DT properties, while parsing them.
+ * @dentry: debugfs dentry pointer of the real device directory (not links).
+ * @dentry_name: Name of the real dentry.
*
* This is an internal data structure maintaining the link to opps attached to
* a device. This structure is not meant to be shared to users as it is
@@ -135,6 +155,15 @@ struct device_opp {
unsigned long clock_latency_ns_max;
bool shared_opp;
struct dev_pm_opp *suspend_opp;
+
+ unsigned int *supported_hw;
+ unsigned int supported_hw_count;
+ const char *prop_name;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dentry;
+ char dentry_name[NAME_MAX];
+#endif
};
/* Routines internal to opp core */
@@ -143,4 +172,26 @@ struct device_list_opp *_add_list_dev(const struct device *dev,
struct device_opp *dev_opp);
struct device_node *_of_get_opp_desc_node(struct device *dev);
+#ifdef CONFIG_DEBUG_FS
+void opp_debug_remove_one(struct dev_pm_opp *opp);
+int opp_debug_create_one(struct dev_pm_opp *opp, struct device_opp *dev_opp);
+int opp_debug_register(struct device_list_opp *list_dev,
+ struct device_opp *dev_opp);
+void opp_debug_unregister(struct device_list_opp *list_dev,
+ struct device_opp *dev_opp);
+#else
+static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {}
+
+static inline int opp_debug_create_one(struct dev_pm_opp *opp,
+ struct device_opp *dev_opp)
+{ return 0; }
+static inline int opp_debug_register(struct device_list_opp *list_dev,
+ struct device_opp *dev_opp)
+{ return 0; }
+
+static inline void opp_debug_unregister(struct device_list_opp *list_dev,
+ struct device_opp *dev_opp)
+{ }
+#endif /* DEBUG_FS */
+
#endif /* __DRIVER_OPP_H__ */
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 998fa6b23..50e30e7b0 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -18,6 +18,7 @@ static inline void pm_runtime_early_init(struct device *dev)
}
extern void pm_runtime_init(struct device *dev);
+extern void pm_runtime_reinit(struct device *dev);
extern void pm_runtime_remove(struct device *dev);
struct wake_irq {
@@ -84,6 +85,7 @@ static inline void pm_runtime_early_init(struct device *dev)
}
static inline void pm_runtime_init(struct device *dev) {}
+static inline void pm_runtime_reinit(struct device *dev) {}
static inline void pm_runtime_remove(struct device *dev) {}
static inline int dpm_sysfs_add(struct device *dev) { return 0; }
@@ -123,6 +125,7 @@ extern void device_pm_remove(struct device *);
extern void device_pm_move_before(struct device *, struct device *);
extern void device_pm_move_after(struct device *, struct device *);
extern void device_pm_move_last(struct device *);
+extern void device_pm_check_callbacks(struct device *dev);
#else /* !CONFIG_PM_SLEEP */
@@ -141,6 +144,8 @@ static inline void device_pm_move_after(struct device *deva,
struct device *devb) {}
static inline void device_pm_move_last(struct device *dev) {}
+static inline void device_pm_check_callbacks(struct device *dev) {}
+
#endif /* !CONFIG_PM_SLEEP */
static inline void device_pm_init(struct device *dev)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index e1a10a03d..4c7055009 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -966,6 +966,30 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
+ * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
+ * @dev: Device to handle.
+ *
+ * Return -EINVAL if runtime PM is disabled for the device.
+ *
+ * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
+ * and the runtime PM usage counter is nonzero, increment the counter and
+ * return 1. Otherwise return 0 without changing the counter.
+ */
+int pm_runtime_get_if_in_use(struct device *dev)
+{
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ retval = dev->power.disable_depth > 0 ? -EINVAL :
+ dev->power.runtime_status == RPM_ACTIVE
+ && atomic_inc_not_zero(&dev->power.usage_count);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
+
+/**
* __pm_runtime_set_status - Set runtime PM status of a device.
* @dev: Device to handle.
* @status: New runtime PM status of the device.
@@ -1390,18 +1414,32 @@ void pm_runtime_init(struct device *dev)
}
/**
+ * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
+ * @dev: Device object to re-initialize.
+ */
+void pm_runtime_reinit(struct device *dev)
+{
+ if (!pm_runtime_enabled(dev)) {
+ if (dev->power.runtime_status == RPM_ACTIVE)
+ pm_runtime_set_suspended(dev);
+ if (dev->power.irq_safe) {
+ spin_lock_irq(&dev->power.lock);
+ dev->power.irq_safe = 0;
+ spin_unlock_irq(&dev->power.lock);
+ if (dev->parent)
+ pm_runtime_put(dev->parent);
+ }
+ }
+}
+
+/**
* pm_runtime_remove - Prepare for removing a device from device hierarchy.
* @dev: Device object being removed from device hierarchy.
*/
void pm_runtime_remove(struct device *dev)
{
__pm_runtime_disable(dev, false);
-
- /* Change the status back to 'suspended' to match the initial status. */
- if (dev->power.runtime_status == RPM_ACTIVE)
- pm_runtime_set_suspended(dev);
- if (dev->power.irq_safe && dev->parent)
- pm_runtime_put(dev->parent);
+ pm_runtime_reinit(dev);
}
/**
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 1325ff225..a163f2c59 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -19,32 +19,14 @@
#include <linux/etherdevice.h>
#include <linux/phy.h>
-/**
- * device_add_property_set - Add a collection of properties to a device object.
- * @dev: Device to add properties to.
- * @pset: Collection of properties to add.
- *
- * Associate a collection of device properties represented by @pset with @dev
- * as its secondary firmware node.
- */
-void device_add_property_set(struct device *dev, struct property_set *pset)
-{
- if (!pset)
- return;
-
- pset->fwnode.type = FWNODE_PDATA;
- set_secondary_fwnode(dev, &pset->fwnode);
-}
-EXPORT_SYMBOL_GPL(device_add_property_set);
-
-static inline bool is_pset(struct fwnode_handle *fwnode)
+static inline bool is_pset_node(struct fwnode_handle *fwnode)
{
return fwnode && fwnode->type == FWNODE_PDATA;
}
-static inline struct property_set *to_pset(struct fwnode_handle *fwnode)
+static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode)
{
- return is_pset(fwnode) ?
+ return is_pset_node(fwnode) ?
container_of(fwnode, struct property_set, fwnode) : NULL;
}
@@ -63,45 +45,135 @@ static struct property_entry *pset_prop_get(struct property_set *pset,
return NULL;
}
-static int pset_prop_read_array(struct property_set *pset, const char *name,
- enum dev_prop_type type, void *val, size_t nval)
+static void *pset_prop_find(struct property_set *pset, const char *propname,
+ size_t length)
{
struct property_entry *prop;
- unsigned int item_size;
+ void *pointer;
- prop = pset_prop_get(pset, name);
+ prop = pset_prop_get(pset, propname);
if (!prop)
- return -ENODATA;
+ return ERR_PTR(-EINVAL);
+ if (prop->is_array)
+ pointer = prop->pointer.raw_data;
+ else
+ pointer = &prop->value.raw_data;
+ if (!pointer)
+ return ERR_PTR(-ENODATA);
+ if (length > prop->length)
+ return ERR_PTR(-EOVERFLOW);
+ return pointer;
+}
+
+static int pset_prop_read_u8_array(struct property_set *pset,
+ const char *propname,
+ u8 *values, size_t nval)
+{
+ void *pointer;
+ size_t length = nval * sizeof(*values);
+
+ pointer = pset_prop_find(pset, propname, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(values, pointer, length);
+ return 0;
+}
+
+static int pset_prop_read_u16_array(struct property_set *pset,
+ const char *propname,
+ u16 *values, size_t nval)
+{
+ void *pointer;
+ size_t length = nval * sizeof(*values);
+
+ pointer = pset_prop_find(pset, propname, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(values, pointer, length);
+ return 0;
+}
+
+static int pset_prop_read_u32_array(struct property_set *pset,
+ const char *propname,
+ u32 *values, size_t nval)
+{
+ void *pointer;
+ size_t length = nval * sizeof(*values);
+
+ pointer = pset_prop_find(pset, propname, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(values, pointer, length);
+ return 0;
+}
+
+static int pset_prop_read_u64_array(struct property_set *pset,
+ const char *propname,
+ u64 *values, size_t nval)
+{
+ void *pointer;
+ size_t length = nval * sizeof(*values);
+
+ pointer = pset_prop_find(pset, propname, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(values, pointer, length);
+ return 0;
+}
+
+static int pset_prop_count_elems_of_size(struct property_set *pset,
+ const char *propname, size_t length)
+{
+ struct property_entry *prop;
+
+ prop = pset_prop_get(pset, propname);
+ if (!prop)
+ return -EINVAL;
+
+ return prop->length / length;
+}
+
+static int pset_prop_read_string_array(struct property_set *pset,
+ const char *propname,
+ const char **strings, size_t nval)
+{
+ void *pointer;
+ size_t length = nval * sizeof(*strings);
+
+ pointer = pset_prop_find(pset, propname, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(strings, pointer, length);
+ return 0;
+}
+
+static int pset_prop_read_string(struct property_set *pset,
+ const char *propname, const char **strings)
+{
+ struct property_entry *prop;
+ const char **pointer;
- if (prop->type != type)
- return -EPROTO;
-
- if (!val)
- return prop->nval;
-
- if (prop->nval < nval)
- return -EOVERFLOW;
-
- switch (type) {
- case DEV_PROP_U8:
- item_size = sizeof(u8);
- break;
- case DEV_PROP_U16:
- item_size = sizeof(u16);
- break;
- case DEV_PROP_U32:
- item_size = sizeof(u32);
- break;
- case DEV_PROP_U64:
- item_size = sizeof(u64);
- break;
- case DEV_PROP_STRING:
- item_size = sizeof(const char *);
- break;
- default:
+ prop = pset_prop_get(pset, propname);
+ if (!prop)
return -EINVAL;
+ if (!prop->is_string)
+ return -EILSEQ;
+ if (prop->is_array) {
+ pointer = prop->pointer.str;
+ if (!pointer)
+ return -ENODATA;
+ } else {
+ pointer = &prop->value.str;
+ if (*pointer && strnlen(*pointer, prop->length) >= prop->length)
+ return -EILSEQ;
}
- memcpy(val, prop->value.raw_data, nval * item_size);
+
+ *strings = *pointer;
return 0;
}
@@ -124,6 +196,18 @@ bool device_property_present(struct device *dev, const char *propname)
}
EXPORT_SYMBOL_GPL(device_property_present);
+static bool __fwnode_property_present(struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ if (is_of_node(fwnode))
+ return of_property_read_bool(to_of_node(fwnode), propname);
+ else if (is_acpi_node(fwnode))
+ return !acpi_node_prop_get(fwnode, propname, NULL);
+ else if (is_pset_node(fwnode))
+ return !!pset_prop_get(to_pset_node(fwnode), propname);
+ return false;
+}
+
/**
* fwnode_property_present - check if a property of a firmware node is present
* @fwnode: Firmware node whose property to check
@@ -131,12 +215,12 @@ EXPORT_SYMBOL_GPL(device_property_present);
*/
bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname)
{
- if (is_of_node(fwnode))
- return of_property_read_bool(to_of_node(fwnode), propname);
- else if (is_acpi_node(fwnode))
- return !acpi_node_prop_get(fwnode, propname, NULL);
+ bool ret;
- return !!pset_prop_get(to_pset(fwnode), propname);
+ ret = __fwnode_property_present(fwnode, propname);
+ if (ret == false && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
+ ret = __fwnode_property_present(fwnode->secondary, propname);
+ return ret;
}
EXPORT_SYMBOL_GPL(fwnode_property_present);
@@ -309,25 +393,40 @@ int device_property_match_string(struct device *dev, const char *propname,
}
EXPORT_SYMBOL_GPL(device_property_match_string);
-#define OF_DEV_PROP_READ_ARRAY(node, propname, type, val, nval) \
- (val) ? of_property_read_##type##_array((node), (propname), (val), (nval)) \
+#define OF_DEV_PROP_READ_ARRAY(node, propname, type, val, nval) \
+ (val) ? of_property_read_##type##_array((node), (propname), (val), (nval)) \
: of_property_count_elems_of_size((node), (propname), sizeof(type))
-#define FWNODE_PROP_READ_ARRAY(_fwnode_, _propname_, _type_, _proptype_, _val_, _nval_) \
-({ \
- int _ret_; \
- if (is_of_node(_fwnode_)) \
- _ret_ = OF_DEV_PROP_READ_ARRAY(to_of_node(_fwnode_), _propname_, \
- _type_, _val_, _nval_); \
- else if (is_acpi_node(_fwnode_)) \
- _ret_ = acpi_node_prop_read(_fwnode_, _propname_, _proptype_, \
- _val_, _nval_); \
- else if (is_pset(_fwnode_)) \
- _ret_ = pset_prop_read_array(to_pset(_fwnode_), _propname_, \
- _proptype_, _val_, _nval_); \
- else \
- _ret_ = -ENXIO; \
- _ret_; \
+#define PSET_PROP_READ_ARRAY(node, propname, type, val, nval) \
+ (val) ? pset_prop_read_##type##_array((node), (propname), (val), (nval)) \
+ : pset_prop_count_elems_of_size((node), (propname), sizeof(type))
+
+#define FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, _val_, _nval_) \
+({ \
+ int _ret_; \
+ if (is_of_node(_fwnode_)) \
+ _ret_ = OF_DEV_PROP_READ_ARRAY(to_of_node(_fwnode_), _propname_, \
+ _type_, _val_, _nval_); \
+ else if (is_acpi_node(_fwnode_)) \
+ _ret_ = acpi_node_prop_read(_fwnode_, _propname_, _proptype_, \
+ _val_, _nval_); \
+ else if (is_pset_node(_fwnode_)) \
+ _ret_ = PSET_PROP_READ_ARRAY(to_pset_node(_fwnode_), _propname_, \
+ _type_, _val_, _nval_); \
+ else \
+ _ret_ = -ENXIO; \
+ _ret_; \
+})
+
+#define FWNODE_PROP_READ_ARRAY(_fwnode_, _propname_, _type_, _proptype_, _val_, _nval_) \
+({ \
+ int _ret_; \
+ _ret_ = FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, \
+ _val_, _nval_); \
+ if (_ret_ == -EINVAL && _fwnode_ && !IS_ERR_OR_NULL(_fwnode_->secondary)) \
+ _ret_ = FWNODE_PROP_READ(_fwnode_->secondary, _propname_, _type_, \
+ _proptype_, _val_, _nval_); \
+ _ret_; \
})
/**
@@ -434,6 +533,41 @@ int fwnode_property_read_u64_array(struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array);
+static int __fwnode_property_read_string_array(struct fwnode_handle *fwnode,
+ const char *propname,
+ const char **val, size_t nval)
+{
+ if (is_of_node(fwnode))
+ return val ?
+ of_property_read_string_array(to_of_node(fwnode),
+ propname, val, nval) :
+ of_property_count_strings(to_of_node(fwnode), propname);
+ else if (is_acpi_node(fwnode))
+ return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING,
+ val, nval);
+ else if (is_pset_node(fwnode))
+ return val ?
+ pset_prop_read_string_array(to_pset_node(fwnode),
+ propname, val, nval) :
+ pset_prop_count_elems_of_size(to_pset_node(fwnode),
+ propname,
+ sizeof(const char *));
+ return -ENXIO;
+}
+
+static int __fwnode_property_read_string(struct fwnode_handle *fwnode,
+ const char *propname, const char **val)
+{
+ if (is_of_node(fwnode))
+ return of_property_read_string(to_of_node(fwnode), propname, val);
+ else if (is_acpi_node(fwnode))
+ return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING,
+ val, 1);
+ else if (is_pset_node(fwnode))
+ return pset_prop_read_string(to_pset_node(fwnode), propname, val);
+ return -ENXIO;
+}
+
/**
* fwnode_property_read_string_array - return string array property of a node
* @fwnode: Firmware node to get the property of
@@ -456,18 +590,13 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
const char *propname, const char **val,
size_t nval)
{
- if (is_of_node(fwnode))
- return val ?
- of_property_read_string_array(to_of_node(fwnode),
- propname, val, nval) :
- of_property_count_strings(to_of_node(fwnode), propname);
- else if (is_acpi_node(fwnode))
- return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING,
- val, nval);
- else if (is_pset(fwnode))
- return pset_prop_read_array(to_pset(fwnode), propname,
- DEV_PROP_STRING, val, nval);
- return -ENXIO;
+ int ret;
+
+ ret = __fwnode_property_read_string_array(fwnode, propname, val, nval);
+ if (ret == -EINVAL && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
+ ret = __fwnode_property_read_string_array(fwnode->secondary,
+ propname, val, nval);
+ return ret;
}
EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
@@ -489,14 +618,13 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
int fwnode_property_read_string(struct fwnode_handle *fwnode,
const char *propname, const char **val)
{
- if (is_of_node(fwnode))
- return of_property_read_string(to_of_node(fwnode), propname, val);
- else if (is_acpi_node(fwnode))
- return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING,
- val, 1);
+ int ret;
- return pset_prop_read_array(to_pset(fwnode), propname,
- DEV_PROP_STRING, val, 1);
+ ret = __fwnode_property_read_string(fwnode, propname, val);
+ if (ret == -EINVAL && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
+ ret = __fwnode_property_read_string(fwnode->secondary,
+ propname, val);
+ return ret;
}
EXPORT_SYMBOL_GPL(fwnode_property_read_string);
@@ -525,6 +653,9 @@ int fwnode_property_match_string(struct fwnode_handle *fwnode,
if (nval < 0)
return nval;
+ if (nval == 0)
+ return -ENODATA;
+
values = kcalloc(nval, sizeof(*values), GFP_KERNEL);
if (!values)
return -ENOMEM;
@@ -547,6 +678,182 @@ out:
EXPORT_SYMBOL_GPL(fwnode_property_match_string);
/**
+ * pset_free_set - releases memory allocated for copied property set
+ * @pset: Property set to release
+ *
+ * Function takes previously copied property set and releases all the
+ * memory allocated to it.
+ */
+static void pset_free_set(struct property_set *pset)
+{
+ const struct property_entry *prop;
+ size_t i, nval;
+
+ if (!pset)
+ return;
+
+ for (prop = pset->properties; prop->name; prop++) {
+ if (prop->is_array) {
+ if (prop->is_string && prop->pointer.str) {
+ nval = prop->length / sizeof(const char *);
+ for (i = 0; i < nval; i++)
+ kfree(prop->pointer.str[i]);
+ }
+ kfree(prop->pointer.raw_data);
+ } else if (prop->is_string) {
+ kfree(prop->value.str);
+ }
+ kfree(prop->name);
+ }
+
+ kfree(pset->properties);
+ kfree(pset);
+}
+
+static int pset_copy_entry(struct property_entry *dst,
+ const struct property_entry *src)
+{
+ const char **d, **s;
+ size_t i, nval;
+
+ dst->name = kstrdup(src->name, GFP_KERNEL);
+ if (!dst->name)
+ return -ENOMEM;
+
+ if (src->is_array) {
+ if (!src->length)
+ return -ENODATA;
+
+ if (src->is_string) {
+ nval = src->length / sizeof(const char *);
+ dst->pointer.str = kcalloc(nval, sizeof(const char *),
+ GFP_KERNEL);
+ if (!dst->pointer.str)
+ return -ENOMEM;
+
+ d = dst->pointer.str;
+ s = src->pointer.str;
+ for (i = 0; i < nval; i++) {
+ d[i] = kstrdup(s[i], GFP_KERNEL);
+ if (!d[i] && s[i])
+ return -ENOMEM;
+ }
+ } else {
+ dst->pointer.raw_data = kmemdup(src->pointer.raw_data,
+ src->length, GFP_KERNEL);
+ if (!dst->pointer.raw_data)
+ return -ENOMEM;
+ }
+ } else if (src->is_string) {
+ dst->value.str = kstrdup(src->value.str, GFP_KERNEL);
+ if (!dst->value.str && src->value.str)
+ return -ENOMEM;
+ } else {
+ dst->value.raw_data = src->value.raw_data;
+ }
+
+ dst->length = src->length;
+ dst->is_array = src->is_array;
+ dst->is_string = src->is_string;
+
+ return 0;
+}
+
+/**
+ * pset_copy_set - copies property set
+ * @pset: Property set to copy
+ *
+ * This function takes a deep copy of the given property set and returns
+ * pointer to the copy. Call device_free_property_set() to free resources
+ * allocated in this function.
+ *
+ * Return: Pointer to the new property set or error pointer.
+ */
+static struct property_set *pset_copy_set(const struct property_set *pset)
+{
+ const struct property_entry *entry;
+ struct property_set *p;
+ size_t i, n = 0;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ while (pset->properties[n].name)
+ n++;
+
+ p->properties = kcalloc(n + 1, sizeof(*entry), GFP_KERNEL);
+ if (!p->properties) {
+ kfree(p);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < n; i++) {
+ int ret = pset_copy_entry(&p->properties[i],
+ &pset->properties[i]);
+ if (ret) {
+ pset_free_set(p);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return p;
+}
+
+/**
+ * device_remove_property_set - Remove properties from a device object.
+ * @dev: Device whose properties to remove.
+ *
+ * The function removes properties previously associated to the device
+ * secondary firmware node with device_add_property_set(). Memory allocated
+ * to the properties will also be released.
+ */
+void device_remove_property_set(struct device *dev)
+{
+ struct fwnode_handle *fwnode;
+
+ fwnode = dev_fwnode(dev);
+ if (!fwnode)
+ return;
+ /*
+ * Pick either primary or secondary node depending which one holds
+ * the pset. If there is no real firmware node (ACPI/DT) primary
+ * will hold the pset.
+ */
+ if (!is_pset_node(fwnode))
+ fwnode = fwnode->secondary;
+ if (!IS_ERR(fwnode) && is_pset_node(fwnode))
+ pset_free_set(to_pset_node(fwnode));
+ set_secondary_fwnode(dev, NULL);
+}
+EXPORT_SYMBOL_GPL(device_remove_property_set);
+
+/**
+ * device_add_property_set - Add a collection of properties to a device object.
+ * @dev: Device to add properties to.
+ * @pset: Collection of properties to add.
+ *
+ * Associate a collection of device properties represented by @pset with @dev
+ * as its secondary firmware node. The function takes a copy of @pset.
+ */
+int device_add_property_set(struct device *dev, const struct property_set *pset)
+{
+ struct property_set *p;
+
+ if (!pset)
+ return -EINVAL;
+
+ p = pset_copy_set(pset);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ p->fwnode.type = FWNODE_PDATA;
+ set_secondary_fwnode(dev, &p->fwnode);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(device_add_property_set);
+
+/**
* device_get_next_child_node - Return the next child node handle for a device
* @dev: Device to find the next child node for.
* @child: Handle to one of the device's child nodes or a null handle.
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index 0246f44de..686c9e0b9 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -21,7 +21,7 @@ static int regcache_flat_init(struct regmap *map)
int i;
unsigned int *cache;
- map->cache = kzalloc(sizeof(unsigned int) * (map->max_register + 1),
+ map->cache = kcalloc(map->max_register + 1, sizeof(unsigned int),
GFP_KERNEL);
if (!map->cache)
return -ENOMEM;
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index 736e0d378..6f77d7319 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -139,7 +139,7 @@ static int regcache_lzo_init(struct regmap *map)
ret = 0;
blkcount = regcache_lzo_block_count(map);
- map->cache = kzalloc(blkcount * sizeof *lzo_blocks,
+ map->cache = kcalloc(blkcount, sizeof(*lzo_blocks),
GFP_KERNEL);
if (!map->cache)
return -ENOMEM;
@@ -152,8 +152,8 @@ static int regcache_lzo_init(struct regmap *map)
* that register.
*/
bmp_size = map->num_reg_defaults_raw;
- sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
- GFP_KERNEL);
+ sync_bmp = kmalloc_array(BITS_TO_LONGS(bmp_size), sizeof(long),
+ GFP_KERNEL);
if (!sync_bmp) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 56486d92c..aa56af87d 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -361,13 +361,14 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
rbnode->base_reg = reg;
}
- rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
- GFP_KERNEL);
+ rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
+ GFP_KERNEL);
if (!rbnode->block)
goto err_free;
- rbnode->cache_present = kzalloc(BITS_TO_LONGS(rbnode->blklen) *
- sizeof(*rbnode->cache_present), GFP_KERNEL);
+ rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
+ sizeof(*rbnode->cache_present),
+ GFP_KERNEL);
if (!rbnode->cache_present)
goto err_free_block;
@@ -413,8 +414,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
max = reg + max_dist;
/* look for an adjacent register to the one we are about to add */
- for (node = rb_first(&rbtree_ctx->root); node;
- node = rb_next(node)) {
+ node = rbtree_ctx->root.rb_node;
+ while (node) {
rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
node);
@@ -425,6 +426,11 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
new_base_reg = min(reg, base_reg);
new_top_reg = max(reg, top_reg);
} else {
+ if (max < base_reg)
+ node = node->rb_left;
+ else
+ node = node->rb_right;
+
continue;
}
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 4c0780298..348be3a35 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -100,15 +100,25 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
int i;
void *tmp_buf;
- for (i = 0; i < config->num_reg_defaults; i++)
- if (config->reg_defaults[i].reg % map->reg_stride)
- return -EINVAL;
-
if (map->cache_type == REGCACHE_NONE) {
+ if (config->reg_defaults || config->num_reg_defaults_raw)
+ dev_warn(map->dev,
+ "No cache used with register defaults set!\n");
+
map->cache_bypass = true;
return 0;
}
+ if (config->reg_defaults && !config->num_reg_defaults) {
+ dev_err(map->dev,
+ "Register defaults are set without the number!\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < config->num_reg_defaults; i++)
+ if (config->reg_defaults[i].reg % map->reg_stride)
+ return -EINVAL;
+
for (i = 0; i < ARRAY_SIZE(cache_types); i++)
if (cache_types[i]->type == map->cache_type)
break;
@@ -138,8 +148,6 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
* a copy of it.
*/
if (config->reg_defaults) {
- if (!map->num_reg_defaults)
- return -EINVAL;
tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
sizeof(struct reg_default), GFP_KERNEL);
if (!tmp_buf)
@@ -535,19 +543,30 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
switch (map->cache_word_size) {
case 1: {
u8 *cache = base;
+
cache[idx] = val;
break;
}
case 2: {
u16 *cache = base;
+
cache[idx] = val;
break;
}
case 4: {
u32 *cache = base;
+
+ cache[idx] = val;
+ break;
+ }
+#ifdef CONFIG_64BIT
+ case 8: {
+ u64 *cache = base;
+
cache[idx] = val;
break;
}
+#endif
default:
BUG();
}
@@ -568,16 +587,26 @@ unsigned int regcache_get_val(struct regmap *map, const void *base,
switch (map->cache_word_size) {
case 1: {
const u8 *cache = base;
+
return cache[idx];
}
case 2: {
const u16 *cache = base;
+
return cache[idx];
}
case 4: {
const u32 *cache = base;
+
+ return cache[idx];
+ }
+#ifdef CONFIG_64BIT
+ case 8: {
+ const u64 *cache = base;
+
return cache[idx];
}
+#endif
default:
BUG();
}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 3f0a7e262..1ee3d4086 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -397,72 +397,39 @@ static const struct file_operations regmap_reg_ranges_fops = {
.llseek = default_llseek,
};
-static ssize_t regmap_access_read_file(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos)
+static int regmap_access_show(struct seq_file *s, void *ignored)
{
- int reg_len, tot_len;
- size_t buf_pos = 0;
- loff_t p = 0;
- ssize_t ret;
- int i;
- struct regmap *map = file->private_data;
- char *buf;
-
- if (*ppos < 0 || !count)
- return -EINVAL;
+ struct regmap *map = s->private;
+ int i, reg_len;
- buf = kmalloc(count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* Calculate the length of a fixed format */
reg_len = regmap_calc_reg_len(map->max_register);
- tot_len = reg_len + 10; /* ': R W V P\n' */
for (i = 0; i <= map->max_register; i += map->reg_stride) {
/* Ignore registers which are neither readable nor writable */
if (!regmap_readable(map, i) && !regmap_writeable(map, i))
continue;
- /* If we're in the region the user is trying to read */
- if (p >= *ppos) {
- /* ...but not beyond it */
- if (buf_pos + tot_len + 1 >= count)
- break;
-
- /* Format the register */
- snprintf(buf + buf_pos, count - buf_pos,
- "%.*x: %c %c %c %c\n",
- reg_len, i,
- regmap_readable(map, i) ? 'y' : 'n',
- regmap_writeable(map, i) ? 'y' : 'n',
- regmap_volatile(map, i) ? 'y' : 'n',
- regmap_precious(map, i) ? 'y' : 'n');
-
- buf_pos += tot_len;
- }
- p += tot_len;
- }
-
- ret = buf_pos;
-
- if (copy_to_user(user_buf, buf, buf_pos)) {
- ret = -EFAULT;
- goto out;
+ /* Format the register */
+ seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i,
+ regmap_readable(map, i) ? 'y' : 'n',
+ regmap_writeable(map, i) ? 'y' : 'n',
+ regmap_volatile(map, i) ? 'y' : 'n',
+ regmap_precious(map, i) ? 'y' : 'n');
}
- *ppos += buf_pos;
+ return 0;
+}
-out:
- kfree(buf);
- return ret;
+static int access_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, regmap_access_show, inode->i_private);
}
static const struct file_operations regmap_access_fops = {
- .open = simple_open,
- .read = regmap_access_read_file,
- .llseek = default_llseek,
+ .open = access_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static ssize_t regmap_cache_only_write_file(struct file *file,
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 8d16db533..9b0d20241 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -39,8 +39,11 @@ struct regmap_irq_chip_data {
unsigned int *mask_buf;
unsigned int *mask_buf_def;
unsigned int *wake_buf;
+ unsigned int *type_buf;
+ unsigned int *type_buf_def;
unsigned int irq_reg_stride;
+ unsigned int type_reg_stride;
};
static inline const
@@ -144,6 +147,22 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
}
}
+ for (i = 0; i < d->chip->num_type_reg; i++) {
+ if (!d->type_buf_def[i])
+ continue;
+ reg = d->chip->type_base +
+ (i * map->reg_stride * d->type_reg_stride);
+ if (d->chip->type_invert)
+ ret = regmap_update_bits(d->map, reg,
+ d->type_buf_def[i], ~d->type_buf[i]);
+ else
+ ret = regmap_update_bits(d->map, reg,
+ d->type_buf_def[i], d->type_buf[i]);
+ if (ret != 0)
+ dev_err(d->map->dev, "Failed to sync type in %x\n",
+ reg);
+ }
+
if (d->chip->runtime_pm)
pm_runtime_put(map->dev);
@@ -178,6 +197,38 @@ static void regmap_irq_disable(struct irq_data *data)
d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
}
+static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ struct regmap *map = d->map;
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
+ int reg = irq_data->type_reg_offset / map->reg_stride;
+
+ if (!(irq_data->type_rising_mask | irq_data->type_falling_mask))
+ return 0;
+
+ d->type_buf[reg] &= ~(irq_data->type_falling_mask |
+ irq_data->type_rising_mask);
+ switch (type) {
+ case IRQ_TYPE_EDGE_FALLING:
+ d->type_buf[reg] |= irq_data->type_falling_mask;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ d->type_buf[reg] |= irq_data->type_rising_mask;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ d->type_buf[reg] |= (irq_data->type_falling_mask |
+ irq_data->type_rising_mask);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
{
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
@@ -204,6 +255,7 @@ static const struct irq_chip regmap_irq_chip = {
.irq_bus_sync_unlock = regmap_irq_sync_unlock,
.irq_disable = regmap_irq_disable,
.irq_enable = regmap_irq_enable,
+ .irq_set_type = regmap_irq_set_type,
.irq_set_wake = regmap_irq_set_wake,
};
@@ -386,28 +438,40 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (!d)
return -ENOMEM;
- d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
+ d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
GFP_KERNEL);
if (!d->status_buf)
goto err_alloc;
- d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
+ d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
GFP_KERNEL);
if (!d->mask_buf)
goto err_alloc;
- d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
+ d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
GFP_KERNEL);
if (!d->mask_buf_def)
goto err_alloc;
if (chip->wake_base) {
- d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
+ d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
GFP_KERNEL);
if (!d->wake_buf)
goto err_alloc;
}
+ if (chip->num_type_reg) {
+ d->type_buf_def = kcalloc(chip->num_type_reg,
+ sizeof(unsigned int), GFP_KERNEL);
+ if (!d->type_buf_def)
+ goto err_alloc;
+
+ d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!d->type_buf)
+ goto err_alloc;
+ }
+
d->irq_chip = regmap_irq_chip;
d->irq_chip.name = chip->name;
d->irq = irq;
@@ -420,10 +484,16 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
else
d->irq_reg_stride = 1;
+ if (chip->type_reg_stride)
+ d->type_reg_stride = chip->type_reg_stride;
+ else
+ d->type_reg_stride = 1;
+
if (!map->use_single_read && map->reg_stride == 1 &&
d->irq_reg_stride == 1) {
- d->status_reg_buf = kmalloc(map->format.val_bytes *
- chip->num_regs, GFP_KERNEL);
+ d->status_reg_buf = kmalloc_array(chip->num_regs,
+ map->format.val_bytes,
+ GFP_KERNEL);
if (!d->status_reg_buf)
goto err_alloc;
}
@@ -511,6 +581,33 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
}
}
+ if (chip->num_type_reg) {
+ for (i = 0; i < chip->num_irqs; i++) {
+ reg = chip->irqs[i].type_reg_offset / map->reg_stride;
+ d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask |
+ chip->irqs[i].type_falling_mask;
+ }
+ for (i = 0; i < chip->num_type_reg; ++i) {
+ if (!d->type_buf_def[i])
+ continue;
+
+ reg = chip->type_base +
+ (i * map->reg_stride * d->type_reg_stride);
+ if (chip->type_invert)
+ ret = regmap_update_bits(map, reg,
+ d->type_buf_def[i], 0xFF);
+ else
+ ret = regmap_update_bits(map, reg,
+ d->type_buf_def[i], 0x0);
+ if (ret != 0) {
+ dev_err(map->dev,
+ "Failed to set type in 0x%x: %x\n",
+ reg, ret);
+ goto err_alloc;
+ }
+ }
+ }
+
if (irq_base)
d->domain = irq_domain_add_legacy(map->dev->of_node,
chip->num_irqs, irq_base, 0,
@@ -541,6 +638,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
err_domain:
/* Should really dispose of the domain but... */
err_alloc:
+ kfree(d->type_buf);
+ kfree(d->type_buf_def);
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
@@ -564,6 +663,8 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
free_irq(irq, d);
irq_domain_remove(d->domain);
+ kfree(d->type_buf);
+ kfree(d->type_buf_def);
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 426a57e41..eea51569f 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -61,6 +61,33 @@ static int regmap_mmio_regbits_check(size_t reg_bits)
}
}
+static int regmap_mmio_get_min_stride(size_t val_bits)
+{
+ int min_stride;
+
+ switch (val_bits) {
+ case 8:
+ /* The core treats 0 as 1 */
+ min_stride = 0;
+ return 0;
+ case 16:
+ min_stride = 2;
+ break;
+ case 32:
+ min_stride = 4;
+ break;
+#ifdef CONFIG_64BIT
+ case 64:
+ min_stride = 8;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ return min_stride;
+}
+
static inline void regmap_mmio_count_check(size_t count, u32 offset)
{
BUG_ON(count <= offset);
@@ -231,26 +258,9 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
if (config->pad_bits)
return ERR_PTR(-EINVAL);
- switch (config->val_bits) {
- case 8:
- /* The core treats 0 as 1 */
- min_stride = 0;
- break;
- case 16:
- min_stride = 2;
- break;
- case 32:
- min_stride = 4;
- break;
-#ifdef CONFIG_64BIT
- case 64:
- min_stride = 8;
- break;
-#endif
- break;
- default:
- return ERR_PTR(-EINVAL);
- }
+ min_stride = regmap_mmio_get_min_stride(config->val_bits);
+ if (min_stride < 0)
+ return ERR_PTR(min_stride);
if (config->reg_stride < min_stride)
return ERR_PTR(-EINVAL);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 4ac63c0e5..ee54e841d 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -245,6 +245,28 @@ static void regmap_format_32_native(void *buf, unsigned int val,
*(u32 *)buf = val << shift;
}
+#ifdef CONFIG_64BIT
+static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
+{
+ __be64 *b = buf;
+
+ b[0] = cpu_to_be64((u64)val << shift);
+}
+
+static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
+{
+ __le64 *b = buf;
+
+ b[0] = cpu_to_le64((u64)val << shift);
+}
+
+static void regmap_format_64_native(void *buf, unsigned int val,
+ unsigned int shift)
+{
+ *(u64 *)buf = (u64)val << shift;
+}
+#endif
+
static void regmap_parse_inplace_noop(void *buf)
{
}
@@ -332,6 +354,41 @@ static unsigned int regmap_parse_32_native(const void *buf)
return *(u32 *)buf;
}
+#ifdef CONFIG_64BIT
+static unsigned int regmap_parse_64_be(const void *buf)
+{
+ const __be64 *b = buf;
+
+ return be64_to_cpu(b[0]);
+}
+
+static unsigned int regmap_parse_64_le(const void *buf)
+{
+ const __le64 *b = buf;
+
+ return le64_to_cpu(b[0]);
+}
+
+static void regmap_parse_64_be_inplace(void *buf)
+{
+ __be64 *b = buf;
+
+ b[0] = be64_to_cpu(b[0]);
+}
+
+static void regmap_parse_64_le_inplace(void *buf)
+{
+ __le64 *b = buf;
+
+ b[0] = le64_to_cpu(b[0]);
+}
+
+static unsigned int regmap_parse_64_native(const void *buf)
+{
+ return *(u64 *)buf;
+}
+#endif
+
static void regmap_lock_mutex(void *__map)
{
struct regmap *map = __map;
@@ -712,6 +769,21 @@ struct regmap *__regmap_init(struct device *dev,
}
break;
+#ifdef CONFIG_64BIT
+ case 64:
+ switch (reg_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_reg = regmap_format_64_be;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_reg = regmap_format_64_native;
+ break;
+ default:
+ goto err_map;
+ }
+ break;
+#endif
+
default:
goto err_map;
}
@@ -771,6 +843,28 @@ struct regmap *__regmap_init(struct device *dev,
goto err_map;
}
break;
+#ifdef CONFIG_64BIT
+ case 64:
+ switch (val_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_val = regmap_format_64_be;
+ map->format.parse_val = regmap_parse_64_be;
+ map->format.parse_inplace = regmap_parse_64_be_inplace;
+ break;
+ case REGMAP_ENDIAN_LITTLE:
+ map->format.format_val = regmap_format_64_le;
+ map->format.parse_val = regmap_parse_64_le;
+ map->format.parse_inplace = regmap_parse_64_le_inplace;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_val = regmap_format_64_native;
+ map->format.parse_val = regmap_parse_64_native;
+ break;
+ default:
+ goto err_map;
+ }
+ break;
+#endif
}
if (map->format.format_write) {
@@ -1513,7 +1607,7 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
{
int ret;
- if (reg % map->reg_stride)
+ if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
map->lock(map->lock_arg);
@@ -1540,7 +1634,7 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
{
int ret;
- if (reg % map->reg_stride)
+ if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
map->lock(map->lock_arg);
@@ -1714,7 +1808,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
if (map->bus && !map->format.parse_inplace)
return -EINVAL;
- if (reg % map->reg_stride)
+ if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
/*
@@ -1983,7 +2077,7 @@ static int _regmap_multi_reg_write(struct regmap *map,
int reg = regs[i].reg;
if (!map->writeable_reg(map->dev, reg))
return -EINVAL;
- if (reg % map->reg_stride)
+ if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
}
@@ -2133,7 +2227,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
if (val_len % map->format.val_bytes)
return -EINVAL;
- if (reg % map->reg_stride)
+ if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
map->lock(map->lock_arg);
@@ -2260,7 +2354,7 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
{
int ret;
- if (reg % map->reg_stride)
+ if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
map->lock(map->lock_arg);
@@ -2296,7 +2390,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
- if (reg % map->reg_stride)
+ if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
if (val_count == 0)
return -EINVAL;
@@ -2414,7 +2508,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_bytes = map->format.val_bytes;
bool vol = regmap_volatile_range(map, reg, val_count);
- if (reg % map->reg_stride)
+ if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
@@ -2488,11 +2582,19 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
* we assume that the values are native
* endian.
*/
+#ifdef CONFIG_64BIT
+ u64 *u64 = val;
+#endif
u32 *u32 = val;
u16 *u16 = val;
u8 *u8 = val;
switch (map->format.val_bytes) {
+#ifdef CONFIG_64BIT
+ case 8:
+ u64[i] = ival;
+ break;
+#endif
case 4:
u32[i] = ival;
break;