diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-12-15 14:52:16 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-12-15 14:52:16 -0300 |
commit | 8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be (patch) | |
tree | e9891aa6c295060d065adffd610c4f49ecf884f3 /drivers/nvmem | |
parent | a71852147516bc1cb5b0b3cbd13639bfd4022dc8 (diff) |
Linux-libre 4.3.2-gnu
Diffstat (limited to 'drivers/nvmem')
-rw-r--r-- | drivers/nvmem/Kconfig | 39 | ||||
-rw-r--r-- | drivers/nvmem/Makefile | 12 | ||||
-rw-r--r-- | drivers/nvmem/core.c | 1083 | ||||
-rw-r--r-- | drivers/nvmem/qfprom.c | 85 | ||||
-rw-r--r-- | drivers/nvmem/sunxi_sid.c | 180 |
5 files changed, 1399 insertions, 0 deletions
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig new file mode 100644 index 000000000..8db297821 --- /dev/null +++ b/drivers/nvmem/Kconfig @@ -0,0 +1,39 @@ +menuconfig NVMEM + tristate "NVMEM Support" + select REGMAP + help + Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES... + + This framework is designed to provide a generic interface to NVMEM + from both the Linux Kernel and the userspace. + + This driver can also be built as a module. If so, the module + will be called nvmem_core. + + If unsure, say no. + +if NVMEM + +config QCOM_QFPROM + tristate "QCOM QFPROM Support" + depends on ARCH_QCOM || COMPILE_TEST + select REGMAP_MMIO + help + Say y here to enable QFPROM support. The QFPROM provides access + functions for QFPROM data to rest of the drivers via nvmem interface. + + This driver can also be built as a module. If so, the module + will be called nvmem_qfprom. + +config NVMEM_SUNXI_SID + tristate "Allwinner SoCs SID support" + depends on ARCH_SUNXI + select REGMAP_MMIO + help + This is a driver for the 'security ID' available on various Allwinner + devices. + + This driver can also be built as a module. If so, the module + will be called nvmem_sunxi_sid. + +endif diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile new file mode 100644 index 000000000..4328b930a --- /dev/null +++ b/drivers/nvmem/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for nvmem drivers. +# + +obj-$(CONFIG_NVMEM) += nvmem_core.o +nvmem_core-y := core.o + +# Devices +obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o +nvmem_qfprom-y := qfprom.o +obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o +nvmem_sunxi_sid-y := sunxi_sid.o diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c new file mode 100644 index 000000000..6fd4e5a5e --- /dev/null +++ b/drivers/nvmem/core.c @@ -0,0 +1,1083 @@ +/* + * nvmem framework core. + * + * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> + * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/export.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/nvmem-consumer.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +struct nvmem_device { + const char *name; + struct regmap *regmap; + struct module *owner; + struct device dev; + int stride; + int word_size; + int ncells; + int id; + int users; + size_t size; + bool read_only; +}; + +struct nvmem_cell { + const char *name; + int offset; + int bytes; + int bit_offset; + int nbits; + struct nvmem_device *nvmem; + struct list_head node; +}; + +static DEFINE_MUTEX(nvmem_mutex); +static DEFINE_IDA(nvmem_ida); + +static LIST_HEAD(nvmem_cells); +static DEFINE_MUTEX(nvmem_cells_mutex); + +#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) + +static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t pos, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct nvmem_device *nvmem = to_nvmem_device(dev); + int rc; + + /* Stop the user from reading */ + if (pos >= nvmem->size) + return 0; + + if (pos + count > nvmem->size) + count = nvmem->size - pos; + + count = round_down(count, nvmem->word_size); + + rc = regmap_raw_read(nvmem->regmap, pos, buf, count); + + if (IS_ERR_VALUE(rc)) + return rc; + + return count; +} + +static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t pos, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct nvmem_device *nvmem = to_nvmem_device(dev); + int rc; + + /* Stop the user from writing */ + if (pos >= nvmem->size) + return 0; + + if (pos + count > nvmem->size) + count = nvmem->size - pos; + + count = round_down(count, nvmem->word_size); + + rc = regmap_raw_write(nvmem->regmap, pos, buf, count); + + if (IS_ERR_VALUE(rc)) + return rc; + + return count; +} + +/* default read/write permissions */ +static struct bin_attribute bin_attr_rw_nvmem = { + .attr = { + .name = "nvmem", + .mode = S_IWUSR | S_IRUGO, + }, + .read = bin_attr_nvmem_read, + .write = bin_attr_nvmem_write, +}; + +static struct bin_attribute *nvmem_bin_rw_attributes[] = { + &bin_attr_rw_nvmem, + NULL, +}; + +static const struct attribute_group nvmem_bin_rw_group = { + .bin_attrs = nvmem_bin_rw_attributes, +}; + +static const struct attribute_group *nvmem_rw_dev_groups[] = { + &nvmem_bin_rw_group, + NULL, +}; + +/* read only permission */ +static struct bin_attribute bin_attr_ro_nvmem = { + .attr = { + .name = "nvmem", + .mode = S_IRUGO, + }, + .read = bin_attr_nvmem_read, +}; + +static struct bin_attribute *nvmem_bin_ro_attributes[] = { + &bin_attr_ro_nvmem, + NULL, +}; + +static const struct attribute_group nvmem_bin_ro_group = { + .bin_attrs = nvmem_bin_ro_attributes, +}; + +static const struct attribute_group *nvmem_ro_dev_groups[] = { + &nvmem_bin_ro_group, + NULL, +}; + +static void nvmem_release(struct device *dev) +{ + struct nvmem_device *nvmem = to_nvmem_device(dev); + + ida_simple_remove(&nvmem_ida, nvmem->id); + kfree(nvmem); +} + +static const struct device_type nvmem_provider_type = { + .release = nvmem_release, +}; + +static struct bus_type nvmem_bus_type = { + .name = "nvmem", +}; + +static int of_nvmem_match(struct device *dev, void *nvmem_np) +{ + return dev->of_node == nvmem_np; +} + +static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) +{ + struct device *d; + + if (!nvmem_np) + return NULL; + + d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match); + + if (!d) + return NULL; + + return to_nvmem_device(d); +} + +static struct nvmem_cell *nvmem_find_cell(const char *cell_id) +{ + struct nvmem_cell *p; + + list_for_each_entry(p, &nvmem_cells, node) + if (p && !strcmp(p->name, cell_id)) + return p; + + return NULL; +} + +static void nvmem_cell_drop(struct nvmem_cell *cell) +{ + mutex_lock(&nvmem_cells_mutex); + list_del(&cell->node); + mutex_unlock(&nvmem_cells_mutex); + kfree(cell); +} + +static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) +{ + struct nvmem_cell *cell; + struct list_head *p, *n; + + list_for_each_safe(p, n, &nvmem_cells) { + cell = list_entry(p, struct nvmem_cell, node); + if (cell->nvmem == nvmem) + nvmem_cell_drop(cell); + } +} + +static void nvmem_cell_add(struct nvmem_cell *cell) +{ + mutex_lock(&nvmem_cells_mutex); + list_add_tail(&cell->node, &nvmem_cells); + mutex_unlock(&nvmem_cells_mutex); +} + +static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info, + struct nvmem_cell *cell) +{ + cell->nvmem = nvmem; + cell->offset = info->offset; + cell->bytes = info->bytes; + cell->name = info->name; + + cell->bit_offset = info->bit_offset; + cell->nbits = info->nbits; + + if (cell->nbits) + cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, + BITS_PER_BYTE); + + if (!IS_ALIGNED(cell->offset, nvmem->stride)) { + dev_err(&nvmem->dev, + "cell %s unaligned to nvmem stride %d\n", + cell->name, nvmem->stride); + return -EINVAL; + } + + return 0; +} + +static int nvmem_add_cells(struct nvmem_device *nvmem, + const struct nvmem_config *cfg) +{ + struct nvmem_cell **cells; + const struct nvmem_cell_info *info = cfg->cells; + int i, rval; + + cells = kcalloc(cfg->ncells, sizeof(*cells), GFP_KERNEL); + if (!cells) + return -ENOMEM; + + for (i = 0; i < cfg->ncells; i++) { + cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); + if (!cells[i]) { + rval = -ENOMEM; + goto err; + } + + rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); + if (IS_ERR_VALUE(rval)) { + kfree(cells[i]); + goto err; + } + + nvmem_cell_add(cells[i]); + } + + nvmem->ncells = cfg->ncells; + /* remove tmp array */ + kfree(cells); + + return 0; +err: + while (--i) + nvmem_cell_drop(cells[i]); + + return rval; +} + +/** + * nvmem_register() - Register a nvmem device for given nvmem_config. + * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem + * + * @config: nvmem device configuration with which nvmem device is created. + * + * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device + * on success. + */ + +struct nvmem_device *nvmem_register(const struct nvmem_config *config) +{ + struct nvmem_device *nvmem; + struct device_node *np; + struct regmap *rm; + int rval; + + if (!config->dev) + return ERR_PTR(-EINVAL); + + rm = dev_get_regmap(config->dev, NULL); + if (!rm) { + dev_err(config->dev, "Regmap not found\n"); + return ERR_PTR(-EINVAL); + } + + nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); + if (!nvmem) + return ERR_PTR(-ENOMEM); + + rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); + if (rval < 0) { + kfree(nvmem); + return ERR_PTR(rval); + } + + nvmem->id = rval; + nvmem->regmap = rm; + nvmem->owner = config->owner; + nvmem->stride = regmap_get_reg_stride(rm); + nvmem->word_size = regmap_get_val_bytes(rm); + nvmem->size = regmap_get_max_register(rm) + nvmem->stride; + nvmem->dev.type = &nvmem_provider_type; + nvmem->dev.bus = &nvmem_bus_type; + nvmem->dev.parent = config->dev; + np = config->dev->of_node; + nvmem->dev.of_node = np; + dev_set_name(&nvmem->dev, "%s%d", + config->name ? : "nvmem", config->id); + + nvmem->read_only = of_property_read_bool(np, "read-only") | + config->read_only; + + nvmem->dev.groups = nvmem->read_only ? nvmem_ro_dev_groups : + nvmem_rw_dev_groups; + + device_initialize(&nvmem->dev); + + dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); + + rval = device_add(&nvmem->dev); + if (rval) { + ida_simple_remove(&nvmem_ida, nvmem->id); + kfree(nvmem); + return ERR_PTR(rval); + } + + if (config->cells) + nvmem_add_cells(nvmem, config); + + return nvmem; +} +EXPORT_SYMBOL_GPL(nvmem_register); + +/** + * nvmem_unregister() - Unregister previously registered nvmem device + * + * @nvmem: Pointer to previously registered nvmem device. + * + * Return: Will be an negative on error or a zero on success. + */ +int nvmem_unregister(struct nvmem_device *nvmem) +{ + mutex_lock(&nvmem_mutex); + if (nvmem->users) { + mutex_unlock(&nvmem_mutex); + return -EBUSY; + } + mutex_unlock(&nvmem_mutex); + + nvmem_device_remove_all_cells(nvmem); + device_del(&nvmem->dev); + + return 0; +} +EXPORT_SYMBOL_GPL(nvmem_unregister); + +static struct nvmem_device *__nvmem_device_get(struct device_node *np, + struct nvmem_cell **cellp, + const char *cell_id) +{ + struct nvmem_device *nvmem = NULL; + + mutex_lock(&nvmem_mutex); + + if (np) { + nvmem = of_nvmem_find(np); + if (!nvmem) { + mutex_unlock(&nvmem_mutex); + return ERR_PTR(-EPROBE_DEFER); + } + } else { + struct nvmem_cell *cell = nvmem_find_cell(cell_id); + + if (cell) { + nvmem = cell->nvmem; + *cellp = cell; + } + + if (!nvmem) { + mutex_unlock(&nvmem_mutex); + return ERR_PTR(-ENOENT); + } + } + + nvmem->users++; + mutex_unlock(&nvmem_mutex); + + if (!try_module_get(nvmem->owner)) { + dev_err(&nvmem->dev, + "could not increase module refcount for cell %s\n", + nvmem->name); + + mutex_lock(&nvmem_mutex); + nvmem->users--; + mutex_unlock(&nvmem_mutex); + + return ERR_PTR(-EINVAL); + } + + return nvmem; +} + +static void __nvmem_device_put(struct nvmem_device *nvmem) +{ + module_put(nvmem->owner); + mutex_lock(&nvmem_mutex); + nvmem->users--; + mutex_unlock(&nvmem_mutex); +} + +static int nvmem_match(struct device *dev, void *data) +{ + return !strcmp(dev_name(dev), data); +} + +static struct nvmem_device *nvmem_find(const char *name) +{ + struct device *d; + + d = bus_find_device(&nvmem_bus_type, NULL, (void *)name, nvmem_match); + + if (!d) + return NULL; + + return to_nvmem_device(d); +} + +#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) +/** + * of_nvmem_device_get() - Get nvmem device from a given id + * + * @dev node: Device tree node that uses the nvmem device + * @id: nvmem name from nvmem-names property. + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device + * on success. + */ +struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) +{ + + struct device_node *nvmem_np; + int index; + + index = of_property_match_string(np, "nvmem-names", id); + + nvmem_np = of_parse_phandle(np, "nvmem", index); + if (!nvmem_np) + return ERR_PTR(-EINVAL); + + return __nvmem_device_get(nvmem_np, NULL, NULL); +} +EXPORT_SYMBOL_GPL(of_nvmem_device_get); +#endif + +/** + * nvmem_device_get() - Get nvmem device from a given id + * + * @dev : Device that uses the nvmem device + * @id: nvmem name from nvmem-names property. + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device + * on success. + */ +struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) +{ + if (dev->of_node) { /* try dt first */ + struct nvmem_device *nvmem; + + nvmem = of_nvmem_device_get(dev->of_node, dev_name); + + if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) + return nvmem; + + } + + return nvmem_find(dev_name); +} +EXPORT_SYMBOL_GPL(nvmem_device_get); + +static int devm_nvmem_device_match(struct device *dev, void *res, void *data) +{ + struct nvmem_device **nvmem = res; + + if (WARN_ON(!nvmem || !*nvmem)) + return 0; + + return *nvmem == data; +} + +static void devm_nvmem_device_release(struct device *dev, void *res) +{ + nvmem_device_put(*(struct nvmem_device **)res); +} + +/** + * devm_nvmem_device_put() - put alredy got nvmem device + * + * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), + * that needs to be released. + */ +void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) +{ + int ret; + + ret = devres_release(dev, devm_nvmem_device_release, + devm_nvmem_device_match, nvmem); + + WARN_ON(ret); +} +EXPORT_SYMBOL_GPL(devm_nvmem_device_put); + +/** + * nvmem_device_put() - put alredy got nvmem device + * + * @nvmem: pointer to nvmem device that needs to be released. + */ +void nvmem_device_put(struct nvmem_device *nvmem) +{ + __nvmem_device_put(nvmem); +} +EXPORT_SYMBOL_GPL(nvmem_device_put); + +/** + * devm_nvmem_device_get() - Get nvmem cell of device form a given id + * + * @dev node: Device tree node that uses the nvmem cell + * @id: nvmem name in nvmems property. + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell + * on success. The nvmem_cell will be freed by the automatically once the + * device is freed. + */ +struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) +{ + struct nvmem_device **ptr, *nvmem; + + ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + nvmem = nvmem_device_get(dev, id); + if (!IS_ERR(nvmem)) { + *ptr = nvmem; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return nvmem; +} +EXPORT_SYMBOL_GPL(devm_nvmem_device_get); + +static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id) +{ + struct nvmem_cell *cell = NULL; + struct nvmem_device *nvmem; + + nvmem = __nvmem_device_get(NULL, &cell, cell_id); + if (IS_ERR(nvmem)) + return ERR_CAST(nvmem); + + return cell; +} + +#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) +/** + * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id + * + * @dev node: Device tree node that uses the nvmem cell + * @id: nvmem cell name from nvmem-cell-names property. + * + * Return: Will be an ERR_PTR() on error or a valid pointer + * to a struct nvmem_cell. The nvmem_cell will be freed by the + * nvmem_cell_put(). + */ +struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, + const char *name) +{ + struct device_node *cell_np, *nvmem_np; + struct nvmem_cell *cell; + struct nvmem_device *nvmem; + const __be32 *addr; + int rval, len, index; + + index = of_property_match_string(np, "nvmem-cell-names", name); + + cell_np = of_parse_phandle(np, "nvmem-cells", index); + if (!cell_np) + return ERR_PTR(-EINVAL); + + nvmem_np = of_get_next_parent(cell_np); + if (!nvmem_np) + return ERR_PTR(-EINVAL); + + nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); + if (IS_ERR(nvmem)) + return ERR_CAST(nvmem); + + addr = of_get_property(cell_np, "reg", &len); + if (!addr || (len < 2 * sizeof(u32))) { + dev_err(&nvmem->dev, "nvmem: invalid reg on %s\n", + cell_np->full_name); + rval = -EINVAL; + goto err_mem; + } + + cell = kzalloc(sizeof(*cell), GFP_KERNEL); + if (!cell) { + rval = -ENOMEM; + goto err_mem; + } + + cell->nvmem = nvmem; + cell->offset = be32_to_cpup(addr++); + cell->bytes = be32_to_cpup(addr); + cell->name = cell_np->name; + + addr = of_get_property(cell_np, "bits", &len); + if (addr && len == (2 * sizeof(u32))) { + cell->bit_offset = be32_to_cpup(addr++); + cell->nbits = be32_to_cpup(addr); + } + + if (cell->nbits) + cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, + BITS_PER_BYTE); + + if (!IS_ALIGNED(cell->offset, nvmem->stride)) { + dev_err(&nvmem->dev, + "cell %s unaligned to nvmem stride %d\n", + cell->name, nvmem->stride); + rval = -EINVAL; + goto err_sanity; + } + + nvmem_cell_add(cell); + + return cell; + +err_sanity: + kfree(cell); + +err_mem: + __nvmem_device_put(nvmem); + + return ERR_PTR(rval); +} +EXPORT_SYMBOL_GPL(of_nvmem_cell_get); +#endif + +/** + * nvmem_cell_get() - Get nvmem cell of device form a given cell name + * + * @dev node: Device tree node that uses the nvmem cell + * @id: nvmem cell name to get. + * + * Return: Will be an ERR_PTR() on error or a valid pointer + * to a struct nvmem_cell. The nvmem_cell will be freed by the + * nvmem_cell_put(). + */ +struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id) +{ + struct nvmem_cell *cell; + + if (dev->of_node) { /* try dt first */ + cell = of_nvmem_cell_get(dev->of_node, cell_id); + if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) + return cell; + } + + return nvmem_cell_get_from_list(cell_id); +} +EXPORT_SYMBOL_GPL(nvmem_cell_get); + +static void devm_nvmem_cell_release(struct device *dev, void *res) +{ + nvmem_cell_put(*(struct nvmem_cell **)res); +} + +/** + * devm_nvmem_cell_get() - Get nvmem cell of device form a given id + * + * @dev node: Device tree node that uses the nvmem cell + * @id: nvmem id in nvmem-names property. + * + * Return: Will be an ERR_PTR() on error or a valid pointer + * to a struct nvmem_cell. The nvmem_cell will be freed by the + * automatically once the device is freed. + */ +struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) +{ + struct nvmem_cell **ptr, *cell; + + ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + cell = nvmem_cell_get(dev, id); + if (!IS_ERR(cell)) { + *ptr = cell; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return cell; +} +EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); + +static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) +{ + struct nvmem_cell **c = res; + + if (WARN_ON(!c || !*c)) + return 0; + + return *c == data; +} + +/** + * devm_nvmem_cell_put() - Release previously allocated nvmem cell + * from devm_nvmem_cell_get. + * + * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get() + */ +void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) +{ + int ret; + + ret = devres_release(dev, devm_nvmem_cell_release, + devm_nvmem_cell_match, cell); + + WARN_ON(ret); +} +EXPORT_SYMBOL(devm_nvmem_cell_put); + +/** + * nvmem_cell_put() - Release previously allocated nvmem cell. + * + * @cell: Previously allocated nvmem cell by nvmem_cell_get() + */ +void nvmem_cell_put(struct nvmem_cell *cell) +{ + struct nvmem_device *nvmem = cell->nvmem; + + __nvmem_device_put(nvmem); + nvmem_cell_drop(cell); +} +EXPORT_SYMBOL_GPL(nvmem_cell_put); + +static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, + void *buf) +{ + u8 *p, *b; + int i, bit_offset = cell->bit_offset; + + p = b = buf; + if (bit_offset) { + /* First shift */ + *b++ >>= bit_offset; + + /* setup rest of the bytes if any */ + for (i = 1; i < cell->bytes; i++) { + /* Get bits from next byte and shift them towards msb */ + *p |= *b << (BITS_PER_BYTE - bit_offset); + + p = b; + *b++ >>= bit_offset; + } + + /* result fits in less bytes */ + if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) + *p-- = 0; + } + /* clear msb bits if any leftover in the last byte */ + *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); +} + +static int __nvmem_cell_read(struct nvmem_device *nvmem, + struct nvmem_cell *cell, + void *buf, size_t *len) +{ + int rc; + + rc = regmap_raw_read(nvmem->regmap, cell->offset, buf, cell->bytes); + + if (IS_ERR_VALUE(rc)) + return rc; + + /* shift bits in-place */ + if (cell->bit_offset || cell->nbits) + nvmem_shift_read_buffer_in_place(cell, buf); + + *len = cell->bytes; + + return 0; +} + +/** + * nvmem_cell_read() - Read a given nvmem cell + * + * @cell: nvmem cell to be read. + * @len: pointer to length of cell which will be populated on successful read. + * + * Return: ERR_PTR() on error or a valid pointer to a char * buffer on success. + * The buffer should be freed by the consumer with a kfree(). + */ +void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) +{ + struct nvmem_device *nvmem = cell->nvmem; + u8 *buf; + int rc; + + if (!nvmem || !nvmem->regmap) + return ERR_PTR(-EINVAL); + + buf = kzalloc(cell->bytes, GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + rc = __nvmem_cell_read(nvmem, cell, buf, len); + if (IS_ERR_VALUE(rc)) { + kfree(buf); + return ERR_PTR(rc); + } + + return buf; +} +EXPORT_SYMBOL_GPL(nvmem_cell_read); + +static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, + u8 *_buf, int len) +{ + struct nvmem_device *nvmem = cell->nvmem; + int i, rc, nbits, bit_offset = cell->bit_offset; + u8 v, *p, *buf, *b, pbyte, pbits; + + nbits = cell->nbits; + buf = kzalloc(cell->bytes, GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + memcpy(buf, _buf, len); + p = b = buf; + + if (bit_offset) { + pbyte = *b; + *b <<= bit_offset; + + /* setup the first byte with lsb bits from nvmem */ + rc = regmap_raw_read(nvmem->regmap, cell->offset, &v, 1); + *b++ |= GENMASK(bit_offset - 1, 0) & v; + + /* setup rest of the byte if any */ + for (i = 1; i < cell->bytes; i++) { + /* Get last byte bits and shift them towards lsb */ + pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); + pbyte = *b; + p = b; + *b <<= bit_offset; + *b++ |= pbits; + } + } + + /* if it's not end on byte boundary */ + if ((nbits + bit_offset) % BITS_PER_BYTE) { + /* setup the last byte with msb bits from nvmem */ + rc = regmap_raw_read(nvmem->regmap, + cell->offset + cell->bytes - 1, &v, 1); + *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; + + } + + return buf; +} + +/** + * nvmem_cell_write() - Write to a given nvmem cell + * + * @cell: nvmem cell to be written. + * @buf: Buffer to be written. + * @len: length of buffer to be written to nvmem cell. + * + * Return: length of bytes written or negative on failure. + */ +int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) +{ + struct nvmem_device *nvmem = cell->nvmem; + int rc; + + if (!nvmem || !nvmem->regmap || nvmem->read_only || + (cell->bit_offset == 0 && len != cell->bytes)) + return -EINVAL; + + if (cell->bit_offset || cell->nbits) { + buf = nvmem_cell_prepare_write_buffer(cell, buf, len); + if (IS_ERR(buf)) + return PTR_ERR(buf); + } + + rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes); + + /* free the tmp buffer */ + if (cell->bit_offset || cell->nbits) + kfree(buf); + + if (IS_ERR_VALUE(rc)) + return rc; + + return len; +} +EXPORT_SYMBOL_GPL(nvmem_cell_write); + +/** + * nvmem_device_cell_read() - Read a given nvmem device and cell + * + * @nvmem: nvmem device to read from. + * @info: nvmem cell info to be read. + * @buf: buffer pointer which will be populated on successful read. + * + * Return: length of successful bytes read on success and negative + * error code on error. + */ +ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf) +{ + struct nvmem_cell cell; + int rc; + ssize_t len; + + if (!nvmem || !nvmem->regmap) + return -EINVAL; + + rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); + if (IS_ERR_VALUE(rc)) + return rc; + + rc = __nvmem_cell_read(nvmem, &cell, buf, &len); + if (IS_ERR_VALUE(rc)) + return rc; + + return len; +} +EXPORT_SYMBOL_GPL(nvmem_device_cell_read); + +/** + * nvmem_device_cell_write() - Write cell to a given nvmem device + * + * @nvmem: nvmem device to be written to. + * @info: nvmem cell info to be written + * @buf: buffer to be written to cell. + * + * Return: length of bytes written or negative error code on failure. + * */ +int nvmem_device_cell_write(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf) +{ + struct nvmem_cell cell; + int rc; + + if (!nvmem || !nvmem->regmap) + return -EINVAL; + + rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); + if (IS_ERR_VALUE(rc)) + return rc; + + return nvmem_cell_write(&cell, buf, cell.bytes); +} +EXPORT_SYMBOL_GPL(nvmem_device_cell_write); + +/** + * nvmem_device_read() - Read from a given nvmem device + * + * @nvmem: nvmem device to read from. + * @offset: offset in nvmem device. + * @bytes: number of bytes to read. + * @buf: buffer pointer which will be populated on successful read. + * + * Return: length of successful bytes read on success and negative + * error code on error. + */ +int nvmem_device_read(struct nvmem_device *nvmem, + unsigned int offset, + size_t bytes, void *buf) +{ + int rc; + + if (!nvmem || !nvmem->regmap) + return -EINVAL; + + rc = regmap_raw_read(nvmem->regmap, offset, buf, bytes); + + if (IS_ERR_VALUE(rc)) + return rc; + + return bytes; +} +EXPORT_SYMBOL_GPL(nvmem_device_read); + +/** + * nvmem_device_write() - Write cell to a given nvmem device + * + * @nvmem: nvmem device to be written to. + * @offset: offset in nvmem device. + * @bytes: number of bytes to write. + * @buf: buffer to be written. + * + * Return: length of bytes written or negative error code on failure. + * */ +int nvmem_device_write(struct nvmem_device *nvmem, + unsigned int offset, + size_t bytes, void *buf) +{ + int rc; + + if (!nvmem || !nvmem->regmap) + return -EINVAL; + + rc = regmap_raw_write(nvmem->regmap, offset, buf, bytes); + + if (IS_ERR_VALUE(rc)) + return rc; + + + return bytes; +} +EXPORT_SYMBOL_GPL(nvmem_device_write); + +static int __init nvmem_init(void) +{ + return bus_register(&nvmem_bus_type); +} + +static void __exit nvmem_exit(void) +{ + bus_unregister(&nvmem_bus_type); +} + +subsys_initcall(nvmem_init); +module_exit(nvmem_exit); + +MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); +MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); +MODULE_DESCRIPTION("nvmem Driver Core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c new file mode 100644 index 000000000..afb67e7ee --- /dev/null +++ b/drivers/nvmem/qfprom.c @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +static struct regmap_config qfprom_regmap_config = { + .reg_bits = 32, + .val_bits = 8, + .reg_stride = 1, +}; + +static struct nvmem_config econfig = { + .name = "qfprom", + .owner = THIS_MODULE, +}; + +static int qfprom_remove(struct platform_device *pdev) +{ + struct nvmem_device *nvmem = platform_get_drvdata(pdev); + + return nvmem_unregister(nvmem); +} + +static int qfprom_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct nvmem_device *nvmem; + struct regmap *regmap; + void __iomem *base; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + qfprom_regmap_config.max_register = resource_size(res) - 1; + + regmap = devm_regmap_init_mmio(dev, base, &qfprom_regmap_config); + if (IS_ERR(regmap)) { + dev_err(dev, "regmap init failed\n"); + return PTR_ERR(regmap); + } + econfig.dev = dev; + nvmem = nvmem_register(&econfig); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + platform_set_drvdata(pdev, nvmem); + + return 0; +} + +static const struct of_device_id qfprom_of_match[] = { + { .compatible = "qcom,qfprom",}, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, qfprom_of_match); + +static struct platform_driver qfprom_driver = { + .probe = qfprom_probe, + .remove = qfprom_remove, + .driver = { + .name = "qcom,qfprom", + .of_match_table = qfprom_of_match, + }, +}; +module_platform_driver(qfprom_driver); +MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>"); +MODULE_DESCRIPTION("Qualcomm QFPROM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c new file mode 100644 index 000000000..cfa3b8506 --- /dev/null +++ b/drivers/nvmem/sunxi_sid.c @@ -0,0 +1,180 @@ +/* + * Allwinner sunXi SoCs Security ID support. + * + * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl> + * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/random.h> + + +static struct nvmem_config econfig = { + .name = "sunxi-sid", + .read_only = true, + .owner = THIS_MODULE, +}; + +struct sunxi_sid { + void __iomem *base; +}; + +/* We read the entire key, due to a 32 bit read alignment requirement. Since we + * want to return the requested byte, this results in somewhat slower code and + * uses 4 times more reads as needed but keeps code simpler. Since the SID is + * only very rarely probed, this is not really an issue. + */ +static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid, + const unsigned int offset) +{ + u32 sid_key; + + sid_key = ioread32be(sid->base + round_down(offset, 4)); + sid_key >>= (offset % 4) * 8; + + return sid_key; /* Only return the last byte */ +} + +static int sunxi_sid_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + struct sunxi_sid *sid = context; + unsigned int offset = *(u32 *)reg; + u8 *buf = val; + + while (val_size) { + *buf++ = sunxi_sid_read_byte(sid, offset); + val_size--; + offset++; + } + + return 0; +} + +static int sunxi_sid_write(void *context, const void *data, size_t count) +{ + /* Unimplemented, dummy to keep regmap core happy */ + return 0; +} + +static struct regmap_bus sunxi_sid_bus = { + .read = sunxi_sid_read, + .write = sunxi_sid_write, + .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, + .val_format_endian_default = REGMAP_ENDIAN_NATIVE, +}; + +static bool sunxi_sid_writeable_reg(struct device *dev, unsigned int reg) +{ + return false; +} + +static struct regmap_config sunxi_sid_regmap_config = { + .reg_bits = 32, + .val_bits = 8, + .reg_stride = 1, + .writeable_reg = sunxi_sid_writeable_reg, +}; + +static int sunxi_sid_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct nvmem_device *nvmem; + struct regmap *regmap; + struct sunxi_sid *sid; + int ret, i, size; + char *randomness; + + sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL); + if (!sid) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sid->base = devm_ioremap_resource(dev, res); + if (IS_ERR(sid->base)) + return PTR_ERR(sid->base); + + size = resource_size(res) - 1; + sunxi_sid_regmap_config.max_register = size; + + regmap = devm_regmap_init(dev, &sunxi_sid_bus, sid, + &sunxi_sid_regmap_config); + if (IS_ERR(regmap)) { + dev_err(dev, "regmap init failed\n"); + return PTR_ERR(regmap); + } + + econfig.dev = dev; + nvmem = nvmem_register(&econfig); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL); + if (!randomness) { + ret = -EINVAL; + goto err_unreg_nvmem; + } + + for (i = 0; i < size; i++) + randomness[i] = sunxi_sid_read_byte(sid, i); + + add_device_randomness(randomness, size); + kfree(randomness); + + platform_set_drvdata(pdev, nvmem); + + return 0; + +err_unreg_nvmem: + nvmem_unregister(nvmem); + return ret; +} + +static int sunxi_sid_remove(struct platform_device *pdev) +{ + struct nvmem_device *nvmem = platform_get_drvdata(pdev); + + return nvmem_unregister(nvmem); +} + +static const struct of_device_id sunxi_sid_of_match[] = { + { .compatible = "allwinner,sun4i-a10-sid" }, + { .compatible = "allwinner,sun7i-a20-sid" }, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, sunxi_sid_of_match); + +static struct platform_driver sunxi_sid_driver = { + .probe = sunxi_sid_probe, + .remove = sunxi_sid_remove, + .driver = { + .name = "eeprom-sunxi-sid", + .of_match_table = sunxi_sid_of_match, + }, +}; +module_platform_driver(sunxi_sid_driver); + +MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>"); +MODULE_DESCRIPTION("Allwinner sunxi security id driver"); +MODULE_LICENSE("GPL"); |