diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-09-08 01:01:14 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-09-08 01:01:14 -0300 |
commit | e5fd91f1ef340da553f7a79da9540c3db711c937 (patch) | |
tree | b11842027dc6641da63f4bcc524f8678263304a3 /drivers/firmware | |
parent | 2a9b0348e685a63d97486f6749622b61e9e3292f (diff) |
Linux-libre 4.2-gnu
Diffstat (limited to 'drivers/firmware')
-rw-r--r-- | drivers/firmware/Kconfig | 1 | ||||
-rw-r--r-- | drivers/firmware/Makefile | 4 | ||||
-rw-r--r-- | drivers/firmware/broadcom/Kconfig | 11 | ||||
-rw-r--r-- | drivers/firmware/broadcom/Makefile | 1 | ||||
-rw-r--r-- | drivers/firmware/broadcom/bcm47xx_nvram.c | 248 | ||||
-rw-r--r-- | drivers/firmware/dmi-sysfs.c | 17 | ||||
-rw-r--r-- | drivers/firmware/dmi_scan.c | 110 | ||||
-rw-r--r-- | drivers/firmware/efi/Kconfig | 5 | ||||
-rw-r--r-- | drivers/firmware/efi/Makefile | 1 | ||||
-rw-r--r-- | drivers/firmware/efi/efi.c | 91 | ||||
-rw-r--r-- | drivers/firmware/efi/efivars.c | 11 | ||||
-rw-r--r-- | drivers/firmware/efi/esrt.c | 471 | ||||
-rw-r--r-- | drivers/firmware/efi/libstub/Makefile | 2 | ||||
-rw-r--r-- | drivers/firmware/memmap.c | 24 | ||||
-rw-r--r-- | drivers/firmware/qcom_scm-32.c | 503 | ||||
-rw-r--r-- | drivers/firmware/qcom_scm.c | 474 | ||||
-rw-r--r-- | drivers/firmware/qcom_scm.h | 47 |
17 files changed, 1540 insertions, 481 deletions
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 6517132e5..99c69a320 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -136,6 +136,7 @@ config QCOM_SCM bool depends on ARM || ARM64 +source "drivers/firmware/broadcom/Kconfig" source "drivers/firmware/google/Kconfig" source "drivers/firmware/efi/Kconfig" diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 3fdd39127..4a4b897f9 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -12,8 +12,10 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o obj-$(CONFIG_QCOM_SCM) += qcom_scm.o -CFLAGS_qcom_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1) +obj-$(CONFIG_QCOM_SCM) += qcom_scm-32.o +CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1) +obj-y += broadcom/ obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ obj-$(CONFIG_EFI) += efi/ obj-$(CONFIG_UEFI_CPER) += efi/ diff --git a/drivers/firmware/broadcom/Kconfig b/drivers/firmware/broadcom/Kconfig new file mode 100644 index 000000000..6bed11993 --- /dev/null +++ b/drivers/firmware/broadcom/Kconfig @@ -0,0 +1,11 @@ +config BCM47XX_NVRAM + bool "Broadcom NVRAM driver" + depends on BCM47XX || ARCH_BCM_5301X + help + Broadcom home routers contain flash partition called "nvram" with all + important hardware configuration as well as some minor user setup. + NVRAM partition contains a text-like data representing name=value + pairs. + This driver provides an easy way to get value of requested parameter. + It simply reads content of NVRAM and parses it. It doesn't control any + hardware part itself. diff --git a/drivers/firmware/broadcom/Makefile b/drivers/firmware/broadcom/Makefile new file mode 100644 index 000000000..d0e683583 --- /dev/null +++ b/drivers/firmware/broadcom/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_BCM47XX_NVRAM) += bcm47xx_nvram.o diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c new file mode 100644 index 000000000..e41594510 --- /dev/null +++ b/drivers/firmware/broadcom/bcm47xx_nvram.c @@ -0,0 +1,248 @@ +/* + * BCM947xx nvram variable access + * + * Copyright (C) 2005 Broadcom Corporation + * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> + * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/io.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/mtd/mtd.h> +#include <linux/bcm47xx_nvram.h> + +#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */ +#define NVRAM_SPACE 0x10000 +#define NVRAM_MAX_GPIO_ENTRIES 32 +#define NVRAM_MAX_GPIO_VALUE_LEN 30 + +#define FLASH_MIN 0x00020000 /* Minimum flash size */ + +struct nvram_header { + u32 magic; + u32 len; + u32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */ + u32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */ + u32 config_ncdl; /* ncdl values for memc */ +}; + +static char nvram_buf[NVRAM_SPACE]; +static size_t nvram_len; +static const u32 nvram_sizes[] = {0x8000, 0xF000, 0x10000}; + +static u32 find_nvram_size(void __iomem *end) +{ + struct nvram_header __iomem *header; + int i; + + for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) { + header = (struct nvram_header *)(end - nvram_sizes[i]); + if (header->magic == NVRAM_MAGIC) + return nvram_sizes[i]; + } + + return 0; +} + +/* Probe for NVRAM header */ +static int nvram_find_and_copy(void __iomem *iobase, u32 lim) +{ + struct nvram_header __iomem *header; + int i; + u32 off; + u32 *src, *dst; + u32 size; + + if (nvram_len) { + pr_warn("nvram already initialized\n"); + return -EEXIST; + } + + /* TODO: when nvram is on nand flash check for bad blocks first. */ + off = FLASH_MIN; + while (off <= lim) { + /* Windowed flash access */ + size = find_nvram_size(iobase + off); + if (size) { + header = (struct nvram_header *)(iobase + off - size); + goto found; + } + off <<= 1; + } + + /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */ + header = (struct nvram_header *)(iobase + 4096); + if (header->magic == NVRAM_MAGIC) { + size = NVRAM_SPACE; + goto found; + } + + header = (struct nvram_header *)(iobase + 1024); + if (header->magic == NVRAM_MAGIC) { + size = NVRAM_SPACE; + goto found; + } + + pr_err("no nvram found\n"); + return -ENXIO; + +found: + src = (u32 *)header; + dst = (u32 *)nvram_buf; + for (i = 0; i < sizeof(struct nvram_header); i += 4) + *dst++ = __raw_readl(src++); + header = (struct nvram_header *)nvram_buf; + nvram_len = header->len; + if (nvram_len > size) { + pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n"); + nvram_len = size; + } + if (nvram_len >= NVRAM_SPACE) { + pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n", + header->len, NVRAM_SPACE - 1); + nvram_len = NVRAM_SPACE - 1; + } + /* proceed reading data after header */ + for (; i < nvram_len; i += 4) + *dst++ = readl(src++); + nvram_buf[NVRAM_SPACE - 1] = '\0'; + + return 0; +} + +/* + * On bcm47xx we need access to the NVRAM very early, so we can't use mtd + * subsystem to access flash. We can't even use platform device / driver to + * store memory offset. + * To handle this we provide following symbol. It's supposed to be called as + * soon as we get info about flash device, before any NVRAM entry is needed. + */ +int bcm47xx_nvram_init_from_mem(u32 base, u32 lim) +{ + void __iomem *iobase; + int err; + + iobase = ioremap_nocache(base, lim); + if (!iobase) + return -ENOMEM; + + err = nvram_find_and_copy(iobase, lim); + + iounmap(iobase); + + return err; +} + +static int nvram_init(void) +{ +#ifdef CONFIG_MTD + struct mtd_info *mtd; + struct nvram_header header; + size_t bytes_read; + int err; + + mtd = get_mtd_device_nm("nvram"); + if (IS_ERR(mtd)) + return -ENODEV; + + err = mtd_read(mtd, 0, sizeof(header), &bytes_read, (uint8_t *)&header); + if (!err && header.magic == NVRAM_MAGIC && + header.len > sizeof(header)) { + nvram_len = header.len; + if (nvram_len >= NVRAM_SPACE) { + pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n", + header.len, NVRAM_SPACE); + nvram_len = NVRAM_SPACE - 1; + } + + err = mtd_read(mtd, 0, nvram_len, &nvram_len, + (u8 *)nvram_buf); + return err; + } +#endif + + return -ENXIO; +} + +int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len) +{ + char *var, *value, *end, *eq; + int err; + + if (!name) + return -EINVAL; + + if (!nvram_len) { + err = nvram_init(); + if (err) + return err; + } + + /* Look for name=value and return value */ + var = &nvram_buf[sizeof(struct nvram_header)]; + end = nvram_buf + sizeof(nvram_buf); + while (var < end && *var) { + eq = strchr(var, '='); + if (!eq) + break; + value = eq + 1; + if (eq - var == strlen(name) && + strncmp(var, name, eq - var) == 0) + return snprintf(val, val_len, "%s", value); + var = value + strlen(value) + 1; + } + return -ENOENT; +} +EXPORT_SYMBOL(bcm47xx_nvram_getenv); + +int bcm47xx_nvram_gpio_pin(const char *name) +{ + int i, err; + char nvram_var[] = "gpioXX"; + char buf[NVRAM_MAX_GPIO_VALUE_LEN]; + + /* TODO: Optimize it to don't call getenv so many times */ + for (i = 0; i < NVRAM_MAX_GPIO_ENTRIES; i++) { + err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i); + if (err <= 0) + continue; + err = bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf)); + if (err <= 0) + continue; + if (!strcmp(name, buf)) + return i; + } + return -ENOENT; +} +EXPORT_SYMBOL(bcm47xx_nvram_gpio_pin); + +char *bcm47xx_nvram_get_contents(size_t *nvram_size) +{ + int err; + char *nvram; + + if (!nvram_len) { + err = nvram_init(); + if (err) + return NULL; + } + + *nvram_size = nvram_len - sizeof(struct nvram_header); + nvram = vmalloc(*nvram_size); + if (!nvram) + return NULL; + memcpy(nvram, &nvram_buf[sizeof(struct nvram_header)], *nvram_size); + + return nvram; +} +EXPORT_SYMBOL(bcm47xx_nvram_get_contents); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c index e0f1cb3d3..ef76e5eec 100644 --- a/drivers/firmware/dmi-sysfs.c +++ b/drivers/firmware/dmi-sysfs.c @@ -566,7 +566,6 @@ static struct kobj_type dmi_sysfs_entry_ktype = { .default_attrs = dmi_sysfs_entry_attrs, }; -static struct kobject *dmi_kobj; static struct kset *dmi_kset; /* Global count of all instances seen. Only for setup */ @@ -648,17 +647,20 @@ static void cleanup_entry_list(void) static int __init dmi_sysfs_init(void) { - int error = -ENOMEM; + int error; int val; - /* Set up our directory */ - dmi_kobj = kobject_create_and_add("dmi", firmware_kobj); - if (!dmi_kobj) + if (!dmi_kobj) { + pr_err("dmi-sysfs: dmi entry is absent.\n"); + error = -ENODATA; goto err; + } dmi_kset = kset_create_and_add("entries", NULL, dmi_kobj); - if (!dmi_kset) + if (!dmi_kset) { + error = -ENOMEM; goto err; + } val = 0; error = dmi_walk(dmi_sysfs_register_handle, &val); @@ -675,7 +677,6 @@ static int __init dmi_sysfs_init(void) err: cleanup_entry_list(); kset_unregister(dmi_kset); - kobject_put(dmi_kobj); return error; } @@ -685,8 +686,6 @@ static void __exit dmi_sysfs_exit(void) pr_debug("dmi-sysfs: unloading.\n"); cleanup_entry_list(); kset_unregister(dmi_kset); - kobject_del(dmi_kobj); - kobject_put(dmi_kobj); } module_init(dmi_sysfs_init); diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index bba843c2b..ac1ce4a73 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -10,6 +10,9 @@ #include <asm/dmi.h> #include <asm/unaligned.h> +struct kobject *dmi_kobj; +EXPORT_SYMBOL_GPL(dmi_kobj); + /* * DMI stands for "Desktop Management Interface". It is part * of and an antecedent to, SMBIOS, which stands for System @@ -20,6 +23,9 @@ static const char dmi_empty_string[] = " "; static u32 dmi_ver __initdata; static u32 dmi_len; static u16 dmi_num; +static u8 smbios_entry_point[32]; +static int smbios_entry_point_size; + /* * Catch too early calls to dmi_check_system(): */ @@ -80,9 +86,9 @@ static const char * __init dmi_string(const struct dmi_header *dm, u8 s) * We have to be cautious here. We have seen BIOSes with DMI pointers * pointing to completely the wrong place for example */ -static void dmi_table(u8 *buf, - void (*decode)(const struct dmi_header *, void *), - void *private_data) +static void dmi_decode_table(u8 *buf, + void (*decode)(const struct dmi_header *, void *), + void *private_data) { u8 *data = buf; int i = 0; @@ -108,6 +114,9 @@ static void dmi_table(u8 *buf, if (data - buf < dmi_len - 1) decode(dm, private_data); + data += 2; + i++; + /* * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0] * For tables behind a 64-bit entry point, we have no item @@ -118,10 +127,11 @@ static void dmi_table(u8 *buf, */ if (!dmi_num && dm->type == DMI_ENTRY_END_OF_TABLE) break; - - data += 2; - i++; } + + /* Trim DMI table length if needed */ + if (dmi_len > data - buf) + dmi_len = data - buf; } static phys_addr_t dmi_base; @@ -130,16 +140,17 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, void *)) { u8 *buf; + u32 orig_dmi_len = dmi_len; - buf = dmi_early_remap(dmi_base, dmi_len); + buf = dmi_early_remap(dmi_base, orig_dmi_len); if (buf == NULL) return -1; - dmi_table(buf, decode, NULL); + dmi_decode_table(buf, decode, NULL); add_device_randomness(buf, dmi_len); - dmi_early_unmap(buf, dmi_len); + dmi_early_unmap(buf, orig_dmi_len); return 0; } @@ -483,17 +494,19 @@ static int __init dmi_present(const u8 *buf) if (memcmp(buf, "_SM_", 4) == 0 && buf[5] < 32 && dmi_checksum(buf, buf[5])) { smbios_ver = get_unaligned_be16(buf + 6); + smbios_entry_point_size = buf[5]; + memcpy(smbios_entry_point, buf, smbios_entry_point_size); /* Some BIOS report weird SMBIOS version, fix that up */ switch (smbios_ver) { case 0x021F: case 0x0221: - pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", + pr_debug("SMBIOS version fixup (2.%d->2.%d)\n", smbios_ver & 0xFF, 3); smbios_ver = 0x0203; break; case 0x0233: - pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", 51, 6); + pr_debug("SMBIOS version fixup (2.%d->2.%d)\n", 51, 6); smbios_ver = 0x0206; break; } @@ -517,6 +530,9 @@ static int __init dmi_present(const u8 *buf) pr_info("SMBIOS %d.%d present.\n", dmi_ver >> 8, dmi_ver & 0xFF); } else { + smbios_entry_point_size = 15; + memcpy(smbios_entry_point, buf, + smbios_entry_point_size); pr_info("Legacy DMI %d.%d present.\n", dmi_ver >> 8, dmi_ver & 0xFF); } @@ -538,11 +554,12 @@ static int __init dmi_smbios3_present(const u8 *buf) { if (memcmp(buf, "_SM3_", 5) == 0 && buf[6] < 32 && dmi_checksum(buf, buf[6])) { - dmi_ver = get_unaligned_be32(buf + 6); - dmi_ver &= 0xFFFFFF; + dmi_ver = get_unaligned_be32(buf + 6) & 0xFFFFFF; dmi_num = 0; /* No longer specified */ dmi_len = get_unaligned_le32(buf + 12); dmi_base = get_unaligned_le64(buf + 16); + smbios_entry_point_size = buf[6]; + memcpy(smbios_entry_point, buf, smbios_entry_point_size); if (dmi_walk_early(dmi_decode) == 0) { pr_info("SMBIOS %d.%d.%d present.\n", @@ -634,6 +651,71 @@ void __init dmi_scan_machine(void) dmi_initialized = 1; } +static ssize_t raw_table_read(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t pos, size_t count) +{ + memcpy(buf, attr->private + pos, count); + return count; +} + +static BIN_ATTR(smbios_entry_point, S_IRUSR, raw_table_read, NULL, 0); +static BIN_ATTR(DMI, S_IRUSR, raw_table_read, NULL, 0); + +static int __init dmi_init(void) +{ + struct kobject *tables_kobj; + u8 *dmi_table; + int ret = -ENOMEM; + + if (!dmi_available) { + ret = -ENODATA; + goto err; + } + + /* + * Set up dmi directory at /sys/firmware/dmi. This entry should stay + * even after farther error, as it can be used by other modules like + * dmi-sysfs. + */ + dmi_kobj = kobject_create_and_add("dmi", firmware_kobj); + if (!dmi_kobj) + goto err; + + tables_kobj = kobject_create_and_add("tables", dmi_kobj); + if (!tables_kobj) + goto err; + + dmi_table = dmi_remap(dmi_base, dmi_len); + if (!dmi_table) + goto err_tables; + + bin_attr_smbios_entry_point.size = smbios_entry_point_size; + bin_attr_smbios_entry_point.private = smbios_entry_point; + ret = sysfs_create_bin_file(tables_kobj, &bin_attr_smbios_entry_point); + if (ret) + goto err_unmap; + + bin_attr_DMI.size = dmi_len; + bin_attr_DMI.private = dmi_table; + ret = sysfs_create_bin_file(tables_kobj, &bin_attr_DMI); + if (!ret) + return 0; + + sysfs_remove_bin_file(tables_kobj, + &bin_attr_smbios_entry_point); + err_unmap: + dmi_unmap(dmi_table); + err_tables: + kobject_del(tables_kobj); + kobject_put(tables_kobj); + err: + pr_err("dmi: Firmware registration failed.\n"); + + return ret; +} +subsys_initcall(dmi_init); + /** * dmi_set_dump_stack_arch_desc - set arch description for dump_stack() * @@ -902,7 +984,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *), if (buf == NULL) return -1; - dmi_table(buf, decode, private_data); + dmi_decode_table(buf, decode, private_data); dmi_unmap(buf); return 0; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 8de4da5c9..54071c148 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -18,6 +18,11 @@ config EFI_VARS Subsequent efibootmgr releases may be found at: <http://github.com/vathpela/efibootmgr> +config EFI_ESRT + bool + depends on EFI && !IA64 + default y + config EFI_VARS_PSTORE tristate "Register efivars backend for pstore" depends on EFI_VARS && PSTORE diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index d8be608a9..6fd3da938 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -3,6 +3,7 @@ # obj-$(CONFIG_EFI) += efi.o vars.o reboot.o obj-$(CONFIG_EFI_VARS) += efivars.o +obj-$(CONFIG_EFI_ESRT) += esrt.o obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o obj-$(CONFIG_UEFI_CPER) += cper.o obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 63226e903..d6144e3b9 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -39,6 +39,7 @@ struct efi __read_mostly efi = { .fw_vendor = EFI_INVALID_TABLE_ADDR, .runtime = EFI_INVALID_TABLE_ADDR, .config_table = EFI_INVALID_TABLE_ADDR, + .esrt = EFI_INVALID_TABLE_ADDR, }; EXPORT_SYMBOL(efi); @@ -69,7 +70,7 @@ static int __init parse_efi_cmdline(char *str) } early_param("efi", parse_efi_cmdline); -static struct kobject *efi_kobj; +struct kobject *efi_kobj; /* * Let's not leave out systab information that snuck into @@ -89,10 +90,15 @@ static ssize_t systab_show(struct kobject *kobj, str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); if (efi.acpi != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); - if (efi.smbios != EFI_INVALID_TABLE_ADDR) - str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); + /* + * If both SMBIOS and SMBIOS3 entry points are implemented, the + * SMBIOS3 entry point shall be preferred, so we list it first to + * let applications stop parsing after the first match. + */ if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); + if (efi.smbios != EFI_INVALID_TABLE_ADDR) + str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); if (efi.hcdp != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp); if (efi.boot_info != EFI_INVALID_TABLE_ADDR) @@ -235,6 +241,84 @@ err_put: subsys_initcall(efisubsys_init); +/* + * Find the efi memory descriptor for a given physical address. Given a + * physicall address, determine if it exists within an EFI Memory Map entry, + * and if so, populate the supplied memory descriptor with the appropriate + * data. + */ +int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) +{ + struct efi_memory_map *map = efi.memmap; + void *p, *e; + + if (!efi_enabled(EFI_MEMMAP)) { + pr_err_once("EFI_MEMMAP is not enabled.\n"); + return -EINVAL; + } + + if (!map) { + pr_err_once("efi.memmap is not set.\n"); + return -EINVAL; + } + if (!out_md) { + pr_err_once("out_md is null.\n"); + return -EINVAL; + } + if (WARN_ON_ONCE(!map->phys_map)) + return -EINVAL; + if (WARN_ON_ONCE(map->nr_map == 0) || WARN_ON_ONCE(map->desc_size == 0)) + return -EINVAL; + + e = map->phys_map + map->nr_map * map->desc_size; + for (p = map->phys_map; p < e; p += map->desc_size) { + efi_memory_desc_t *md; + u64 size; + u64 end; + + /* + * If a driver calls this after efi_free_boot_services, + * ->map will be NULL, and the target may also not be mapped. + * So just always get our own virtual map on the CPU. + * + */ + md = early_memremap((phys_addr_t)p, sizeof (*md)); + if (!md) { + pr_err_once("early_memremap(%p, %zu) failed.\n", + p, sizeof (*md)); + return -ENOMEM; + } + + if (!(md->attribute & EFI_MEMORY_RUNTIME) && + md->type != EFI_BOOT_SERVICES_DATA && + md->type != EFI_RUNTIME_SERVICES_DATA) { + early_memunmap(md, sizeof (*md)); + continue; + } + + size = md->num_pages << EFI_PAGE_SHIFT; + end = md->phys_addr + size; + if (phys_addr >= md->phys_addr && phys_addr < end) { + memcpy(out_md, md, sizeof(*out_md)); + early_memunmap(md, sizeof (*md)); + return 0; + } + + early_memunmap(md, sizeof (*md)); + } + pr_err_once("requested map not found.\n"); + return -ENOENT; +} + +/* + * Calculate the highest address of an efi memory descriptor. + */ +u64 __init efi_mem_desc_end(efi_memory_desc_t *md) +{ + u64 size = md->num_pages << EFI_PAGE_SHIFT; + u64 end = md->phys_addr + size; + return end; +} /* * We can't ioremap data in EFI boot services RAM, because we've already mapped @@ -277,6 +361,7 @@ static __initdata efi_config_table_type_t common_tables[] = { {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios}, {SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3}, {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga}, + {EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt}, {NULL_GUID, NULL, NULL}, }; diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 7b2e0496e..756eca8c4 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -535,7 +535,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, * efivar_create_sysfs_entry - create a new entry in sysfs * @new_var: efivar entry to create * - * Returns 1 on failure, 0 on success + * Returns 0 on success, negative error code on failure */ static int efivar_create_sysfs_entry(struct efivar_entry *new_var) @@ -544,6 +544,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) char *short_name; unsigned long variable_name_size; efi_char16_t *variable_name; + int ret; variable_name = new_var->var.VariableName; variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t); @@ -558,7 +559,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) short_name = kzalloc(short_name_size, GFP_KERNEL); if (!short_name) - return 1; + return -ENOMEM; /* Convert Unicode to normal chars (assume top bits are 0), ala UTF-8 */ @@ -574,11 +575,11 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) new_var->kobj.kset = efivars_kset; - i = kobject_init_and_add(&new_var->kobj, &efivar_ktype, + ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype, NULL, "%s", short_name); kfree(short_name); - if (i) - return 1; + if (ret) + return ret; kobject_uevent(&new_var->kobj, KOBJ_ADD); efivar_entry_add(new_var, &efivar_sysfs_list); diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c new file mode 100644 index 000000000..a5b95d61a --- /dev/null +++ b/drivers/firmware/efi/esrt.c @@ -0,0 +1,471 @@ +/* + * esrt.c + * + * This module exports EFI System Resource Table (ESRT) entries into userspace + * through the sysfs file system. The ESRT provides a read-only catalog of + * system components for which the system accepts firmware upgrades via UEFI's + * "Capsule Update" feature. This module allows userland utilities to evaluate + * what firmware updates can be applied to this system, and potentially arrange + * for those updates to occur. + * + * Data is currently found below /sys/firmware/efi/esrt/... + */ +#define pr_fmt(fmt) "esrt: " fmt + +#include <linux/capability.h> +#include <linux/device.h> +#include <linux/efi.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/kobject.h> +#include <linux/list.h> +#include <linux/memblock.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include <asm/io.h> +#include <asm/early_ioremap.h> + +struct efi_system_resource_entry_v1 { + efi_guid_t fw_class; + u32 fw_type; + u32 fw_version; + u32 lowest_supported_fw_version; + u32 capsule_flags; + u32 last_attempt_version; + u32 last_attempt_status; +}; + +/* + * _count and _version are what they seem like. _max is actually just + * accounting info for the firmware when creating the table; it should never + * have been exposed to us. To wit, the spec says: + * The maximum number of resource array entries that can be within the + * table without reallocating the table, must not be zero. + * Since there's no guidance about what that means in terms of memory layout, + * it means nothing to us. + */ +struct efi_system_resource_table { + u32 fw_resource_count; + u32 fw_resource_count_max; + u64 fw_resource_version; + u8 entries[]; +}; + +static phys_addr_t esrt_data; +static size_t esrt_data_size; + +static struct efi_system_resource_table *esrt; + +struct esre_entry { + union { + struct efi_system_resource_entry_v1 *esre1; + } esre; + + struct kobject kobj; + struct list_head list; +}; + +/* global list of esre_entry. */ +static LIST_HEAD(entry_list); + +/* entry attribute */ +struct esre_attribute { + struct attribute attr; + ssize_t (*show)(struct esre_entry *entry, char *buf); + ssize_t (*store)(struct esre_entry *entry, + const char *buf, size_t count); +}; + +static struct esre_entry *to_entry(struct kobject *kobj) +{ + return container_of(kobj, struct esre_entry, kobj); +} + +static struct esre_attribute *to_attr(struct attribute *attr) +{ + return container_of(attr, struct esre_attribute, attr); +} + +static ssize_t esre_attr_show(struct kobject *kobj, + struct attribute *_attr, char *buf) +{ + struct esre_entry *entry = to_entry(kobj); + struct esre_attribute *attr = to_attr(_attr); + + /* Don't tell normal users what firmware versions we've got... */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + return attr->show(entry, buf); +} + +static const struct sysfs_ops esre_attr_ops = { + .show = esre_attr_show, +}; + +/* Generic ESRT Entry ("ESRE") support. */ +static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf) +{ + char *str = buf; + + efi_guid_to_str(&entry->esre.esre1->fw_class, str); + str += strlen(str); + str += sprintf(str, "\n"); + + return str - buf; +} + +static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400, + esre_fw_class_show, NULL); + +#define esre_attr_decl(name, size, fmt) \ +static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \ +{ \ + return sprintf(buf, fmt "\n", \ + le##size##_to_cpu(entry->esre.esre1->name)); \ +} \ +\ +static struct esre_attribute esre_##name = __ATTR(name, 0400, \ + esre_##name##_show, NULL) + +esre_attr_decl(fw_type, 32, "%u"); +esre_attr_decl(fw_version, 32, "%u"); +esre_attr_decl(lowest_supported_fw_version, 32, "%u"); +esre_attr_decl(capsule_flags, 32, "0x%x"); +esre_attr_decl(last_attempt_version, 32, "%u"); +esre_attr_decl(last_attempt_status, 32, "%u"); + +static struct attribute *esre1_attrs[] = { + &esre_fw_class.attr, + &esre_fw_type.attr, + &esre_fw_version.attr, + &esre_lowest_supported_fw_version.attr, + &esre_capsule_flags.attr, + &esre_last_attempt_version.attr, + &esre_last_attempt_status.attr, + NULL +}; +static void esre_release(struct kobject *kobj) +{ + struct esre_entry *entry = to_entry(kobj); + + list_del(&entry->list); + kfree(entry); +} + +static struct kobj_type esre1_ktype = { + .release = esre_release, + .sysfs_ops = &esre_attr_ops, + .default_attrs = esre1_attrs, +}; + + +static struct kobject *esrt_kobj; +static struct kset *esrt_kset; + +static int esre_create_sysfs_entry(void *esre, int entry_num) +{ + struct esre_entry *entry; + char name[20]; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + sprintf(name, "entry%d", entry_num); + + entry->kobj.kset = esrt_kset; + + if (esrt->fw_resource_version == 1) { + int rc = 0; + + entry->esre.esre1 = esre; + rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL, + "%s", name); + if (rc) { + kfree(entry); + return rc; + } + } + + list_add_tail(&entry->list, &entry_list); + return 0; +} + +/* support for displaying ESRT fields at the top level */ +#define esrt_attr_decl(name, size, fmt) \ +static ssize_t esrt_##name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf)\ +{ \ + return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \ +} \ +\ +static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \ + esrt_##name##_show, NULL) + +esrt_attr_decl(fw_resource_count, 32, "%u"); +esrt_attr_decl(fw_resource_count_max, 32, "%u"); +esrt_attr_decl(fw_resource_version, 64, "%llu"); + +static struct attribute *esrt_attrs[] = { + &esrt_fw_resource_count.attr, + &esrt_fw_resource_count_max.attr, + &esrt_fw_resource_version.attr, + NULL, +}; + +static inline int esrt_table_exists(void) +{ + if (!efi_enabled(EFI_CONFIG_TABLES)) + return 0; + if (efi.esrt == EFI_INVALID_TABLE_ADDR) + return 0; + return 1; +} + +static umode_t esrt_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + if (!esrt_table_exists()) + return 0; + return attr->mode; +} + +static struct attribute_group esrt_attr_group = { + .attrs = esrt_attrs, + .is_visible = esrt_attr_is_visible, +}; + +/* + * remap the table, copy it to kmalloced pages, and unmap it. + */ +void __init efi_esrt_init(void) +{ + void *va; + struct efi_system_resource_table tmpesrt; + struct efi_system_resource_entry_v1 *v1_entries; + size_t size, max, entry_size, entries_size; + efi_memory_desc_t md; + int rc; + phys_addr_t end; + + pr_debug("esrt-init: loading.\n"); + if (!esrt_table_exists()) + return; + + rc = efi_mem_desc_lookup(efi.esrt, &md); + if (rc < 0) { + pr_err("ESRT header is not in the memory map.\n"); + return; + } + + max = efi_mem_desc_end(&md); + if (max < efi.esrt) { + pr_err("EFI memory descriptor is invalid. (esrt: %p max: %p)\n", + (void *)efi.esrt, (void *)max); + return; + } + + size = sizeof(*esrt); + max -= efi.esrt; + + if (max < size) { + pr_err("ESRT header doen't fit on single memory map entry. (size: %zu max: %zu)\n", + size, max); + return; + } + + va = early_memremap(efi.esrt, size); + if (!va) { + pr_err("early_memremap(%p, %zu) failed.\n", (void *)efi.esrt, + size); + return; + } + + memcpy(&tmpesrt, va, sizeof(tmpesrt)); + + if (tmpesrt.fw_resource_version == 1) { + entry_size = sizeof (*v1_entries); + } else { + pr_err("Unsupported ESRT version %lld.\n", + tmpesrt.fw_resource_version); + return; + } + + if (tmpesrt.fw_resource_count > 0 && max - size < entry_size) { + pr_err("ESRT memory map entry can only hold the header. (max: %zu size: %zu)\n", + max - size, entry_size); + goto err_memunmap; + } + + /* + * The format doesn't really give us any boundary to test here, + * so I'm making up 128 as the max number of individually updatable + * components we support. + * 128 should be pretty excessive, but there's still some chance + * somebody will do that someday and we'll need to raise this. + */ + if (tmpesrt.fw_resource_count > 128) { + pr_err("ESRT says fw_resource_count has very large value %d.\n", + tmpesrt.fw_resource_count); + goto err_memunmap; + } + + /* + * We know it can't be larger than N * sizeof() here, and N is limited + * by the previous test to a small number, so there's no overflow. + */ + entries_size = tmpesrt.fw_resource_count * entry_size; + if (max < size + entries_size) { + pr_err("ESRT does not fit on single memory map entry (size: %zu max: %zu)\n", + size, max); + goto err_memunmap; + } + + /* remap it with our (plausible) new pages */ + early_memunmap(va, size); + size += entries_size; + va = early_memremap(efi.esrt, size); + if (!va) { + pr_err("early_memremap(%p, %zu) failed.\n", (void *)efi.esrt, + size); + return; + } + + esrt_data = (phys_addr_t)efi.esrt; + esrt_data_size = size; + + end = esrt_data + size; + pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end); + memblock_reserve(esrt_data, esrt_data_size); + + pr_debug("esrt-init: loaded.\n"); +err_memunmap: + early_memunmap(va, size); +} + +static int __init register_entries(void) +{ + struct efi_system_resource_entry_v1 *v1_entries = (void *)esrt->entries; + int i, rc; + + if (!esrt_table_exists()) + return 0; + + for (i = 0; i < le32_to_cpu(esrt->fw_resource_count); i++) { + void *esre = NULL; + if (esrt->fw_resource_version == 1) { + esre = &v1_entries[i]; + } else { + pr_err("Unsupported ESRT version %lld.\n", + esrt->fw_resource_version); + return -EINVAL; + } + + rc = esre_create_sysfs_entry(esre, i); + if (rc < 0) { + pr_err("ESRT entry creation failed with error %d.\n", + rc); + return rc; + } + } + return 0; +} + +static void cleanup_entry_list(void) +{ + struct esre_entry *entry, *next; + + list_for_each_entry_safe(entry, next, &entry_list, list) { + kobject_put(&entry->kobj); + } +} + +static int __init esrt_sysfs_init(void) +{ + int error; + struct efi_system_resource_table __iomem *ioesrt; + + pr_debug("esrt-sysfs: loading.\n"); + if (!esrt_data || !esrt_data_size) + return -ENOSYS; + + ioesrt = ioremap(esrt_data, esrt_data_size); + if (!ioesrt) { + pr_err("ioremap(%pa, %zu) failed.\n", &esrt_data, + esrt_data_size); + return -ENOMEM; + } + + esrt = kmalloc(esrt_data_size, GFP_KERNEL); + if (!esrt) { + pr_err("kmalloc failed. (wanted %zu bytes)\n", esrt_data_size); + iounmap(ioesrt); + return -ENOMEM; + } + + memcpy_fromio(esrt, ioesrt, esrt_data_size); + + esrt_kobj = kobject_create_and_add("esrt", efi_kobj); + if (!esrt_kobj) { + pr_err("Firmware table registration failed.\n"); + error = -ENOMEM; + goto err; + } + + error = sysfs_create_group(esrt_kobj, &esrt_attr_group); + if (error) { + pr_err("Sysfs attribute export failed with error %d.\n", + error); + goto err_remove_esrt; + } + + esrt_kset = kset_create_and_add("entries", NULL, esrt_kobj); + if (!esrt_kset) { + pr_err("kset creation failed.\n"); + error = -ENOMEM; + goto err_remove_group; + } + + error = register_entries(); + if (error) + goto err_cleanup_list; + + memblock_remove(esrt_data, esrt_data_size); + + pr_debug("esrt-sysfs: loaded.\n"); + + return 0; +err_cleanup_list: + cleanup_entry_list(); + kset_unregister(esrt_kset); +err_remove_group: + sysfs_remove_group(esrt_kobj, &esrt_attr_group); +err_remove_esrt: + kobject_put(esrt_kobj); +err: + kfree(esrt); + esrt = NULL; + return error; +} + +static void __exit esrt_sysfs_exit(void) +{ + pr_debug("esrt-sysfs: unloading.\n"); + cleanup_entry_list(); + kset_unregister(esrt_kset); + sysfs_remove_group(esrt_kobj, &esrt_attr_group); + kfree(esrt); + esrt = NULL; + kobject_del(esrt_kobj); + kobject_put(esrt_kobj); +} + +module_init(esrt_sysfs_init); +module_exit(esrt_sysfs_exit); + +MODULE_AUTHOR("Peter Jones <pjones@redhat.com>"); +MODULE_DESCRIPTION("EFI System Resource Table support"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 280bc0a63..816dbe9f4 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -24,8 +24,6 @@ KASAN_SANITIZE := n lib-y := efi-stub-helper.o lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o -CFLAGS_fdt.o += -I$(srctree)/scripts/dtc/libfdt/ - # # arm64 puts the stub in the kernel proper, which will unnecessarily retain all # code indefinitely unless it is annotated as __init/__initdata/__initconst etc. diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index cc016c615..5de3ed292 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c @@ -144,7 +144,9 @@ static struct kobj_type __refdata memmap_ktype = { * * Common implementation of firmware_map_add() and firmware_map_add_early() * which expects a pre-allocated struct firmware_map_entry. - **/ + * + * Return: 0 always + */ static int firmware_map_add_entry(u64 start, u64 end, const char *type, struct firmware_map_entry *entry) @@ -170,7 +172,7 @@ static int firmware_map_add_entry(u64 start, u64 end, * @entry: removed entry. * * The caller must hold map_entries_lock, and release it properly. - **/ + */ static inline void firmware_map_remove_entry(struct firmware_map_entry *entry) { list_del(&entry->list); @@ -208,7 +210,7 @@ static inline void remove_sysfs_fw_map_entry(struct firmware_map_entry *entry) kobject_put(&entry->kobj); } -/* +/** * firmware_map_find_entry_in_list() - Search memmap entry in a given list. * @start: Start of the memory range. * @end: End of the memory range (exclusive). @@ -236,7 +238,7 @@ firmware_map_find_entry_in_list(u64 start, u64 end, const char *type, return NULL; } -/* +/** * firmware_map_find_entry() - Search memmap entry in map_entries. * @start: Start of the memory range. * @end: End of the memory range (exclusive). @@ -254,7 +256,7 @@ firmware_map_find_entry(u64 start, u64 end, const char *type) return firmware_map_find_entry_in_list(start, end, type, &map_entries); } -/* +/** * firmware_map_find_entry_bootmem() - Search memmap entry in map_entries_bootmem. * @start: Start of the memory range. * @end: End of the memory range (exclusive). @@ -283,8 +285,8 @@ firmware_map_find_entry_bootmem(u64 start, u64 end, const char *type) * similar to function firmware_map_add_early(). The only difference is that * it will create the syfs entry dynamically. * - * Returns 0 on success, or -ENOMEM if no memory could be allocated. - **/ + * Return: 0 on success, or -ENOMEM if no memory could be allocated. + */ int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type) { struct firmware_map_entry *entry; @@ -325,8 +327,8 @@ int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type) * * That function must be called before late_initcall. * - * Returns 0 on success, or -ENOMEM if no memory could be allocated. - **/ + * Return: 0 on success, or -ENOMEM if no memory could be allocated. + */ int __init firmware_map_add_early(u64 start, u64 end, const char *type) { struct firmware_map_entry *entry; @@ -346,8 +348,8 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type) * * removes a firmware mapping entry. * - * Returns 0 on success, or -EINVAL if no entry. - **/ + * Return: 0 on success, or -EINVAL if no entry. + */ int __meminit firmware_map_remove(u64 start, u64 end, const char *type) { struct firmware_map_entry *entry; diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c new file mode 100644 index 000000000..1bd6f9c34 --- /dev/null +++ b/drivers/firmware/qcom_scm-32.c @@ -0,0 +1,503 @@ +/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/qcom_scm.h> + +#include <asm/outercache.h> +#include <asm/cacheflush.h> + +#include "qcom_scm.h" + +#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00 +#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01 +#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08 +#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20 + +#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04 +#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02 +#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10 +#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40 + +struct qcom_scm_entry { + int flag; + void *entry; +}; + +static struct qcom_scm_entry qcom_scm_wb[] = { + { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 }, + { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 }, + { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 }, + { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, +}; + +static DEFINE_MUTEX(qcom_scm_lock); + +/** + * struct qcom_scm_command - one SCM command buffer + * @len: total available memory for command and response + * @buf_offset: start of command buffer + * @resp_hdr_offset: start of response buffer + * @id: command to be executed + * @buf: buffer returned from qcom_scm_get_command_buffer() + * + * An SCM command is laid out in memory as follows: + * + * ------------------- <--- struct qcom_scm_command + * | command header | + * ------------------- <--- qcom_scm_get_command_buffer() + * | command buffer | + * ------------------- <--- struct qcom_scm_response and + * | response header | qcom_scm_command_to_response() + * ------------------- <--- qcom_scm_get_response_buffer() + * | response buffer | + * ------------------- + * + * There can be arbitrary padding between the headers and buffers so + * you should always use the appropriate qcom_scm_get_*_buffer() routines + * to access the buffers in a safe manner. + */ +struct qcom_scm_command { + __le32 len; + __le32 buf_offset; + __le32 resp_hdr_offset; + __le32 id; + __le32 buf[0]; +}; + +/** + * struct qcom_scm_response - one SCM response buffer + * @len: total available memory for response + * @buf_offset: start of response data relative to start of qcom_scm_response + * @is_complete: indicates if the command has finished processing + */ +struct qcom_scm_response { + __le32 len; + __le32 buf_offset; + __le32 is_complete; +}; + +/** + * alloc_qcom_scm_command() - Allocate an SCM command + * @cmd_size: size of the command buffer + * @resp_size: size of the response buffer + * + * Allocate an SCM command, including enough room for the command + * and response headers as well as the command and response buffers. + * + * Returns a valid &qcom_scm_command on success or %NULL if the allocation fails. + */ +static struct qcom_scm_command *alloc_qcom_scm_command(size_t cmd_size, size_t resp_size) +{ + struct qcom_scm_command *cmd; + size_t len = sizeof(*cmd) + sizeof(struct qcom_scm_response) + cmd_size + + resp_size; + u32 offset; + + cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL); + if (cmd) { + cmd->len = cpu_to_le32(len); + offset = offsetof(struct qcom_scm_command, buf); + cmd->buf_offset = cpu_to_le32(offset); + cmd->resp_hdr_offset = cpu_to_le32(offset + cmd_size); + } + return cmd; +} + +/** + * free_qcom_scm_command() - Free an SCM command + * @cmd: command to free + * + * Free an SCM command. + */ +static inline void free_qcom_scm_command(struct qcom_scm_command *cmd) +{ + kfree(cmd); +} + +/** + * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response + * @cmd: command + * + * Returns a pointer to a response for a command. + */ +static inline struct qcom_scm_response *qcom_scm_command_to_response( + const struct qcom_scm_command *cmd) +{ + return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset); +} + +/** + * qcom_scm_get_command_buffer() - Get a pointer to a command buffer + * @cmd: command + * + * Returns a pointer to the command buffer of a command. + */ +static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd) +{ + return (void *)cmd->buf; +} + +/** + * qcom_scm_get_response_buffer() - Get a pointer to a response buffer + * @rsp: response + * + * Returns a pointer to a response buffer of a response. + */ +static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp) +{ + return (void *)rsp + le32_to_cpu(rsp->buf_offset); +} + +static int qcom_scm_remap_error(int err) +{ + pr_err("qcom_scm_call failed with error code %d\n", err); + switch (err) { + case QCOM_SCM_ERROR: + return -EIO; + case QCOM_SCM_EINVAL_ADDR: + case QCOM_SCM_EINVAL_ARG: + return -EINVAL; + case QCOM_SCM_EOPNOTSUPP: + return -EOPNOTSUPP; + case QCOM_SCM_ENOMEM: + return -ENOMEM; + } + return -EINVAL; +} + +static u32 smc(u32 cmd_addr) +{ + int context_id; + register u32 r0 asm("r0") = 1; + register u32 r1 asm("r1") = (u32)&context_id; + register u32 r2 asm("r2") = cmd_addr; + do { + asm volatile( + __asmeq("%0", "r0") + __asmeq("%1", "r0") + __asmeq("%2", "r1") + __asmeq("%3", "r2") +#ifdef REQUIRES_SEC + ".arch_extension sec\n" +#endif + "smc #0 @ switch to secure world\n" + : "=r" (r0) + : "r" (r0), "r" (r1), "r" (r2) + : "r3"); + } while (r0 == QCOM_SCM_INTERRUPTED); + + return r0; +} + +static int __qcom_scm_call(const struct qcom_scm_command *cmd) +{ + int ret; + u32 cmd_addr = virt_to_phys(cmd); + + /* + * Flush the command buffer so that the secure world sees + * the correct data. + */ + __cpuc_flush_dcache_area((void *)cmd, cmd->len); + outer_flush_range(cmd_addr, cmd_addr + cmd->len); + + ret = smc(cmd_addr); + if (ret < 0) + ret = qcom_scm_remap_error(ret); + + return ret; +} + +static void qcom_scm_inv_range(unsigned long start, unsigned long end) +{ + u32 cacheline_size, ctr; + + asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr)); + cacheline_size = 4 << ((ctr >> 16) & 0xf); + + start = round_down(start, cacheline_size); + end = round_up(end, cacheline_size); + outer_inv_range(start, end); + while (start < end) { + asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start) + : "memory"); + start += cacheline_size; + } + dsb(); + isb(); +} + +/** + * qcom_scm_call() - Send an SCM command + * @svc_id: service identifier + * @cmd_id: command identifier + * @cmd_buf: command buffer + * @cmd_len: length of the command buffer + * @resp_buf: response buffer + * @resp_len: length of the response buffer + * + * Sends a command to the SCM and waits for the command to finish processing. + * + * A note on cache maintenance: + * Note that any buffers that are expected to be accessed by the secure world + * must be flushed before invoking qcom_scm_call and invalidated in the cache + * immediately after qcom_scm_call returns. Cache maintenance on the command + * and response buffers is taken care of by qcom_scm_call; however, callers are + * responsible for any other cached buffers passed over to the secure world. + */ +static int qcom_scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, + size_t cmd_len, void *resp_buf, size_t resp_len) +{ + int ret; + struct qcom_scm_command *cmd; + struct qcom_scm_response *rsp; + unsigned long start, end; + + cmd = alloc_qcom_scm_command(cmd_len, resp_len); + if (!cmd) + return -ENOMEM; + + cmd->id = cpu_to_le32((svc_id << 10) | cmd_id); + if (cmd_buf) + memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len); + + mutex_lock(&qcom_scm_lock); + ret = __qcom_scm_call(cmd); + mutex_unlock(&qcom_scm_lock); + if (ret) + goto out; + + rsp = qcom_scm_command_to_response(cmd); + start = (unsigned long)rsp; + + do { + qcom_scm_inv_range(start, start + sizeof(*rsp)); + } while (!rsp->is_complete); + + end = (unsigned long)qcom_scm_get_response_buffer(rsp) + resp_len; + qcom_scm_inv_range(start, end); + + if (resp_buf) + memcpy(resp_buf, qcom_scm_get_response_buffer(rsp), resp_len); +out: + free_qcom_scm_command(cmd); + return ret; +} + +#define SCM_CLASS_REGISTER (0x2 << 8) +#define SCM_MASK_IRQS BIT(5) +#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \ + SCM_CLASS_REGISTER | \ + SCM_MASK_IRQS | \ + (n & 0xf)) + +/** + * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument + * @svc_id: service identifier + * @cmd_id: command identifier + * @arg1: first argument + * + * This shall only be used with commands that are guaranteed to be + * uninterruptable, atomic and SMP safe. + */ +static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1) +{ + int context_id; + + register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1); + register u32 r1 asm("r1") = (u32)&context_id; + register u32 r2 asm("r2") = arg1; + + asm volatile( + __asmeq("%0", "r0") + __asmeq("%1", "r0") + __asmeq("%2", "r1") + __asmeq("%3", "r2") +#ifdef REQUIRES_SEC + ".arch_extension sec\n" +#endif + "smc #0 @ switch to secure world\n" + : "=r" (r0) + : "r" (r0), "r" (r1), "r" (r2) + : "r3"); + return r0; +} + +u32 qcom_scm_get_version(void) +{ + int context_id; + static u32 version = -1; + register u32 r0 asm("r0"); + register u32 r1 asm("r1"); + + if (version != -1) + return version; + + mutex_lock(&qcom_scm_lock); + + r0 = 0x1 << 8; + r1 = (u32)&context_id; + do { + asm volatile( + __asmeq("%0", "r0") + __asmeq("%1", "r1") + __asmeq("%2", "r0") + __asmeq("%3", "r1") +#ifdef REQUIRES_SEC + ".arch_extension sec\n" +#endif + "smc #0 @ switch to secure world\n" + : "=r" (r0), "=r" (r1) + : "r" (r0), "r" (r1) + : "r2", "r3"); + } while (r0 == QCOM_SCM_INTERRUPTED); + + version = r1; + mutex_unlock(&qcom_scm_lock); + + return version; +} +EXPORT_SYMBOL(qcom_scm_get_version); + +/* + * Set the cold/warm boot address for one of the CPU cores. + */ +static int qcom_scm_set_boot_addr(u32 addr, int flags) +{ + struct { + __le32 flags; + __le32 addr; + } cmd; + + cmd.addr = cpu_to_le32(addr); + cmd.flags = cpu_to_le32(flags); + return qcom_scm_call(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR, + &cmd, sizeof(cmd), NULL, 0); +} + +/** + * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus + * @entry: Entry point function for the cpus + * @cpus: The cpumask of cpus that will use the entry point + * + * Set the cold boot address of the cpus. Any cpu outside the supported + * range would be removed from the cpu present mask. + */ +int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) +{ + int flags = 0; + int cpu; + int scm_cb_flags[] = { + QCOM_SCM_FLAG_COLDBOOT_CPU0, + QCOM_SCM_FLAG_COLDBOOT_CPU1, + QCOM_SCM_FLAG_COLDBOOT_CPU2, + QCOM_SCM_FLAG_COLDBOOT_CPU3, + }; + + if (!cpus || (cpus && cpumask_empty(cpus))) + return -EINVAL; + + for_each_cpu(cpu, cpus) { + if (cpu < ARRAY_SIZE(scm_cb_flags)) + flags |= scm_cb_flags[cpu]; + else + set_cpu_present(cpu, false); + } + + return qcom_scm_set_boot_addr(virt_to_phys(entry), flags); +} + +/** + * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus + * @entry: Entry point function for the cpus + * @cpus: The cpumask of cpus that will use the entry point + * + * Set the Linux entry point for the SCM to transfer control to when coming + * out of a power down. CPU power down may be executed on cpuidle or hotplug. + */ +int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) +{ + int ret; + int flags = 0; + int cpu; + + /* + * Reassign only if we are switching from hotplug entry point + * to cpuidle entry point or vice versa. + */ + for_each_cpu(cpu, cpus) { + if (entry == qcom_scm_wb[cpu].entry) + continue; + flags |= qcom_scm_wb[cpu].flag; + } + + /* No change in entry function */ + if (!flags) + return 0; + + ret = qcom_scm_set_boot_addr(virt_to_phys(entry), flags); + if (!ret) { + for_each_cpu(cpu, cpus) + qcom_scm_wb[cpu].entry = entry; + } + + return ret; +} + +/** + * qcom_scm_cpu_power_down() - Power down the cpu + * @flags - Flags to flush cache + * + * This is an end point to power down cpu. If there was a pending interrupt, + * the control would return from this function, otherwise, the cpu jumps to the + * warm boot entry point set for this cpu upon reset. + */ +void __qcom_scm_cpu_power_down(u32 flags) +{ + qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC, + flags & QCOM_SCM_FLUSH_FLAG_MASK); +} + +int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id) +{ + int ret; + u32 svc_cmd = (svc_id << 10) | cmd_id; + u32 ret_val = 0; + + ret = qcom_scm_call(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, &svc_cmd, + sizeof(svc_cmd), &ret_val, sizeof(ret_val)); + if (ret) + return ret; + + return ret_val; +} + +int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) +{ + if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) + return -ERANGE; + + return qcom_scm_call(QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, + req, req_cnt * sizeof(*req), resp, sizeof(*resp)); +} diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 994b50fd9..45c008d68 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. * Copyright (C) 2015 Linaro Ltd. * * This program is free software; you can redistribute it and/or modify @@ -16,393 +16,12 @@ * 02110-1301, USA. */ -#include <linux/slab.h> -#include <linux/io.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/errno.h> -#include <linux/err.h> +#include <linux/cpumask.h> +#include <linux/export.h> +#include <linux/types.h> #include <linux/qcom_scm.h> -#include <asm/outercache.h> -#include <asm/cacheflush.h> - - -#define QCOM_SCM_ENOMEM -5 -#define QCOM_SCM_EOPNOTSUPP -4 -#define QCOM_SCM_EINVAL_ADDR -3 -#define QCOM_SCM_EINVAL_ARG -2 -#define QCOM_SCM_ERROR -1 -#define QCOM_SCM_INTERRUPTED 1 - -#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00 -#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01 -#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08 -#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20 - -#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04 -#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02 -#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10 -#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40 - -struct qcom_scm_entry { - int flag; - void *entry; -}; - -static struct qcom_scm_entry qcom_scm_wb[] = { - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 }, - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 }, - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 }, - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, -}; - -static DEFINE_MUTEX(qcom_scm_lock); - -/** - * struct qcom_scm_command - one SCM command buffer - * @len: total available memory for command and response - * @buf_offset: start of command buffer - * @resp_hdr_offset: start of response buffer - * @id: command to be executed - * @buf: buffer returned from qcom_scm_get_command_buffer() - * - * An SCM command is laid out in memory as follows: - * - * ------------------- <--- struct qcom_scm_command - * | command header | - * ------------------- <--- qcom_scm_get_command_buffer() - * | command buffer | - * ------------------- <--- struct qcom_scm_response and - * | response header | qcom_scm_command_to_response() - * ------------------- <--- qcom_scm_get_response_buffer() - * | response buffer | - * ------------------- - * - * There can be arbitrary padding between the headers and buffers so - * you should always use the appropriate qcom_scm_get_*_buffer() routines - * to access the buffers in a safe manner. - */ -struct qcom_scm_command { - __le32 len; - __le32 buf_offset; - __le32 resp_hdr_offset; - __le32 id; - __le32 buf[0]; -}; - -/** - * struct qcom_scm_response - one SCM response buffer - * @len: total available memory for response - * @buf_offset: start of response data relative to start of qcom_scm_response - * @is_complete: indicates if the command has finished processing - */ -struct qcom_scm_response { - __le32 len; - __le32 buf_offset; - __le32 is_complete; -}; - -/** - * alloc_qcom_scm_command() - Allocate an SCM command - * @cmd_size: size of the command buffer - * @resp_size: size of the response buffer - * - * Allocate an SCM command, including enough room for the command - * and response headers as well as the command and response buffers. - * - * Returns a valid &qcom_scm_command on success or %NULL if the allocation fails. - */ -static struct qcom_scm_command *alloc_qcom_scm_command(size_t cmd_size, size_t resp_size) -{ - struct qcom_scm_command *cmd; - size_t len = sizeof(*cmd) + sizeof(struct qcom_scm_response) + cmd_size + - resp_size; - u32 offset; - - cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL); - if (cmd) { - cmd->len = cpu_to_le32(len); - offset = offsetof(struct qcom_scm_command, buf); - cmd->buf_offset = cpu_to_le32(offset); - cmd->resp_hdr_offset = cpu_to_le32(offset + cmd_size); - } - return cmd; -} - -/** - * free_qcom_scm_command() - Free an SCM command - * @cmd: command to free - * - * Free an SCM command. - */ -static inline void free_qcom_scm_command(struct qcom_scm_command *cmd) -{ - kfree(cmd); -} - -/** - * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response - * @cmd: command - * - * Returns a pointer to a response for a command. - */ -static inline struct qcom_scm_response *qcom_scm_command_to_response( - const struct qcom_scm_command *cmd) -{ - return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset); -} - -/** - * qcom_scm_get_command_buffer() - Get a pointer to a command buffer - * @cmd: command - * - * Returns a pointer to the command buffer of a command. - */ -static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd) -{ - return (void *)cmd->buf; -} - -/** - * qcom_scm_get_response_buffer() - Get a pointer to a response buffer - * @rsp: response - * - * Returns a pointer to a response buffer of a response. - */ -static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp) -{ - return (void *)rsp + le32_to_cpu(rsp->buf_offset); -} - -static int qcom_scm_remap_error(int err) -{ - pr_err("qcom_scm_call failed with error code %d\n", err); - switch (err) { - case QCOM_SCM_ERROR: - return -EIO; - case QCOM_SCM_EINVAL_ADDR: - case QCOM_SCM_EINVAL_ARG: - return -EINVAL; - case QCOM_SCM_EOPNOTSUPP: - return -EOPNOTSUPP; - case QCOM_SCM_ENOMEM: - return -ENOMEM; - } - return -EINVAL; -} - -static u32 smc(u32 cmd_addr) -{ - int context_id; - register u32 r0 asm("r0") = 1; - register u32 r1 asm("r1") = (u32)&context_id; - register u32 r2 asm("r2") = cmd_addr; - do { - asm volatile( - __asmeq("%0", "r0") - __asmeq("%1", "r0") - __asmeq("%2", "r1") - __asmeq("%3", "r2") -#ifdef REQUIRES_SEC - ".arch_extension sec\n" -#endif - "smc #0 @ switch to secure world\n" - : "=r" (r0) - : "r" (r0), "r" (r1), "r" (r2) - : "r3"); - } while (r0 == QCOM_SCM_INTERRUPTED); - - return r0; -} - -static int __qcom_scm_call(const struct qcom_scm_command *cmd) -{ - int ret; - u32 cmd_addr = virt_to_phys(cmd); - - /* - * Flush the command buffer so that the secure world sees - * the correct data. - */ - __cpuc_flush_dcache_area((void *)cmd, cmd->len); - outer_flush_range(cmd_addr, cmd_addr + cmd->len); - - ret = smc(cmd_addr); - if (ret < 0) - ret = qcom_scm_remap_error(ret); - - return ret; -} - -static void qcom_scm_inv_range(unsigned long start, unsigned long end) -{ - u32 cacheline_size, ctr; - - asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr)); - cacheline_size = 4 << ((ctr >> 16) & 0xf); - - start = round_down(start, cacheline_size); - end = round_up(end, cacheline_size); - outer_inv_range(start, end); - while (start < end) { - asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start) - : "memory"); - start += cacheline_size; - } - dsb(); - isb(); -} - -/** - * qcom_scm_call() - Send an SCM command - * @svc_id: service identifier - * @cmd_id: command identifier - * @cmd_buf: command buffer - * @cmd_len: length of the command buffer - * @resp_buf: response buffer - * @resp_len: length of the response buffer - * - * Sends a command to the SCM and waits for the command to finish processing. - * - * A note on cache maintenance: - * Note that any buffers that are expected to be accessed by the secure world - * must be flushed before invoking qcom_scm_call and invalidated in the cache - * immediately after qcom_scm_call returns. Cache maintenance on the command - * and response buffers is taken care of by qcom_scm_call; however, callers are - * responsible for any other cached buffers passed over to the secure world. - */ -static int qcom_scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, - size_t cmd_len, void *resp_buf, size_t resp_len) -{ - int ret; - struct qcom_scm_command *cmd; - struct qcom_scm_response *rsp; - unsigned long start, end; - - cmd = alloc_qcom_scm_command(cmd_len, resp_len); - if (!cmd) - return -ENOMEM; - - cmd->id = cpu_to_le32((svc_id << 10) | cmd_id); - if (cmd_buf) - memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len); - - mutex_lock(&qcom_scm_lock); - ret = __qcom_scm_call(cmd); - mutex_unlock(&qcom_scm_lock); - if (ret) - goto out; - - rsp = qcom_scm_command_to_response(cmd); - start = (unsigned long)rsp; - - do { - qcom_scm_inv_range(start, start + sizeof(*rsp)); - } while (!rsp->is_complete); - - end = (unsigned long)qcom_scm_get_response_buffer(rsp) + resp_len; - qcom_scm_inv_range(start, end); - - if (resp_buf) - memcpy(resp_buf, qcom_scm_get_response_buffer(rsp), resp_len); -out: - free_qcom_scm_command(cmd); - return ret; -} - -#define SCM_CLASS_REGISTER (0x2 << 8) -#define SCM_MASK_IRQS BIT(5) -#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \ - SCM_CLASS_REGISTER | \ - SCM_MASK_IRQS | \ - (n & 0xf)) - -/** - * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument - * @svc_id: service identifier - * @cmd_id: command identifier - * @arg1: first argument - * - * This shall only be used with commands that are guaranteed to be - * uninterruptable, atomic and SMP safe. - */ -static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1) -{ - int context_id; - - register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1); - register u32 r1 asm("r1") = (u32)&context_id; - register u32 r2 asm("r2") = arg1; - - asm volatile( - __asmeq("%0", "r0") - __asmeq("%1", "r0") - __asmeq("%2", "r1") - __asmeq("%3", "r2") -#ifdef REQUIRES_SEC - ".arch_extension sec\n" -#endif - "smc #0 @ switch to secure world\n" - : "=r" (r0) - : "r" (r0), "r" (r1), "r" (r2) - : "r3"); - return r0; -} - -u32 qcom_scm_get_version(void) -{ - int context_id; - static u32 version = -1; - register u32 r0 asm("r0"); - register u32 r1 asm("r1"); - - if (version != -1) - return version; - - mutex_lock(&qcom_scm_lock); - - r0 = 0x1 << 8; - r1 = (u32)&context_id; - do { - asm volatile( - __asmeq("%0", "r0") - __asmeq("%1", "r1") - __asmeq("%2", "r0") - __asmeq("%3", "r1") -#ifdef REQUIRES_SEC - ".arch_extension sec\n" -#endif - "smc #0 @ switch to secure world\n" - : "=r" (r0), "=r" (r1) - : "r" (r0), "r" (r1) - : "r2", "r3"); - } while (r0 == QCOM_SCM_INTERRUPTED); - - version = r1; - mutex_unlock(&qcom_scm_lock); - - return version; -} -EXPORT_SYMBOL(qcom_scm_get_version); - -#define QCOM_SCM_SVC_BOOT 0x1 -#define QCOM_SCM_BOOT_ADDR 0x1 -/* - * Set the cold/warm boot address for one of the CPU cores. - */ -static int qcom_scm_set_boot_addr(u32 addr, int flags) -{ - struct { - __le32 flags; - __le32 addr; - } cmd; - - cmd.addr = cpu_to_le32(addr); - cmd.flags = cpu_to_le32(flags); - return qcom_scm_call(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR, - &cmd, sizeof(cmd), NULL, 0); -} +#include "qcom_scm.h" /** * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus @@ -414,26 +33,7 @@ static int qcom_scm_set_boot_addr(u32 addr, int flags) */ int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) { - int flags = 0; - int cpu; - int scm_cb_flags[] = { - QCOM_SCM_FLAG_COLDBOOT_CPU0, - QCOM_SCM_FLAG_COLDBOOT_CPU1, - QCOM_SCM_FLAG_COLDBOOT_CPU2, - QCOM_SCM_FLAG_COLDBOOT_CPU3, - }; - - if (!cpus || (cpus && cpumask_empty(cpus))) - return -EINVAL; - - for_each_cpu(cpu, cpus) { - if (cpu < ARRAY_SIZE(scm_cb_flags)) - flags |= scm_cb_flags[cpu]; - else - set_cpu_present(cpu, false); - } - - return qcom_scm_set_boot_addr(virt_to_phys(entry), flags); + return __qcom_scm_set_cold_boot_addr(entry, cpus); } EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); @@ -447,37 +47,10 @@ EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); */ int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) { - int ret; - int flags = 0; - int cpu; - - /* - * Reassign only if we are switching from hotplug entry point - * to cpuidle entry point or vice versa. - */ - for_each_cpu(cpu, cpus) { - if (entry == qcom_scm_wb[cpu].entry) - continue; - flags |= qcom_scm_wb[cpu].flag; - } - - /* No change in entry function */ - if (!flags) - return 0; - - ret = qcom_scm_set_boot_addr(virt_to_phys(entry), flags); - if (!ret) { - for_each_cpu(cpu, cpus) - qcom_scm_wb[cpu].entry = entry; - } - - return ret; + return __qcom_scm_set_warm_boot_addr(entry, cpus); } EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); -#define QCOM_SCM_CMD_TERMINATE_PC 0x2 -#define QCOM_SCM_FLUSH_FLAG_MASK 0x3 - /** * qcom_scm_cpu_power_down() - Power down the cpu * @flags - Flags to flush cache @@ -488,7 +61,36 @@ EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); */ void qcom_scm_cpu_power_down(u32 flags) { - qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC, - flags & QCOM_SCM_FLUSH_FLAG_MASK); + __qcom_scm_cpu_power_down(flags); } EXPORT_SYMBOL(qcom_scm_cpu_power_down); + +/** + * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. + * + * Return true if HDCP is supported, false if not. + */ +bool qcom_scm_hdcp_available(void) +{ + int ret; + + ret = __qcom_scm_is_call_available(QCOM_SCM_SVC_HDCP, + QCOM_SCM_CMD_HDCP); + + return (ret > 0) ? true : false; +} +EXPORT_SYMBOL(qcom_scm_hdcp_available); + +/** + * qcom_scm_hdcp_req() - Send HDCP request. + * @req: HDCP request array + * @req_cnt: HDCP request array count + * @resp: response buffer passed to SCM + * + * Write HDCP register(s) through SCM. + */ +int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) +{ + return __qcom_scm_hdcp_req(req, req_cnt, resp); +} +EXPORT_SYMBOL(qcom_scm_hdcp_req); diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h new file mode 100644 index 000000000..2cce75c08 --- /dev/null +++ b/drivers/firmware/qcom_scm.h @@ -0,0 +1,47 @@ +/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __QCOM_SCM_INT_H +#define __QCOM_SCM_INT_H + +#define QCOM_SCM_SVC_BOOT 0x1 +#define QCOM_SCM_BOOT_ADDR 0x1 +#define QCOM_SCM_BOOT_ADDR_MC 0x11 + +#define QCOM_SCM_FLAG_HLOS 0x01 +#define QCOM_SCM_FLAG_COLDBOOT_MC 0x02 +#define QCOM_SCM_FLAG_WARMBOOT_MC 0x04 +extern int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); +extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); + +#define QCOM_SCM_CMD_TERMINATE_PC 0x2 +#define QCOM_SCM_FLUSH_FLAG_MASK 0x3 +#define QCOM_SCM_CMD_CORE_HOTPLUGGED 0x10 +extern void __qcom_scm_cpu_power_down(u32 flags); + +#define QCOM_SCM_SVC_INFO 0x6 +#define QCOM_IS_CALL_AVAIL_CMD 0x1 +extern int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id); + +#define QCOM_SCM_SVC_HDCP 0x11 +#define QCOM_SCM_CMD_HDCP 0x01 +extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, + u32 *resp); + +/* common error codes */ +#define QCOM_SCM_ENOMEM -5 +#define QCOM_SCM_EOPNOTSUPP -4 +#define QCOM_SCM_EINVAL_ADDR -3 +#define QCOM_SCM_EINVAL_ARG -2 +#define QCOM_SCM_ERROR -1 +#define QCOM_SCM_INTERRUPTED 1 + +#endif |