summaryrefslogtreecommitdiff
path: root/arch/x86/platform
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-12-15 14:52:16 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-12-15 14:52:16 -0300
commit8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be (patch)
treee9891aa6c295060d065adffd610c4f49ecf884f3 /arch/x86/platform
parenta71852147516bc1cb5b0b3cbd13639bfd4022dc8 (diff)
Linux-libre 4.3.2-gnu
Diffstat (limited to 'arch/x86/platform')
-rw-r--r--arch/x86/platform/Makefile1
-rw-r--r--arch/x86/platform/atom/Makefile3
-rw-r--r--arch/x86/platform/atom/pmc_atom.c460
-rw-r--r--arch/x86/platform/efi/efi.c4
-rw-r--r--arch/x86/platform/intel/Makefile1
-rw-r--r--arch/x86/platform/intel/iosf_mbi.c327
-rw-r--r--arch/x86/platform/uv/uv_irq.c2
-rw-r--r--arch/x86/platform/uv/uv_nmi.c6
-rw-r--r--arch/x86/platform/uv/uv_time.c37
9 files changed, 810 insertions, 31 deletions
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
index f1a6c8e86..184842ef3 100644
--- a/arch/x86/platform/Makefile
+++ b/arch/x86/platform/Makefile
@@ -5,6 +5,7 @@ obj-y += efi/
obj-y += geode/
obj-y += goldfish/
obj-y += iris/
+obj-y += intel/
obj-y += intel-mid/
obj-y += intel-quark/
obj-y += olpc/
diff --git a/arch/x86/platform/atom/Makefile b/arch/x86/platform/atom/Makefile
index 0a3a40cbc..40983f5b0 100644
--- a/arch/x86/platform/atom/Makefile
+++ b/arch/x86/platform/atom/Makefile
@@ -1 +1,2 @@
-obj-$(CONFIG_PUNIT_ATOM_DEBUG) += punit_atom_debug.o
+obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
+obj-$(CONFIG_PUNIT_ATOM_DEBUG) += punit_atom_debug.o
diff --git a/arch/x86/platform/atom/pmc_atom.c b/arch/x86/platform/atom/pmc_atom.c
new file mode 100644
index 000000000..964ff4fc6
--- /dev/null
+++ b/arch/x86/platform/atom/pmc_atom.c
@@ -0,0 +1,460 @@
+/*
+ * Intel Atom SOC Power Management Controller Driver
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/io.h>
+
+#include <asm/pmc_atom.h>
+
+struct pmc_bit_map {
+ const char *name;
+ u32 bit_mask;
+};
+
+struct pmc_reg_map {
+ const struct pmc_bit_map *d3_sts_0;
+ const struct pmc_bit_map *d3_sts_1;
+ const struct pmc_bit_map *func_dis;
+ const struct pmc_bit_map *func_dis_2;
+ const struct pmc_bit_map *pss;
+};
+
+struct pmc_dev {
+ u32 base_addr;
+ void __iomem *regmap;
+ const struct pmc_reg_map *map;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dbgfs_dir;
+#endif /* CONFIG_DEBUG_FS */
+ bool init;
+};
+
+static struct pmc_dev pmc_device;
+static u32 acpi_base_addr;
+
+static const struct pmc_bit_map d3_sts_0_map[] = {
+ {"LPSS1_F0_DMA", BIT_LPSS1_F0_DMA},
+ {"LPSS1_F1_PWM1", BIT_LPSS1_F1_PWM1},
+ {"LPSS1_F2_PWM2", BIT_LPSS1_F2_PWM2},
+ {"LPSS1_F3_HSUART1", BIT_LPSS1_F3_HSUART1},
+ {"LPSS1_F4_HSUART2", BIT_LPSS1_F4_HSUART2},
+ {"LPSS1_F5_SPI", BIT_LPSS1_F5_SPI},
+ {"LPSS1_F6_Reserved", BIT_LPSS1_F6_XXX},
+ {"LPSS1_F7_Reserved", BIT_LPSS1_F7_XXX},
+ {"SCC_EMMC", BIT_SCC_EMMC},
+ {"SCC_SDIO", BIT_SCC_SDIO},
+ {"SCC_SDCARD", BIT_SCC_SDCARD},
+ {"SCC_MIPI", BIT_SCC_MIPI},
+ {"HDA", BIT_HDA},
+ {"LPE", BIT_LPE},
+ {"OTG", BIT_OTG},
+ {"USH", BIT_USH},
+ {"GBE", BIT_GBE},
+ {"SATA", BIT_SATA},
+ {"USB_EHCI", BIT_USB_EHCI},
+ {"SEC", BIT_SEC},
+ {"PCIE_PORT0", BIT_PCIE_PORT0},
+ {"PCIE_PORT1", BIT_PCIE_PORT1},
+ {"PCIE_PORT2", BIT_PCIE_PORT2},
+ {"PCIE_PORT3", BIT_PCIE_PORT3},
+ {"LPSS2_F0_DMA", BIT_LPSS2_F0_DMA},
+ {"LPSS2_F1_I2C1", BIT_LPSS2_F1_I2C1},
+ {"LPSS2_F2_I2C2", BIT_LPSS2_F2_I2C2},
+ {"LPSS2_F3_I2C3", BIT_LPSS2_F3_I2C3},
+ {"LPSS2_F3_I2C4", BIT_LPSS2_F4_I2C4},
+ {"LPSS2_F5_I2C5", BIT_LPSS2_F5_I2C5},
+ {"LPSS2_F6_I2C6", BIT_LPSS2_F6_I2C6},
+ {"LPSS2_F7_I2C7", BIT_LPSS2_F7_I2C7},
+ {},
+};
+
+static struct pmc_bit_map byt_d3_sts_1_map[] = {
+ {"SMB", BIT_SMB},
+ {"OTG_SS_PHY", BIT_OTG_SS_PHY},
+ {"USH_SS_PHY", BIT_USH_SS_PHY},
+ {"DFX", BIT_DFX},
+ {},
+};
+
+static struct pmc_bit_map cht_d3_sts_1_map[] = {
+ {"SMB", BIT_SMB},
+ {"GMM", BIT_STS_GMM},
+ {"ISH", BIT_STS_ISH},
+ {},
+};
+
+static struct pmc_bit_map cht_func_dis_2_map[] = {
+ {"SMB", BIT_SMB},
+ {"GMM", BIT_FD_GMM},
+ {"ISH", BIT_FD_ISH},
+ {},
+};
+
+static const struct pmc_bit_map byt_pss_map[] = {
+ {"GBE", PMC_PSS_BIT_GBE},
+ {"SATA", PMC_PSS_BIT_SATA},
+ {"HDA", PMC_PSS_BIT_HDA},
+ {"SEC", PMC_PSS_BIT_SEC},
+ {"PCIE", PMC_PSS_BIT_PCIE},
+ {"LPSS", PMC_PSS_BIT_LPSS},
+ {"LPE", PMC_PSS_BIT_LPE},
+ {"DFX", PMC_PSS_BIT_DFX},
+ {"USH_CTRL", PMC_PSS_BIT_USH_CTRL},
+ {"USH_SUS", PMC_PSS_BIT_USH_SUS},
+ {"USH_VCCS", PMC_PSS_BIT_USH_VCCS},
+ {"USH_VCCA", PMC_PSS_BIT_USH_VCCA},
+ {"OTG_CTRL", PMC_PSS_BIT_OTG_CTRL},
+ {"OTG_VCCS", PMC_PSS_BIT_OTG_VCCS},
+ {"OTG_VCCA_CLK", PMC_PSS_BIT_OTG_VCCA_CLK},
+ {"OTG_VCCA", PMC_PSS_BIT_OTG_VCCA},
+ {"USB", PMC_PSS_BIT_USB},
+ {"USB_SUS", PMC_PSS_BIT_USB_SUS},
+ {},
+};
+
+static const struct pmc_bit_map cht_pss_map[] = {
+ {"SATA", PMC_PSS_BIT_SATA},
+ {"HDA", PMC_PSS_BIT_HDA},
+ {"SEC", PMC_PSS_BIT_SEC},
+ {"PCIE", PMC_PSS_BIT_PCIE},
+ {"LPSS", PMC_PSS_BIT_LPSS},
+ {"LPE", PMC_PSS_BIT_LPE},
+ {"UFS", PMC_PSS_BIT_CHT_UFS},
+ {"UXD", PMC_PSS_BIT_CHT_UXD},
+ {"UXD_FD", PMC_PSS_BIT_CHT_UXD_FD},
+ {"UX_ENG", PMC_PSS_BIT_CHT_UX_ENG},
+ {"USB_SUS", PMC_PSS_BIT_CHT_USB_SUS},
+ {"GMM", PMC_PSS_BIT_CHT_GMM},
+ {"ISH", PMC_PSS_BIT_CHT_ISH},
+ {"DFX_MASTER", PMC_PSS_BIT_CHT_DFX_MASTER},
+ {"DFX_CLUSTER1", PMC_PSS_BIT_CHT_DFX_CLUSTER1},
+ {"DFX_CLUSTER2", PMC_PSS_BIT_CHT_DFX_CLUSTER2},
+ {"DFX_CLUSTER3", PMC_PSS_BIT_CHT_DFX_CLUSTER3},
+ {"DFX_CLUSTER4", PMC_PSS_BIT_CHT_DFX_CLUSTER4},
+ {"DFX_CLUSTER5", PMC_PSS_BIT_CHT_DFX_CLUSTER5},
+ {},
+};
+
+static const struct pmc_reg_map byt_reg_map = {
+ .d3_sts_0 = d3_sts_0_map,
+ .d3_sts_1 = byt_d3_sts_1_map,
+ .func_dis = d3_sts_0_map,
+ .func_dis_2 = byt_d3_sts_1_map,
+ .pss = byt_pss_map,
+};
+
+static const struct pmc_reg_map cht_reg_map = {
+ .d3_sts_0 = d3_sts_0_map,
+ .d3_sts_1 = cht_d3_sts_1_map,
+ .func_dis = d3_sts_0_map,
+ .func_dis_2 = cht_func_dis_2_map,
+ .pss = cht_pss_map,
+};
+
+static inline u32 pmc_reg_read(struct pmc_dev *pmc, int reg_offset)
+{
+ return readl(pmc->regmap + reg_offset);
+}
+
+static inline void pmc_reg_write(struct pmc_dev *pmc, int reg_offset, u32 val)
+{
+ writel(val, pmc->regmap + reg_offset);
+}
+
+int pmc_atom_read(int offset, u32 *value)
+{
+ struct pmc_dev *pmc = &pmc_device;
+
+ if (!pmc->init)
+ return -ENODEV;
+
+ *value = pmc_reg_read(pmc, offset);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmc_atom_read);
+
+int pmc_atom_write(int offset, u32 value)
+{
+ struct pmc_dev *pmc = &pmc_device;
+
+ if (!pmc->init)
+ return -ENODEV;
+
+ pmc_reg_write(pmc, offset, value);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmc_atom_write);
+
+static void pmc_power_off(void)
+{
+ u16 pm1_cnt_port;
+ u32 pm1_cnt_value;
+
+ pr_info("Preparing to enter system sleep state S5\n");
+
+ pm1_cnt_port = acpi_base_addr + PM1_CNT;
+
+ pm1_cnt_value = inl(pm1_cnt_port);
+ pm1_cnt_value &= SLEEP_TYPE_MASK;
+ pm1_cnt_value |= SLEEP_TYPE_S5;
+ pm1_cnt_value |= SLEEP_ENABLE;
+
+ outl(pm1_cnt_value, pm1_cnt_port);
+}
+
+static void pmc_hw_reg_setup(struct pmc_dev *pmc)
+{
+ /*
+ * Disable PMC S0IX_WAKE_EN events coming from:
+ * - LPC clock run
+ * - GPIO_SUS ored dedicated IRQs
+ * - GPIO_SCORE ored dedicated IRQs
+ * - GPIO_SUS shared IRQ
+ * - GPIO_SCORE shared IRQ
+ */
+ pmc_reg_write(pmc, PMC_S0IX_WAKE_EN, (u32)PMC_WAKE_EN_SETTING);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void pmc_dev_state_print(struct seq_file *s, int reg_index,
+ u32 sts, const struct pmc_bit_map *sts_map,
+ u32 fd, const struct pmc_bit_map *fd_map)
+{
+ int offset = PMC_REG_BIT_WIDTH * reg_index;
+ int index;
+
+ for (index = 0; sts_map[index].name; index++) {
+ seq_printf(s, "Dev: %-2d - %-32s\tState: %s [%s]\n",
+ offset + index, sts_map[index].name,
+ fd_map[index].bit_mask & fd ? "Disabled" : "Enabled ",
+ sts_map[index].bit_mask & sts ? "D3" : "D0");
+ }
+}
+
+static int pmc_dev_state_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmc = s->private;
+ const struct pmc_reg_map *m = pmc->map;
+ u32 func_dis, func_dis_2;
+ u32 d3_sts_0, d3_sts_1;
+
+ func_dis = pmc_reg_read(pmc, PMC_FUNC_DIS);
+ func_dis_2 = pmc_reg_read(pmc, PMC_FUNC_DIS_2);
+ d3_sts_0 = pmc_reg_read(pmc, PMC_D3_STS_0);
+ d3_sts_1 = pmc_reg_read(pmc, PMC_D3_STS_1);
+
+ /* Low part */
+ pmc_dev_state_print(s, 0, d3_sts_0, m->d3_sts_0, func_dis, m->func_dis);
+
+ /* High part */
+ pmc_dev_state_print(s, 1, d3_sts_1, m->d3_sts_1, func_dis_2, m->func_dis_2);
+
+ return 0;
+}
+
+static int pmc_dev_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_dev_state_show, inode->i_private);
+}
+
+static const struct file_operations pmc_dev_state_ops = {
+ .open = pmc_dev_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pmc_pss_state_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmc = s->private;
+ const struct pmc_bit_map *map = pmc->map->pss;
+ u32 pss = pmc_reg_read(pmc, PMC_PSS);
+ int index;
+
+ for (index = 0; map[index].name; index++) {
+ seq_printf(s, "Island: %-2d - %-32s\tState: %s\n",
+ index, map[index].name,
+ map[index].bit_mask & pss ? "Off" : "On");
+ }
+ return 0;
+}
+
+static int pmc_pss_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_pss_state_show, inode->i_private);
+}
+
+static const struct file_operations pmc_pss_state_ops = {
+ .open = pmc_pss_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pmc_sleep_tmr_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmc = s->private;
+ u64 s0ir_tmr, s0i1_tmr, s0i2_tmr, s0i3_tmr, s0_tmr;
+
+ s0ir_tmr = (u64)pmc_reg_read(pmc, PMC_S0IR_TMR) << PMC_TMR_SHIFT;
+ s0i1_tmr = (u64)pmc_reg_read(pmc, PMC_S0I1_TMR) << PMC_TMR_SHIFT;
+ s0i2_tmr = (u64)pmc_reg_read(pmc, PMC_S0I2_TMR) << PMC_TMR_SHIFT;
+ s0i3_tmr = (u64)pmc_reg_read(pmc, PMC_S0I3_TMR) << PMC_TMR_SHIFT;
+ s0_tmr = (u64)pmc_reg_read(pmc, PMC_S0_TMR) << PMC_TMR_SHIFT;
+
+ seq_printf(s, "S0IR Residency:\t%lldus\n", s0ir_tmr);
+ seq_printf(s, "S0I1 Residency:\t%lldus\n", s0i1_tmr);
+ seq_printf(s, "S0I2 Residency:\t%lldus\n", s0i2_tmr);
+ seq_printf(s, "S0I3 Residency:\t%lldus\n", s0i3_tmr);
+ seq_printf(s, "S0 Residency:\t%lldus\n", s0_tmr);
+ return 0;
+}
+
+static int pmc_sleep_tmr_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_sleep_tmr_show, inode->i_private);
+}
+
+static const struct file_operations pmc_sleep_tmr_ops = {
+ .open = pmc_sleep_tmr_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void pmc_dbgfs_unregister(struct pmc_dev *pmc)
+{
+ debugfs_remove_recursive(pmc->dbgfs_dir);
+}
+
+static int pmc_dbgfs_register(struct pmc_dev *pmc)
+{
+ struct dentry *dir, *f;
+
+ dir = debugfs_create_dir("pmc_atom", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ pmc->dbgfs_dir = dir;
+
+ f = debugfs_create_file("dev_state", S_IFREG | S_IRUGO,
+ dir, pmc, &pmc_dev_state_ops);
+ if (!f)
+ goto err;
+
+ f = debugfs_create_file("pss_state", S_IFREG | S_IRUGO,
+ dir, pmc, &pmc_pss_state_ops);
+ if (!f)
+ goto err;
+
+ f = debugfs_create_file("sleep_state", S_IFREG | S_IRUGO,
+ dir, pmc, &pmc_sleep_tmr_ops);
+ if (!f)
+ goto err;
+
+ return 0;
+err:
+ pmc_dbgfs_unregister(pmc);
+ return -ENODEV;
+}
+#else
+static int pmc_dbgfs_register(struct pmc_dev *pmc)
+{
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct pmc_dev *pmc = &pmc_device;
+ const struct pmc_reg_map *map = (struct pmc_reg_map *)ent->driver_data;
+ int ret;
+
+ /* Obtain ACPI base address */
+ pci_read_config_dword(pdev, ACPI_BASE_ADDR_OFFSET, &acpi_base_addr);
+ acpi_base_addr &= ACPI_BASE_ADDR_MASK;
+
+ /* Install power off function */
+ if (acpi_base_addr != 0 && pm_power_off == NULL)
+ pm_power_off = pmc_power_off;
+
+ pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr);
+ pmc->base_addr &= PMC_BASE_ADDR_MASK;
+
+ pmc->regmap = ioremap_nocache(pmc->base_addr, PMC_MMIO_REG_LEN);
+ if (!pmc->regmap) {
+ dev_err(&pdev->dev, "error: ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ pmc->map = map;
+
+ /* PMC hardware registers setup */
+ pmc_hw_reg_setup(pmc);
+
+ ret = pmc_dbgfs_register(pmc);
+ if (ret)
+ dev_warn(&pdev->dev, "debugfs register failed\n");
+
+ pmc->init = true;
+ return ret;
+}
+
+/*
+ * Data for PCI driver interface
+ *
+ * used by pci_match_id() call below.
+ */
+static const struct pci_device_id pmc_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_VLV_PMC), (kernel_ulong_t)&byt_reg_map },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CHT_PMC), (kernel_ulong_t)&cht_reg_map },
+ { 0, },
+};
+
+static int __init pmc_atom_init(void)
+{
+ struct pci_dev *pdev = NULL;
+ const struct pci_device_id *ent;
+
+ /* We look for our device - PCU PMC
+ * we assume that there is max. one device.
+ *
+ * We can't use plain pci_driver mechanism,
+ * as the device is really a multiple function device,
+ * main driver that binds to the pci_device is lpc_ich
+ * and have to find & bind to the device this way.
+ */
+ for_each_pci_dev(pdev) {
+ ent = pci_match_id(pmc_pci_ids, pdev);
+ if (ent)
+ return pmc_setup_dev(pdev, ent);
+ }
+ /* Device not found. */
+ return -ENODEV;
+}
+
+device_initcall(pmc_atom_init);
+
+/*
+MODULE_AUTHOR("Aubrey Li <aubrey.li@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Atom SOC Power Management Controller Interface");
+MODULE_LICENSE("GPL v2");
+*/
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index c6835bfad..6a28ded74 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -650,7 +650,7 @@ static void __init get_systab_virt_addr(efi_memory_desc_t *md)
static void __init save_runtime_map(void)
{
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
efi_memory_desc_t *md;
void *tmp, *p, *q = NULL;
int count = 0;
@@ -813,7 +813,7 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
static void __init kexec_enter_virtual_mode(void)
{
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
efi_memory_desc_t *md;
void *p;
diff --git a/arch/x86/platform/intel/Makefile b/arch/x86/platform/intel/Makefile
new file mode 100644
index 000000000..b878032fb
--- /dev/null
+++ b/arch/x86/platform/intel/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_IOSF_MBI) += iosf_mbi.o
diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
new file mode 100644
index 000000000..edf2c54bf
--- /dev/null
+++ b/arch/x86/platform/intel/iosf_mbi.c
@@ -0,0 +1,327 @@
+/*
+ * IOSF-SB MailBox Interface Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *
+ * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
+ * mailbox interface (MBI) to communicate with mutiple devices. This
+ * driver implements access to this interface for those platforms that can
+ * enumerate the device using PCI.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/capability.h>
+
+#include <asm/iosf_mbi.h>
+
+#define PCI_DEVICE_ID_BAYTRAIL 0x0F00
+#define PCI_DEVICE_ID_BRASWELL 0x2280
+#define PCI_DEVICE_ID_QUARK_X1000 0x0958
+#define PCI_DEVICE_ID_TANGIER 0x1170
+
+static struct pci_dev *mbi_pdev;
+static DEFINE_SPINLOCK(iosf_mbi_lock);
+
+static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
+{
+ return (op << 24) | (port << 16) | (offset << 8) | MBI_ENABLE;
+}
+
+static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
+{
+ int result;
+
+ if (!mbi_pdev)
+ return -ENODEV;
+
+ if (mcrx) {
+ result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
+ mcrx);
+ if (result < 0)
+ goto fail_read;
+ }
+
+ result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
+ if (result < 0)
+ goto fail_read;
+
+ result = pci_read_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
+ if (result < 0)
+ goto fail_read;
+
+ return 0;
+
+fail_read:
+ dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+ return result;
+}
+
+static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
+{
+ int result;
+
+ if (!mbi_pdev)
+ return -ENODEV;
+
+ result = pci_write_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
+ if (result < 0)
+ goto fail_write;
+
+ if (mcrx) {
+ result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
+ mcrx);
+ if (result < 0)
+ goto fail_write;
+ }
+
+ result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
+ if (result < 0)
+ goto fail_write;
+
+ return 0;
+
+fail_write:
+ dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+ return result;
+}
+
+int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
+{
+ u32 mcr, mcrx;
+ unsigned long flags;
+ int ret;
+
+ /* Access to the GFX unit is handled by GPU code */
+ if (port == BT_MBI_UNIT_GFX) {
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+ mcrx = offset & MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+ ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr);
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_read);
+
+int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
+{
+ u32 mcr, mcrx;
+ unsigned long flags;
+ int ret;
+
+ /* Access to the GFX unit is handled by GPU code */
+ if (port == BT_MBI_UNIT_GFX) {
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+ mcrx = offset & MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+ ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr);
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_write);
+
+int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
+{
+ u32 mcr, mcrx;
+ u32 value;
+ unsigned long flags;
+ int ret;
+
+ /* Access to the GFX unit is handled by GPU code */
+ if (port == BT_MBI_UNIT_GFX) {
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+ mcrx = offset & MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+
+ /* Read current mdr value */
+ ret = iosf_mbi_pci_read_mdr(mcrx, mcr & MBI_RD_MASK, &value);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+ return ret;
+ }
+
+ /* Apply mask */
+ value &= ~mask;
+ mdr &= mask;
+ value |= mdr;
+
+ /* Write back */
+ ret = iosf_mbi_pci_write_mdr(mcrx, mcr | MBI_WR_MASK, value);
+
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_modify);
+
+bool iosf_mbi_available(void)
+{
+ /* Mbi isn't hot-pluggable. No remove routine is provided */
+ return mbi_pdev;
+}
+EXPORT_SYMBOL(iosf_mbi_available);
+
+#ifdef CONFIG_IOSF_MBI_DEBUG
+static u32 dbg_mdr;
+static u32 dbg_mcr;
+static u32 dbg_mcrx;
+
+static int mcr_get(void *data, u64 *val)
+{
+ *val = *(u32 *)data;
+ return 0;
+}
+
+static int mcr_set(void *data, u64 val)
+{
+ u8 command = ((u32)val & 0xFF000000) >> 24,
+ port = ((u32)val & 0x00FF0000) >> 16,
+ offset = ((u32)val & 0x0000FF00) >> 8;
+ int err;
+
+ *(u32 *)data = val;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EACCES;
+
+ if (command & 1u)
+ err = iosf_mbi_write(port,
+ command,
+ dbg_mcrx | offset,
+ dbg_mdr);
+ else
+ err = iosf_mbi_read(port,
+ command,
+ dbg_mcrx | offset,
+ &dbg_mdr);
+
+ return err;
+}
+DEFINE_SIMPLE_ATTRIBUTE(iosf_mcr_fops, mcr_get, mcr_set , "%llx\n");
+
+static struct dentry *iosf_dbg;
+
+static void iosf_sideband_debug_init(void)
+{
+ struct dentry *d;
+
+ iosf_dbg = debugfs_create_dir("iosf_sb", NULL);
+ if (IS_ERR_OR_NULL(iosf_dbg))
+ return;
+
+ /* mdr */
+ d = debugfs_create_x32("mdr", 0660, iosf_dbg, &dbg_mdr);
+ if (!d)
+ goto cleanup;
+
+ /* mcrx */
+ d = debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx);
+ if (!d)
+ goto cleanup;
+
+ /* mcr - initiates mailbox tranaction */
+ d = debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops);
+ if (!d)
+ goto cleanup;
+
+ return;
+
+cleanup:
+ debugfs_remove_recursive(d);
+}
+
+static void iosf_debugfs_init(void)
+{
+ iosf_sideband_debug_init();
+}
+
+static void iosf_debugfs_remove(void)
+{
+ debugfs_remove_recursive(iosf_dbg);
+}
+#else
+static inline void iosf_debugfs_init(void) { }
+static inline void iosf_debugfs_remove(void) { }
+#endif /* CONFIG_IOSF_MBI_DEBUG */
+
+static int iosf_mbi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *unused)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ mbi_pdev = pci_dev_get(pdev);
+ return 0;
+}
+
+static const struct pci_device_id iosf_mbi_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BAYTRAIL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRASWELL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_QUARK_X1000) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_TANGIER) },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
+
+static struct pci_driver iosf_mbi_pci_driver = {
+ .name = "iosf_mbi_pci",
+ .probe = iosf_mbi_probe,
+ .id_table = iosf_mbi_pci_ids,
+};
+
+static int __init iosf_mbi_init(void)
+{
+ iosf_debugfs_init();
+
+ return pci_register_driver(&iosf_mbi_pci_driver);
+}
+
+static void __exit iosf_mbi_exit(void)
+{
+ iosf_debugfs_remove();
+
+ pci_unregister_driver(&iosf_mbi_pci_driver);
+ pci_dev_put(mbi_pdev);
+ mbi_pdev = NULL;
+}
+
+module_init(iosf_mbi_init);
+module_exit(iosf_mbi_exit);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("IOSF Mailbox Interface accessor");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index 8570abe68..e1c24631a 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -89,7 +89,7 @@ static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq,
return -EINVAL;
chip_data = kmalloc_node(sizeof(*chip_data), GFP_KERNEL,
- irq_data->node);
+ irq_data_get_node(irq_data));
if (!chip_data)
return -ENOMEM;
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index 020c101c2..5c9f63fa6 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -492,7 +492,7 @@ static void uv_nmi_touch_watchdogs(void)
touch_nmi_watchdog();
}
-#if defined(CONFIG_KEXEC)
+#if defined(CONFIG_KEXEC_CORE)
static atomic_t uv_nmi_kexec_failed;
static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
{
@@ -519,13 +519,13 @@ static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
uv_nmi_sync_exit(0);
}
-#else /* !CONFIG_KEXEC */
+#else /* !CONFIG_KEXEC_CORE */
static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
{
if (master)
pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n");
}
-#endif /* !CONFIG_KEXEC */
+#endif /* !CONFIG_KEXEC_CORE */
#ifdef CONFIG_KGDB
#ifdef CONFIG_KGDB_KDB
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index a244237f3..2b158a9fa 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -32,8 +32,7 @@
static cycle_t uv_read_rtc(struct clocksource *cs);
static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
-static void uv_rtc_timer_setup(enum clock_event_mode,
- struct clock_event_device *);
+static int uv_rtc_shutdown(struct clock_event_device *evt);
static struct clocksource clocksource_uv = {
.name = RTC_NAME,
@@ -44,14 +43,14 @@ static struct clocksource clocksource_uv = {
};
static struct clock_event_device clock_event_device_uv = {
- .name = RTC_NAME,
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .shift = 20,
- .rating = 400,
- .irq = -1,
- .set_next_event = uv_rtc_next_event,
- .set_mode = uv_rtc_timer_setup,
- .event_handler = NULL,
+ .name = RTC_NAME,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 20,
+ .rating = 400,
+ .irq = -1,
+ .set_next_event = uv_rtc_next_event,
+ .set_state_shutdown = uv_rtc_shutdown,
+ .event_handler = NULL,
};
static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
@@ -321,24 +320,14 @@ static int uv_rtc_next_event(unsigned long delta,
}
/*
- * Setup the RTC timer in oneshot mode
+ * Shutdown the RTC timer
*/
-static void uv_rtc_timer_setup(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int uv_rtc_shutdown(struct clock_event_device *evt)
{
int ced_cpu = cpumask_first(evt->cpumask);
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_RESUME:
- /* Nothing to do here yet */
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- uv_rtc_unset_timer(ced_cpu, 1);
- break;
- }
+ uv_rtc_unset_timer(ced_cpu, 1);
+ return 0;
}
static void uv_rtc_interrupt(void)