From d0b2f91bede3bd5e3d24dd6803e56eee959c1797 Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Thu, 20 Oct 2016 00:10:27 -0300 Subject: Linux-libre 4.8.2-gnu --- arch/powerpc/platforms/pseries/cmm.c | 2 +- arch/powerpc/platforms/pseries/dlpar.c | 56 +++++- arch/powerpc/platforms/pseries/eeh_pseries.c | 2 +- arch/powerpc/platforms/pseries/event_sources.c | 53 ++---- arch/powerpc/platforms/pseries/firmware.c | 48 ++++- arch/powerpc/platforms/pseries/hotplug-cpu.c | 13 -- arch/powerpc/platforms/pseries/hotplug-memory.c | 210 +++++++++++--------- arch/powerpc/platforms/pseries/io_event_irq.c | 2 +- arch/powerpc/platforms/pseries/iommu.c | 59 +----- arch/powerpc/platforms/pseries/kexec.c | 23 +-- arch/powerpc/platforms/pseries/lpar.c | 41 ++-- arch/powerpc/platforms/pseries/nvram.c | 2 - arch/powerpc/platforms/pseries/pci.c | 4 + arch/powerpc/platforms/pseries/pci_dlpar.c | 7 +- arch/powerpc/platforms/pseries/power.c | 2 + arch/powerpc/platforms/pseries/pseries.h | 21 +- arch/powerpc/platforms/pseries/pseries_energy.c | 8 +- arch/powerpc/platforms/pseries/ras.c | 39 ++++ arch/powerpc/platforms/pseries/setup.c | 243 ++++++------------------ arch/powerpc/platforms/pseries/smp.c | 31 +-- 20 files changed, 375 insertions(+), 491 deletions(-) (limited to 'arch/powerpc/platforms/pseries') diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index fc44ad047..66e722746 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -574,7 +574,7 @@ static int cmm_mem_going_offline(void *arg) cmm_dbg("Failed to allocate memory for list " "management. Memory hotplug " "failed.\n"); - return ENOMEM; + return -ENOMEM; } memcpy(npa, pa_curr, PAGE_SIZE); if (pa_curr == cmm_page_list) diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 2b93ae8d5..4748124fa 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -27,6 +27,15 @@ #include #include +struct workqueue_struct *pseries_hp_wq; + +struct pseries_hp_work { + struct work_struct work; + struct pseries_hp_errorlog *errlog; + struct completion *hp_completion; + int *rc; +}; + struct cc_workarea { __be32 drc_index; __be32 zero; @@ -368,10 +377,51 @@ static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) return rc; } +void pseries_hp_work_fn(struct work_struct *work) +{ + struct pseries_hp_work *hp_work = + container_of(work, struct pseries_hp_work, work); + + if (hp_work->rc) + *(hp_work->rc) = handle_dlpar_errorlog(hp_work->errlog); + else + handle_dlpar_errorlog(hp_work->errlog); + + if (hp_work->hp_completion) + complete(hp_work->hp_completion); + + kfree(hp_work->errlog); + kfree((void *)work); +} + +void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog, + struct completion *hotplug_done, int *rc) +{ + struct pseries_hp_work *work; + struct pseries_hp_errorlog *hp_errlog_copy; + + hp_errlog_copy = kmalloc(sizeof(struct pseries_hp_errorlog), + GFP_KERNEL); + memcpy(hp_errlog_copy, hp_errlog, sizeof(struct pseries_hp_errorlog)); + + work = kmalloc(sizeof(struct pseries_hp_work), GFP_KERNEL); + if (work) { + INIT_WORK((struct work_struct *)work, pseries_hp_work_fn); + work->errlog = hp_errlog_copy; + work->hp_completion = hotplug_done; + work->rc = rc; + queue_work(pseries_hp_wq, (struct work_struct *)work); + } else { + *rc = -ENOMEM; + complete(hotplug_done); + } +} + static ssize_t dlpar_store(struct class *class, struct class_attribute *attr, const char *buf, size_t count) { struct pseries_hp_errorlog *hp_elog; + struct completion hotplug_done; const char *arg; int rc; @@ -439,7 +489,9 @@ static ssize_t dlpar_store(struct class *class, struct class_attribute *attr, goto dlpar_store_out; } - rc = handle_dlpar_errorlog(hp_elog); + init_completion(&hotplug_done); + queue_hotplug_event(hp_elog, &hotplug_done, &rc); + wait_for_completion(&hotplug_done); dlpar_store_out: kfree(hp_elog); @@ -450,6 +502,8 @@ static CLASS_ATTR(dlpar, S_IWUSR, NULL, dlpar_store); static int __init pseries_dlpar_init(void) { + pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", + WQ_UNBOUND, 1); return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); } machine_device_initcall(pseries, pseries_dlpar_init); diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 3998e0f9a..1c428f06b 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -2,7 +2,7 @@ * The file intends to implement the platform dependent EEH operations on pseries. * Actually, the pseries platform is built based on RTAS heavily. That means the * pseries platform dependent EEH operations will be built on RTAS calls. The functions - * are devired from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has + * are derived from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has * been done. * * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011. diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c index 18380e8f6..a6ddca833 100644 --- a/arch/powerpc/platforms/pseries/event_sources.c +++ b/arch/powerpc/platforms/pseries/event_sources.c @@ -26,48 +26,21 @@ void request_event_sources_irqs(struct device_node *np, { int i, index, count = 0; struct of_phandle_args oirq; - const u32 *opicprop; - unsigned int opicplen; unsigned int virqs[16]; - /* Check for obsolete "open-pic-interrupt" property. If present, then - * map those interrupts using the default interrupt host and default - * trigger - */ - opicprop = of_get_property(np, "open-pic-interrupt", &opicplen); - if (opicprop) { - opicplen /= sizeof(u32); - for (i = 0; i < opicplen; i++) { - if (count > 15) - break; - virqs[count] = irq_create_mapping(NULL, *(opicprop++)); - if (virqs[count] == NO_IRQ) { - pr_err("event-sources: Unable to allocate " - "interrupt number for %s\n", - np->full_name); - WARN_ON(1); - } - else - count++; - - } - } - /* Else use normal interrupt tree parsing */ - else { - /* First try to do a proper OF tree parsing */ - for (index = 0; of_irq_parse_one(np, index, &oirq) == 0; - index++) { - if (count > 15) - break; - virqs[count] = irq_create_of_mapping(&oirq); - if (virqs[count] == NO_IRQ) { - pr_err("event-sources: Unable to allocate " - "interrupt number for %s\n", - np->full_name); - WARN_ON(1); - } - else - count++; + /* First try to do a proper OF tree parsing */ + for (index = 0; of_irq_parse_one(np, index, &oirq) == 0; + index++) { + if (count > 15) + break; + virqs[count] = irq_create_of_mapping(&oirq); + if (virqs[count] == NO_IRQ) { + pr_err("event-sources: Unable to allocate " + "interrupt number for %s\n", + np->full_name); + WARN_ON(1); + } else { + count++; } } diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c index 8c80588ab..ea7f09bd7 100644 --- a/arch/powerpc/platforms/pseries/firmware.c +++ b/arch/powerpc/platforms/pseries/firmware.c @@ -22,6 +22,7 @@ */ +#include #include #include #include @@ -69,7 +70,8 @@ hypertas_fw_features_table[] = { * device-tree/ibm,hypertas-functions. Ultimately this functionality may * be moved into prom.c prom_init(). */ -void __init fw_hypertas_feature_init(const char *hypertas, unsigned long len) +static void __init fw_hypertas_feature_init(const char *hypertas, + unsigned long len) { const char *s; int i; @@ -113,7 +115,7 @@ vec5_fw_features_table[] = { {FW_FEATURE_PRRN, OV5_PRRN}, }; -void __init fw_vec5_feature_init(const char *vec5, unsigned long len) +static void __init fw_vec5_feature_init(const char *vec5, unsigned long len) { unsigned int index, feat; int i; @@ -131,3 +133,45 @@ void __init fw_vec5_feature_init(const char *vec5, unsigned long len) pr_debug(" <- fw_vec5_feature_init()\n"); } + +/* + * Called very early, MMU is off, device-tree isn't unflattened + */ +static int __init probe_fw_features(unsigned long node, const char *uname, int + depth, void *data) +{ + const char *prop; + int len; + static int hypertas_found; + static int vec5_found; + + if (depth != 1) + return 0; + + if (!strcmp(uname, "rtas") || !strcmp(uname, "rtas@0")) { + prop = of_get_flat_dt_prop(node, "ibm,hypertas-functions", + &len); + if (prop) { + powerpc_firmware_features |= FW_FEATURE_LPAR; + fw_hypertas_feature_init(prop, len); + } + + hypertas_found = 1; + } + + if (!strcmp(uname, "chosen")) { + prop = of_get_flat_dt_prop(node, "ibm,architecture-vec-5", + &len); + if (prop) + fw_vec5_feature_init(prop, len); + + vec5_found = 1; + } + + return hypertas_found && vec5_found; +} + +void __init pseries_probe_fw_features(void) +{ + of_scan_flat_dt(probe_fw_features, NULL); +} diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 282837a1d..a1b63e00b 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -903,8 +903,6 @@ static int parse_cede_parameters(void) static int __init pseries_cpu_hotplug_init(void) { - struct device_node *np; - const char *typep; int cpu; int qcss_tok; @@ -913,17 +911,6 @@ static int __init pseries_cpu_hotplug_init(void) ppc_md.cpu_release = dlpar_cpu_release; #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ - for_each_node_by_name(np, "interrupt-controller") { - typep = of_get_property(np, "compatible", NULL); - if (strstr(typep, "open-pic")) { - of_node_put(np); - - printk(KERN_INFO "CPU Hotplug not supported on " - "systems using MPIC\n"); - return 0; - } - } - rtas_stop_self_token = rtas_token("stop-self"); qcss_tok = rtas_token("query-cpu-stopped-state"); diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 2ce138542..76ec104e8 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -69,13 +69,36 @@ unsigned long pseries_memory_block_size(void) return memblock_size; } -static void dlpar_free_drconf_property(struct property *prop) +static void dlpar_free_property(struct property *prop) { kfree(prop->name); kfree(prop->value); kfree(prop); } +static struct property *dlpar_clone_property(struct property *prop, + u32 prop_size) +{ + struct property *new_prop; + + new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); + if (!new_prop) + return NULL; + + new_prop->name = kstrdup(prop->name, GFP_KERNEL); + new_prop->value = kzalloc(prop_size, GFP_KERNEL); + if (!new_prop->name || !new_prop->value) { + dlpar_free_property(new_prop); + return NULL; + } + + memcpy(new_prop->value, prop->value, prop->length); + new_prop->length = prop_size; + + of_property_set_flag(new_prop, OF_DYNAMIC); + return new_prop; +} + static struct property *dlpar_clone_drconf_property(struct device_node *dn) { struct property *prop, *new_prop; @@ -87,19 +110,10 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn) if (!prop) return NULL; - new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); + new_prop = dlpar_clone_property(prop, prop->length); if (!new_prop) return NULL; - new_prop->name = kstrdup(prop->name, GFP_KERNEL); - new_prop->value = kmemdup(prop->value, prop->length, GFP_KERNEL); - if (!new_prop->name || !new_prop->value) { - dlpar_free_drconf_property(new_prop); - return NULL; - } - - new_prop->length = prop->length; - /* Convert the property to cpu endian-ness */ p = new_prop->value; *p = be32_to_cpu(*p); @@ -177,14 +191,74 @@ static int dlpar_update_device_tree_lmb(struct of_drconf_cell *lmb) return 0; } +static u32 find_aa_index(struct device_node *dr_node, + struct property *ala_prop, const u32 *lmb_assoc) +{ + u32 *assoc_arrays; + u32 aa_index; + int aa_arrays, aa_array_entries, aa_array_sz; + int i, index; + + /* + * The ibm,associativity-lookup-arrays property is defined to be + * a 32-bit value specifying the number of associativity arrays + * followed by a 32-bitvalue specifying the number of entries per + * array, followed by the associativity arrays. + */ + assoc_arrays = ala_prop->value; + + aa_arrays = be32_to_cpu(assoc_arrays[0]); + aa_array_entries = be32_to_cpu(assoc_arrays[1]); + aa_array_sz = aa_array_entries * sizeof(u32); + + aa_index = -1; + for (i = 0; i < aa_arrays; i++) { + index = (i * aa_array_entries) + 2; + + if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz)) + continue; + + aa_index = i; + break; + } + + if (aa_index == -1) { + struct property *new_prop; + u32 new_prop_size; + + new_prop_size = ala_prop->length + aa_array_sz; + new_prop = dlpar_clone_property(ala_prop, new_prop_size); + if (!new_prop) + return -1; + + assoc_arrays = new_prop->value; + + /* increment the number of entries in the lookup array */ + assoc_arrays[0] = cpu_to_be32(aa_arrays + 1); + + /* copy the new associativity into the lookup array */ + index = aa_arrays * aa_array_entries + 2; + memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz); + + of_update_property(dr_node, new_prop); + + /* + * The associativity lookup array index for this lmb is + * number of entries - 1 since we added its associativity + * to the end of the lookup array. + */ + aa_index = be32_to_cpu(assoc_arrays[0]) - 1; + } + + return aa_index; +} + static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb) { struct device_node *parent, *lmb_node, *dr_node; + struct property *ala_prop; const u32 *lmb_assoc; - const u32 *assoc_arrays; u32 aa_index; - int aa_arrays, aa_array_entries, aa_array_sz; - int i; parent = of_find_node_by_path("/"); if (!parent) @@ -208,34 +282,15 @@ static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb) return -ENODEV; } - assoc_arrays = of_get_property(dr_node, - "ibm,associativity-lookup-arrays", - NULL); - of_node_put(dr_node); - if (!assoc_arrays) { + ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays", + NULL); + if (!ala_prop) { + of_node_put(dr_node); dlpar_free_cc_nodes(lmb_node); return -ENODEV; } - /* The ibm,associativity-lookup-arrays property is defined to be - * a 32-bit value specifying the number of associativity arrays - * followed by a 32-bitvalue specifying the number of entries per - * array, followed by the associativity arrays. - */ - aa_arrays = be32_to_cpu(assoc_arrays[0]); - aa_array_entries = be32_to_cpu(assoc_arrays[1]); - aa_array_sz = aa_array_entries * sizeof(u32); - - aa_index = -1; - for (i = 0; i < aa_arrays; i++) { - int indx = (i * aa_array_entries) + 2; - - if (memcmp(&assoc_arrays[indx], &lmb_assoc[1], aa_array_sz)) - continue; - - aa_index = i; - break; - } + aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc); dlpar_free_cc_nodes(lmb_node); return aa_index; @@ -265,19 +320,6 @@ static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb) return dlpar_update_device_tree_lmb(lmb); } -static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) -{ - unsigned long section_nr; - struct mem_section *mem_sect; - struct memory_block *mem_block; - - section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); - mem_sect = __nr_to_section(section_nr); - - mem_block = find_memory_block(mem_sect); - return mem_block; -} - #ifdef CONFIG_MEMORY_HOTREMOVE static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) { @@ -365,6 +407,19 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb) static int dlpar_add_lmb(struct of_drconf_cell *); +static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) +{ + unsigned long section_nr; + struct mem_section *mem_sect; + struct memory_block *mem_block; + + section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); + mem_sect = __nr_to_section(section_nr); + + mem_block = find_memory_block(mem_sect); + return mem_block; +} + static int dlpar_remove_lmb(struct of_drconf_cell *lmb) { struct memory_block *mem_block; @@ -533,50 +588,11 @@ static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop) #endif /* CONFIG_MEMORY_HOTREMOVE */ -static int dlpar_add_lmb_memory(struct of_drconf_cell *lmb) +static int dlpar_add_lmb(struct of_drconf_cell *lmb) { - struct memory_block *mem_block; unsigned long block_sz; int nid, rc; - block_sz = memory_block_size_bytes(); - - /* Find the node id for this address */ - nid = memory_add_physaddr_to_nid(lmb->base_addr); - - /* Add the memory */ - rc = add_memory(nid, lmb->base_addr, block_sz); - if (rc) - return rc; - - /* Register this block of memory */ - rc = memblock_add(lmb->base_addr, block_sz); - if (rc) { - remove_memory(nid, lmb->base_addr, block_sz); - return rc; - } - - mem_block = lmb_to_memblock(lmb); - if (!mem_block) { - remove_memory(nid, lmb->base_addr, block_sz); - return -EINVAL; - } - - rc = device_online(&mem_block->dev); - put_device(&mem_block->dev); - if (rc) { - remove_memory(nid, lmb->base_addr, block_sz); - return rc; - } - - lmb->flags |= DRCONF_MEM_ASSIGNED; - return 0; -} - -static int dlpar_add_lmb(struct of_drconf_cell *lmb) -{ - int rc; - if (lmb->flags & DRCONF_MEM_ASSIGNED) return -EINVAL; @@ -592,10 +608,18 @@ static int dlpar_add_lmb(struct of_drconf_cell *lmb) return rc; } - rc = dlpar_add_lmb_memory(lmb); + block_sz = memory_block_size_bytes(); + + /* Find the node id for this address */ + nid = memory_add_physaddr_to_nid(lmb->base_addr); + + /* Add the memory */ + rc = add_memory(nid, lmb->base_addr, block_sz); if (rc) { dlpar_remove_device_tree_lmb(lmb); dlpar_release_drc(lmb->drc_index); + } else { + lmb->flags |= DRCONF_MEM_ASSIGNED; } return rc; @@ -748,7 +772,7 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) break; } - dlpar_free_drconf_property(prop); + dlpar_free_property(prop); dlpar_memory_out: of_node_put(dn); diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c index 0240c4ff8..f053bda64 100644 --- a/arch/powerpc/platforms/pseries/io_event_irq.c +++ b/arch/powerpc/platforms/pseries/io_event_irq.c @@ -113,7 +113,7 @@ static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog) * - The owner of an event is determined by combinations of scope, * event type, and sub-type. There is no easy way to pre-sort clients * by scope or event type alone. For example, Torrent ISR route change - * event is reported with scope 0x00 (Not Applicatable) rather than + * event is reported with scope 0x00 (Not Applicable) rather than * 0x3B (Torrent-hub). It is better to let the clients to identify * who owns the event. */ diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 3e8865b18..0024e451b 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -120,39 +120,10 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group, kfree(table_group); } -static void tce_invalidate_pSeries_sw(struct iommu_table *tbl, - __be64 *startp, __be64 *endp) -{ - u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index; - unsigned long start, end, inc; - - start = __pa(startp); - end = __pa(endp); - inc = L1_CACHE_BYTES; /* invalidate a cacheline of TCEs at a time */ - - /* If this is non-zero, change the format. We shift the - * address and or in the magic from the device tree. */ - if (tbl->it_busno) { - start <<= 12; - end <<= 12; - inc <<= 12; - start |= tbl->it_busno; - end |= tbl->it_busno; - } - - end |= inc - 1; /* round up end to be different than start */ - - mb(); /* Make sure TCEs in memory are written */ - while (start <= end) { - out_be64(invalidate, start); - start += inc; - } -} - static int tce_build_pSeries(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { u64 proto_tce; __be64 *tcep, *tces; @@ -173,9 +144,6 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index, uaddr += TCE_PAGE_SIZE; tcep++; } - - if (tbl->it_type & TCE_PCI_SWINV_CREATE) - tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); return 0; } @@ -188,9 +156,6 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) while (npages--) *(tcep++) = 0; - - if (tbl->it_type & TCE_PCI_SWINV_FREE) - tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); } static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) @@ -208,7 +173,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long); static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { u64 rc = 0; u64 proto_tce, tce; @@ -251,7 +216,7 @@ static DEFINE_PER_CPU(__be64 *, tce_page); static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { u64 rc = 0; u64 proto_tce; @@ -537,7 +502,7 @@ static void iommu_table_setparms(struct pci_controller *phb, struct iommu_table *tbl) { struct device_node *node; - const unsigned long *basep, *sw_inval; + const unsigned long *basep; const u32 *sizep; node = phb->dn; @@ -575,22 +540,6 @@ static void iommu_table_setparms(struct pci_controller *phb, tbl->it_index = 0; tbl->it_blocksize = 16; tbl->it_type = TCE_PCI; - - sw_inval = of_get_property(node, "linux,tce-sw-invalidate-info", NULL); - if (sw_inval) { - /* - * This property contains information on how to - * invalidate the TCE entry. The first property is - * the base MMIO address used to invalidate entries. - * The second property tells us the format of the TCE - * invalidate (whether it needs to be shifted) and - * some magic routing info to add to our invalidate - * command. - */ - tbl->it_index = (unsigned long) ioremap(sw_inval[0], 8); - tbl->it_busno = sw_inval[1]; /* overload this with magic */ - tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE; - } } /* diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 13fa95b3a..6681ac97f 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c @@ -14,14 +14,13 @@ #include #include #include -#include #include #include #include #include "pseries.h" -static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) +void pseries_kexec_cpu_down(int crash_shutdown, int secondary) { /* Don't risk a hypervisor call if we're crashing */ if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { @@ -51,26 +50,6 @@ static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) "(hw %d) failed with %d\n", cpu, hwcpu, ret); } } -} - -static void pseries_kexec_cpu_down_mpic(int crash_shutdown, int secondary) -{ - pseries_kexec_cpu_down(crash_shutdown, secondary); - mpic_teardown_this_cpu(secondary); -} -void __init setup_kexec_cpu_down_mpic(void) -{ - ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_mpic; -} - -static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary) -{ - pseries_kexec_cpu_down(crash_shutdown, secondary); xics_kexec_teardown_cpu(secondary); } - -void __init setup_kexec_cpu_down_xics(void) -{ - ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics; -} diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 7f6100d91..86707e678 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -45,6 +45,7 @@ #include #include #include +#include #include "pseries.h" @@ -260,24 +261,8 @@ static void pSeries_lpar_hptab_clear(void) * This is also called on boot when a fadump happens. In that case we * must not change the exception endian mode. */ - if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) { - long rc; - - rc = pseries_big_endian_exceptions(); - /* - * At this point it is unlikely panic() will get anything - * out to the user, but at least this will stop us from - * continuing on further and creating an even more - * difficult to debug situation. - * - * There is a known problem when kdump'ing, if cpus are offline - * the above call will fail. Rather than panicking again, keep - * going and hope the kdump kernel is also little endian, which - * it usually is. - */ - if (rc && !kdump_in_progress()) - panic("Could not enable big endian exceptions"); - } + if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) + pseries_big_endian_exceptions(); #endif } @@ -604,17 +589,17 @@ static int __init disable_bulk_remove(char *str) __setup("bulk_remove=", disable_bulk_remove); -void __init hpte_init_lpar(void) +void __init hpte_init_pseries(void) { - ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate; - ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp; - ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp; - ppc_md.hpte_insert = pSeries_lpar_hpte_insert; - ppc_md.hpte_remove = pSeries_lpar_hpte_remove; - ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted; - ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range; - ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear; - ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; + mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate; + mmu_hash_ops.hpte_updatepp = pSeries_lpar_hpte_updatepp; + mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp; + mmu_hash_ops.hpte_insert = pSeries_lpar_hpte_insert; + mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove; + mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted; + mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; + mmu_hash_ops.hpte_clear_all = pSeries_lpar_hptab_clear; + mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; } #ifdef CONFIG_PPC_SMLPAR diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 9f8184175..79aef8c1c 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -17,8 +17,6 @@ #include #include #include -#include -#include #include #include #include diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index fe16a5070..09eba5a99 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -119,6 +119,10 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) bus = bridge->bus; + /* Rely on the pcibios_free_controller_deferred() callback. */ + pci_set_host_bridge_release(bridge, pcibios_free_controller_deferred, + (void *) pci_bus_to_host(bus)); + dn = pcibios_get_phb_of_node(bus); if (!dn) return 0; diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 906dbaa97..547fd13e4 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -106,8 +106,11 @@ int remove_phb_dynamic(struct pci_controller *phb) release_resource(res); } - /* Free pci_controller data structure */ - pcibios_free_controller(phb); + /* + * The pci_controller data structure is freed by + * the pcibios_free_controller_deferred() callback; + * see pseries_root_bridge_prepare(). + */ return 0; } diff --git a/arch/powerpc/platforms/pseries/power.c b/arch/powerpc/platforms/pseries/power.c index c26eadde4..a4a0b57d1 100644 --- a/arch/powerpc/platforms/pseries/power.c +++ b/arch/powerpc/platforms/pseries/power.c @@ -27,6 +27,8 @@ #include #include +#include "pseries.h" + unsigned long rtas_poweron_auto; /* default and normal state is 0 */ static ssize_t auto_poweron_show(struct kobject *kobj, diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 7aa83f00a..b1be7b713 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -20,31 +20,18 @@ extern void request_event_sources_irqs(struct device_node *np, #include -extern void __init fw_hypertas_feature_init(const char *hypertas, - unsigned long len); -extern void __init fw_vec5_feature_init(const char *hypertas, - unsigned long len); - struct pt_regs; extern int pSeries_system_reset_exception(struct pt_regs *regs); extern int pSeries_machine_check_exception(struct pt_regs *regs); #ifdef CONFIG_SMP -extern void smp_init_pseries_mpic(void); -extern void smp_init_pseries_xics(void); +extern void smp_init_pseries(void); #else -static inline void smp_init_pseries_mpic(void) { }; -static inline void smp_init_pseries_xics(void) { }; +static inline void smp_init_pseries(void) { }; #endif -#ifdef CONFIG_KEXEC -extern void setup_kexec_cpu_down_xics(void); -extern void setup_kexec_cpu_down_mpic(void); -#else -static inline void setup_kexec_cpu_down_xics(void) { } -static inline void setup_kexec_cpu_down_mpic(void) { } -#endif +extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary); extern void pSeries_final_fixup(void); @@ -64,6 +51,8 @@ extern int dlpar_detach_node(struct device_node *); extern int dlpar_acquire_drc(u32 drc_index); extern int dlpar_release_drc(u32 drc_index); +void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog, + struct completion *hotplug_done, int *rc); #ifdef CONFIG_MEMORY_HOTPLUG int dlpar_memory(struct pseries_hp_errorlog *hp_elog); #else diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c index 92767791f..164a13d39 100644 --- a/arch/powerpc/platforms/pseries/pseries_energy.c +++ b/arch/powerpc/platforms/pseries/pseries_energy.c @@ -208,19 +208,19 @@ static ssize_t percpu_deactivate_hint_show(struct device *dev, * Per-cpu value of the hint */ -struct device_attribute attr_cpu_activate_hint_list = +static struct device_attribute attr_cpu_activate_hint_list = __ATTR(pseries_activate_hint_list, 0444, cpu_activate_hint_list_show, NULL); -struct device_attribute attr_cpu_deactivate_hint_list = +static struct device_attribute attr_cpu_deactivate_hint_list = __ATTR(pseries_deactivate_hint_list, 0444, cpu_deactivate_hint_list_show, NULL); -struct device_attribute attr_percpu_activate_hint = +static struct device_attribute attr_percpu_activate_hint = __ATTR(pseries_activate_hint, 0444, percpu_activate_hint_show, NULL); -struct device_attribute attr_percpu_deactivate_hint = +static struct device_attribute attr_percpu_deactivate_hint = __ATTR(pseries_deactivate_hint, 0444, percpu_deactivate_hint_show, NULL); diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 9a3e27b86..904a67720 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -43,6 +43,7 @@ static int ras_check_exception_token; /* EPOW events counter variable */ static int num_epow_events; +static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id); static irqreturn_t ras_epow_interrupt(int irq, void *dev_id); static irqreturn_t ras_error_interrupt(int irq, void *dev_id); @@ -65,6 +66,14 @@ static int __init init_ras_IRQ(void) of_node_put(np); } + /* Hotplug Events */ + np = of_find_node_by_path("/event-sources/hot-plug-events"); + if (np != NULL) { + request_event_sources_irqs(np, ras_hotplug_interrupt, + "RAS_HOTPLUG"); + of_node_put(np); + } + /* EPOW Events */ np = of_find_node_by_path("/event-sources/epow-events"); if (np != NULL) { @@ -190,6 +199,36 @@ static void rtas_parse_epow_errlog(struct rtas_error_log *log) num_epow_events++; } +static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id) +{ + struct pseries_errorlog *pseries_log; + struct pseries_hp_errorlog *hp_elog; + + spin_lock(&ras_log_buf_lock); + + rtas_call(ras_check_exception_token, 6, 1, NULL, + RTAS_VECTOR_EXTERNAL_INTERRUPT, virq_to_hw(irq), + RTAS_HOTPLUG_EVENTS, 0, __pa(&ras_log_buf), + rtas_get_error_log_max()); + + pseries_log = get_pseries_errorlog((struct rtas_error_log *)ras_log_buf, + PSERIES_ELOG_SECT_ID_HOTPLUG); + hp_elog = (struct pseries_hp_errorlog *)pseries_log->data; + + /* + * Since PCI hotplug is not currently supported on pseries, put PCI + * hotplug events on the ras_log_buf to be handled by rtas_errd. + */ + if (hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_MEM || + hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_CPU) + queue_hotplug_event(hp_elog, NULL, NULL); + else + log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0); + + spin_unlock(&ras_log_buf_lock); + return IRQ_HANDLED; +} + /* Handle environmental and power warning (EPOW) interrupts. */ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id) { diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 9883bc7ea..a39d20e86 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -41,7 +41,6 @@ #include #include #include -#include #include #include @@ -57,7 +56,6 @@ #include #include #include -#include #include #include #include @@ -67,6 +65,7 @@ #include #include #include +#include #include "pseries.h" @@ -77,8 +76,6 @@ EXPORT_SYMBOL(CMO_PageSize); int fwnmi_active; /* TRUE if an FWNMI handler is present */ -static struct device_node *pSeries_mpic_node; - static void pSeries_show_cpuinfo(struct seq_file *m) { struct device_node *root; @@ -172,48 +169,7 @@ static void __init pseries_setup_i8259_cascade(void) irq_set_chained_handler(cascade, pseries_8259_cascade); } -static void __init pseries_mpic_init_IRQ(void) -{ - struct device_node *np; - const unsigned int *opprop; - unsigned long openpic_addr = 0; - int naddr, n, i, opplen; - struct mpic *mpic; - - np = of_find_node_by_path("/"); - naddr = of_n_addr_cells(np); - opprop = of_get_property(np, "platform-open-pic", &opplen); - if (opprop != NULL) { - openpic_addr = of_read_number(opprop, naddr); - printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); - } - of_node_put(np); - - BUG_ON(openpic_addr == 0); - - /* Setup the openpic driver */ - mpic = mpic_alloc(pSeries_mpic_node, openpic_addr, - MPIC_NO_RESET, 16, 0, " MPIC "); - BUG_ON(mpic == NULL); - - /* Add ISUs */ - opplen /= sizeof(u32); - for (n = 0, i = naddr; i < opplen; i += naddr, n++) { - unsigned long isuaddr = of_read_number(opprop + i, naddr); - mpic_assign_isu(mpic, n, isuaddr); - } - - /* Setup top-level get_irq */ - ppc_md.get_irq = mpic_get_irq; - - /* All ISUs are setup, complete initialization */ - mpic_init(mpic); - - /* Look for cascade */ - pseries_setup_i8259_cascade(); -} - -static void __init pseries_xics_init_IRQ(void) +static void __init pseries_init_irq(void) { xics_init(); pseries_setup_i8259_cascade(); @@ -228,32 +184,6 @@ static void pseries_lpar_enable_pmcs(void) plpar_hcall_norets(H_PERFMON, set, reset); } -static void __init pseries_discover_pic(void) -{ - struct device_node *np; - const char *typep; - - for_each_node_by_name(np, "interrupt-controller") { - typep = of_get_property(np, "compatible", NULL); - if (!typep) - continue; - if (strstr(typep, "open-pic")) { - pSeries_mpic_node = of_node_get(np); - ppc_md.init_IRQ = pseries_mpic_init_IRQ; - setup_kexec_cpu_down_mpic(); - smp_init_pseries_mpic(); - return; - } else if (strstr(typep, "ppc-xicp")) { - ppc_md.init_IRQ = pseries_xics_init_IRQ; - setup_kexec_cpu_down_xics(); - smp_init_pseries_xics(); - return; - } - } - printk(KERN_ERR "pSeries_discover_pic: failed to recognize" - " interrupt-controller\n"); -} - static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct of_reconfig_data *rd = data; @@ -265,11 +195,8 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act case OF_RECONFIG_ATTACH_NODE: parent = of_get_parent(np); pdn = parent ? PCI_DN(parent) : NULL; - if (pdn) { - /* Create pdn and EEH device */ + if (pdn) pci_add_device_node_info(pdn->phb, np); - eeh_dev_init(PCI_DN(np), pdn->phb); - } of_node_put(parent); break; @@ -367,7 +294,7 @@ static void pseries_lpar_idle(void) { /* * Default handler to go into low thread priority and possibly - * low power mode by cedeing processor to hypervisor + * low power mode by ceding processor to hypervisor */ /* Indicate to hypervisor that we are idle. */ @@ -392,15 +319,23 @@ static void pseries_lpar_idle(void) * to ever be a problem in practice we can move this into a kernel thread to * finish off the process later in boot. */ -long pSeries_enable_reloc_on_exc(void) +void pseries_enable_reloc_on_exc(void) { long rc; unsigned int delay, total_delay = 0; while (1) { rc = enable_reloc_on_exceptions(); - if (!H_IS_LONG_BUSY(rc)) - return rc; + if (!H_IS_LONG_BUSY(rc)) { + if (rc == H_P2) { + pr_info("Relocation on exceptions not" + " supported\n"); + } else if (rc != H_SUCCESS) { + pr_warn("Unable to enable relocation" + " on exceptions: %ld\n", rc); + } + break; + } delay = get_longbusy_msecs(rc); total_delay += delay; @@ -408,66 +343,81 @@ long pSeries_enable_reloc_on_exc(void) pr_warn("Warning: Giving up waiting to enable " "relocation on exceptions (%u msec)!\n", total_delay); - return rc; + return; } mdelay(delay); } } -EXPORT_SYMBOL(pSeries_enable_reloc_on_exc); +EXPORT_SYMBOL(pseries_enable_reloc_on_exc); -long pSeries_disable_reloc_on_exc(void) +void pseries_disable_reloc_on_exc(void) { long rc; while (1) { rc = disable_reloc_on_exceptions(); if (!H_IS_LONG_BUSY(rc)) - return rc; + break; mdelay(get_longbusy_msecs(rc)); } + if (rc != H_SUCCESS) + pr_warning("Warning: Failed to disable relocation on " + "exceptions: %ld\n", rc); } -EXPORT_SYMBOL(pSeries_disable_reloc_on_exc); +EXPORT_SYMBOL(pseries_disable_reloc_on_exc); #ifdef CONFIG_KEXEC static void pSeries_machine_kexec(struct kimage *image) { - long rc; - - if (firmware_has_feature(FW_FEATURE_SET_MODE)) { - rc = pSeries_disable_reloc_on_exc(); - if (rc != H_SUCCESS) - pr_warning("Warning: Failed to disable relocation on " - "exceptions: %ld\n", rc); - } + if (firmware_has_feature(FW_FEATURE_SET_MODE)) + pseries_disable_reloc_on_exc(); default_machine_kexec(image); } #endif #ifdef __LITTLE_ENDIAN__ -long pseries_big_endian_exceptions(void) +void pseries_big_endian_exceptions(void) { long rc; while (1) { rc = enable_big_endian_exceptions(); if (!H_IS_LONG_BUSY(rc)) - return rc; + break; mdelay(get_longbusy_msecs(rc)); } + + /* + * At this point it is unlikely panic() will get anything + * out to the user, since this is called very late in kexec + * but at least this will stop us from continuing on further + * and creating an even more difficult to debug situation. + * + * There is a known problem when kdump'ing, if cpus are offline + * the above call will fail. Rather than panicking again, keep + * going and hope the kdump kernel is also little endian, which + * it usually is. + */ + if (rc && !kdump_in_progress()) + panic("Could not enable big endian exceptions"); } -static long pseries_little_endian_exceptions(void) +void pseries_little_endian_exceptions(void) { long rc; while (1) { rc = enable_little_endian_exceptions(); if (!H_IS_LONG_BUSY(rc)) - return rc; + break; mdelay(get_longbusy_msecs(rc)); } + if (rc) { + ppc_md.progress("H_SET_MODE LE exception fail", 0); + panic("Could not enable little endian exceptions"); + } } #endif @@ -492,7 +442,6 @@ static void __init find_and_init_phbs(void) } of_node_put(root); - pci_devs_phb_init(); /* * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties @@ -506,7 +455,8 @@ static void __init pSeries_setup_arch(void) set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); /* Discover PIC type and setup ppc_md accordingly */ - pseries_discover_pic(); + smp_init_pseries(); + /* openpic global configuration register (64-bit format). */ /* openpic Interrupt Source Unit pointer (64-bit format). */ @@ -537,18 +487,6 @@ static void __init pSeries_setup_arch(void) } ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare; - - if (firmware_has_feature(FW_FEATURE_SET_MODE)) { - long rc; - - rc = pSeries_enable_reloc_on_exc(); - if (rc == H_P2) { - pr_info("Relocation on exceptions not supported\n"); - } else if (rc != H_SUCCESS) { - pr_warn("Unable to enable relocation on exceptions: " - "%ld\n", rc); - } - } } static int __init pSeries_init_panel(void) @@ -682,9 +620,9 @@ static void pSeries_cmo_feature_init(void) /* * Early initialization. Relocation is on but do not reference unbolted pages */ -static void __init pSeries_init_early(void) +static void __init pseries_init(void) { - pr_debug(" -> pSeries_init_early()\n"); + pr_debug(" -> pseries_init()\n"); #ifdef CONFIG_HVC_CONSOLE if (firmware_has_feature(FW_FEATURE_LPAR)) @@ -701,7 +639,7 @@ static void __init pSeries_init_early(void) pSeries_cmo_feature_init(); iommu_init_early_pSeries(); - pr_debug(" <- pSeries_init_early()\n"); + pr_debug(" <- pseries_init()\n"); } /** @@ -732,49 +670,9 @@ static void pseries_power_off(void) for (;;); } -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ - -static int __init pseries_probe_fw_features(unsigned long node, - const char *uname, int depth, - void *data) -{ - const char *prop; - int len; - static int hypertas_found; - static int vec5_found; - - if (depth != 1) - return 0; - - if (!strcmp(uname, "rtas") || !strcmp(uname, "rtas@0")) { - prop = of_get_flat_dt_prop(node, "ibm,hypertas-functions", - &len); - if (prop) { - powerpc_firmware_features |= FW_FEATURE_LPAR; - fw_hypertas_feature_init(prop, len); - } - - hypertas_found = 1; - } - - if (!strcmp(uname, "chosen")) { - prop = of_get_flat_dt_prop(node, "ibm,architecture-vec-5", - &len); - if (prop) - fw_vec5_feature_init(prop, len); - - vec5_found = 1; - } - - return hypertas_found && vec5_found; -} - static int __init pSeries_probe(void) { - unsigned long root = of_get_flat_dt_root(); - const char *dtype = of_get_flat_dt_prop(root, "device_type", NULL); + const char *dtype = of_get_property(of_root, "device_type", NULL); if (dtype == NULL) return 0; @@ -784,41 +682,17 @@ static int __init pSeries_probe(void) /* Cell blades firmware claims to be chrp while it's not. Until this * is fixed, we need to avoid those here. */ - if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") || - of_flat_dt_is_compatible(root, "IBM,CBEA")) + if (of_machine_is_compatible("IBM,CPBW-1.0") || + of_machine_is_compatible("IBM,CBEA")) return 0; - pr_debug("pSeries detected, looking for LPAR capability...\n"); - - /* Now try to figure out if we are running on LPAR */ - of_scan_flat_dt(pseries_probe_fw_features, NULL); - -#ifdef __LITTLE_ENDIAN__ - if (firmware_has_feature(FW_FEATURE_SET_MODE)) { - long rc; - /* - * Tell the hypervisor that we want our exceptions to - * be taken in little endian mode. If this fails we don't - * want to use BUG() because it will trigger an exception. - */ - rc = pseries_little_endian_exceptions(); - if (rc) { - ppc_md.progress("H_SET_MODE LE exception fail", 0); - panic("Could not enable little endian exceptions"); - } - } -#endif - - if (firmware_has_feature(FW_FEATURE_LPAR)) - hpte_init_lpar(); - else - hpte_init_native(); - pm_power_off = pseries_power_off; pr_debug("Machine is%s LPAR !\n", (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not"); + pseries_init(); + return 1; } @@ -837,7 +711,7 @@ define_machine(pseries) { .name = "pSeries", .probe = pSeries_probe, .setup_arch = pSeries_setup_arch, - .init_early = pSeries_init_early, + .init_IRQ = pseries_init_irq, .show_cpuinfo = pSeries_show_cpuinfo, .log_error = pSeries_log_error, .pcibios_fixup = pSeries_final_fixup, @@ -853,6 +727,7 @@ define_machine(pseries) { .machine_check_exception = pSeries_machine_check_exception, #ifdef CONFIG_KEXEC .machine_kexec = pSeries_machine_kexec, + .kexec_cpu_down = pseries_kexec_cpu_down, #endif #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE .memory_block_size = pseries_memory_block_size, diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 6932ea803..f6f83aecc 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include #include @@ -140,7 +139,7 @@ out: return 1; } -static void smp_xics_setup_cpu(int cpu) +static void smp_setup_cpu(int cpu) { if (cpu != boot_cpuid) xics_setup_cpu(); @@ -207,28 +206,22 @@ static __init void pSeries_smp_probe(void) } } -static struct smp_ops_t pSeries_mpic_smp_ops = { - .message_pass = smp_mpic_message_pass, - .probe = smp_mpic_probe, - .kick_cpu = smp_pSeries_kick_cpu, - .setup_cpu = smp_mpic_setup_cpu, -}; - -static struct smp_ops_t pSeries_xics_smp_ops = { +static struct smp_ops_t pseries_smp_ops = { .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */ .cause_ipi = NULL, /* Filled at runtime by pSeries_smp_probe() */ .probe = pSeries_smp_probe, .kick_cpu = smp_pSeries_kick_cpu, - .setup_cpu = smp_xics_setup_cpu, + .setup_cpu = smp_setup_cpu, .cpu_bootable = smp_generic_cpu_bootable, }; /* This is called very early */ -static void __init smp_init_pseries(void) +void __init smp_init_pseries(void) { int i; pr_debug(" -> smp_init_pSeries()\n"); + smp_ops = &pseries_smp_ops; alloc_bootmem_cpumask_var(&of_spin_mask); @@ -258,17 +251,3 @@ static void __init smp_init_pseries(void) pr_debug(" <- smp_init_pSeries()\n"); } - -void __init smp_init_pseries_mpic(void) -{ - smp_ops = &pSeries_mpic_smp_ops; - - smp_init_pseries(); -} - -void __init smp_init_pseries_xics(void) -{ - smp_ops = &pSeries_xics_smp_ops; - - smp_init_pseries(); -} -- cgit v1.2.3-54-g00ecf