diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 1 | ||||
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 6 | ||||
-rw-r--r-- | arch/ia64/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/atomic.h | 24 | ||||
-rw-r--r-- | arch/ia64/include/asm/barrier.h | 4 | ||||
-rw-r--r-- | arch/ia64/include/asm/dma-mapping.h | 50 | ||||
-rw-r--r-- | arch/ia64/include/asm/io.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/unistd.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/uapi/asm/unistd.h | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/cyclone.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 8 | ||||
-rw-r--r-- | arch/ia64/kernel/irq.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/msi_ia64.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/uncached.c | 2 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 4 | ||||
-rw-r--r-- | arch/ia64/mm/tlb.c | 4 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/msi_sn.c | 4 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pci_dma.c | 2 |
19 files changed, 52 insertions, 81 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 42a91a7aa..eb0249e37 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -518,6 +518,7 @@ source "drivers/sn/Kconfig" config KEXEC bool "kexec system call" depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU) + select KEXEC_CORE help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 344387a55..a6d6190c9 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1140,13 +1140,9 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, #ifdef CONFIG_NUMA { - int node = ioc->node; struct page *page; - if (node == NUMA_NO_NODE) - node = numa_node_id(); - - page = alloc_pages_exact_node(node, flags, get_order(size)); + page = alloc_pages_node(ioc->node, flags, get_order(size)); if (unlikely(!page)) return NULL; diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 9de3ba12f..502a91d8d 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -8,3 +8,4 @@ generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h generic-y += vtime.h +generic-y += word-at-a-time.h diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 0bf03501f..be4beeb77 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -45,8 +45,6 @@ ia64_atomic_##op (int i, atomic_t *v) \ ATOMIC_OP(add, +) ATOMIC_OP(sub, -) -#undef ATOMIC_OP - #define atomic_add_return(i,v) \ ({ \ int __ia64_aar_i = (i); \ @@ -71,6 +69,16 @@ ATOMIC_OP(sub, -) : ia64_atomic_sub(__ia64_asr_i, v); \ }) +ATOMIC_OP(and, &) +ATOMIC_OP(or, |) +ATOMIC_OP(xor, ^) + +#define atomic_and(i,v) (void)ia64_atomic_and(i,v) +#define atomic_or(i,v) (void)ia64_atomic_or(i,v) +#define atomic_xor(i,v) (void)ia64_atomic_xor(i,v) + +#undef ATOMIC_OP + #define ATOMIC64_OP(op, c_op) \ static __inline__ long \ ia64_atomic64_##op (__s64 i, atomic64_t *v) \ @@ -89,8 +97,6 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v) \ ATOMIC64_OP(add, +) ATOMIC64_OP(sub, -) -#undef ATOMIC64_OP - #define atomic64_add_return(i,v) \ ({ \ long __ia64_aar_i = (i); \ @@ -115,6 +121,16 @@ ATOMIC64_OP(sub, -) : ia64_atomic64_sub(__ia64_asr_i, v); \ }) +ATOMIC64_OP(and, &) +ATOMIC64_OP(or, |) +ATOMIC64_OP(xor, ^) + +#define atomic64_and(i,v) (void)ia64_atomic64_and(i,v) +#define atomic64_or(i,v) (void)ia64_atomic64_or(i,v) +#define atomic64_xor(i,v) (void)ia64_atomic64_xor(i,v) + +#undef ATOMIC64_OP + #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index 843ba435e..df896a1c4 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -66,12 +66,12 @@ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ ___p1; \ diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index cf3ab7e78..9beccf801 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -23,60 +23,10 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *daddr, gfp_t gfp, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - void *caddr; - - caddr = ops->alloc(dev, size, daddr, gfp, attrs); - debug_dma_alloc_coherent(dev, size, *daddr, caddr); - return caddr; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *caddr, dma_addr_t daddr, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - debug_dma_free_coherent(dev, size, caddr, daddr); - ops->free(dev, size, caddr, daddr, attrs); -} - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - #define get_dma_ops(dev) platform_dma_get_ops(dev) #include <asm-generic/dma-mapping-common.h> -static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - debug_dma_mapping_error(dev, daddr); - return ops->mapping_error(dev, daddr); -} - -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - return ops->dma_supported(dev, mask); -} - -static inline int -dma_set_mask (struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - *dev->dma_mask = mask; - return 0; -} - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 80a7e34be..9041bbe2b 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -435,6 +435,7 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo { return ioremap(phys_addr, size); } +#define ioremap_cache ioremap_cache /* diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 95c39b95e..db7339056 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -11,7 +11,7 @@ -#define NR_syscalls 319 /* length of syscall table */ +#define NR_syscalls 322 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 461079560..9038726e7 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h @@ -332,5 +332,8 @@ #define __NR_memfd_create 1340 #define __NR_bpf 1341 #define __NR_execveat 1342 +#define __NR_userfaultfd 1343 +#define __NR_membarrier 1344 +#define __NR_kcmp 1345 #endif /* _UAPI_ASM_IA64_UNISTD_H */ diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c index 4826ff957..5fa3848ba 100644 --- a/arch/ia64/kernel/cyclone.c +++ b/arch/ia64/kernel/cyclone.c @@ -4,7 +4,7 @@ #include <linux/errno.h> #include <linux/timex.h> #include <linux/clocksource.h> -#include <asm/io.h> +#include <linux/io.h> /* IBM Summit (EXA) Cyclone counter code*/ #define CYCLONE_CBAR_ADDR 0xFEB00CD0 diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index ae0de7bf5..dcd97f84d 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1768,5 +1768,8 @@ sys_call_table: data8 sys_memfd_create // 1340 data8 sys_bpf data8 sys_execveat + data8 sys_userfaultfd + data8 sys_membarrier + data8 sys_kcmp // 1345 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index bc9501e36..d2fae054d 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -610,9 +610,9 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery, chip->name, irq_type->name); chip = irq_type; } - __irq_set_chip_handler_name_locked(irq, chip, trigger == IOSAPIC_EDGE ? - handle_edge_irq : handle_level_irq, - NULL); + irq_set_chip_handler_name_locked(irq_get_irq_data(irq), chip, + trigger == IOSAPIC_EDGE ? handle_edge_irq : handle_level_irq, + NULL); return 0; } @@ -838,7 +838,7 @@ iosapic_unregister_intr (unsigned int gsi) if (iosapic_intr_info[irq].count == 0) { #ifdef CONFIG_SMP /* Clear affinity */ - cpumask_setall(irq_get_irq_data(irq)->affinity); + cpumask_setall(irq_get_affinity_mask(irq)); #endif /* Clear the interrupt information */ iosapic_intr_info[irq].dest = 0; diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 812a1e6b3..de4fc00de 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -67,7 +67,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; void set_irq_affinity_info (unsigned int irq, int hwid, int redir) { if (irq < NR_IRQS) { - cpumask_copy(irq_get_irq_data(irq)->affinity, + cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu_logical_id(hwid))); irq_redir[irq] = (char) (redir & 0xff); } @@ -119,8 +119,8 @@ static void migrate_irqs(void) if (irqd_is_per_cpu(data)) continue; - if (cpumask_any_and(data->affinity, cpu_online_mask) - >= nr_cpu_ids) { + if (cpumask_any_and(irq_data_get_affinity_mask(data), + cpu_online_mask) >= nr_cpu_ids) { /* * Save it for phase 2 processing */ diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index d70bf15c6..af4eaec0f 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c @@ -23,7 +23,7 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata, if (irq_prepare_move(irq, cpu)) return -1; - __get_cached_msi_msg(idata->msi_desc, &msg); + __get_cached_msi_msg(irq_data_get_msi_desc(idata), &msg); addr = msg.address_lo; addr &= MSI_ADDR_DEST_ID_MASK; @@ -36,7 +36,7 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata, msg.data = data; pci_write_msi_msg(irq, &msg); - cpumask_copy(idata->affinity, cpumask_of(cpu)); + cpumask_copy(irq_data_get_affinity_mask(idata), cpumask_of(cpu)); return 0; } @@ -148,7 +148,7 @@ static int dmar_msi_set_affinity(struct irq_data *data, msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); dmar_msi_write(irq, &msg); - cpumask_copy(data->affinity, mask); + cpumask_copy(irq_data_get_affinity_mask(data), mask); return 0; } diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index 20e8a9b21..f3976da36 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -97,7 +97,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) /* attempt to allocate a granule's worth of cached memory pages */ - page = alloc_pages_exact_node(nid, + page = __alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, IA64_GRANULE_SHIFT-PAGE_SHIFT); if (!page) { diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 97e48b0ee..1841ef691 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -645,7 +645,7 @@ mem_init (void) } #ifdef CONFIG_MEMORY_HOTPLUG -int arch_add_memory(int nid, u64 start, u64 size) +int arch_add_memory(int nid, u64 start, u64 size, bool for_device) { pg_data_t *pgdat; struct zone *zone; @@ -656,7 +656,7 @@ int arch_add_memory(int nid, u64 start, u64 size) pgdat = NODE_DATA(nid); zone = pgdat->node_zones + - zone_for_memory(nid, start, size, ZONE_NORMAL); + zone_for_memory(nid, start, size, ZONE_NORMAL, for_device); ret = __add_pages(nid, zone, start_pfn, nr_pages); if (ret) diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index ed6129768..46ecc5d94 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -444,7 +444,7 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) if (p->pte & 0x1) if (is_tr_overlap(p, va, log_size)) { printk(KERN_DEBUG "Overlapped Entry" - "Inserted for TR Reigster!!\n"); + "Inserted for TR Register!!\n"); goto out; } } @@ -456,7 +456,7 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) if (p->pte & 0x1) if (is_tr_overlap(p, va, log_size)) { printk(KERN_DEBUG "Overlapped Entry" - "Inserted for TR Reigster!!\n"); + "Inserted for TR Register!!\n"); goto out; } } diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index a0eb27b66..fb25065b2 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c @@ -175,7 +175,7 @@ static int sn_set_msi_irq_affinity(struct irq_data *data, * Release XIO resources for the old MSI PCI address */ - __get_cached_msi_msg(data->msi_desc, &msg); + __get_cached_msi_msg(irq_data_get_msi_desc(data), &msg); sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; pdev = sn_pdev->pdi_linux_pcidev; provider = SN_PCIDEV_BUSPROVIDER(pdev); @@ -206,7 +206,7 @@ static int sn_set_msi_irq_affinity(struct irq_data *data, msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); pci_write_msi_msg(irq, &msg); - cpumask_copy(data->affinity, cpu_mask); + cpumask_copy(irq_data_get_affinity_mask(data), cpu_mask); return 0; } diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index d0853e8e8..8f5990700 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -92,7 +92,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, */ node = pcibus_to_node(pdev->bus); if (likely(node >=0)) { - struct page *p = alloc_pages_exact_node(node, + struct page *p = __alloc_pages_node(node, flags, get_order(size)); if (likely(p)) |