diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 2 | ||||
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 22 | ||||
-rw-r--r-- | arch/ia64/include/asm/acpi.h | 3 | ||||
-rw-r--r-- | arch/ia64/include/asm/atomic.h | 130 | ||||
-rw-r--r-- | arch/ia64/include/asm/machvec.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/mutex.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/rwsem.h | 31 | ||||
-rw-r--r-- | arch/ia64/include/asm/spinlock.h | 4 | ||||
-rw-r--r-- | arch/ia64/include/asm/thread_info.h | 28 | ||||
-rw-r--r-- | arch/ia64/include/asm/tlb.h | 31 | ||||
-rw-r--r-- | arch/ia64/include/asm/uaccess.h | 11 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/efi.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/machine_kexec.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/pci-swiotlb.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/salinfo.c | 38 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 2 | ||||
-rw-r--r-- | arch/ia64/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pci_dma.c | 22 |
21 files changed, 209 insertions, 135 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index e109ee95e..18ca6a9ce 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -39,7 +39,6 @@ config IA64 select GENERIC_PENDING_IRQ if SMP select GENERIC_IRQ_SHOW select GENERIC_IRQ_LEGACY - select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_IOMAP select GENERIC_SMP_IDLE_THREAD @@ -53,6 +52,7 @@ config IA64 select MODULES_USE_ELF_RELA select ARCH_USE_CMPXCHG_LOCKREF select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_HARDENED_USERCOPY default y help The Itanium Processor Family is Intel's 64-bit successor to diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index a6d6190c9..630ee8073 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -919,7 +919,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) static dma_addr_t sba_map_page(struct device *dev, struct page *page, unsigned long poff, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct ioc *ioc; void *addr = page_address(page) + poff; @@ -1005,7 +1005,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { return sba_map_page(dev, virt_to_page(addr), (unsigned long)addr & ~PAGE_MASK, size, dir, attrs); @@ -1046,7 +1046,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) * See Documentation/DMA-API-HOWTO.txt */ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { struct ioc *ioc; #if DELAYED_RESOURCE_CNT > 0 @@ -1115,7 +1115,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, } void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { sba_unmap_page(dev, iova, size, dir, attrs); } @@ -1130,7 +1130,7 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, */ static void * sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t flags, struct dma_attrs *attrs) + gfp_t flags, unsigned long attrs) { struct ioc *ioc; void *addr; @@ -1175,7 +1175,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, * device to map single to get an iova mapping. */ *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr, - size, 0, NULL); + size, 0, 0); return addr; } @@ -1191,9 +1191,9 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, * See Documentation/DMA-API-HOWTO.txt */ static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { - sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); + sba_unmap_single_attrs(dev, dma_handle, size, 0, 0); free_pages((unsigned long) vaddr, get_order(size)); } @@ -1442,7 +1442,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); /** * sba_map_sg - map Scatter/Gather list * @dev: instance of PCI owned by the driver that's asking. @@ -1455,7 +1455,7 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, */ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct ioc *ioc; int coalesced, filled = 0; @@ -1551,7 +1551,7 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, */ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { #ifdef ASSERT_PDIR_SANITY struct ioc *ioc; diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index aa0fdf125..a3d021197 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -140,6 +140,9 @@ static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus) } } } + +extern void acpi_numa_fixup(void); + #endif /* CONFIG_ACPI_NUMA */ #endif /*__KERNEL__*/ diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 8dfb5f6f6..f565ad376 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -42,8 +42,27 @@ ia64_atomic_##op (int i, atomic_t *v) \ return new; \ } -ATOMIC_OP(add, +) -ATOMIC_OP(sub, -) +#define ATOMIC_FETCH_OP(op, c_op) \ +static __inline__ int \ +ia64_atomic_fetch_##op (int i, atomic_t *v) \ +{ \ + __s32 old, new; \ + CMPXCHG_BUGCHECK_DECL \ + \ + do { \ + CMPXCHG_BUGCHECK(v); \ + old = atomic_read(v); \ + new = old c_op i; \ + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \ + return old; \ +} + +#define ATOMIC_OPS(op, c_op) \ + ATOMIC_OP(op, c_op) \ + ATOMIC_FETCH_OP(op, c_op) + +ATOMIC_OPS(add, +) +ATOMIC_OPS(sub, -) #define atomic_add_return(i,v) \ ({ \ @@ -69,14 +88,44 @@ ATOMIC_OP(sub, -) : ia64_atomic_sub(__ia64_asr_i, v); \ }) -ATOMIC_OP(and, &) -ATOMIC_OP(or, |) -ATOMIC_OP(xor, ^) +#define atomic_fetch_add(i,v) \ +({ \ + int __ia64_aar_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ + || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ + || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ + || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ + ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \ + : ia64_atomic_fetch_add(__ia64_aar_i, v); \ +}) + +#define atomic_fetch_sub(i,v) \ +({ \ + int __ia64_asr_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ + || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ + || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ + || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ + ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \ + : ia64_atomic_fetch_sub(__ia64_asr_i, v); \ +}) + +ATOMIC_FETCH_OP(and, &) +ATOMIC_FETCH_OP(or, |) +ATOMIC_FETCH_OP(xor, ^) + +#define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v) +#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v) +#define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v) -#define atomic_and(i,v) (void)ia64_atomic_and(i,v) -#define atomic_or(i,v) (void)ia64_atomic_or(i,v) -#define atomic_xor(i,v) (void)ia64_atomic_xor(i,v) +#define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v) +#define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v) +#define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v) +#undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP #undef ATOMIC_OP #define ATOMIC64_OP(op, c_op) \ @@ -94,8 +143,27 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v) \ return new; \ } -ATOMIC64_OP(add, +) -ATOMIC64_OP(sub, -) +#define ATOMIC64_FETCH_OP(op, c_op) \ +static __inline__ long \ +ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v) \ +{ \ + __s64 old, new; \ + CMPXCHG_BUGCHECK_DECL \ + \ + do { \ + CMPXCHG_BUGCHECK(v); \ + old = atomic64_read(v); \ + new = old c_op i; \ + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \ + return old; \ +} + +#define ATOMIC64_OPS(op, c_op) \ + ATOMIC64_OP(op, c_op) \ + ATOMIC64_FETCH_OP(op, c_op) + +ATOMIC64_OPS(add, +) +ATOMIC64_OPS(sub, -) #define atomic64_add_return(i,v) \ ({ \ @@ -121,14 +189,44 @@ ATOMIC64_OP(sub, -) : ia64_atomic64_sub(__ia64_asr_i, v); \ }) -ATOMIC64_OP(and, &) -ATOMIC64_OP(or, |) -ATOMIC64_OP(xor, ^) +#define atomic64_fetch_add(i,v) \ +({ \ + long __ia64_aar_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ + || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ + || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ + || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ + ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \ + : ia64_atomic64_fetch_add(__ia64_aar_i, v); \ +}) + +#define atomic64_fetch_sub(i,v) \ +({ \ + long __ia64_asr_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ + || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ + || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ + || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ + ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \ + : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \ +}) + +ATOMIC64_FETCH_OP(and, &) +ATOMIC64_FETCH_OP(or, |) +ATOMIC64_FETCH_OP(xor, ^) + +#define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v) +#define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v) +#define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v) -#define atomic64_and(i,v) (void)ia64_atomic64_and(i,v) -#define atomic64_or(i,v) (void)ia64_atomic64_or(i,v) -#define atomic64_xor(i,v) (void)ia64_atomic64_xor(i,v) +#define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v) +#define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v) +#define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v) +#undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index 9c39bdfc2..ed7f09089 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h @@ -22,7 +22,6 @@ struct pci_bus; struct task_struct; struct pci_dev; struct msi_desc; -struct dma_attrs; typedef void ia64_mv_setup_t (char **); typedef void ia64_mv_cpu_init_t (void); diff --git a/arch/ia64/include/asm/mutex.h b/arch/ia64/include/asm/mutex.h index f41e66d65..28cb819e0 100644 --- a/arch/ia64/include/asm/mutex.h +++ b/arch/ia64/include/asm/mutex.h @@ -82,7 +82,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { - if (cmpxchg_acq(count, 1, 0) == 1) + if (atomic_read(count) == 1 && cmpxchg_acq(count, 1, 0) == 1) return 1; return 0; } diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h index 8b23e070b..8fa98dd30 100644 --- a/arch/ia64/include/asm/rwsem.h +++ b/arch/ia64/include/asm/rwsem.h @@ -40,7 +40,7 @@ static inline void __down_read (struct rw_semaphore *sem) { - long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1); + long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1); if (result < 0) rwsem_down_read_failed(sem); @@ -55,9 +55,9 @@ ___down_write (struct rw_semaphore *sem) long old, new; do { - old = sem->count; + old = atomic_long_read(&sem->count); new = old + RWSEM_ACTIVE_WRITE_BIAS; - } while (cmpxchg_acq(&sem->count, old, new) != old); + } while (atomic_long_cmpxchg_acquire(&sem->count, old, new) != old); return old; } @@ -85,7 +85,7 @@ __down_write_killable (struct rw_semaphore *sem) static inline void __up_read (struct rw_semaphore *sem) { - long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1); + long result = ia64_fetchadd8_rel((unsigned long *)&sem->count.counter, -1); if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) rwsem_wake(sem); @@ -100,9 +100,9 @@ __up_write (struct rw_semaphore *sem) long old, new; do { - old = sem->count; + old = atomic_long_read(&sem->count); new = old - RWSEM_ACTIVE_WRITE_BIAS; - } while (cmpxchg_rel(&sem->count, old, new) != old); + } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old); if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0) rwsem_wake(sem); @@ -115,8 +115,8 @@ static inline int __down_read_trylock (struct rw_semaphore *sem) { long tmp; - while ((tmp = sem->count) >= 0) { - if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) { + while ((tmp = atomic_long_read(&sem->count)) >= 0) { + if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, tmp+1)) { return 1; } } @@ -129,8 +129,8 @@ __down_read_trylock (struct rw_semaphore *sem) static inline int __down_write_trylock (struct rw_semaphore *sem) { - long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE, - RWSEM_ACTIVE_WRITE_BIAS); + long tmp = atomic_long_cmpxchg_acquire(&sem->count, + RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); return tmp == RWSEM_UNLOCKED_VALUE; } @@ -143,19 +143,12 @@ __downgrade_write (struct rw_semaphore *sem) long old, new; do { - old = sem->count; + old = atomic_long_read(&sem->count); new = old - RWSEM_WAITING_BIAS; - } while (cmpxchg_rel(&sem->count, old, new) != old); + } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old); if (old < 0) rwsem_downgrade_wake(sem); } -/* - * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1 - * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd. - */ -#define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count)) -#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count)) - #endif /* _ASM_IA64_RWSEM_H */ diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 45698cd15..ca9e76149 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -15,6 +15,8 @@ #include <linux/atomic.h> #include <asm/intrinsics.h> +#include <asm/barrier.h> +#include <asm/processor.h> #define arch_spin_lock_init(x) ((x)->lock = 0) @@ -86,6 +88,8 @@ static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) return; cpu_relax(); } + + smp_acquire__after_ctrl_dep(); } static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index d1212b84f..29bd59790 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -121,32 +121,4 @@ struct thread_info { /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) -#define TS_RESTORE_SIGMASK 2 /* restore signal mask in do_signal() */ - -#ifndef __ASSEMBLY__ -#define HAVE_SET_RESTORE_SIGMASK 1 -static inline void set_restore_sigmask(void) -{ - struct thread_info *ti = current_thread_info(); - ti->status |= TS_RESTORE_SIGMASK; - WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags)); -} -static inline void clear_restore_sigmask(void) -{ - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; -} -static inline bool test_restore_sigmask(void) -{ - return current_thread_info()->status & TS_RESTORE_SIGMASK; -} -static inline bool test_and_clear_restore_sigmask(void) -{ - struct thread_info *ti = current_thread_info(); - if (!(ti->status & TS_RESTORE_SIGMASK)) - return false; - ti->status &= ~TS_RESTORE_SIGMASK; - return true; -} -#endif /* !__ASSEMBLY__ */ - #endif /* _ASM_IA64_THREAD_INFO_H */ diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index 39d64e0df..77e541cf0 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -205,17 +205,18 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) * must be delayed until after the TLB has been flushed (see comments at the beginning of * this file). */ -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { + if (tlb->nr == tlb->max) + return true; + tlb->need_flush = 1; if (!tlb->nr && tlb->pages == tlb->local) __tlb_alloc_page(tlb); tlb->pages[tlb->nr++] = page; - VM_BUG_ON(tlb->nr > tlb->max); - - return tlb->max - tlb->nr; + return false; } static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) @@ -235,8 +236,28 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { - if (!__tlb_remove_page(tlb, page)) + if (__tlb_remove_page(tlb, page)) { tlb_flush_mmu(tlb); + __tlb_remove_page(tlb, page); + } +} + +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return __tlb_remove_page(tlb, page); +} + +static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, + struct page *page) +{ + return __tlb_remove_page(tlb, page); +} + +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return tlb_remove_page(tlb, page); } /* diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index ebcd8a153..bfe13196f 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -241,12 +241,16 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use static inline unsigned long __copy_to_user (void __user *to, const void *from, unsigned long count) { + check_object_size(from, count, true); + return __copy_user(to, (__force void __user *) from, count); } static inline unsigned long __copy_from_user (void *to, const void __user *from, unsigned long count) { + check_object_size(to, count, false); + return __copy_user((__force void __user *) to, from, count); } @@ -258,14 +262,17 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) const void *__cu_from = (from); \ long __cu_len = (n); \ \ - if (__access_ok(__cu_to, __cu_len, get_fs())) \ - __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ + if (__access_ok(__cu_to, __cu_len, get_fs())) { \ + check_object_size(__cu_from, __cu_len, true); \ + __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ + } \ __cu_len; \ }) static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { + check_object_size(to, n, false); if (likely(__access_ok(from, n, get_fs()))) n = __copy_user((__force void __user *) to, from, n); else diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index b1698bc04..92b7bc956 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -524,7 +524,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) return 0; } -void __init acpi_numa_arch_fixup(void) +void __init acpi_numa_fixup(void) { int i, j, node_from, node_to; diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 3b7a60e40..121295637 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -236,7 +236,7 @@ STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id) STUB_RESET_SYSTEM(virt, id) void -efi_gettimeofday (struct timespec *ts) +efi_gettimeofday (struct timespec64 *ts) { efi_time_t tm; @@ -245,7 +245,7 @@ efi_gettimeofday (struct timespec *ts) return; } - ts->tv_sec = mktime(tm.year, tm.month, tm.day, + ts->tv_sec = mktime64(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second); ts->tv_nsec = tm.nanosecond; } diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c index b72cd7a07..599507bce 100644 --- a/arch/ia64/kernel/machine_kexec.c +++ b/arch/ia64/kernel/machine_kexec.c @@ -163,7 +163,7 @@ void arch_crash_save_vmcoreinfo(void) #endif } -unsigned long paddr_vmcoreinfo_note(void) +phys_addr_t paddr_vmcoreinfo_note(void) { return ia64_tpa((unsigned long)(char *)&vmcoreinfo_note); } diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 07a4e32ae..eb9220cde 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1831,7 +1831,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset, } /* Caller prevents this from being called after init */ -static void * __init_refok mca_bootmem(void) +static void * __ref mca_bootmem(void) { return __alloc_bootmem(sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE, 0); diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 939260aea..2933208c0 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c @@ -16,7 +16,7 @@ EXPORT_SYMBOL(swiotlb); static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { if (dev->coherent_dma_mask != DMA_BIT_MASK(64)) gfp |= GFP_DMA; @@ -25,7 +25,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, - struct dma_attrs *attrs) + unsigned long attrs) { swiotlb_free_coherent(dev, size, vaddr, dma_addr); } diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index 1eeffb7fb..5313007d5 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -141,7 +141,7 @@ enum salinfo_state { struct salinfo_data { cpumask_t cpu_event; /* which cpus have outstanding events */ - struct semaphore mutex; + wait_queue_head_t read_wait; u8 *log_buffer; u64 log_size; u8 *oemdata; /* decoded oem data */ @@ -182,21 +182,6 @@ struct salinfo_platform_oemdata_parms { int ret; }; -/* Kick the mutex that tells user space that there is work to do. Instead of - * trying to track the state of the mutex across multiple cpus, in user - * context, interrupt context, non-maskable interrupt context and hotplug cpu, - * it is far easier just to grab the mutex if it is free then release it. - * - * This routine must be called with data_saved_lock held, to make the down/up - * operation atomic. - */ -static void -salinfo_work_to_do(struct salinfo_data *data) -{ - (void)(down_trylock(&data->mutex) ?: 0); - up(&data->mutex); -} - static void salinfo_platform_oemdata_cpu(void *context) { @@ -258,7 +243,7 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) } cpumask_set_cpu(smp_processor_id(), &data->cpu_event); if (irqsafe) { - salinfo_work_to_do(data); + wake_up_interruptible(&data->read_wait); spin_unlock_irqrestore(&data_saved_lock, flags); } } @@ -271,14 +256,10 @@ extern void ia64_mlogbuf_dump(void); static void salinfo_timeout_check(struct salinfo_data *data) { - unsigned long flags; if (!data->open) return; - if (!cpumask_empty(&data->cpu_event)) { - spin_lock_irqsave(&data_saved_lock, flags); - salinfo_work_to_do(data); - spin_unlock_irqrestore(&data_saved_lock, flags); - } + if (!cpumask_empty(&data->cpu_event)) + wake_up_interruptible(&data->read_wait); } static void @@ -308,10 +289,11 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t int i, n, cpu = -1; retry: - if (cpumask_empty(&data->cpu_event) && down_trylock(&data->mutex)) { + if (cpumask_empty(&data->cpu_event)) { if (file->f_flags & O_NONBLOCK) return -EAGAIN; - if (down_interruptible(&data->mutex)) + if (wait_event_interruptible(data->read_wait, + !cpumask_empty(&data->cpu_event))) return -EINTR; } @@ -510,7 +492,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu) if (data->state == STATE_LOG_RECORD) { spin_lock_irqsave(&data_saved_lock, flags); cpumask_set_cpu(cpu, &data->cpu_event); - salinfo_work_to_do(data); + wake_up_interruptible(&data->read_wait); spin_unlock_irqrestore(&data_saved_lock, flags); } return 0; @@ -582,7 +564,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu i < ARRAY_SIZE(salinfo_data); ++i, ++data) { cpumask_set_cpu(cpu, &data->cpu_event); - salinfo_work_to_do(data); + wake_up_interruptible(&data->read_wait); } spin_unlock_irqrestore(&data_saved_lock, flags); break; @@ -640,7 +622,7 @@ salinfo_init(void) for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { data = salinfo_data + i; data->type = i; - sema_init(&data->mutex, 1); + init_waitqueue_head(&data->read_wait); dir = proc_mkdir(salinfo_log_name[i], salinfo_dir); if (!dir) continue; diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 2029a38a7..afddb3e80 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -552,6 +552,7 @@ setup_arch (char **cmdline_p) early_acpi_boot_init(); # ifdef CONFIG_ACPI_NUMA acpi_numa_init(); + acpi_numa_fixup(); # ifdef CONFIG_ACPI_HOTPLUG_CPU prefill_possible_map(); # endif diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index c8dbe2acd..6f892b94e 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -355,7 +355,7 @@ static struct irqaction timer_irqaction = { .name = "timer" }; -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { efi_gettimeofday(ts); } diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 70b40d120..fa6ad95e9 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -159,7 +159,7 @@ retry: * sure we exit gracefully rather than endlessly redo the * fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 8f5990700..74c934a99 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -77,7 +77,7 @@ EXPORT_SYMBOL(sn_dma_set_mask); */ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t flags, - struct dma_attrs *attrs) + unsigned long attrs) { void *cpuaddr; unsigned long phys_addr; @@ -138,7 +138,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, * any associated IOMMU mappings. */ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); @@ -176,21 +176,18 @@ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { void *cpu_addr = page_address(page) + offset; dma_addr_t dma_addr; unsigned long phys_addr; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); - int dmabarr; - - dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); BUG_ON(!dev_is_pci(dev)); phys_addr = __pa(cpu_addr); - if (dmabarr) + if (attrs & DMA_ATTR_WRITE_BARRIER) dma_addr = provider->dma_map_consistent(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); else @@ -218,7 +215,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, */ static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); @@ -240,7 +237,7 @@ static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, */ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { int i; struct pci_dev *pdev = to_pci_dev(dev); @@ -273,16 +270,13 @@ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, */ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long phys_addr; struct scatterlist *saved_sg = sgl, *sg; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); int i; - int dmabarr; - - dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); BUG_ON(!dev_is_pci(dev)); @@ -292,7 +286,7 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, for_each_sg(sgl, sg, nhwentries, i) { dma_addr_t dma_addr; phys_addr = SG_ENT_PHYS_ADDRESS(sg); - if (dmabarr) + if (attrs & DMA_ATTR_WRITE_BARRIER) dma_addr = provider->dma_map_consistent(pdev, phys_addr, sg->length, |