From 03dd4cb26d967f9588437b0fc9cc0e8353322bb7 Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Fri, 25 Mar 2016 03:53:42 -0300 Subject: Linux-libre 4.5-gnu --- arch/metag/Kconfig | 3 - arch/metag/include/asm/barrier.h | 55 ++++------- arch/metag/include/asm/dma-mapping.h | 179 +---------------------------------- arch/metag/kernel/dma.c | 146 +++++++++++++++++++++------- arch/metag/kernel/ftrace.c | 11 +-- arch/metag/kernel/module.c | 4 +- 6 files changed, 138 insertions(+), 260 deletions(-) (limited to 'arch/metag') diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig index 0b389a81c..a0fa88da3 100644 --- a/arch/metag/Kconfig +++ b/arch/metag/Kconfig @@ -36,9 +36,6 @@ config STACKTRACE_SUPPORT config LOCKDEP_SUPPORT def_bool y -config HAVE_LATENCYTOP_SUPPORT - def_bool y - config RWSEM_GENERIC_SPINLOCK def_bool y diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h index 172b7e5ef..5418517aa 100644 --- a/arch/metag/include/asm/barrier.h +++ b/arch/metag/include/asm/barrier.h @@ -44,16 +44,6 @@ static inline void wr_fence(void) #define rmb() barrier() #define wmb() mb() -#define dma_rmb() rmb() -#define dma_wmb() wmb() - -#ifndef CONFIG_SMP -#define fence() do { } while (0) -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#else - #ifdef CONFIG_METAG_SMP_WRITE_REORDERING /* * Write to the atomic memory unlock system event register (command 0). This is @@ -63,45 +53,32 @@ static inline void wr_fence(void) * incoherence). It is therefore ineffective if used after and on the same * thread as a write. */ -static inline void fence(void) +static inline void metag_fence(void) { volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK; barrier(); *flushptr = 0; barrier(); } -#define smp_mb() fence() -#define smp_rmb() fence() -#define smp_wmb() barrier() +#define __smp_mb() metag_fence() +#define __smp_rmb() metag_fence() +#define __smp_wmb() barrier() #else -#define fence() do { } while (0) -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#endif +#define metag_fence() do { } while (0) +#define __smp_mb() barrier() +#define __smp_rmb() barrier() +#define __smp_wmb() barrier() #endif -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) - -#define smp_store_release(p, v) \ -do { \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - WRITE_ONCE(*p, v); \ -} while (0) +#ifdef CONFIG_SMP +#define fence() metag_fence() +#else +#define fence() do { } while (0) +#endif -#define smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - ___p1; \ -}) +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#include #endif /* _ASM_METAG_BARRIER_H */ diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h index eb5cdec94..27af5d479 100644 --- a/arch/metag/include/asm/dma-mapping.h +++ b/arch/metag/include/asm/dma-mapping.h @@ -1,177 +1,11 @@ #ifndef _ASM_METAG_DMA_MAPPING_H #define _ASM_METAG_DMA_MAPPING_H -#include +extern struct dma_map_ops metag_dma_ops; -#include -#include -#include -#include - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - -void dma_sync_for_device(void *vaddr, size_t size, int dma_direction); -void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction); - -int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size); - -int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size); - -static inline dma_addr_t -dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); - WARN_ON(size == 0); - dma_sync_for_device(ptr, size, direction); - return virt_to_phys(ptr); -} - -static inline void -dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); - dma_sync_for_cpu(phys_to_virt(dma_addr), size, direction); -} - -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, - enum dma_data_direction direction) -{ - struct scatterlist *sg; - int i; - - BUG_ON(!valid_dma_direction(direction)); - WARN_ON(nents == 0 || sglist[0].length == 0); - - for_each_sg(sglist, sg, nents, i) { - BUG_ON(!sg_page(sg)); - - sg->dma_address = sg_phys(sg); - dma_sync_for_device(sg_virt(sg), sg->length, direction); - } - - return nents; -} - -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, unsigned long offset, - size_t size, enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); - dma_sync_for_device((void *)(page_to_phys(page) + offset), size, - direction); - return page_to_phys(page) + offset; -} - -static inline void -dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); - dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); -} - - -static inline void -dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries, - enum dma_data_direction direction) -{ - struct scatterlist *sg; - int i; - - BUG_ON(!valid_dma_direction(direction)); - WARN_ON(nhwentries == 0 || sglist[0].length == 0); - - for_each_sg(sglist, sg, nhwentries, i) { - BUG_ON(!sg_page(sg)); - - sg->dma_address = sg_phys(sg); - dma_sync_for_cpu(sg_virt(sg), sg->length, direction); - } -} - -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); -} - -static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) -{ - dma_sync_for_device(phys_to_virt(dma_handle), size, direction); -} - -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - dma_sync_for_cpu(phys_to_virt(dma_handle)+offset, size, - direction); -} - -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - dma_sync_for_device(phys_to_virt(dma_handle)+offset, size, - direction); -} - -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, - enum dma_data_direction direction) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) - dma_sync_for_cpu(sg_virt(sg), sg->length, direction); -} - -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) - dma_sync_for_device(sg_virt(sg), sg->length, direction); -} - -static inline int -dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +static inline struct dma_map_ops *get_dma_ops(struct device *dev) { - return 0; -} - -#define dma_supported(dev, mask) (1) - -static inline int -dma_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - - return 0; + return &metag_dma_ops; } /* @@ -184,11 +18,4 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, { } -/* drivers/base/dma-mapping.c */ -extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size); - -#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) - #endif diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c index c700d6250..e12368d02 100644 --- a/arch/metag/kernel/dma.c +++ b/arch/metag/kernel/dma.c @@ -171,8 +171,8 @@ out: * Allocate DMA-coherent memory space and return both the kernel remapped * virtual and bus address for that space. */ -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp) +static void *metag_dma_alloc(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { struct page *page; struct metag_vm_region *c; @@ -263,13 +263,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size, no_page: return NULL; } -EXPORT_SYMBOL(dma_alloc_coherent); /* * free a page as defined by the above mapping. */ -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) +static void metag_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, struct dma_attrs *attrs) { struct metag_vm_region *c; unsigned long flags, addr; @@ -329,16 +328,19 @@ no_area: __func__, vaddr); dump_stack(); } -EXPORT_SYMBOL(dma_free_coherent); - -static int dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size) +static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + struct dma_attrs *attrs) { - int ret = -ENXIO; - unsigned long flags, user_size, kern_size; struct metag_vm_region *c; + int ret = -ENXIO; + + if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + else + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; @@ -364,25 +366,6 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, return ret; } -int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size) -{ - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - return dma_mmap(dev, vma, cpu_addr, dma_addr, size); -} -EXPORT_SYMBOL(dma_mmap_coherent); - -int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size) -{ - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - return dma_mmap(dev, vma, cpu_addr, dma_addr, size); -} -EXPORT_SYMBOL(dma_mmap_writecombine); - - - - /* * Initialise the consistent memory allocation. */ @@ -423,7 +406,7 @@ early_initcall(dma_alloc_init); /* * make an area consistent to devices. */ -void dma_sync_for_device(void *vaddr, size_t size, int dma_direction) +static void dma_sync_for_device(void *vaddr, size_t size, int dma_direction) { /* * Ensure any writes get through the write combiner. This is necessary @@ -465,12 +448,11 @@ void dma_sync_for_device(void *vaddr, size_t size, int dma_direction) wmb(); } -EXPORT_SYMBOL(dma_sync_for_device); /* * make an area consistent to the core. */ -void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction) +static void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction) { /* * Hardware L2 cache prefetch doesn't occur across 4K physical @@ -497,4 +479,100 @@ void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction) rmb(); } -EXPORT_SYMBOL(dma_sync_for_cpu); + +static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction, struct dma_attrs *attrs) +{ + dma_sync_for_device((void *)(page_to_phys(page) + offset), size, + direction); + return page_to_phys(page) + offset; +} + +static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address, + size_t size, enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); +} + +static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist, + int nents, enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sglist, sg, nents, i) { + BUG_ON(!sg_page(sg)); + + sg->dma_address = sg_phys(sg); + dma_sync_for_device(sg_virt(sg), sg->length, direction); + } + + return nents; +} + + +static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nhwentries, enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sglist, sg, nhwentries, i) { + BUG_ON(!sg_page(sg)); + + sg->dma_address = sg_phys(sg); + dma_sync_for_cpu(sg_virt(sg), sg->length, direction); + } +} + +static void metag_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ + dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); +} + +static void metag_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ + dma_sync_for_device(phys_to_virt(dma_handle), size, direction); +} + +static void metag_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sglist, int nelems, + enum dma_data_direction direction) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sglist, sg, nelems, i) + dma_sync_for_cpu(sg_virt(sg), sg->length, direction); +} + +static void metag_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sglist, int nelems, + enum dma_data_direction direction) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sglist, sg, nelems, i) + dma_sync_for_device(sg_virt(sg), sg->length, direction); +} + +struct dma_map_ops metag_dma_ops = { + .alloc = metag_dma_alloc, + .free = metag_dma_free, + .map_page = metag_dma_map_page, + .map_sg = metag_dma_map_sg, + .sync_single_for_device = metag_dma_sync_single_for_device, + .sync_single_for_cpu = metag_dma_sync_single_for_cpu, + .sync_sg_for_cpu = metag_dma_sync_sg_for_cpu, + .mmap = metag_dma_mmap, +}; +EXPORT_SYMBOL(metag_dma_ops); diff --git a/arch/metag/kernel/ftrace.c b/arch/metag/kernel/ftrace.c index ed1d68515..ac8c039b0 100644 --- a/arch/metag/kernel/ftrace.c +++ b/arch/metag/kernel/ftrace.c @@ -54,12 +54,11 @@ static int ftrace_modify_code(unsigned long pc, unsigned char *old_code, unsigned char replaced[MCOUNT_INSN_SIZE]; /* - * Note: Due to modules and __init, code can - * disappear and change, we need to protect against faulting - * as well as code changing. - * - * No real locking needed, this code is run through - * kstop_machine. + * Note: + * We are paranoid about modifying text, as if a bug was to happen, it + * could cause us to read or write to someplace that could cause harm. + * Carefully read and modify the code with probe_kernel_*(), and make + * sure what we read is what we expected it to be before modifying it. */ /* read the text we want to modify */ diff --git a/arch/metag/kernel/module.c b/arch/metag/kernel/module.c index 986331cd0..bb8dfba9a 100644 --- a/arch/metag/kernel/module.c +++ b/arch/metag/kernel/module.c @@ -176,8 +176,8 @@ static uint32_t do_plt_call(void *location, Elf32_Addr val, tramp[1] = 0xac000001 | ((val & 0x0000ffff) << 3); /* Init, or core PLT? */ - if (location >= mod->module_core - && location < mod->module_core + mod->core_size) + if (location >= mod->core_layout.base + && location < mod->core_layout.base + mod->core_layout.size) entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; else entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; -- cgit v1.2.3-54-g00ecf