diff options
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r-- | arch/tile/kernel/compat.c | 35 | ||||
-rw-r--r-- | arch/tile/kernel/pci-dma.c | 28 | ||||
-rw-r--r-- | arch/tile/kernel/ptrace.c | 11 | ||||
-rw-r--r-- | arch/tile/kernel/sys.c | 13 | ||||
-rw-r--r-- | arch/tile/kernel/vmlinux.lds.S | 12 |
5 files changed, 64 insertions, 35 deletions
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c index 49120843f..bdaf71d31 100644 --- a/arch/tile/kernel/compat.c +++ b/arch/tile/kernel/compat.c @@ -23,42 +23,50 @@ #include <linux/uaccess.h> #include <linux/signal.h> #include <asm/syscalls.h> +#include <asm/byteorder.h> /* * Syscalls that take 64-bit numbers traditionally take them in 32-bit * "high" and "low" value parts on 32-bit architectures. * In principle, one could imagine passing some register arguments as * fully 64-bit on TILE-Gx in 32-bit mode, but it seems easier to - * adapt the usual convention. + * adopt the usual convention. */ +#ifdef __BIG_ENDIAN +#define SYSCALL_PAIR(name) u32, name ## _hi, u32, name ## _lo +#else +#define SYSCALL_PAIR(name) u32, name ## _lo, u32, name ## _hi +#endif + COMPAT_SYSCALL_DEFINE4(truncate64, char __user *, filename, u32, dummy, - u32, low, u32, high) + SYSCALL_PAIR(length)) { - return sys_truncate(filename, ((loff_t)high << 32) | low); + return sys_truncate(filename, ((loff_t)length_hi << 32) | length_lo); } COMPAT_SYSCALL_DEFINE4(ftruncate64, unsigned int, fd, u32, dummy, - u32, low, u32, high) + SYSCALL_PAIR(length)) { - return sys_ftruncate(fd, ((loff_t)high << 32) | low); + return sys_ftruncate(fd, ((loff_t)length_hi << 32) | length_lo); } COMPAT_SYSCALL_DEFINE6(pread64, unsigned int, fd, char __user *, ubuf, - size_t, count, u32, dummy, u32, low, u32, high) + size_t, count, u32, dummy, SYSCALL_PAIR(offset)) { - return sys_pread64(fd, ubuf, count, ((loff_t)high << 32) | low); + return sys_pread64(fd, ubuf, count, + ((loff_t)offset_hi << 32) | offset_lo); } COMPAT_SYSCALL_DEFINE6(pwrite64, unsigned int, fd, char __user *, ubuf, - size_t, count, u32, dummy, u32, low, u32, high) + size_t, count, u32, dummy, SYSCALL_PAIR(offset)) { - return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low); + return sys_pwrite64(fd, ubuf, count, + ((loff_t)offset_hi << 32) | offset_lo); } COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags, - u32, offset_lo, u32, offset_hi, - u32, nbytes_lo, u32, nbytes_hi) + SYSCALL_PAIR(offset), SYSCALL_PAIR(nbytes)) { return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo, ((loff_t)nbytes_hi << 32) | nbytes_lo, @@ -66,8 +74,7 @@ COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags, } COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode, - u32, offset_lo, u32, offset_hi, - u32, len_lo, u32, len_hi) + SYSCALL_PAIR(offset), SYSCALL_PAIR(len)) { return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo, ((loff_t)len_hi << 32) | len_lo); @@ -77,6 +84,8 @@ COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode, * Avoid bug in generic sys_llseek() that specifies offset_high and * offset_low as "unsigned long", thus making it possible to pass * a sign-extended high 32 bits in offset_low. + * Note that we do not use SYSCALL_PAIR here since glibc passes the + * high and low parts explicitly in that order. */ COMPAT_SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned int, offset_high, unsigned int, offset_low, loff_t __user *, result, diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index b6bc0547a..09bb774b3 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c @@ -34,7 +34,7 @@ static void *tile_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { u64 dma_mask = (dev && dev->coherent_dma_mask) ? dev->coherent_dma_mask : DMA_BIT_MASK(32); @@ -78,7 +78,7 @@ static void *tile_dma_alloc_coherent(struct device *dev, size_t size, */ static void tile_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { homecache_free_pages((unsigned long)vaddr, get_order(size)); } @@ -202,7 +202,7 @@ static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size, static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -224,7 +224,7 @@ static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -240,7 +240,7 @@ static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { BUG_ON(!valid_dma_direction(direction)); @@ -252,7 +252,7 @@ static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { BUG_ON(!valid_dma_direction(direction)); @@ -343,7 +343,7 @@ EXPORT_SYMBOL(tile_dma_map_ops); static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { int node = dev_to_node(dev); int order = get_order(size); @@ -368,14 +368,14 @@ static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, */ static void tile_pci_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { homecache_free_pages((unsigned long)vaddr, get_order(size)); } static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -400,7 +400,7 @@ static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, static void tile_pci_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -416,7 +416,7 @@ static void tile_pci_dma_unmap_sg(struct device *dev, static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { BUG_ON(!valid_dma_direction(direction)); @@ -429,7 +429,7 @@ static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { BUG_ON(!valid_dma_direction(direction)); @@ -531,7 +531,7 @@ EXPORT_SYMBOL(gx_pci_dma_map_ops); #ifdef CONFIG_SWIOTLB static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { gfp |= GFP_DMA; return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); @@ -539,7 +539,7 @@ static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, static void tile_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, - struct dma_attrs *attrs) + unsigned long attrs) { swiotlb_free_coherent(dev, size, vaddr, dma_addr); } diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c index 54e7b723d..d89b70116 100644 --- a/arch/tile/kernel/ptrace.c +++ b/arch/tile/kernel/ptrace.c @@ -255,14 +255,15 @@ int do_syscall_trace_enter(struct pt_regs *regs) { u32 work = ACCESS_ONCE(current_thread_info()->flags); - if (secure_computing() == -1) + if ((work & _TIF_SYSCALL_TRACE) && + tracehook_report_syscall_entry(regs)) { + regs->regs[TREG_SYSCALL_NR] = -1; return -1; - - if (work & _TIF_SYSCALL_TRACE) { - if (tracehook_report_syscall_entry(regs)) - regs->regs[TREG_SYSCALL_NR] = -1; } + if (secure_computing(NULL) == -1) + return -1; + if (work & _TIF_SYSCALL_TRACEPOINT) trace_sys_enter(regs, regs->regs[TREG_SYSCALL_NR]); diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c index 38debe706..c7418dcbb 100644 --- a/arch/tile/kernel/sys.c +++ b/arch/tile/kernel/sys.c @@ -33,6 +33,7 @@ #include <asm/pgtable.h> #include <asm/homecache.h> #include <asm/cachectl.h> +#include <asm/byteorder.h> #include <arch/chip.h> SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len, @@ -59,13 +60,19 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len, #if !defined(__tilegx__) || defined(CONFIG_COMPAT) -ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count) +#ifdef __BIG_ENDIAN +#define SYSCALL_PAIR(name) u32 name ## _hi, u32 name ## _lo +#else +#define SYSCALL_PAIR(name) u32 name ## _lo, u32 name ## _hi +#endif + +ssize_t sys32_readahead(int fd, SYSCALL_PAIR(offset), u32 count) { return sys_readahead(fd, ((loff_t)offset_hi << 32) | offset_lo, count); } -int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, - u32 len_lo, u32 len_hi, int advice) +int sys32_fadvise64_64(int fd, SYSCALL_PAIR(offset), + SYSCALL_PAIR(len), int advice) { return sys_fadvise64_64(fd, ((loff_t)offset_hi << 32) | offset_lo, ((loff_t)len_hi << 32) | len_lo, advice); diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S index 378f5d8d1..9d449caf8 100644 --- a/arch/tile/kernel/vmlinux.lds.S +++ b/arch/tile/kernel/vmlinux.lds.S @@ -60,6 +60,18 @@ SECTIONS /* "Init" is divided into two areas with very different virtual addresses. */ INIT_TEXT_SECTION(PAGE_SIZE) + /* + * Some things, like the __jump_table, may contain symbol references + * to __exit text, so include such text in the final image if so. + * In that case we also override the _einittext from INIT_TEXT_SECTION. + */ +#ifdef CONFIG_JUMP_LABEL + .exit.text : { + EXIT_TEXT + _einittext = .; + } +#endif + /* Now we skip back to PAGE_OFFSET for the data. */ . = (. - TEXT_OFFSET + PAGE_OFFSET); #undef LOAD_OFFSET |