summaryrefslogtreecommitdiff
path: root/arch/tile
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/Kconfig6
-rw-r--r--arch/tile/include/asm/barrier.h9
-rw-r--r--arch/tile/include/asm/cmpxchg.h2
-rw-r--r--arch/tile/include/asm/dma-mapping.h32
-rw-r--r--arch/tile/include/asm/insn.h59
-rw-r--r--arch/tile/include/asm/io.h16
-rw-r--r--arch/tile/include/asm/jump_label.h58
-rw-r--r--arch/tile/include/asm/page.h10
-rw-r--r--arch/tile/include/asm/pgtable.h10
-rw-r--r--arch/tile/include/asm/processor.h2
-rw-r--r--arch/tile/include/asm/thread_info.h8
-rw-r--r--arch/tile/include/asm/topology.h3
-rw-r--r--arch/tile/kernel/Makefile1
-rw-r--r--arch/tile/kernel/ftrace.c13
-rw-r--r--arch/tile/kernel/intvec_32.S51
-rw-r--r--arch/tile/kernel/intvec_64.S56
-rw-r--r--arch/tile/kernel/jump_label.c64
-rw-r--r--arch/tile/kernel/kgdb.c2
-rw-r--r--arch/tile/kernel/kprobes.c4
-rw-r--r--arch/tile/kernel/pci-dma.c29
-rw-r--r--arch/tile/kernel/process.c79
-rw-r--r--arch/tile/kernel/ptrace.c15
-rw-r--r--arch/tile/kernel/setup.c2
-rw-r--r--arch/tile/kernel/single_step.c3
-rw-r--r--arch/tile/kernel/stack.c17
-rw-r--r--arch/tile/kernel/traps.c13
-rw-r--r--arch/tile/kernel/unaligned.c13
-rw-r--r--arch/tile/mm/fault.c3
28 files changed, 345 insertions, 235 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 8ec7a4599..de4a4fff9 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -5,7 +5,6 @@ config TILE
def_bool y
select HAVE_PERF_EVENTS
select USE_PMC if PERF_EVENTS
- select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_KVM if !TILEGX
select GENERIC_FIND_FIRST_BIT
@@ -19,6 +18,7 @@ config TILE
select VIRT_TO_BUS
select SYS_HYPERVISOR
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+ select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA
@@ -116,9 +116,6 @@ config ARCH_DISCONTIGMEM_DEFAULT
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-config STRICT_DEVMEM
- def_bool y
-
# SMP is required for Tilera Linux.
config SMP
def_bool y
@@ -143,6 +140,7 @@ config TILEGX
select HAVE_KRETPROBES
select HAVE_ARCH_KGDB
select ARCH_SUPPORTS_ATOMIC_RMW
+ select HAVE_ARCH_JUMP_LABEL
config TILEPRO
def_bool !TILEGX
diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h
index 96a42ae79..d55222806 100644
--- a/arch/tile/include/asm/barrier.h
+++ b/arch/tile/include/asm/barrier.h
@@ -79,11 +79,12 @@ mb_incoherent(void)
* But after the word is updated, the routine issues an "mf" before returning,
* and since it's a function call, we don't even need a compiler barrier.
*/
-#define smp_mb__before_atomic() smp_mb()
-#define smp_mb__after_atomic() do { } while (0)
+#define __smp_mb__before_atomic() __smp_mb()
+#define __smp_mb__after_atomic() do { } while (0)
+#define smp_mb__after_atomic() __smp_mb__after_atomic()
#else /* 64 bit */
-#define smp_mb__before_atomic() smp_mb()
-#define smp_mb__after_atomic() smp_mb()
+#define __smp_mb__before_atomic() __smp_mb()
+#define __smp_mb__after_atomic() __smp_mb()
#endif
#include <asm-generic/barrier.h>
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
index 0ccda3c42..25d589949 100644
--- a/arch/tile/include/asm/cmpxchg.h
+++ b/arch/tile/include/asm/cmpxchg.h
@@ -127,8 +127,6 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n);
#endif
-#define tas(ptr) xchg((ptr), 1)
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_TILE_CMPXCHG_H */
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index 96ac6cce4..01ceb4a89 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -73,37 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
}
#define HAVE_ARCH_DMA_SET_MASK 1
-
-#include <asm-generic/dma-mapping-common.h>
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
- struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
- /*
- * For PCI devices with 64-bit DMA addressing capability, promote
- * the dma_ops to hybrid, with the consistent memory DMA space limited
- * to 32-bit. For 32-bit capable devices, limit the streaming DMA
- * address range to max_direct_dma_addr.
- */
- if (dma_ops == gx_pci_dma_map_ops ||
- dma_ops == gx_hybrid_pci_dma_map_ops ||
- dma_ops == gx_legacy_pci_dma_map_ops) {
- if (mask == DMA_BIT_MASK(64) &&
- dma_ops == gx_legacy_pci_dma_map_ops)
- set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
- else if (mask > dev->archdata.max_direct_dma_addr)
- mask = dev->archdata.max_direct_dma_addr;
- }
-
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
-}
+int dma_set_mask(struct device *dev, u64 mask);
/*
* dma_alloc_noncoherent() is #defined to return coherent memory,
diff --git a/arch/tile/include/asm/insn.h b/arch/tile/include/asm/insn.h
new file mode 100644
index 000000000..f78ba5c16
--- /dev/null
+++ b/arch/tile/include/asm/insn.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2015 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+#ifndef __ASM_TILE_INSN_H
+#define __ASM_TILE_INSN_H
+
+#include <arch/opcode.h>
+
+static inline tilegx_bundle_bits NOP(void)
+{
+ return create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
+ create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
+ create_Opcode_X0(RRR_0_OPCODE_X0) |
+ create_UnaryOpcodeExtension_X1(NOP_UNARY_OPCODE_X1) |
+ create_RRROpcodeExtension_X1(UNARY_RRR_0_OPCODE_X1) |
+ create_Opcode_X1(RRR_0_OPCODE_X1);
+}
+
+static inline tilegx_bundle_bits tilegx_gen_branch(unsigned long pc,
+ unsigned long addr,
+ bool link)
+{
+ tilegx_bundle_bits opcode_x0, opcode_x1;
+ long pcrel_by_instr = (addr - pc) >> TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES;
+
+ if (link) {
+ /* opcode: jal addr */
+ opcode_x1 =
+ create_Opcode_X1(JUMP_OPCODE_X1) |
+ create_JumpOpcodeExtension_X1(JAL_JUMP_OPCODE_X1) |
+ create_JumpOff_X1(pcrel_by_instr);
+ } else {
+ /* opcode: j addr */
+ opcode_x1 =
+ create_Opcode_X1(JUMP_OPCODE_X1) |
+ create_JumpOpcodeExtension_X1(J_JUMP_OPCODE_X1) |
+ create_JumpOff_X1(pcrel_by_instr);
+ }
+
+ /* opcode: fnop */
+ opcode_x0 =
+ create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
+ create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
+ create_Opcode_X0(RRR_0_OPCODE_X0);
+
+ return opcode_x1 | opcode_x0;
+}
+
+#endif /* __ASM_TILE_INSN_H */
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index 322b5fe94..30f4a210d 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -161,14 +161,14 @@ extern void _tile_writew(u16 val, unsigned long addr);
extern void _tile_writel(u32 val, unsigned long addr);
extern void _tile_writeq(u64 val, unsigned long addr);
-#define __raw_readb(addr) _tile_readb((unsigned long)addr)
-#define __raw_readw(addr) _tile_readw((unsigned long)addr)
-#define __raw_readl(addr) _tile_readl((unsigned long)addr)
-#define __raw_readq(addr) _tile_readq((unsigned long)addr)
-#define __raw_writeb(val, addr) _tile_writeb(val, (unsigned long)addr)
-#define __raw_writew(val, addr) _tile_writew(val, (unsigned long)addr)
-#define __raw_writel(val, addr) _tile_writel(val, (unsigned long)addr)
-#define __raw_writeq(val, addr) _tile_writeq(val, (unsigned long)addr)
+#define __raw_readb(addr) _tile_readb((unsigned long)(addr))
+#define __raw_readw(addr) _tile_readw((unsigned long)(addr))
+#define __raw_readl(addr) _tile_readl((unsigned long)(addr))
+#define __raw_readq(addr) _tile_readq((unsigned long)(addr))
+#define __raw_writeb(val, addr) _tile_writeb(val, (unsigned long)(addr))
+#define __raw_writew(val, addr) _tile_writew(val, (unsigned long)(addr))
+#define __raw_writel(val, addr) _tile_writel(val, (unsigned long)(addr))
+#define __raw_writeq(val, addr) _tile_writeq(val, (unsigned long)(addr))
#else /* CONFIG_PCI */
diff --git a/arch/tile/include/asm/jump_label.h b/arch/tile/include/asm/jump_label.h
new file mode 100644
index 000000000..cde7573f3
--- /dev/null
+++ b/arch/tile/include/asm/jump_label.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ASM_TILE_JUMP_LABEL_H
+#define _ASM_TILE_JUMP_LABEL_H
+
+#include <arch/opcode.h>
+
+#define JUMP_LABEL_NOP_SIZE TILE_BUNDLE_SIZE_IN_BYTES
+
+static __always_inline bool arch_static_branch(struct static_key *key,
+ bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ "nop" "\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".quad 1b, %l[l_yes], %0 + %1 \n\t"
+ ".popsection\n\t"
+ : : "i" (key), "i" (branch) : : l_yes);
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ "j %l[l_yes]" "\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".quad 1b, %l[l_yes], %0 + %1 \n\t"
+ ".popsection\n\t"
+ : : "i" (key), "i" (branch) : : l_yes);
+ return false;
+l_yes:
+ return true;
+}
+
+typedef u64 jump_label_t;
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif /* _ASM_TILE_JUMP_LABEL_H */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index 8eca6a0e1..498a5f712 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -321,6 +321,16 @@ static inline int pfn_valid(unsigned long pfn)
#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn((void *)(kaddr)))
#define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page))
+/*
+ * The kernel text is mapped at MEM_SV_START as read-only. To allow
+ * modifying kernel text, it is also mapped at PAGE_OFFSET as read-write.
+ * This macro converts a kernel address to its writable kernel text mapping,
+ * which is used to modify the text code on a running kernel by kgdb,
+ * ftrace, kprobe, jump label, etc.
+ */
+#define ktext_writable_addr(kaddr) \
+ ((unsigned long)(kaddr) - MEM_SV_START + PAGE_OFFSET)
+
struct mm_struct;
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
extern pte_t *virt_to_kpte(unsigned long kaddr);
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index 2b05ccbeb..96cecf555 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -489,16 +489,6 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define has_transparent_hugepage() 1
#define pmd_trans_huge pmd_huge_page
-
-static inline pmd_t pmd_mksplitting(pmd_t pmd)
-{
- return pte_pmd(hv_pte_set_client2(pmd_pte(pmd)));
-}
-
-static inline int pmd_trans_splitting(pmd_t pmd)
-{
- return hv_pte_get_client2(pmd_pte(pmd));
-}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 139dfdee0..0684e88aa 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -212,7 +212,7 @@ static inline void release_thread(struct task_struct *dead_task)
/* Nothing for now */
}
-extern int do_work_pending(struct pt_regs *regs, u32 flags);
+extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags);
/*
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index dc1fb28d9..4b7cef9e9 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -140,10 +140,14 @@ extern void _cpu_idle(void);
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_NOHZ (1<<TIF_NOHZ)
+/* Work to do as we loop to exit to user space. */
+#define _TIF_WORK_MASK \
+ (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_ASYNC_TLB | _TIF_NOTIFY_RESUME)
+
/* Work to do on any return to user space. */
#define _TIF_ALLWORK_MASK \
- (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_SINGLESTEP | \
- _TIF_ASYNC_TLB | _TIF_NOTIFY_RESUME | _TIF_NOHZ)
+ (_TIF_WORK_MASK | _TIF_SINGLESTEP | _TIF_NOHZ)
/* Work to do at syscall entry. */
#define _TIF_SYSCALL_ENTRY_WORK \
diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h
index 76b0d0ebb..b11d5fcd2 100644
--- a/arch/tile/include/asm/topology.h
+++ b/arch/tile/include/asm/topology.h
@@ -44,9 +44,6 @@ static inline const struct cpumask *cpumask_of_node(int node)
/* For now, use numa node -1 for global allocation. */
#define pcibus_to_node(bus) ((void)(bus), -1)
-/* By definition, we create nodes based on online memory. */
-#define node_has_online_mem(nid) 1
-
#endif /* CONFIG_NUMA */
#include <asm-generic/topology.h>
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index 21f77bf68..09936d0bc 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -32,5 +32,6 @@ obj-$(CONFIG_TILE_HVGLUE_TRACE) += hvglue_trace.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount_64.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-y += vdso/
diff --git a/arch/tile/kernel/ftrace.c b/arch/tile/kernel/ftrace.c
index 0c0996175..4a572088b 100644
--- a/arch/tile/kernel/ftrace.c
+++ b/arch/tile/kernel/ftrace.c
@@ -20,21 +20,12 @@
#include <asm/cacheflush.h>
#include <asm/ftrace.h>
#include <asm/sections.h>
+#include <asm/insn.h>
#include <arch/opcode.h>
#ifdef CONFIG_DYNAMIC_FTRACE
-static inline tilegx_bundle_bits NOP(void)
-{
- return create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
- create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
- create_Opcode_X0(RRR_0_OPCODE_X0) |
- create_UnaryOpcodeExtension_X1(NOP_UNARY_OPCODE_X1) |
- create_RRROpcodeExtension_X1(UNARY_RRR_0_OPCODE_X1) |
- create_Opcode_X1(RRR_0_OPCODE_X1);
-}
-
static int machine_stopped __read_mostly;
int ftrace_arch_code_modify_prepare(void)
@@ -117,7 +108,7 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old,
return -EINVAL;
/* Operate on writable kernel text mapping. */
- pc_wr = pc - MEM_SV_START + PAGE_OFFSET;
+ pc_wr = ktext_writable_addr(pc);
if (probe_kernel_write((void *)pc_wr, &new, MCOUNT_INSN_SIZE))
return -EPERM;
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index fbbe2ea88..9ff75e3a3 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -572,7 +572,7 @@ intvec_\vecname:
}
wh64 r52
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
.ifnc \function,handle_nmi
/*
* We finally have enough state set up to notify the irq
@@ -588,6 +588,9 @@ intvec_\vecname:
{ move r32, r2; move r33, r3 }
.endif
TRACE_IRQS_OFF
+#ifdef CONFIG_CONTEXT_TRACKING
+ jal context_tracking_user_exit
+#endif
.ifnc \function,handle_syscall
{ move r0, r30; move r1, r31 }
{ move r2, r32; move r3, r33 }
@@ -846,18 +849,6 @@ STD_ENTRY(interrupt_return)
FEEDBACK_REENTER(interrupt_return)
/*
- * Use r33 to hold whether we have already loaded the callee-saves
- * into ptregs. We don't want to do it twice in this loop, since
- * then we'd clobber whatever changes are made by ptrace, etc.
- * Get base of stack in r32.
- */
- {
- GET_THREAD_INFO(r32)
- movei r33, 0
- }
-
-.Lretry_work_pending:
- /*
* Disable interrupts so as to make sure we don't
* miss an interrupt that sets any of the thread flags (like
* need_resched or sigpending) between sampling and the iret.
@@ -867,33 +858,27 @@ STD_ENTRY(interrupt_return)
IRQ_DISABLE(r20, r21)
TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
-
- /* Check to see if there is any work to do before returning to user. */
+ /*
+ * See if there are any work items (including single-shot items)
+ * to do. If so, save the callee-save registers to pt_regs
+ * and then dispatch to C code.
+ */
+ GET_THREAD_INFO(r21)
{
- addi r29, r32, THREAD_INFO_FLAGS_OFFSET
- moveli r1, lo16(_TIF_ALLWORK_MASK)
+ addi r22, r21, THREAD_INFO_FLAGS_OFFSET
+ moveli r20, lo16(_TIF_ALLWORK_MASK)
}
{
- lw r29, r29
- auli r1, r1, ha16(_TIF_ALLWORK_MASK)
+ lw r22, r22
+ auli r20, r20, ha16(_TIF_ALLWORK_MASK)
}
- and r1, r29, r1
- bzt r1, .Lrestore_all
-
- /*
- * Make sure we have all the registers saved for signal
- * handling, notify-resume, or single-step. Call out to C
- * code to figure out exactly what we need to do for each flag bit,
- * then if necessary, reload the flags and recheck.
- */
+ and r1, r22, r20
{
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- bnz r33, 1f
+ bzt r1, .Lrestore_all
}
push_extra_callee_saves r0
- movei r33, 1
-1: jal do_work_pending
- bnz r0, .Lretry_work_pending
+ jal prepare_exit_to_usermode
/*
* In the NMI case we
@@ -1327,7 +1312,7 @@ STD_ENTRY(ret_from_kernel_thread)
FEEDBACK_REENTER(ret_from_kernel_thread)
{
movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
+ j interrupt_return
}
STD_ENDPROC(ret_from_kernel_thread)
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 58964d209..3b51bdf37 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -658,7 +658,7 @@ intvec_\vecname:
*/
mfspr r32, SPR_EX_CONTEXT_K_1
{
- IS_KERNEL_EX1(r22, r22)
+ IS_KERNEL_EX1(r32, r32)
PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
}
beqzt r32, 1f /* zero if from user space */
@@ -753,7 +753,7 @@ intvec_\vecname:
}
wh64 r52
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
.ifnc \function,handle_nmi
/*
* We finally have enough state set up to notify the irq
@@ -769,6 +769,9 @@ intvec_\vecname:
{ move r32, r2; move r33, r3 }
.endif
TRACE_IRQS_OFF
+#ifdef CONFIG_CONTEXT_TRACKING
+ jal context_tracking_user_exit
+#endif
.ifnc \function,handle_syscall
{ move r0, r30; move r1, r31 }
{ move r2, r32; move r3, r33 }
@@ -879,20 +882,6 @@ STD_ENTRY(interrupt_return)
FEEDBACK_REENTER(interrupt_return)
/*
- * Use r33 to hold whether we have already loaded the callee-saves
- * into ptregs. We don't want to do it twice in this loop, since
- * then we'd clobber whatever changes are made by ptrace, etc.
- */
- {
- movei r33, 0
- move r32, sp
- }
-
- /* Get base of stack in r32. */
- EXTRACT_THREAD_INFO(r32)
-
-.Lretry_work_pending:
- /*
* Disable interrupts so as to make sure we don't
* miss an interrupt that sets any of the thread flags (like
* need_resched or sigpending) between sampling and the iret.
@@ -902,33 +891,28 @@ STD_ENTRY(interrupt_return)
IRQ_DISABLE(r20, r21)
TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
-
- /* Check to see if there is any work to do before returning to user. */
+ /*
+ * See if there are any work items (including single-shot items)
+ * to do. If so, save the callee-save registers to pt_regs
+ * and then dispatch to C code.
+ */
+ move r21, sp
+ EXTRACT_THREAD_INFO(r21)
{
- addi r29, r32, THREAD_INFO_FLAGS_OFFSET
- moveli r1, hw1_last(_TIF_ALLWORK_MASK)
+ addi r22, r21, THREAD_INFO_FLAGS_OFFSET
+ moveli r20, hw1_last(_TIF_ALLWORK_MASK)
}
{
- ld r29, r29
- shl16insli r1, r1, hw0(_TIF_ALLWORK_MASK)
+ ld r22, r22
+ shl16insli r20, r20, hw0(_TIF_ALLWORK_MASK)
}
- and r1, r29, r1
- beqzt r1, .Lrestore_all
-
- /*
- * Make sure we have all the registers saved for signal
- * handling or notify-resume. Call out to C code to figure out
- * exactly what we need to do for each flag bit, then if
- * necessary, reload the flags and recheck.
- */
+ and r1, r22, r20
{
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- bnez r33, 1f
+ beqzt r1, .Lrestore_all
}
push_extra_callee_saves r0
- movei r33, 1
-1: jal do_work_pending
- bnez r0, .Lretry_work_pending
+ jal prepare_exit_to_usermode
/*
* In the NMI case we
@@ -1411,7 +1395,7 @@ STD_ENTRY(ret_from_kernel_thread)
FEEDBACK_REENTER(ret_from_kernel_thread)
{
movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
+ j interrupt_return
}
STD_ENDPROC(ret_from_kernel_thread)
diff --git a/arch/tile/kernel/jump_label.c b/arch/tile/kernel/jump_label.c
new file mode 100644
index 000000000..07802d586
--- /dev/null
+++ b/arch/tile/kernel/jump_label.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * jump label TILE-Gx support
+ */
+
+#include <linux/jump_label.h>
+#include <linux/memory.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
+
+#include <asm/cacheflush.h>
+#include <asm/insn.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+static void __jump_label_transform(struct jump_entry *e,
+ enum jump_label_type type)
+{
+ tilegx_bundle_bits opcode;
+ /* Operate on writable kernel text mapping. */
+ unsigned long pc_wr = ktext_writable_addr(e->code);
+
+ if (type == JUMP_LABEL_JMP)
+ opcode = tilegx_gen_branch(e->code, e->target, false);
+ else
+ opcode = NOP();
+
+ *(tilegx_bundle_bits *)pc_wr = opcode;
+ /* Make sure that above mem writes were issued towards the memory. */
+ smp_wmb();
+}
+
+void arch_jump_label_transform(struct jump_entry *e,
+ enum jump_label_type type)
+{
+ get_online_cpus();
+ mutex_lock(&text_mutex);
+
+ __jump_label_transform(e, type);
+ flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits));
+
+ mutex_unlock(&text_mutex);
+ put_online_cpus();
+}
+
+__init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
+ enum jump_label_type type)
+{
+ __jump_label_transform(e, type);
+}
+
+#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c
index ff5335ae0..a506c2c28 100644
--- a/arch/tile/kernel/kgdb.c
+++ b/arch/tile/kernel/kgdb.c
@@ -164,7 +164,7 @@ static unsigned long writable_address(unsigned long addr)
unsigned long ret = 0;
if (core_kernel_text(addr))
- ret = addr - MEM_SV_START + PAGE_OFFSET;
+ ret = ktext_writable_addr(addr);
else if (is_module_text_address(addr))
ret = addr;
else
diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c
index f8a45c51e..c68694bb1 100644
--- a/arch/tile/kernel/kprobes.c
+++ b/arch/tile/kernel/kprobes.c
@@ -116,7 +116,7 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
unsigned long addr_wr;
/* Operate on writable kernel text mapping. */
- addr_wr = (unsigned long)p->addr - MEM_SV_START + PAGE_OFFSET;
+ addr_wr = ktext_writable_addr(p->addr);
if (probe_kernel_write((void *)addr_wr, &breakpoint_insn,
sizeof(breakpoint_insn)))
@@ -131,7 +131,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *kp)
unsigned long addr_wr;
/* Operate on writable kernel text mapping. */
- addr_wr = (unsigned long)kp->addr - MEM_SV_START + PAGE_OFFSET;
+ addr_wr = ktext_writable_addr(kp->addr);
if (probe_kernel_write((void *)addr_wr, &kp->opcode,
sizeof(kp->opcode)))
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 09b58703a..b6bc0547a 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -583,6 +583,35 @@ struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
+int dma_set_mask(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ /*
+ * For PCI devices with 64-bit DMA addressing capability, promote
+ * the dma_ops to hybrid, with the consistent memory DMA space limited
+ * to 32-bit. For 32-bit capable devices, limit the streaming DMA
+ * address range to max_direct_dma_addr.
+ */
+ if (dma_ops == gx_pci_dma_map_ops ||
+ dma_ops == gx_hybrid_pci_dma_map_ops ||
+ dma_ops == gx_legacy_pci_dma_map_ops) {
+ if (mask == DMA_BIT_MASK(64) &&
+ dma_ops == gx_legacy_pci_dma_map_ops)
+ set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
+ else if (mask > dev->archdata.max_direct_dma_addr)
+ mask = dev->archdata.max_direct_dma_addr;
+ }
+
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+EXPORT_SYMBOL(dma_set_mask);
+
#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 7d5769310..b5f30d376 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -462,54 +462,57 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
/*
* This routine is called on return from interrupt if any of the
- * TIF_WORK_MASK flags are set in thread_info->flags. It is
- * entered with interrupts disabled so we don't miss an event
- * that modified the thread_info flags. If any flag is set, we
- * handle it and return, and the calling assembly code will
- * re-disable interrupts, reload the thread flags, and call back
- * if more flags need to be handled.
- *
- * We return whether we need to check the thread_info flags again
- * or not. Note that we don't clear TIF_SINGLESTEP here, so it's
- * important that it be tested last, and then claim that we don't
- * need to recheck the flags.
+ * TIF_ALLWORK_MASK flags are set in thread_info->flags. It is
+ * entered with interrupts disabled so we don't miss an event that
+ * modified the thread_info flags. We loop until all the tested flags
+ * are clear. Note that the function is called on certain conditions
+ * that are not listed in the loop condition here (e.g. SINGLESTEP)
+ * which guarantees we will do those things once, and redo them if any
+ * of the other work items is re-done, but won't continue looping if
+ * all the other work is done.
*/
-int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
+void prepare_exit_to_usermode(struct pt_regs *regs, u32 thread_info_flags)
{
- /* If we enter in kernel mode, do nothing and exit the caller loop. */
- if (!user_mode(regs))
- return 0;
+ if (WARN_ON(!user_mode(regs)))
+ return;
- user_exit();
+ do {
+ local_irq_enable();
- /* Enable interrupts; they are disabled again on return to caller. */
- local_irq_enable();
+ if (thread_info_flags & _TIF_NEED_RESCHED)
+ schedule();
- if (thread_info_flags & _TIF_NEED_RESCHED) {
- schedule();
- return 1;
- }
#if CHIP_HAS_TILE_DMA()
- if (thread_info_flags & _TIF_ASYNC_TLB) {
- do_async_page_fault(regs);
- return 1;
- }
+ if (thread_info_flags & _TIF_ASYNC_TLB)
+ do_async_page_fault(regs);
#endif
- if (thread_info_flags & _TIF_SIGPENDING) {
- do_signal(regs);
- return 1;
- }
- if (thread_info_flags & _TIF_NOTIFY_RESUME) {
- clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(regs);
- return 1;
- }
- if (thread_info_flags & _TIF_SINGLESTEP)
+
+ if (thread_info_flags & _TIF_SIGPENDING)
+ do_signal(regs);
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ }
+
+ local_irq_disable();
+ thread_info_flags = READ_ONCE(current_thread_info()->flags);
+
+ } while (thread_info_flags & _TIF_WORK_MASK);
+
+ if (thread_info_flags & _TIF_SINGLESTEP) {
single_step_once(regs);
+#ifndef __tilegx__
+ /*
+ * FIXME: on tilepro, since we enable interrupts in
+ * this routine, it's possible that we miss a signal
+ * or other asynchronous event.
+ */
+ local_irq_disable();
+#endif
+ }
user_enter();
-
- return 0;
}
unsigned long get_wchan(struct task_struct *p)
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index bdc126faf..54e7b723d 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -255,13 +255,6 @@ int do_syscall_trace_enter(struct pt_regs *regs)
{
u32 work = ACCESS_ONCE(current_thread_info()->flags);
- /*
- * If TIF_NOHZ is set, we are required to call user_exit() before
- * doing anything that could touch RCU.
- */
- if (work & _TIF_NOHZ)
- user_exit();
-
if (secure_computing() == -1)
return -1;
@@ -281,12 +274,6 @@ void do_syscall_trace_exit(struct pt_regs *regs)
long errno;
/*
- * We may come here right after calling schedule_user()
- * in which case we can be in RCU user mode.
- */
- user_exit();
-
- /*
* The standard tile calling convention returns the value (or negative
* errno) in r0, and zero (or positive errno) in r1.
* It saves a couple of cycles on the hot path to do this work in
@@ -322,7 +309,5 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs)
/* Handle synthetic interrupt delivered only by the simulator. */
void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num)
{
- enum ctx_state prev_state = exception_enter();
send_sigtrap(current, regs);
- exception_exit(prev_state);
}
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 6b755d125..bbb855de6 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -882,7 +882,7 @@ static int __init node_neighbors(int node, int cpu,
static void __init setup_numa_mapping(void)
{
- int distance[MAX_NUMNODES][NR_CPUS];
+ u8 distance[MAX_NUMNODES][NR_CPUS];
HV_Coord coord;
int cpu, node, cpus, i, x, y;
int num_nodes = num_online_nodes();
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 53f7b9def..862973074 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -23,7 +23,6 @@
#include <linux/types.h>
#include <linux/err.h>
#include <linux/prctl.h>
-#include <linux/context_tracking.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
@@ -739,7 +738,6 @@ static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
{
- enum ctx_state prev_state = exception_enter();
unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
struct thread_info *info = (void *)current_thread_info();
int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
@@ -756,7 +754,6 @@ void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
send_sigtrap(current, regs);
}
- exception_exit(prev_state);
}
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 402b9c85a..22bbbd3ff 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -78,8 +78,7 @@ static bool read_memory_func(void *result, unsigned long address,
/* Return a pt_regs pointer for a valid fault handler frame */
static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
{
- const char *fault = NULL; /* happy compiler */
- char fault_buf[64];
+ char fault[64];
unsigned long sp = kbt->it.sp;
struct pt_regs *p;
@@ -90,14 +89,14 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
return NULL;
p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
- if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
- fault = "syscall";
- else {
- if (kbt->verbose) { /* else we aren't going to use it */
- snprintf(fault_buf, sizeof(fault_buf),
+ if (kbt->verbose) { /* else we aren't going to use it */
+ if (p->faultnum == INT_SWINT_1 ||
+ p->faultnum == INT_SWINT_1_SIGRETURN)
+ snprintf(fault, sizeof(fault),
+ "syscall %ld", p->regs[TREG_SYSCALL_NR]);
+ else
+ snprintf(fault, sizeof(fault),
"interrupt %ld", p->faultnum);
- fault = fault_buf;
- }
}
if (EX1_PL(p->ex1) == KERNEL_PL &&
__kernel_text_address(p->pc) &&
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 0011a9ff0..4d9651c5b 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -20,7 +20,6 @@
#include <linux/reboot.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
-#include <linux/context_tracking.h>
#include <asm/stack.h>
#include <asm/traps.h>
#include <asm/setup.h>
@@ -254,7 +253,6 @@ static int do_bpt(struct pt_regs *regs)
void __kprobes do_trap(struct pt_regs *regs, int fault_num,
unsigned long reason)
{
- enum ctx_state prev_state = exception_enter();
siginfo_t info = { 0 };
int signo, code;
unsigned long address = 0;
@@ -263,7 +261,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
/* Handle breakpoints, etc. */
if (is_kernel && fault_num == INT_ILL && do_bpt(regs))
- goto done;
+ return;
/* Re-enable interrupts, if they were previously enabled. */
if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
@@ -277,7 +275,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
const char *name;
char buf[100];
if (fixup_exception(regs)) /* ILL_TRANS or UNALIGN_DATA */
- goto done;
+ return;
if (fault_num >= 0 &&
fault_num < ARRAY_SIZE(int_name) &&
int_name[fault_num] != NULL)
@@ -319,7 +317,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
case INT_GPV:
#if CHIP_HAS_TILE_DMA()
if (retry_gpv(reason))
- goto done;
+ return;
#endif
/*FALLTHROUGH*/
case INT_UDN_ACCESS:
@@ -346,7 +344,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
if (!state ||
(void __user *)(regs->pc) != state->buffer) {
single_step_once(regs);
- goto done;
+ return;
}
}
#endif
@@ -390,9 +388,6 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
if (signo != SIGTRAP)
trace_unhandled_signal("trap", regs, address, signo);
force_sig_info(signo, &info, current);
-
-done:
- exception_exit(prev_state);
}
void do_nmi(struct pt_regs *regs, int fault_num, unsigned long reason)
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c
index d075f92cc..0db5f7c9d 100644
--- a/arch/tile/kernel/unaligned.c
+++ b/arch/tile/kernel/unaligned.c
@@ -25,7 +25,6 @@
#include <linux/module.h>
#include <linux/compat.h>
#include <linux/prctl.h>
-#include <linux/context_tracking.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
@@ -1449,7 +1448,6 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
void do_unaligned(struct pt_regs *regs, int vecnum)
{
- enum ctx_state prev_state = exception_enter();
tilegx_bundle_bits __user *pc;
tilegx_bundle_bits bundle;
struct thread_info *info = current_thread_info();
@@ -1503,7 +1501,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
*((tilegx_bundle_bits *)(regs->pc)));
jit_bundle_gen(regs, bundle, align_ctl);
}
- goto done;
+ return;
}
/*
@@ -1527,7 +1525,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
trace_unhandled_signal("unaligned fixup trap", regs, 0, SIGBUS);
force_sig_info(info.si_signo, &info, current);
- goto done;
+ return;
}
@@ -1544,7 +1542,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
trace_unhandled_signal("segfault in unalign fixup", regs,
(unsigned long)info.si_addr, SIGSEGV);
force_sig_info(info.si_signo, &info, current);
- goto done;
+ return;
}
if (!info->unalign_jit_base) {
@@ -1579,7 +1577,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
if (IS_ERR((void __force *)user_page)) {
pr_err("Out of kernel pages trying do_mmap\n");
- goto done;
+ return;
}
/* Save the address in the thread_info struct */
@@ -1592,9 +1590,6 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
/* Generate unalign JIT */
jit_bundle_gen(regs, GX_INSN_BSWAP(bundle), align_ctl);
-
-done:
- exception_exit(prev_state);
}
#endif /* __tilegx__ */
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 13eac59bf..267342148 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -35,7 +35,6 @@
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
-#include <linux/context_tracking.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
@@ -845,9 +844,7 @@ static inline void __do_page_fault(struct pt_regs *regs, int fault_num,
void do_page_fault(struct pt_regs *regs, int fault_num,
unsigned long address, unsigned long write)
{
- enum ctx_state prev_state = exception_enter();
__do_page_fault(regs, fault_num, address, write);
- exception_exit(prev_state);
}
#if CHIP_HAS_TILE_DMA()