summaryrefslogtreecommitdiff
path: root/arch/tile/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/mm')
-rw-r--r--arch/tile/mm/elf.c2
-rw-r--r--arch/tile/mm/fault.c21
-rw-r--r--arch/tile/mm/highmem.c3
-rw-r--r--arch/tile/mm/hugetlbpage.c5
4 files changed, 15 insertions, 16 deletions
diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c
index f7ddae372..6225cc998 100644
--- a/arch/tile/mm/elf.c
+++ b/arch/tile/mm/elf.c
@@ -56,7 +56,7 @@ static int notify_exec(struct mm_struct *mm)
if (exe_file == NULL)
goto done_free;
- path = d_path(&exe_file->f_path, buf, PAGE_SIZE);
+ path = file_path(exe_file, buf, PAGE_SIZE);
if (IS_ERR(path))
goto done_put;
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index e83cc999d..13eac59bf 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -354,9 +354,9 @@ static int handle_page_fault(struct pt_regs *regs,
/*
* If we're in an interrupt, have no user context or are running in an
- * atomic region then we must not take the fault.
+ * region with pagefaults disabled then we must not take the fault.
*/
- if (in_atomic() || !mm) {
+ if (pagefault_disabled() || !mm) {
vma = NULL; /* happy compiler */
goto bad_area_nosemaphore;
}
@@ -699,11 +699,10 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
* interrupt away appropriately and return immediately. We can't do
* page faults for user code while in kernel mode.
*/
-void do_page_fault(struct pt_regs *regs, int fault_num,
- unsigned long address, unsigned long write)
+static inline void __do_page_fault(struct pt_regs *regs, int fault_num,
+ unsigned long address, unsigned long write)
{
int is_page_fault;
- enum ctx_state prev_state = exception_enter();
#ifdef CONFIG_KPROBES
/*
@@ -713,7 +712,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
*/
if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
regs->faultnum, SIGSEGV) == NOTIFY_STOP)
- goto done;
+ return;
#endif
#ifdef __tilegx__
@@ -835,18 +834,22 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
async->is_fault = is_page_fault;
async->is_write = write;
async->address = address;
- goto done;
+ return;
}
}
#endif
handle_page_fault(regs, fault_num, is_page_fault, address, write);
+}
-done:
+void do_page_fault(struct pt_regs *regs, int fault_num,
+ unsigned long address, unsigned long write)
+{
+ enum ctx_state prev_state = exception_enter();
+ __do_page_fault(regs, fault_num, address, write);
exception_exit(prev_state);
}
-
#if CHIP_HAS_TILE_DMA()
/*
* This routine effectively re-issues asynchronous page faults
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 6aa2f2625..fcd545014 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -201,7 +201,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
int idx, type;
pte_t *pte;
- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+ preempt_disable();
pagefault_disable();
/* Avoid icache flushes by disallowing atomic executable mappings. */
@@ -259,6 +259,7 @@ void __kunmap_atomic(void *kvaddr)
}
pagefault_enable();
+ preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 8416240c3..c034dc3fe 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -160,11 +160,6 @@ int pud_huge(pud_t pud)
return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
}
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
-{
- return 0;
-}
-
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long addr, unsigned long len,