From 863981e96738983919de841ec669e157e6bdaeb0 Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Sun, 11 Sep 2016 04:34:46 -0300 Subject: Linux-libre 4.7.1-gnu --- arch/powerpc/mm/mmu_context_hash64.c | 151 ----------------------------------- 1 file changed, 151 deletions(-) delete mode 100644 arch/powerpc/mm/mmu_context_hash64.c (limited to 'arch/powerpc/mm/mmu_context_hash64.c') diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c deleted file mode 100644 index 9ca6fe16c..000000000 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ /dev/null @@ -1,151 +0,0 @@ -/* - * MMU context allocation for 64-bit kernels. - * - * Copyright (C) 2004 Anton Blanchard, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "icswx.h" - -static DEFINE_SPINLOCK(mmu_context_lock); -static DEFINE_IDA(mmu_context_ida); - -int __init_new_context(void) -{ - int index; - int err; - -again: - if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) - return -ENOMEM; - - spin_lock(&mmu_context_lock); - err = ida_get_new_above(&mmu_context_ida, 1, &index); - spin_unlock(&mmu_context_lock); - - if (err == -EAGAIN) - goto again; - else if (err) - return err; - - if (index > MAX_USER_CONTEXT) { - spin_lock(&mmu_context_lock); - ida_remove(&mmu_context_ida, index); - spin_unlock(&mmu_context_lock); - return -ENOMEM; - } - - return index; -} -EXPORT_SYMBOL_GPL(__init_new_context); - -int init_new_context(struct task_struct *tsk, struct mm_struct *mm) -{ - int index; - - index = __init_new_context(); - if (index < 0) - return index; - - /* The old code would re-promote on fork, we don't do that - * when using slices as it could cause problem promoting slices - * that have been forced down to 4K - */ - if (slice_mm_new_context(mm)) - slice_set_user_psize(mm, mmu_virtual_psize); - subpage_prot_init_new_context(mm); - mm->context.id = index; -#ifdef CONFIG_PPC_ICSWX - mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); - if (!mm->context.cop_lockp) { - __destroy_context(index); - subpage_prot_free(mm); - mm->context.id = MMU_NO_CONTEXT; - return -ENOMEM; - } - spin_lock_init(mm->context.cop_lockp); -#endif /* CONFIG_PPC_ICSWX */ - -#ifdef CONFIG_PPC_64K_PAGES - mm->context.pte_frag = NULL; -#endif -#ifdef CONFIG_SPAPR_TCE_IOMMU - mm_iommu_init(&mm->context); -#endif - return 0; -} - -void __destroy_context(int context_id) -{ - spin_lock(&mmu_context_lock); - ida_remove(&mmu_context_ida, context_id); - spin_unlock(&mmu_context_lock); -} -EXPORT_SYMBOL_GPL(__destroy_context); - -#ifdef CONFIG_PPC_64K_PAGES -static void destroy_pagetable_page(struct mm_struct *mm) -{ - int count; - void *pte_frag; - struct page *page; - - pte_frag = mm->context.pte_frag; - if (!pte_frag) - return; - - page = virt_to_page(pte_frag); - /* drop all the pending references */ - count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; - /* We allow PTE_FRAG_NR fragments from a PTE page */ - if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) { - pgtable_page_dtor(page); - free_hot_cold_page(page, 0); - } -} - -#else -static inline void destroy_pagetable_page(struct mm_struct *mm) -{ - return; -} -#endif - - -void destroy_context(struct mm_struct *mm) -{ -#ifdef CONFIG_SPAPR_TCE_IOMMU - mm_iommu_cleanup(&mm->context); -#endif - -#ifdef CONFIG_PPC_ICSWX - drop_cop(mm->context.acop, mm); - kfree(mm->context.cop_lockp); - mm->context.cop_lockp = NULL; -#endif /* CONFIG_PPC_ICSWX */ - - destroy_pagetable_page(mm); - __destroy_context(mm->context.id); - subpage_prot_free(mm); - mm->context.id = MMU_NO_CONTEXT; -} -- cgit v1.2.3-54-g00ecf