summaryrefslogtreecommitdiff
path: root/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c44
1 files changed, 3 insertions, 41 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index be5ee9f7f..76d1ec291 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -43,7 +43,6 @@
#include <linux/printk.h>
#include <linux/userfaultfd_k.h>
#include <linux/moduleparam.h>
-#include <linux/ksm.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
@@ -293,7 +292,6 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
if (vma->vm_file)
fput(vma->vm_file);
mpol_put(vma_policy(vma));
- uksm_remove_vma(vma);
kmem_cache_free(vm_area_cachep, vma);
return next;
}
@@ -758,16 +756,9 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
long adjust_next = 0;
int remove_next = 0;
-/*
- * to avoid deadlock, ksm_remove_vma must be done before any spin_lock is
- * acquired
- */
- uksm_remove_vma(vma);
-
if (next && !insert) {
struct vm_area_struct *exporter = NULL;
- uksm_remove_vma(next);
if (end >= next->vm_end) {
/*
* vma expands, overlapping all the next, and
@@ -861,7 +852,6 @@ again: remove_next = 1 + (end > next->vm_end);
end_changed = true;
}
vma->vm_pgoff = pgoff;
-
if (adjust_next) {
next->vm_start += adjust_next << PAGE_SHIFT;
next->vm_pgoff += adjust_next;
@@ -932,22 +922,16 @@ again: remove_next = 1 + (end > next->vm_end);
* up the code too much to do both in one go.
*/
next = vma->vm_next;
- if (remove_next == 2) {
- uksm_remove_vma(next);
+ if (remove_next == 2)
goto again;
- } else if (next) {
+ else if (next)
vma_gap_update(next);
- } else {
+ else
mm->highest_vm_end = end;
- }
- } else {
- if (next && !insert)
- uksm_vma_add_new(next);
}
if (insert && file)
uprobe_mmap(insert);
- uksm_vma_add_new(vma);
validate_mm(mm);
return 0;
@@ -1332,9 +1316,6 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
vm_flags |= calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
- /* If uksm is enabled, we add VM_MERGABLE to new VMAs. */
- uksm_vm_flags_mod(&vm_flags);
-
if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
@@ -1673,7 +1654,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
allow_write_access(file);
}
file = vma->vm_file;
- uksm_vma_add_new(vma);
out:
perf_event_mmap(vma);
@@ -1715,7 +1695,6 @@ allow_write_and_free_vma:
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
free_vma:
- uksm_remove_vma(vma);
kmem_cache_free(vm_area_cachep, vma);
unacct_error:
if (charged)
@@ -2511,8 +2490,6 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
else
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
- uksm_vma_add_new(new);
-
/* Success. */
if (!err)
return 0;
@@ -2773,7 +2750,6 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
return addr;
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
- uksm_vm_flags_mod(&flags);
error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
if (offset_in_page(error))
@@ -2831,7 +2807,6 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
vma->vm_flags = flags;
vma->vm_page_prot = vm_get_page_prot(flags);
vma_link(mm, vma, prev, rb_link, rb_parent);
- uksm_vma_add_new(vma);
out:
perf_event_mmap(vma);
mm->total_vm += len >> PAGE_SHIFT;
@@ -2868,12 +2843,6 @@ void exit_mmap(struct mm_struct *mm)
/* mm's last user has gone, and its about to be pulled down */
mmu_notifier_release(mm);
- /*
- * Taking write lock on mmap_sem does not harm others,
- * but it's crucial for uksm to avoid races.
- */
- down_write(&mm->mmap_sem);
-
if (mm->locked_vm) {
vma = mm->mmap;
while (vma) {
@@ -2909,11 +2878,6 @@ void exit_mmap(struct mm_struct *mm)
vma = remove_vma(vma);
}
vm_unacct_memory(nr_accounted);
-
- mm->mmap = NULL;
- mm->mm_rb = RB_ROOT;
- vmacache_invalidate(mm);
- up_write(&mm->mmap_sem);
}
/* Insert vm structure into process list sorted by address
@@ -3023,7 +2987,6 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
new_vma->vm_ops->open(new_vma);
vma_link(mm, new_vma, prev, rb_link, rb_parent);
*need_rmap_locks = false;
- uksm_vma_add_new(new_vma);
}
return new_vma;
@@ -3153,7 +3116,6 @@ static struct vm_area_struct *__install_special_mapping(
vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
perf_event_mmap(vma);
- uksm_vma_add_new(vma);
return vma;