summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/pgtable.h17
-rw-r--r--include/linux/ksm.h44
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/sradix-tree.h77
-rw-r--r--include/linux/uksm.h146
6 files changed, 18 insertions, 274 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f387ed12c..c370b261c 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -601,25 +601,12 @@ extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
extern void untrack_pfn_moved(struct vm_area_struct *vma);
#endif
-#ifdef CONFIG_UKSM
-static inline int is_uksm_zero_pfn(unsigned long pfn)
-{
- extern unsigned long uksm_zero_pfn;
- return pfn == uksm_zero_pfn;
-}
-#else
-static inline int is_uksm_zero_pfn(unsigned long pfn)
-{
- return 0;
-}
-#endif
-
#ifdef __HAVE_COLOR_ZERO_PAGE
static inline int is_zero_pfn(unsigned long pfn)
{
extern unsigned long zero_pfn;
unsigned long offset_from_zero_pfn = pfn - zero_pfn;
- return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT) || is_uksm_zero_pfn(pfn);
+ return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
}
#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
@@ -628,7 +615,7 @@ static inline int is_zero_pfn(unsigned long pfn)
static inline int is_zero_pfn(unsigned long pfn)
{
extern unsigned long zero_pfn;
- return (pfn == zero_pfn) || (is_uksm_zero_pfn(pfn));
+ return pfn == zero_pfn;
}
static inline unsigned long my_zero_pfn(unsigned long addr)
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 06861d8ef..7ae216a39 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -19,6 +19,21 @@ struct mem_cgroup;
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags);
+int __ksm_enter(struct mm_struct *mm);
+void __ksm_exit(struct mm_struct *mm);
+
+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
+ return __ksm_enter(mm);
+ return 0;
+}
+
+static inline void ksm_exit(struct mm_struct *mm)
+{
+ if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
+ __ksm_exit(mm);
+}
static inline struct stable_node *page_stable_node(struct page *page)
{
@@ -49,33 +64,6 @@ struct page *ksm_might_need_to_copy(struct page *page,
int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
-#ifdef CONFIG_KSM_LEGACY
-int __ksm_enter(struct mm_struct *mm);
-void __ksm_exit(struct mm_struct *mm);
-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
-{
- if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
- return __ksm_enter(mm);
- return 0;
-}
-
-static inline void ksm_exit(struct mm_struct *mm)
-{
- if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
- __ksm_exit(mm);
-}
-
-#elif defined(CONFIG_UKSM)
-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
-{
- return 0;
-}
-
-static inline void ksm_exit(struct mm_struct *mm)
-{
-}
-#endif /* !CONFIG_UKSM */
-
#else /* !CONFIG_KSM */
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
@@ -118,6 +106,4 @@ static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
#endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */
-#include <linux/uksm.h>
-
#endif /* __LINUX_KSM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index b1f9fe171..624b78b84 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -352,9 +352,6 @@ struct vm_area_struct {
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
-#ifdef CONFIG_UKSM
- struct vma_slot *uksm_vma_slot;
-#endif
};
struct core_thread {
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 038651383..7b6c2cfee 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -157,9 +157,6 @@ enum zone_stat_item {
WORKINGSET_NODERECLAIM,
NR_ANON_TRANSPARENT_HUGEPAGES,
NR_FREE_CMA_PAGES,
-#ifdef CONFIG_UKSM
- NR_UKSM_ZERO_PAGES,
-#endif
NR_VM_ZONE_STAT_ITEMS };
/*
@@ -811,7 +808,7 @@ static inline int is_highmem_idx(enum zone_type idx)
}
/**
- * is_highmem - helper function to quickly check if a struct zone is a
+ * is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
* to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
* @zone - pointer to struct zone variable
diff --git a/include/linux/sradix-tree.h b/include/linux/sradix-tree.h
deleted file mode 100644
index 6780fdb0a..000000000
--- a/include/linux/sradix-tree.h
+++ /dev/null
@@ -1,77 +0,0 @@
-#ifndef _LINUX_SRADIX_TREE_H
-#define _LINUX_SRADIX_TREE_H
-
-
-#define INIT_SRADIX_TREE(root, mask) \
-do { \
- (root)->height = 0; \
- (root)->gfp_mask = (mask); \
- (root)->rnode = NULL; \
-} while (0)
-
-#define ULONG_BITS (sizeof(unsigned long) * 8)
-#define SRADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
-//#define SRADIX_TREE_MAP_SHIFT 6
-//#define SRADIX_TREE_MAP_SIZE (1UL << SRADIX_TREE_MAP_SHIFT)
-//#define SRADIX_TREE_MAP_MASK (SRADIX_TREE_MAP_SIZE-1)
-
-struct sradix_tree_node {
- unsigned int height; /* Height from the bottom */
- unsigned int count;
- unsigned int fulls; /* Number of full sublevel trees */
- struct sradix_tree_node *parent;
- void *stores[0];
-};
-
-/* A simple radix tree implementation */
-struct sradix_tree_root {
- unsigned int height;
- struct sradix_tree_node *rnode;
-
- /* Where found to have available empty stores in its sublevels */
- struct sradix_tree_node *enter_node;
- unsigned int shift;
- unsigned int stores_size;
- unsigned int mask;
- unsigned long min; /* The first hole index */
- unsigned long num;
- //unsigned long *height_to_maxindex;
-
- /* How the node is allocated and freed. */
- struct sradix_tree_node *(*alloc)(void);
- void (*free)(struct sradix_tree_node *node);
-
- /* When a new node is added and removed */
- void (*extend)(struct sradix_tree_node *parent, struct sradix_tree_node *child);
- void (*assign)(struct sradix_tree_node *node, unsigned index, void *item);
- void (*rm)(struct sradix_tree_node *node, unsigned offset);
-};
-
-struct sradix_tree_path {
- struct sradix_tree_node *node;
- int offset;
-};
-
-static inline
-void init_sradix_tree_root(struct sradix_tree_root *root, unsigned long shift)
-{
- root->height = 0;
- root->rnode = NULL;
- root->shift = shift;
- root->stores_size = 1UL << shift;
- root->mask = root->stores_size - 1;
-}
-
-
-extern void *sradix_tree_next(struct sradix_tree_root *root,
- struct sradix_tree_node *node, unsigned long index,
- int (*iter)(void *, unsigned long));
-
-extern int sradix_tree_enter(struct sradix_tree_root *root, void **item, int num);
-
-extern void sradix_tree_delete_from_leaf(struct sradix_tree_root *root,
- struct sradix_tree_node *node, unsigned long index);
-
-extern void *sradix_tree_lookup(struct sradix_tree_root *root, unsigned long index);
-
-#endif /* _LINUX_SRADIX_TREE_H */
diff --git a/include/linux/uksm.h b/include/linux/uksm.h
deleted file mode 100644
index 206f10958..000000000
--- a/include/linux/uksm.h
+++ /dev/null
@@ -1,146 +0,0 @@
-#ifndef __LINUX_UKSM_H
-#define __LINUX_UKSM_H
-/*
- * Memory merging support.
- *
- * This code enables dynamic sharing of identical pages found in different
- * memory areas, even if they are not shared by fork().
- */
-
-/* if !CONFIG_UKSM this file should not be compiled at all. */
-#ifdef CONFIG_UKSM
-
-#include <linux/bitops.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/rmap.h>
-#include <linux/sched.h>
-
-extern unsigned long zero_pfn __read_mostly;
-extern unsigned long uksm_zero_pfn __read_mostly;
-extern struct page *empty_uksm_zero_page;
-
-/* must be done before linked to mm */
-extern void uksm_vma_add_new(struct vm_area_struct *vma);
-extern void uksm_remove_vma(struct vm_area_struct *vma);
-
-#define UKSM_SLOT_NEED_SORT (1 << 0)
-#define UKSM_SLOT_NEED_RERAND (1 << 1)
-#define UKSM_SLOT_SCANNED (1 << 2) /* It's scanned in this round */
-#define UKSM_SLOT_FUL_SCANNED (1 << 3)
-#define UKSM_SLOT_IN_UKSM (1 << 4)
-
-struct vma_slot {
- struct sradix_tree_node *snode;
- unsigned long sindex;
-
- struct list_head slot_list;
- unsigned long fully_scanned_round;
- unsigned long dedup_num;
- unsigned long pages_scanned;
- unsigned long last_scanned;
- unsigned long pages_to_scan;
- struct scan_rung *rung;
- struct page **rmap_list_pool;
- unsigned int *pool_counts;
- unsigned long pool_size;
- struct vm_area_struct *vma;
- struct mm_struct *mm;
- unsigned long ctime_j;
- unsigned long pages;
- unsigned long flags;
- unsigned long pages_cowed; /* pages cowed this round */
- unsigned long pages_merged; /* pages merged this round */
- unsigned long pages_bemerged;
-
- /* when it has page merged in this eval round */
- struct list_head dedup_list;
-};
-
-static inline void uksm_unmap_zero_page(pte_t pte)
-{
- if (pte_pfn(pte) == uksm_zero_pfn)
- __dec_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES);
-}
-
-static inline void uksm_map_zero_page(pte_t pte)
-{
- if (pte_pfn(pte) == uksm_zero_pfn)
- __inc_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES);
-}
-
-static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page)
-{
- if (vma->uksm_vma_slot && PageKsm(page))
- vma->uksm_vma_slot->pages_cowed++;
-}
-
-static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte)
-{
- if (vma->uksm_vma_slot && pte_pfn(pte) == uksm_zero_pfn)
- vma->uksm_vma_slot->pages_cowed++;
-}
-
-static inline int uksm_flags_can_scan(unsigned long vm_flags)
-{
-#ifndef VM_SAO
-#define VM_SAO 0
-#endif
- return !(vm_flags & (VM_PFNMAP | VM_IO | VM_DONTEXPAND |
- VM_HUGETLB | VM_MIXEDMAP | VM_SHARED
- | VM_MAYSHARE | VM_GROWSUP | VM_GROWSDOWN | VM_SAO));
-}
-
-static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p)
-{
- if (uksm_flags_can_scan(*vm_flags_p))
- *vm_flags_p |= VM_MERGEABLE;
-}
-
-/*
- * Just a wrapper for BUG_ON for where ksm_zeropage must not be. TODO: it will
- * be removed when uksm zero page patch is stable enough.
- */
-static inline void uksm_bugon_zeropage(pte_t pte)
-{
- BUG_ON(pte_pfn(pte) == uksm_zero_pfn);
-}
-#else
-static inline void uksm_vma_add_new(struct vm_area_struct *vma)
-{
-}
-
-static inline void uksm_remove_vma(struct vm_area_struct *vma)
-{
-}
-
-static inline void uksm_unmap_zero_page(pte_t pte)
-{
-}
-
-static inline void uksm_map_zero_page(pte_t pte)
-{
-}
-
-static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page)
-{
-}
-
-static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte)
-{
-}
-
-static inline int uksm_flags_can_scan(unsigned long vm_flags)
-{
- return 0;
-}
-
-static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p)
-{
-}
-
-static inline void uksm_bugon_zeropage(pte_t pte)
-{
-}
-#endif /* !CONFIG_UKSM */
-#endif /* __LINUX_UKSM_H */