summaryrefslogtreecommitdiff
path: root/mm/swapfile.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c64
1 files changed, 62 insertions, 2 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 41e4581af..3ce3f2978 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -9,6 +9,7 @@
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
@@ -43,7 +44,6 @@
static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
unsigned char);
static void free_swap_count_continuations(struct swap_info_struct *);
-static sector_t map_swap_entry(swp_entry_t, struct block_device**);
DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
@@ -722,6 +722,60 @@ swp_entry_t get_swap_page_of_type(int type)
return (swp_entry_t) {0};
}
+static unsigned int find_next_to_unuse(struct swap_info_struct *si,
+ unsigned int prev, bool frontswap);
+
+void get_swap_range_of_type(int type, swp_entry_t *start, swp_entry_t *end,
+ unsigned int limit)
+{
+ struct swap_info_struct *si;
+ pgoff_t start_at;
+ unsigned int i;
+
+ *start = swp_entry(0, 0);
+ *end = swp_entry(0, 0);
+ si = swap_info[type];
+ spin_lock(&si->lock);
+ if (si && (si->flags & SWP_WRITEOK)) {
+ atomic_long_dec(&nr_swap_pages);
+ /* This is called for allocating swap entry, not cache */
+ start_at = scan_swap_map(si, 1);
+ if (start_at) {
+ unsigned long stop_at = find_next_to_unuse(si, start_at, 0);
+ if (stop_at > start_at)
+ stop_at--;
+ else
+ stop_at = si->max - 1;
+ if (stop_at - start_at + 1 > limit)
+ stop_at = min_t(unsigned int,
+ start_at + limit - 1,
+ si->max - 1);
+ /* Mark them used */
+ for (i = start_at; i <= stop_at; i++)
+ si->swap_map[i] = 1;
+ /* first page already done above */
+ si->inuse_pages += stop_at - start_at;
+
+ atomic_long_sub(stop_at - start_at, &nr_swap_pages);
+ if (start_at == si->lowest_bit)
+ si->lowest_bit = stop_at + 1;
+ if (stop_at == si->highest_bit)
+ si->highest_bit = start_at - 1;
+ if (si->inuse_pages == si->pages) {
+ si->lowest_bit = si->max;
+ si->highest_bit = 0;
+ }
+ for (i = start_at + 1; i <= stop_at; i++)
+ inc_cluster_info_page(si, si->cluster_info, i);
+ si->cluster_next = stop_at + 1;
+ *start = swp_entry(type, start_at);
+ *end = swp_entry(type, stop_at);
+ } else
+ atomic_long_inc(&nr_swap_pages);
+ }
+ spin_unlock(&si->lock);
+}
+
static struct swap_info_struct *swap_info_get(swp_entry_t entry)
{
struct swap_info_struct *p;
@@ -1576,7 +1630,7 @@ static void drain_mmlist(void)
* Note that the type of this function is sector_t, but it returns page offset
* into the bdev, not sector offset.
*/
-static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
+sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
{
struct swap_info_struct *sis;
struct swap_extent *start_se;
@@ -2721,8 +2775,14 @@ pgoff_t __page_file_index(struct page *page)
VM_BUG_ON_PAGE(!PageSwapCache(page), page);
return swp_offset(swap);
}
+
EXPORT_SYMBOL_GPL(__page_file_index);
+struct swap_info_struct *get_swap_info_struct(unsigned type)
+{
+ return swap_info[type];
+}
+
/*
* add_swap_count_continuation - called when a swap count is duplicated
* beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's