summaryrefslogtreecommitdiff
path: root/mm/zsmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/zsmalloc.c')
-rw-r--r--mm/zsmalloc.c208
1 files changed, 97 insertions, 111 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index fe47fbba9..b6d4f258c 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -45,6 +45,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -247,7 +249,6 @@ struct zs_pool {
struct size_class **size_class;
struct kmem_cache *handle_cachep;
- gfp_t flags; /* allocation flags used when growing pool */
atomic_long_t pages_allocated;
struct zs_pool_stats stats;
@@ -295,10 +296,10 @@ static void destroy_handle_cache(struct zs_pool *pool)
kmem_cache_destroy(pool->handle_cachep);
}
-static unsigned long alloc_handle(struct zs_pool *pool)
+static unsigned long alloc_handle(struct zs_pool *pool, gfp_t gfp)
{
return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
- pool->flags & ~__GFP_HIGHMEM);
+ gfp & ~__GFP_HIGHMEM);
}
static void free_handle(struct zs_pool *pool, unsigned long handle)
@@ -324,7 +325,12 @@ static void *zs_zpool_create(const char *name, gfp_t gfp,
const struct zpool_ops *zpool_ops,
struct zpool *zpool)
{
- return zs_create_pool(name, gfp);
+ /*
+ * Ignore global gfp flags: zs_malloc() may be invoked from
+ * different contexts and its caller must provide a valid
+ * gfp mask.
+ */
+ return zs_create_pool(name);
}
static void zs_zpool_destroy(void *pool)
@@ -335,7 +341,7 @@ static void zs_zpool_destroy(void *pool)
static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
unsigned long *handle)
{
- *handle = zs_malloc(pool, size);
+ *handle = zs_malloc(pool, size, gfp);
return *handle ? 0 : -1;
}
static void zs_zpool_free(void *pool, unsigned long handle)
@@ -413,26 +419,28 @@ static int is_last_page(struct page *page)
return PagePrivate2(page);
}
-static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
+static void get_zspage_mapping(struct page *first_page,
+ unsigned int *class_idx,
enum fullness_group *fullness)
{
unsigned long m;
- BUG_ON(!is_first_page(page));
+ VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
- m = (unsigned long)page->mapping;
+ m = (unsigned long)first_page->mapping;
*fullness = m & FULLNESS_MASK;
*class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
}
-static void set_zspage_mapping(struct page *page, unsigned int class_idx,
+static void set_zspage_mapping(struct page *first_page,
+ unsigned int class_idx,
enum fullness_group fullness)
{
unsigned long m;
- BUG_ON(!is_first_page(page));
+ VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
(fullness & FULLNESS_MASK);
- page->mapping = (struct address_space *)m;
+ first_page->mapping = (struct address_space *)m;
}
/*
@@ -477,16 +485,16 @@ static inline unsigned long zs_stat_get(struct size_class *class,
#ifdef CONFIG_ZSMALLOC_STAT
-static int __init zs_stat_init(void)
+static void __init zs_stat_init(void)
{
- if (!debugfs_initialized())
- return -ENODEV;
+ if (!debugfs_initialized()) {
+ pr_warn("debugfs not available, stat dir not created\n");
+ return;
+ }
zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
if (!zs_stat_root)
- return -ENOMEM;
-
- return 0;
+ pr_warn("debugfs 'zsmalloc' stat dir creation failed\n");
}
static void __exit zs_stat_exit(void)
@@ -567,17 +575,19 @@ static const struct file_operations zs_stat_size_ops = {
.release = single_release,
};
-static int zs_pool_stat_create(const char *name, struct zs_pool *pool)
+static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
{
struct dentry *entry;
- if (!zs_stat_root)
- return -ENODEV;
+ if (!zs_stat_root) {
+ pr_warn("no root stat dir, not creating <%s> stat dir\n", name);
+ return;
+ }
entry = debugfs_create_dir(name, zs_stat_root);
if (!entry) {
pr_warn("debugfs dir <%s> creation failed\n", name);
- return -ENOMEM;
+ return;
}
pool->stat_dentry = entry;
@@ -586,10 +596,9 @@ static int zs_pool_stat_create(const char *name, struct zs_pool *pool)
if (!entry) {
pr_warn("%s: debugfs file entry <%s> creation failed\n",
name, "classes");
- return -ENOMEM;
+ debugfs_remove_recursive(pool->stat_dentry);
+ pool->stat_dentry = NULL;
}
-
- return 0;
}
static void zs_pool_stat_destroy(struct zs_pool *pool)
@@ -598,18 +607,16 @@ static void zs_pool_stat_destroy(struct zs_pool *pool)
}
#else /* CONFIG_ZSMALLOC_STAT */
-static int __init zs_stat_init(void)
+static void __init zs_stat_init(void)
{
- return 0;
}
static void __exit zs_stat_exit(void)
{
}
-static inline int zs_pool_stat_create(const char *name, struct zs_pool *pool)
+static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name)
{
- return 0;
}
static inline void zs_pool_stat_destroy(struct zs_pool *pool)
@@ -617,7 +624,6 @@ static inline void zs_pool_stat_destroy(struct zs_pool *pool)
}
#endif
-
/*
* For each size class, zspages are divided into different groups
* depending on how "full" they are. This was done so that we could
@@ -625,14 +631,15 @@ static inline void zs_pool_stat_destroy(struct zs_pool *pool)
* the pool (not yet implemented). This function returns fullness
* status of the given page.
*/
-static enum fullness_group get_fullness_group(struct page *page)
+static enum fullness_group get_fullness_group(struct page *first_page)
{
int inuse, max_objects;
enum fullness_group fg;
- BUG_ON(!is_first_page(page));
- inuse = page->inuse;
- max_objects = page->objects;
+ VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
+
+ inuse = first_page->inuse;
+ max_objects = first_page->objects;
if (inuse == 0)
fg = ZS_EMPTY;
@@ -652,12 +659,13 @@ static enum fullness_group get_fullness_group(struct page *page)
* have. This functions inserts the given zspage into the freelist
* identified by <class, fullness_group>.
*/
-static void insert_zspage(struct page *page, struct size_class *class,
- enum fullness_group fullness)
+static void insert_zspage(struct size_class *class,
+ enum fullness_group fullness,
+ struct page *first_page)
{
struct page **head;
- BUG_ON(!is_first_page(page));
+ VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
if (fullness >= _ZS_NR_FULLNESS_GROUPS)
return;
@@ -667,7 +675,7 @@ static void insert_zspage(struct page *page, struct size_class *class,
head = &class->fullness_list[fullness];
if (!*head) {
- *head = page;
+ *head = first_page;
return;
}
@@ -675,34 +683,35 @@ static void insert_zspage(struct page *page, struct size_class *class,
* We want to see more ZS_FULL pages and less almost
* empty/full. Put pages with higher ->inuse first.
*/
- list_add_tail(&page->lru, &(*head)->lru);
- if (page->inuse >= (*head)->inuse)
- *head = page;
+ list_add_tail(&first_page->lru, &(*head)->lru);
+ if (first_page->inuse >= (*head)->inuse)
+ *head = first_page;
}
/*
* This function removes the given zspage from the freelist identified
* by <class, fullness_group>.
*/
-static void remove_zspage(struct page *page, struct size_class *class,
- enum fullness_group fullness)
+static void remove_zspage(struct size_class *class,
+ enum fullness_group fullness,
+ struct page *first_page)
{
struct page **head;
- BUG_ON(!is_first_page(page));
+ VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
if (fullness >= _ZS_NR_FULLNESS_GROUPS)
return;
head = &class->fullness_list[fullness];
- BUG_ON(!*head);
+ VM_BUG_ON_PAGE(!*head, first_page);
if (list_empty(&(*head)->lru))
*head = NULL;
- else if (*head == page)
+ else if (*head == first_page)
*head = (struct page *)list_entry((*head)->lru.next,
struct page, lru);
- list_del_init(&page->lru);
+ list_del_init(&first_page->lru);
zs_stat_dec(class, fullness == ZS_ALMOST_EMPTY ?
CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1);
}
@@ -717,21 +726,19 @@ static void remove_zspage(struct page *page, struct size_class *class,
* fullness group.
*/
static enum fullness_group fix_fullness_group(struct size_class *class,
- struct page *page)
+ struct page *first_page)
{
int class_idx;
enum fullness_group currfg, newfg;
- BUG_ON(!is_first_page(page));
-
- get_zspage_mapping(page, &class_idx, &currfg);
- newfg = get_fullness_group(page);
+ get_zspage_mapping(first_page, &class_idx, &currfg);
+ newfg = get_fullness_group(first_page);
if (newfg == currfg)
goto out;
- remove_zspage(page, class, currfg);
- insert_zspage(page, class, newfg);
- set_zspage_mapping(page, class_idx, newfg);
+ remove_zspage(class, currfg, first_page);
+ insert_zspage(class, newfg, first_page);
+ set_zspage_mapping(first_page, class_idx, newfg);
out:
return newfg;
@@ -809,7 +816,7 @@ static void *location_to_obj(struct page *page, unsigned long obj_idx)
unsigned long obj;
if (!page) {
- BUG_ON(obj_idx);
+ VM_BUG_ON(obj_idx);
return NULL;
}
@@ -842,7 +849,7 @@ static unsigned long obj_to_head(struct size_class *class, struct page *page,
void *obj)
{
if (class->huge) {
- VM_BUG_ON(!is_first_page(page));
+ VM_BUG_ON_PAGE(!is_first_page(page), page);
return page_private(page);
} else
return *(unsigned long *)obj;
@@ -892,8 +899,8 @@ static void free_zspage(struct page *first_page)
{
struct page *nextp, *tmp, *head_extra;
- BUG_ON(!is_first_page(first_page));
- BUG_ON(first_page->inuse);
+ VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
+ VM_BUG_ON_PAGE(first_page->inuse, first_page);
head_extra = (struct page *)page_private(first_page);
@@ -914,12 +921,13 @@ static void free_zspage(struct page *first_page)
}
/* Initialize a newly allocated zspage */
-static void init_zspage(struct page *first_page, struct size_class *class)
+static void init_zspage(struct size_class *class, struct page *first_page)
{
unsigned long off = 0;
struct page *page = first_page;
- BUG_ON(!is_first_page(first_page));
+ VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
+
while (page) {
struct page *next_page;
struct link_free *link;
@@ -1001,7 +1009,7 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
prev_page = page;
}
- init_zspage(first_page, class);
+ init_zspage(class, first_page);
first_page->freelist = location_to_obj(first_page, 0);
/* Maximum number of objects we can store in this zspage */
@@ -1234,11 +1242,11 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
return true;
}
-static bool zspage_full(struct page *page)
+static bool zspage_full(struct page *first_page)
{
- BUG_ON(!is_first_page(page));
+ VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
- return page->inuse == page->objects;
+ return first_page->inuse == first_page->objects;
}
unsigned long zs_get_total_pages(struct zs_pool *pool)
@@ -1274,14 +1282,12 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
struct page *pages[2];
void *ret;
- BUG_ON(!handle);
-
/*
* Because we use per-cpu mapping areas shared among the
* pools/users, we can't allow mapping in interrupt context
* because it can corrupt another users mappings.
*/
- BUG_ON(in_interrupt());
+ WARN_ON_ONCE(in_interrupt());
/* From now on, migration cannot move the object */
pin_tag(handle);
@@ -1325,8 +1331,6 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
struct size_class *class;
struct mapping_area *area;
- BUG_ON(!handle);
-
obj = handle_to_obj(handle);
obj_to_location(obj, &page, &obj_idx);
get_zspage_mapping(get_first_page(page), &class_idx, &fg);
@@ -1350,8 +1354,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
}
EXPORT_SYMBOL_GPL(zs_unmap_object);
-static unsigned long obj_malloc(struct page *first_page,
- struct size_class *class, unsigned long handle)
+static unsigned long obj_malloc(struct size_class *class,
+ struct page *first_page, unsigned long handle)
{
unsigned long obj;
struct link_free *link;
@@ -1391,7 +1395,7 @@ static unsigned long obj_malloc(struct page *first_page,
* otherwise 0.
* Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
*/
-unsigned long zs_malloc(struct zs_pool *pool, size_t size)
+unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
{
unsigned long handle, obj;
struct size_class *class;
@@ -1400,7 +1404,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
return 0;
- handle = alloc_handle(pool);
+ handle = alloc_handle(pool, gfp);
if (!handle)
return 0;
@@ -1413,7 +1417,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
if (!first_page) {
spin_unlock(&class->lock);
- first_page = alloc_zspage(class, pool->flags);
+ first_page = alloc_zspage(class, gfp);
if (unlikely(!first_page)) {
free_handle(pool, handle);
return 0;
@@ -1428,7 +1432,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
class->size, class->pages_per_zspage));
}
- obj = obj_malloc(first_page, class, handle);
+ obj = obj_malloc(class, first_page, handle);
/* Now move the zspage to another fullness group, if required */
fix_fullness_group(class, first_page);
record_obj(handle, obj);
@@ -1438,16 +1442,13 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
}
EXPORT_SYMBOL_GPL(zs_malloc);
-static void obj_free(struct zs_pool *pool, struct size_class *class,
- unsigned long obj)
+static void obj_free(struct size_class *class, unsigned long obj)
{
struct link_free *link;
struct page *first_page, *f_page;
unsigned long f_objidx, f_offset;
void *vaddr;
- BUG_ON(!obj);
-
obj &= ~OBJ_ALLOCATED_TAG;
obj_to_location(obj, &f_page, &f_objidx);
first_page = get_first_page(f_page);
@@ -1487,7 +1488,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
class = pool->size_class[class_idx];
spin_lock(&class->lock);
- obj_free(pool, class, obj);
+ obj_free(class, obj);
fullness = fix_fullness_group(class, first_page);
if (fullness == ZS_EMPTY) {
zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
@@ -1503,8 +1504,8 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
}
EXPORT_SYMBOL_GPL(zs_free);
-static void zs_object_copy(unsigned long dst, unsigned long src,
- struct size_class *class)
+static void zs_object_copy(struct size_class *class, unsigned long dst,
+ unsigned long src)
{
struct page *s_page, *d_page;
unsigned long s_objidx, d_objidx;
@@ -1547,7 +1548,6 @@ static void zs_object_copy(unsigned long dst, unsigned long src,
kunmap_atomic(d_addr);
kunmap_atomic(s_addr);
s_page = get_next_page(s_page);
- BUG_ON(!s_page);
s_addr = kmap_atomic(s_page);
d_addr = kmap_atomic(d_page);
s_size = class->size - written;
@@ -1557,7 +1557,6 @@ static void zs_object_copy(unsigned long dst, unsigned long src,
if (d_off >= PAGE_SIZE) {
kunmap_atomic(d_addr);
d_page = get_next_page(d_page);
- BUG_ON(!d_page);
d_addr = kmap_atomic(d_page);
d_size = class->size - written;
d_off = 0;
@@ -1572,8 +1571,8 @@ static void zs_object_copy(unsigned long dst, unsigned long src,
* Find alloced object in zspage from index object and
* return handle.
*/
-static unsigned long find_alloced_obj(struct page *page, int index,
- struct size_class *class)
+static unsigned long find_alloced_obj(struct size_class *class,
+ struct page *page, int index)
{
unsigned long head;
int offset = 0;
@@ -1623,7 +1622,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
int ret = 0;
while (1) {
- handle = find_alloced_obj(s_page, index, class);
+ handle = find_alloced_obj(class, s_page, index);
if (!handle) {
s_page = get_next_page(s_page);
if (!s_page)
@@ -1640,8 +1639,8 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
}
used_obj = handle_to_obj(handle);
- free_obj = obj_malloc(d_page, class, handle);
- zs_object_copy(free_obj, used_obj, class);
+ free_obj = obj_malloc(class, d_page, handle);
+ zs_object_copy(class, free_obj, used_obj);
index++;
/*
* record_obj updates handle's value to free_obj and it will
@@ -1652,7 +1651,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
free_obj |= BIT(HANDLE_PIN_BIT);
record_obj(handle, free_obj);
unpin_tag(handle);
- obj_free(pool, class, used_obj);
+ obj_free(class, used_obj);
}
/* Remember last position in this iteration */
@@ -1670,7 +1669,7 @@ static struct page *isolate_target_page(struct size_class *class)
for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
page = class->fullness_list[i];
if (page) {
- remove_zspage(page, class, i);
+ remove_zspage(class, i, page);
break;
}
}
@@ -1692,10 +1691,8 @@ static enum fullness_group putback_zspage(struct zs_pool *pool,
{
enum fullness_group fullness;
- BUG_ON(!is_first_page(first_page));
-
fullness = get_fullness_group(first_page);
- insert_zspage(first_page, class, fullness);
+ insert_zspage(class, fullness, first_page);
set_zspage_mapping(first_page, class->index, fullness);
if (fullness == ZS_EMPTY) {
@@ -1720,7 +1717,7 @@ static struct page *isolate_source_page(struct size_class *class)
if (!page)
continue;
- remove_zspage(page, class, i);
+ remove_zspage(class, i, page);
break;
}
@@ -1757,8 +1754,6 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
spin_lock(&class->lock);
while ((src_page = isolate_source_page(class))) {
- BUG_ON(!is_first_page(src_page));
-
if (!zs_can_compact(class))
break;
@@ -1887,7 +1882,7 @@ static int zs_register_shrinker(struct zs_pool *pool)
* On success, a pointer to the newly created pool is returned,
* otherwise NULL.
*/
-struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
+struct zs_pool *zs_create_pool(const char *name)
{
int i;
struct zs_pool *pool;
@@ -1957,10 +1952,8 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
prev_class = class;
}
- pool->flags = flags;
-
- if (zs_pool_stat_create(name, pool))
- goto err;
+ /* debug only, don't abort if it fails */
+ zs_pool_stat_create(pool, name);
/*
* Not critical, we still can use the pool
@@ -2022,17 +2015,10 @@ static int __init zs_init(void)
zpool_register_driver(&zs_zpool_driver);
#endif
- ret = zs_stat_init();
- if (ret) {
- pr_err("zs stat initialization failed\n");
- goto stat_fail;
- }
+ zs_stat_init();
+
return 0;
-stat_fail:
-#ifdef CONFIG_ZPOOL
- zpool_unregister_driver(&zs_zpool_driver);
-#endif
notifier_fail:
zs_unregister_cpu_notifier();