summaryrefslogtreecommitdiff
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-01-20 14:01:31 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-01-20 14:01:31 -0300
commitb4b7ff4b08e691656c9d77c758fc355833128ac0 (patch)
tree82fcb00e6b918026dc9f2d1f05ed8eee83874cc0 /mm/slab_common.c
parent35acfa0fc609f2a2cd95cef4a6a9c3a5c38f1778 (diff)
Linux-libre 4.4-gnupck-4.4-gnu
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c148
1 files changed, 105 insertions, 43 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 5ce4faeb1..3c6a86b4e 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -112,7 +112,7 @@ void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
kmem_cache_free(s, p[i]);
}
-bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
+int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
void **p)
{
size_t i;
@@ -121,10 +121,10 @@ bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
void *x = p[i] = kmem_cache_alloc(s, flags);
if (!x) {
__kmem_cache_free_bulk(s, i, p);
- return false;
+ return 0;
}
}
- return true;
+ return i;
}
#ifdef CONFIG_MEMCG_KMEM
@@ -316,10 +316,10 @@ unsigned long calculate_alignment(unsigned long flags,
return ALIGN(align, sizeof(void *));
}
-static struct kmem_cache *
-do_kmem_cache_create(const char *name, size_t object_size, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *),
- struct mem_cgroup *memcg, struct kmem_cache *root_cache)
+static struct kmem_cache *create_cache(const char *name,
+ size_t object_size, size_t size, size_t align,
+ unsigned long flags, void (*ctor)(void *),
+ struct mem_cgroup *memcg, struct kmem_cache *root_cache)
{
struct kmem_cache *s;
int err;
@@ -384,7 +384,7 @@ struct kmem_cache *
kmem_cache_create(const char *name, size_t size, size_t align,
unsigned long flags, void (*ctor)(void *))
{
- struct kmem_cache *s;
+ struct kmem_cache *s = NULL;
const char *cache_name;
int err;
@@ -396,7 +396,6 @@ kmem_cache_create(const char *name, size_t size, size_t align,
err = kmem_cache_sanity_check(name, size);
if (err) {
- s = NULL; /* suppress uninit var warning */
goto out_unlock;
}
@@ -418,9 +417,9 @@ kmem_cache_create(const char *name, size_t size, size_t align,
goto out_unlock;
}
- s = do_kmem_cache_create(cache_name, size, size,
- calculate_alignment(flags, align, size),
- flags, ctor, NULL, NULL);
+ s = create_cache(cache_name, size, size,
+ calculate_alignment(flags, align, size),
+ flags, ctor, NULL, NULL);
if (IS_ERR(s)) {
err = PTR_ERR(s);
kfree_const(cache_name);
@@ -448,29 +447,20 @@ out_unlock:
}
EXPORT_SYMBOL(kmem_cache_create);
-static int do_kmem_cache_shutdown(struct kmem_cache *s,
+static int shutdown_cache(struct kmem_cache *s,
struct list_head *release, bool *need_rcu_barrier)
{
- if (__kmem_cache_shutdown(s) != 0) {
- printk(KERN_ERR "kmem_cache_destroy %s: "
- "Slab cache still has objects\n", s->name);
- dump_stack();
+ if (__kmem_cache_shutdown(s) != 0)
return -EBUSY;
- }
if (s->flags & SLAB_DESTROY_BY_RCU)
*need_rcu_barrier = true;
-#ifdef CONFIG_MEMCG_KMEM
- if (!is_root_cache(s))
- list_del(&s->memcg_params.list);
-#endif
list_move(&s->list, release);
return 0;
}
-static void do_kmem_cache_release(struct list_head *release,
- bool need_rcu_barrier)
+static void release_caches(struct list_head *release, bool need_rcu_barrier)
{
struct kmem_cache *s, *s2;
@@ -536,10 +526,10 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
if (!cache_name)
goto out_unlock;
- s = do_kmem_cache_create(cache_name, root_cache->object_size,
- root_cache->size, root_cache->align,
- root_cache->flags, root_cache->ctor,
- memcg, root_cache);
+ s = create_cache(cache_name, root_cache->object_size,
+ root_cache->size, root_cache->align,
+ root_cache->flags, root_cache->ctor,
+ memcg, root_cache);
/*
* If we could not create a memcg cache, do not complain, because
* that's not critical at all as we can always proceed with the root
@@ -598,6 +588,18 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
put_online_cpus();
}
+static int __shutdown_memcg_cache(struct kmem_cache *s,
+ struct list_head *release, bool *need_rcu_barrier)
+{
+ BUG_ON(is_root_cache(s));
+
+ if (shutdown_cache(s, release, need_rcu_barrier))
+ return -EBUSY;
+
+ list_del(&s->memcg_params.list);
+ return 0;
+}
+
void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
{
LIST_HEAD(release);
@@ -615,14 +617,76 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
* The cgroup is about to be freed and therefore has no charges
* left. Hence, all its caches must be empty by now.
*/
- BUG_ON(do_kmem_cache_shutdown(s, &release, &need_rcu_barrier));
+ BUG_ON(__shutdown_memcg_cache(s, &release, &need_rcu_barrier));
}
mutex_unlock(&slab_mutex);
put_online_mems();
put_online_cpus();
- do_kmem_cache_release(&release, need_rcu_barrier);
+ release_caches(&release, need_rcu_barrier);
+}
+
+static int shutdown_memcg_caches(struct kmem_cache *s,
+ struct list_head *release, bool *need_rcu_barrier)
+{
+ struct memcg_cache_array *arr;
+ struct kmem_cache *c, *c2;
+ LIST_HEAD(busy);
+ int i;
+
+ BUG_ON(!is_root_cache(s));
+
+ /*
+ * First, shutdown active caches, i.e. caches that belong to online
+ * memory cgroups.
+ */
+ arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
+ lockdep_is_held(&slab_mutex));
+ for_each_memcg_cache_index(i) {
+ c = arr->entries[i];
+ if (!c)
+ continue;
+ if (__shutdown_memcg_cache(c, release, need_rcu_barrier))
+ /*
+ * The cache still has objects. Move it to a temporary
+ * list so as not to try to destroy it for a second
+ * time while iterating over inactive caches below.
+ */
+ list_move(&c->memcg_params.list, &busy);
+ else
+ /*
+ * The cache is empty and will be destroyed soon. Clear
+ * the pointer to it in the memcg_caches array so that
+ * it will never be accessed even if the root cache
+ * stays alive.
+ */
+ arr->entries[i] = NULL;
+ }
+
+ /*
+ * Second, shutdown all caches left from memory cgroups that are now
+ * offline.
+ */
+ list_for_each_entry_safe(c, c2, &s->memcg_params.list,
+ memcg_params.list)
+ __shutdown_memcg_cache(c, release, need_rcu_barrier);
+
+ list_splice(&busy, &s->memcg_params.list);
+
+ /*
+ * A cache being destroyed must be empty. In particular, this means
+ * that all per memcg caches attached to it must be empty too.
+ */
+ if (!list_empty(&s->memcg_params.list))
+ return -EBUSY;
+ return 0;
+}
+#else
+static inline int shutdown_memcg_caches(struct kmem_cache *s,
+ struct list_head *release, bool *need_rcu_barrier)
+{
+ return 0;
}
#endif /* CONFIG_MEMCG_KMEM */
@@ -635,16 +699,13 @@ void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s)
{
- struct kmem_cache *c, *c2;
LIST_HEAD(release);
bool need_rcu_barrier = false;
- bool busy = false;
+ int err;
if (unlikely(!s))
return;
- BUG_ON(!is_root_cache(s));
-
get_online_cpus();
get_online_mems();
@@ -654,21 +715,22 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (s->refcount)
goto out_unlock;
- for_each_memcg_cache_safe(c, c2, s) {
- if (do_kmem_cache_shutdown(c, &release, &need_rcu_barrier))
- busy = true;
- }
-
- if (!busy)
- do_kmem_cache_shutdown(s, &release, &need_rcu_barrier);
+ err = shutdown_memcg_caches(s, &release, &need_rcu_barrier);
+ if (!err)
+ err = shutdown_cache(s, &release, &need_rcu_barrier);
+ if (err) {
+ pr_err("kmem_cache_destroy %s: "
+ "Slab cache still has objects\n", s->name);
+ dump_stack();
+ }
out_unlock:
mutex_unlock(&slab_mutex);
put_online_mems();
put_online_cpus();
- do_kmem_cache_release(&release, need_rcu_barrier);
+ release_caches(&release, need_rcu_barrier);
}
EXPORT_SYMBOL(kmem_cache_destroy);
@@ -692,7 +754,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
}
EXPORT_SYMBOL(kmem_cache_shrink);
-int slab_is_available(void)
+bool slab_is_available(void)
{
return slab_state >= UP;
}