diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 56 |
1 files changed, 28 insertions, 28 deletions
@@ -338,11 +338,13 @@ static inline int oo_objects(struct kmem_cache_order_objects x) */ static __always_inline void slab_lock(struct page *page) { + VM_BUG_ON_PAGE(PageTail(page), page); bit_spin_lock(PG_locked, &page->flags); } static __always_inline void slab_unlock(struct page *page) { + VM_BUG_ON_PAGE(PageTail(page), page); __bit_spin_unlock(PG_locked, &page->flags); } @@ -1377,7 +1379,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, struct page *page; int order = oo_order(oo); - flags |= (__GFP_NOTRACK | ___GFP_TOI_NOTRACK); + flags |= __GFP_NOTRACK; if (node == NUMA_NO_NODE) page = alloc_pages(flags, order); @@ -1590,18 +1592,12 @@ static inline void add_partial(struct kmem_cache_node *n, __add_partial(n, page, tail); } -static inline void -__remove_partial(struct kmem_cache_node *n, struct page *page) -{ - list_del(&page->lru); - n->nr_partial--; -} - static inline void remove_partial(struct kmem_cache_node *n, struct page *page) { lockdep_assert_held(&n->list_lock); - __remove_partial(n, page); + list_del(&page->lru); + n->nr_partial--; } /* @@ -3182,6 +3178,12 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) } } +void __kmem_cache_release(struct kmem_cache *s) +{ + free_percpu(s->cpu_slab); + free_kmem_cache_nodes(s); +} + static int init_kmem_cache_nodes(struct kmem_cache *s) { int node; @@ -3441,28 +3443,31 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, /* * Attempt to free all partial slabs on a node. - * This is called from kmem_cache_close(). We must be the last thread - * using the cache and therefore we do not need to lock anymore. + * This is called from __kmem_cache_shutdown(). We must take list_lock + * because sysfs file might still access partial list after the shutdowning. */ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) { struct page *page, *h; + BUG_ON(irqs_disabled()); + spin_lock_irq(&n->list_lock); list_for_each_entry_safe(page, h, &n->partial, lru) { if (!page->inuse) { - __remove_partial(n, page); + remove_partial(n, page); discard_slab(s, page); } else { list_slab_objects(s, page, - "Objects remaining in %s on kmem_cache_close()"); + "Objects remaining in %s on __kmem_cache_shutdown()"); } } + spin_unlock_irq(&n->list_lock); } /* * Release all resources used by a slab cache. */ -static inline int kmem_cache_close(struct kmem_cache *s) +int __kmem_cache_shutdown(struct kmem_cache *s) { int node; struct kmem_cache_node *n; @@ -3474,16 +3479,9 @@ static inline int kmem_cache_close(struct kmem_cache *s) if (n->nr_partial || slabs_node(s, node)) return 1; } - free_percpu(s->cpu_slab); - free_kmem_cache_nodes(s); return 0; } -int __kmem_cache_shutdown(struct kmem_cache *s) -{ - return kmem_cache_close(s); -} - /******************************************************************** * Kmalloc subsystem *******************************************************************/ @@ -3545,7 +3543,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) struct page *page; void *ptr = NULL; - flags |= __GFP_COMP | __GFP_NOTRACK | __GFP_TOI_NOTRACK; + flags |= __GFP_COMP | __GFP_NOTRACK; page = alloc_kmem_pages_node(node, flags, get_order(size)); if (page) ptr = page_address(page); @@ -3978,7 +3976,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) memcg_propagate_slab_attrs(s); err = sysfs_slab_add(s); if (err) - kmem_cache_close(s); + __kmem_cache_release(s); return err; } @@ -5205,7 +5203,7 @@ static ssize_t slab_attr_store(struct kobject *kobj, return -EIO; err = attribute->store(s, buf, len); -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG if (slab_state >= FULL && err >= 0 && is_root_cache(s)) { struct kmem_cache *c; @@ -5240,7 +5238,7 @@ static ssize_t slab_attr_store(struct kobject *kobj, static void memcg_propagate_slab_attrs(struct kmem_cache *s) { -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG int i; char *buffer = NULL; struct kmem_cache *root_cache; @@ -5326,7 +5324,7 @@ static struct kset *slab_kset; static inline struct kset *cache_kset(struct kmem_cache *s) { -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG if (!is_root_cache(s)) return s->memcg_params.root_cache->memcg_kset; #endif @@ -5362,6 +5360,8 @@ static char *create_unique_id(struct kmem_cache *s) *p++ = 'F'; if (!(s->flags & SLAB_NOTRACK)) *p++ = 't'; + if (s->flags & SLAB_ACCOUNT) + *p++ = 'A'; if (p != name + 1) *p++ = '-'; p += sprintf(p, "%07d", s->size); @@ -5401,7 +5401,7 @@ static int sysfs_slab_add(struct kmem_cache *s) if (err) goto out_del_kobj; -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG if (is_root_cache(s)) { s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj); if (!s->memcg_kset) { @@ -5434,7 +5434,7 @@ void sysfs_slab_remove(struct kmem_cache *s) */ return; -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG kset_unregister(s->memcg_kset); #endif kobject_uevent(&s->kobj, KOBJ_REMOVE); |