aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-02-19 16:36:00 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-02-19 16:36:00 -0500
commit87d9ac712b727705dc07e05e851e84387397d696 (patch)
treefabd1de9442136eadf6dda41f184f5000272534d
parent23300f657594656e7ebac3130b43460ebc4381cc (diff)
parent52b4b950b50740bff507a62907e86710743c22e7 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "10 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm: slab: free kmem_cache_node after destroy sysfs file ipc/shm: handle removed segments gracefully in shm_mmap() MAINTAINERS: update Kselftest Framework mailing list devm_memremap_release(): fix memremap'd addr handling mm/hugetlb.c: fix incorrect proc nr_hugepages value mm, x86: fix pte_page() crash in gup_pte_range() fsnotify: turn fsnotify reaper thread into a workqueue job Revert "fsnotify: destroy marks with call_srcu instead of dedicated thread" mm: fix regression in remap_file_pages() emulation thp, dax: do not try to withdraw pgtable from non-anon VMA
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--fs/notify/mark.c53
-rw-r--r--include/linux/fsnotify_backend.h5
-rw-r--r--ipc/shm.c53
-rw-r--r--kernel/memremap.c2
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/mmap.c34
-rw-r--r--mm/slab.c12
-rw-r--r--mm/slab.h1
-rw-r--r--mm/slab_common.c1
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c38
14 files changed, 150 insertions, 66 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 28eb61bbecf4..4978dc19a4d2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6128,7 +6128,7 @@ F: include/uapi/linux/sunrpc/
6128 6128
6129KERNEL SELFTEST FRAMEWORK 6129KERNEL SELFTEST FRAMEWORK
6130M: Shuah Khan <shuahkh@osg.samsung.com> 6130M: Shuah Khan <shuahkh@osg.samsung.com>
6131L: linux-api@vger.kernel.org 6131L: linux-kselftest@vger.kernel.org
6132T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest 6132T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest
6133S: Maintained 6133S: Maintained
6134F: tools/testing/selftests 6134F: tools/testing/selftests
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 6d5eb5900372..d8a798d8bf50 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -102,7 +102,6 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
102 return 0; 102 return 0;
103 } 103 }
104 104
105 page = pte_page(pte);
106 if (pte_devmap(pte)) { 105 if (pte_devmap(pte)) {
107 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); 106 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
108 if (unlikely(!pgmap)) { 107 if (unlikely(!pgmap)) {
@@ -115,6 +114,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
115 return 0; 114 return 0;
116 } 115 }
117 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 116 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
117 page = pte_page(pte);
118 get_page(page); 118 get_page(page);
119 put_dev_pagemap(pgmap); 119 put_dev_pagemap(pgmap);
120 SetPageReferenced(page); 120 SetPageReferenced(page);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index cfcbf114676e..7115c5d7d373 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -91,7 +91,14 @@
91#include <linux/fsnotify_backend.h> 91#include <linux/fsnotify_backend.h>
92#include "fsnotify.h" 92#include "fsnotify.h"
93 93
94#define FSNOTIFY_REAPER_DELAY (1) /* 1 jiffy */
95
94struct srcu_struct fsnotify_mark_srcu; 96struct srcu_struct fsnotify_mark_srcu;
97static DEFINE_SPINLOCK(destroy_lock);
98static LIST_HEAD(destroy_list);
99
100static void fsnotify_mark_destroy(struct work_struct *work);
101static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy);
95 102
96void fsnotify_get_mark(struct fsnotify_mark *mark) 103void fsnotify_get_mark(struct fsnotify_mark *mark)
97{ 104{
@@ -165,19 +172,10 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
165 atomic_dec(&group->num_marks); 172 atomic_dec(&group->num_marks);
166} 173}
167 174
168static void
169fsnotify_mark_free_rcu(struct rcu_head *rcu)
170{
171 struct fsnotify_mark *mark;
172
173 mark = container_of(rcu, struct fsnotify_mark, g_rcu);
174 fsnotify_put_mark(mark);
175}
176
177/* 175/*
178 * Free fsnotify mark. The freeing is actually happening from a call_srcu 176 * Free fsnotify mark. The freeing is actually happening from a kthread which
179 * callback. Caller must have a reference to the mark or be protected by 177 * first waits for srcu period end. Caller must have a reference to the mark
180 * fsnotify_mark_srcu. 178 * or be protected by fsnotify_mark_srcu.
181 */ 179 */
182void fsnotify_free_mark(struct fsnotify_mark *mark) 180void fsnotify_free_mark(struct fsnotify_mark *mark)
183{ 181{
@@ -192,7 +190,11 @@ void fsnotify_free_mark(struct fsnotify_mark *mark)
192 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; 190 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
193 spin_unlock(&mark->lock); 191 spin_unlock(&mark->lock);
194 192
195 call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu); 193 spin_lock(&destroy_lock);
194 list_add(&mark->g_list, &destroy_list);
195 spin_unlock(&destroy_lock);
196 queue_delayed_work(system_unbound_wq, &reaper_work,
197 FSNOTIFY_REAPER_DELAY);
196 198
197 /* 199 /*
198 * Some groups like to know that marks are being freed. This is a 200 * Some groups like to know that marks are being freed. This is a
@@ -388,7 +390,12 @@ err:
388 390
389 spin_unlock(&mark->lock); 391 spin_unlock(&mark->lock);
390 392
391 call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu); 393 spin_lock(&destroy_lock);
394 list_add(&mark->g_list, &destroy_list);
395 spin_unlock(&destroy_lock);
396 queue_delayed_work(system_unbound_wq, &reaper_work,
397 FSNOTIFY_REAPER_DELAY);
398
392 return ret; 399 return ret;
393} 400}
394 401
@@ -491,3 +498,21 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
491 atomic_set(&mark->refcnt, 1); 498 atomic_set(&mark->refcnt, 1);
492 mark->free_mark = free_mark; 499 mark->free_mark = free_mark;
493} 500}
501
502static void fsnotify_mark_destroy(struct work_struct *work)
503{
504 struct fsnotify_mark *mark, *next;
505 struct list_head private_destroy_list;
506
507 spin_lock(&destroy_lock);
508 /* exchange the list head */
509 list_replace_init(&destroy_list, &private_destroy_list);
510 spin_unlock(&destroy_lock);
511
512 synchronize_srcu(&fsnotify_mark_srcu);
513
514 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
515 list_del_init(&mark->g_list);
516 fsnotify_put_mark(mark);
517 }
518}
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 6b7e89f45aa4..533c4408529a 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -220,10 +220,7 @@ struct fsnotify_mark {
220 /* List of marks by group->i_fsnotify_marks. Also reused for queueing 220 /* List of marks by group->i_fsnotify_marks. Also reused for queueing
221 * mark into destroy_list when it's waiting for the end of SRCU period 221 * mark into destroy_list when it's waiting for the end of SRCU period
222 * before it can be freed. [group->mark_mutex] */ 222 * before it can be freed. [group->mark_mutex] */
223 union { 223 struct list_head g_list;
224 struct list_head g_list;
225 struct rcu_head g_rcu;
226 };
227 /* Protects inode / mnt pointers, flags, masks */ 224 /* Protects inode / mnt pointers, flags, masks */
228 spinlock_t lock; 225 spinlock_t lock;
229 /* List of marks for inode / vfsmount [obj_lock] */ 226 /* List of marks for inode / vfsmount [obj_lock] */
diff --git a/ipc/shm.c b/ipc/shm.c
index ed3027d0f277..331fc1b0b3c7 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -156,11 +156,12 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
156 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); 156 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
157 157
158 /* 158 /*
159 * We raced in the idr lookup or with shm_destroy(). Either way, the 159 * Callers of shm_lock() must validate the status of the returned ipc
160 * ID is busted. 160 * object pointer (as returned by ipc_lock()), and error out as
161 * appropriate.
161 */ 162 */
162 WARN_ON(IS_ERR(ipcp)); 163 if (IS_ERR(ipcp))
163 164 return (void *)ipcp;
164 return container_of(ipcp, struct shmid_kernel, shm_perm); 165 return container_of(ipcp, struct shmid_kernel, shm_perm);
165} 166}
166 167
@@ -186,18 +187,33 @@ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
186} 187}
187 188
188 189
189/* This is called by fork, once for every shm attach. */ 190static int __shm_open(struct vm_area_struct *vma)
190static void shm_open(struct vm_area_struct *vma)
191{ 191{
192 struct file *file = vma->vm_file; 192 struct file *file = vma->vm_file;
193 struct shm_file_data *sfd = shm_file_data(file); 193 struct shm_file_data *sfd = shm_file_data(file);
194 struct shmid_kernel *shp; 194 struct shmid_kernel *shp;
195 195
196 shp = shm_lock(sfd->ns, sfd->id); 196 shp = shm_lock(sfd->ns, sfd->id);
197
198 if (IS_ERR(shp))
199 return PTR_ERR(shp);
200
197 shp->shm_atim = get_seconds(); 201 shp->shm_atim = get_seconds();
198 shp->shm_lprid = task_tgid_vnr(current); 202 shp->shm_lprid = task_tgid_vnr(current);
199 shp->shm_nattch++; 203 shp->shm_nattch++;
200 shm_unlock(shp); 204 shm_unlock(shp);
205 return 0;
206}
207
208/* This is called by fork, once for every shm attach. */
209static void shm_open(struct vm_area_struct *vma)
210{
211 int err = __shm_open(vma);
212 /*
213 * We raced in the idr lookup or with shm_destroy().
214 * Either way, the ID is busted.
215 */
216 WARN_ON_ONCE(err);
201} 217}
202 218
203/* 219/*
@@ -260,6 +276,14 @@ static void shm_close(struct vm_area_struct *vma)
260 down_write(&shm_ids(ns).rwsem); 276 down_write(&shm_ids(ns).rwsem);
261 /* remove from the list of attaches of the shm segment */ 277 /* remove from the list of attaches of the shm segment */
262 shp = shm_lock(ns, sfd->id); 278 shp = shm_lock(ns, sfd->id);
279
280 /*
281 * We raced in the idr lookup or with shm_destroy().
282 * Either way, the ID is busted.
283 */
284 if (WARN_ON_ONCE(IS_ERR(shp)))
285 goto done; /* no-op */
286
263 shp->shm_lprid = task_tgid_vnr(current); 287 shp->shm_lprid = task_tgid_vnr(current);
264 shp->shm_dtim = get_seconds(); 288 shp->shm_dtim = get_seconds();
265 shp->shm_nattch--; 289 shp->shm_nattch--;
@@ -267,6 +291,7 @@ static void shm_close(struct vm_area_struct *vma)
267 shm_destroy(ns, shp); 291 shm_destroy(ns, shp);
268 else 292 else
269 shm_unlock(shp); 293 shm_unlock(shp);
294done:
270 up_write(&shm_ids(ns).rwsem); 295 up_write(&shm_ids(ns).rwsem);
271} 296}
272 297
@@ -388,17 +413,25 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
388 struct shm_file_data *sfd = shm_file_data(file); 413 struct shm_file_data *sfd = shm_file_data(file);
389 int ret; 414 int ret;
390 415
416 /*
417 * In case of remap_file_pages() emulation, the file can represent
418 * removed IPC ID: propogate shm_lock() error to caller.
419 */
420 ret =__shm_open(vma);
421 if (ret)
422 return ret;
423
391 ret = sfd->file->f_op->mmap(sfd->file, vma); 424 ret = sfd->file->f_op->mmap(sfd->file, vma);
392 if (ret != 0) 425 if (ret) {
426 shm_close(vma);
393 return ret; 427 return ret;
428 }
394 sfd->vm_ops = vma->vm_ops; 429 sfd->vm_ops = vma->vm_ops;
395#ifdef CONFIG_MMU 430#ifdef CONFIG_MMU
396 WARN_ON(!sfd->vm_ops->fault); 431 WARN_ON(!sfd->vm_ops->fault);
397#endif 432#endif
398 vma->vm_ops = &shm_vm_ops; 433 vma->vm_ops = &shm_vm_ops;
399 shm_open(vma); 434 return 0;
400
401 return ret;
402} 435}
403 436
404static int shm_release(struct inode *ino, struct file *file) 437static int shm_release(struct inode *ino, struct file *file)
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 2c468dea60bc..7a1b5c3ef14e 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(memunmap);
114 114
115static void devm_memremap_release(struct device *dev, void *res) 115static void devm_memremap_release(struct device *dev, void *res)
116{ 116{
117 memunmap(res); 117 memunmap(*(void **)res);
118} 118}
119 119
120static int devm_memremap_match(struct device *dev, void *res, void *match_data) 120static int devm_memremap_match(struct device *dev, void *res, void *match_data)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 08fc0ba2207e..722546dcfb7e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1700,7 +1700,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1700 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1700 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1701 VM_BUG_ON(!pmd_none(*new_pmd)); 1701 VM_BUG_ON(!pmd_none(*new_pmd));
1702 1702
1703 if (pmd_move_must_withdraw(new_ptl, old_ptl)) { 1703 if (pmd_move_must_withdraw(new_ptl, old_ptl) &&
1704 vma_is_anonymous(vma)) {
1704 pgtable_t pgtable; 1705 pgtable_t pgtable;
1705 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 1706 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1706 pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 1707 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 06ae13e869d0..01f2b48c8618 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2630,8 +2630,10 @@ static int __init hugetlb_init(void)
2630 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 2630 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2631 } 2631 }
2632 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); 2632 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2633 if (default_hstate_max_huge_pages) 2633 if (default_hstate_max_huge_pages) {
2634 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 2634 if (!default_hstate.max_huge_pages)
2635 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2636 }
2635 2637
2636 hugetlb_init_hstates(); 2638 hugetlb_init_hstates();
2637 gather_bootmem_prealloc(); 2639 gather_bootmem_prealloc();
diff --git a/mm/mmap.c b/mm/mmap.c
index 2f2415a7a688..76d1ec29149b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2664,12 +2664,29 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2664 if (!vma || !(vma->vm_flags & VM_SHARED)) 2664 if (!vma || !(vma->vm_flags & VM_SHARED))
2665 goto out; 2665 goto out;
2666 2666
2667 if (start < vma->vm_start || start + size > vma->vm_end) 2667 if (start < vma->vm_start)
2668 goto out; 2668 goto out;
2669 2669
2670 if (pgoff == linear_page_index(vma, start)) { 2670 if (start + size > vma->vm_end) {
2671 ret = 0; 2671 struct vm_area_struct *next;
2672 goto out; 2672
2673 for (next = vma->vm_next; next; next = next->vm_next) {
2674 /* hole between vmas ? */
2675 if (next->vm_start != next->vm_prev->vm_end)
2676 goto out;
2677
2678 if (next->vm_file != vma->vm_file)
2679 goto out;
2680
2681 if (next->vm_flags != vma->vm_flags)
2682 goto out;
2683
2684 if (start + size <= next->vm_end)
2685 break;
2686 }
2687
2688 if (!next)
2689 goto out;
2673 } 2690 }
2674 2691
2675 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; 2692 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
@@ -2679,9 +2696,16 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2679 flags &= MAP_NONBLOCK; 2696 flags &= MAP_NONBLOCK;
2680 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; 2697 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
2681 if (vma->vm_flags & VM_LOCKED) { 2698 if (vma->vm_flags & VM_LOCKED) {
2699 struct vm_area_struct *tmp;
2682 flags |= MAP_LOCKED; 2700 flags |= MAP_LOCKED;
2701
2683 /* drop PG_Mlocked flag for over-mapped range */ 2702 /* drop PG_Mlocked flag for over-mapped range */
2684 munlock_vma_pages_range(vma, start, start + size); 2703 for (tmp = vma; tmp->vm_start >= start + size;
2704 tmp = tmp->vm_next) {
2705 munlock_vma_pages_range(tmp,
2706 max(tmp->vm_start, start),
2707 min(tmp->vm_end, start + size));
2708 }
2685 } 2709 }
2686 2710
2687 file = get_file(vma->vm_file); 2711 file = get_file(vma->vm_file);
diff --git a/mm/slab.c b/mm/slab.c
index 6ecc697a8bc4..621fbcb35a36 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2275,7 +2275,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2275 2275
2276 err = setup_cpu_cache(cachep, gfp); 2276 err = setup_cpu_cache(cachep, gfp);
2277 if (err) { 2277 if (err) {
2278 __kmem_cache_shutdown(cachep); 2278 __kmem_cache_release(cachep);
2279 return err; 2279 return err;
2280 } 2280 }
2281 2281
@@ -2414,12 +2414,13 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
2414 2414
2415int __kmem_cache_shutdown(struct kmem_cache *cachep) 2415int __kmem_cache_shutdown(struct kmem_cache *cachep)
2416{ 2416{
2417 return __kmem_cache_shrink(cachep, false);
2418}
2419
2420void __kmem_cache_release(struct kmem_cache *cachep)
2421{
2417 int i; 2422 int i;
2418 struct kmem_cache_node *n; 2423 struct kmem_cache_node *n;
2419 int rc = __kmem_cache_shrink(cachep, false);
2420
2421 if (rc)
2422 return rc;
2423 2424
2424 free_percpu(cachep->cpu_cache); 2425 free_percpu(cachep->cpu_cache);
2425 2426
@@ -2430,7 +2431,6 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
2430 kfree(n); 2431 kfree(n);
2431 cachep->node[i] = NULL; 2432 cachep->node[i] = NULL;
2432 } 2433 }
2433 return 0;
2434} 2434}
2435 2435
2436/* 2436/*
diff --git a/mm/slab.h b/mm/slab.h
index 834ad240c0bb..2eedacea439d 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -140,6 +140,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
140#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 140#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
141 141
142int __kmem_cache_shutdown(struct kmem_cache *); 142int __kmem_cache_shutdown(struct kmem_cache *);
143void __kmem_cache_release(struct kmem_cache *);
143int __kmem_cache_shrink(struct kmem_cache *, bool); 144int __kmem_cache_shrink(struct kmem_cache *, bool);
144void slab_kmem_cache_release(struct kmem_cache *); 145void slab_kmem_cache_release(struct kmem_cache *);
145 146
diff --git a/mm/slab_common.c b/mm/slab_common.c
index b50aef01ccf7..065b7bdabdc3 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -693,6 +693,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s,
693 693
694void slab_kmem_cache_release(struct kmem_cache *s) 694void slab_kmem_cache_release(struct kmem_cache *s)
695{ 695{
696 __kmem_cache_release(s);
696 destroy_memcg_params(s); 697 destroy_memcg_params(s);
697 kfree_const(s->name); 698 kfree_const(s->name);
698 kmem_cache_free(kmem_cache, s); 699 kmem_cache_free(kmem_cache, s);
diff --git a/mm/slob.c b/mm/slob.c
index 17e8f8cc7c53..5ec158054ffe 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -630,6 +630,10 @@ int __kmem_cache_shutdown(struct kmem_cache *c)
630 return 0; 630 return 0;
631} 631}
632 632
633void __kmem_cache_release(struct kmem_cache *c)
634{
635}
636
633int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate) 637int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
634{ 638{
635 return 0; 639 return 0;
diff --git a/mm/slub.c b/mm/slub.c
index 2e1355ac056b..d8fbd4a6ed59 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1592,18 +1592,12 @@ static inline void add_partial(struct kmem_cache_node *n,
1592 __add_partial(n, page, tail); 1592 __add_partial(n, page, tail);
1593} 1593}
1594 1594
1595static inline void
1596__remove_partial(struct kmem_cache_node *n, struct page *page)
1597{
1598 list_del(&page->lru);
1599 n->nr_partial--;
1600}
1601
1602static inline void remove_partial(struct kmem_cache_node *n, 1595static inline void remove_partial(struct kmem_cache_node *n,
1603 struct page *page) 1596 struct page *page)
1604{ 1597{
1605 lockdep_assert_held(&n->list_lock); 1598 lockdep_assert_held(&n->list_lock);
1606 __remove_partial(n, page); 1599 list_del(&page->lru);
1600 n->nr_partial--;
1607} 1601}
1608 1602
1609/* 1603/*
@@ -3184,6 +3178,12 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
3184 } 3178 }
3185} 3179}
3186 3180
3181void __kmem_cache_release(struct kmem_cache *s)
3182{
3183 free_percpu(s->cpu_slab);
3184 free_kmem_cache_nodes(s);
3185}
3186
3187static int init_kmem_cache_nodes(struct kmem_cache *s) 3187static int init_kmem_cache_nodes(struct kmem_cache *s)
3188{ 3188{
3189 int node; 3189 int node;
@@ -3443,28 +3443,31 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
3443 3443
3444/* 3444/*
3445 * Attempt to free all partial slabs on a node. 3445 * Attempt to free all partial slabs on a node.
3446 * This is called from kmem_cache_close(). We must be the last thread 3446 * This is called from __kmem_cache_shutdown(). We must take list_lock
3447 * using the cache and therefore we do not need to lock anymore. 3447 * because sysfs file might still access partial list after the shutdowning.
3448 */ 3448 */
3449static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 3449static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3450{ 3450{
3451 struct page *page, *h; 3451 struct page *page, *h;
3452 3452
3453 BUG_ON(irqs_disabled());
3454 spin_lock_irq(&n->list_lock);
3453 list_for_each_entry_safe(page, h, &n->partial, lru) { 3455 list_for_each_entry_safe(page, h, &n->partial, lru) {
3454 if (!page->inuse) { 3456 if (!page->inuse) {
3455 __remove_partial(n, page); 3457 remove_partial(n, page);
3456 discard_slab(s, page); 3458 discard_slab(s, page);
3457 } else { 3459 } else {
3458 list_slab_objects(s, page, 3460 list_slab_objects(s, page,
3459 "Objects remaining in %s on kmem_cache_close()"); 3461 "Objects remaining in %s on __kmem_cache_shutdown()");
3460 } 3462 }
3461 } 3463 }
3464 spin_unlock_irq(&n->list_lock);
3462} 3465}
3463 3466
3464/* 3467/*
3465 * Release all resources used by a slab cache. 3468 * Release all resources used by a slab cache.
3466 */ 3469 */
3467static inline int kmem_cache_close(struct kmem_cache *s) 3470int __kmem_cache_shutdown(struct kmem_cache *s)
3468{ 3471{
3469 int node; 3472 int node;
3470 struct kmem_cache_node *n; 3473 struct kmem_cache_node *n;
@@ -3476,16 +3479,9 @@ static inline int kmem_cache_close(struct kmem_cache *s)
3476 if (n->nr_partial || slabs_node(s, node)) 3479 if (n->nr_partial || slabs_node(s, node))
3477 return 1; 3480 return 1;
3478 } 3481 }
3479 free_percpu(s->cpu_slab);
3480 free_kmem_cache_nodes(s);
3481 return 0; 3482 return 0;
3482} 3483}
3483 3484
3484int __kmem_cache_shutdown(struct kmem_cache *s)
3485{
3486 return kmem_cache_close(s);
3487}
3488
3489/******************************************************************** 3485/********************************************************************
3490 * Kmalloc subsystem 3486 * Kmalloc subsystem
3491 *******************************************************************/ 3487 *******************************************************************/
@@ -3980,7 +3976,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
3980 memcg_propagate_slab_attrs(s); 3976 memcg_propagate_slab_attrs(s);
3981 err = sysfs_slab_add(s); 3977 err = sysfs_slab_add(s);
3982 if (err) 3978 if (err)
3983 kmem_cache_close(s); 3979 __kmem_cache_release(s);
3984 3980
3985 return err; 3981 return err;
3986} 3982}