diff options
author | Ingo Molnar <mingo@kernel.org> | 2017-04-23 05:12:44 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-04-23 05:12:44 -0400 |
commit | 58d30c36d472b75e8e9962d6a640be19d9389128 (patch) | |
tree | ce161b15e844d081f527f02a4f74ffd1171b2b14 /mm/rmap.c | |
parent | 94836ecf1e7378b64d37624fbb81fe48fbd4c772 (diff) | |
parent | f2094107ac82bf867184efd77cee30b6a98e2e20 (diff) |
Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU updates from Paul E. McKenney:
- Documentation updates.
- Miscellaneous fixes.
- Parallelize SRCU callback handling (plus overlapping patches).
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 4 |
1 files changed, 2 insertions, 2 deletions
@@ -430,7 +430,7 @@ static void anon_vma_ctor(void *data) | |||
430 | void __init anon_vma_init(void) | 430 | void __init anon_vma_init(void) |
431 | { | 431 | { |
432 | anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), | 432 | anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), |
433 | 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, | 433 | 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, |
434 | anon_vma_ctor); | 434 | anon_vma_ctor); |
435 | anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, | 435 | anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, |
436 | SLAB_PANIC|SLAB_ACCOUNT); | 436 | SLAB_PANIC|SLAB_ACCOUNT); |
@@ -481,7 +481,7 @@ struct anon_vma *page_get_anon_vma(struct page *page) | |||
481 | * If this page is still mapped, then its anon_vma cannot have been | 481 | * If this page is still mapped, then its anon_vma cannot have been |
482 | * freed. But if it has been unmapped, we have no security against the | 482 | * freed. But if it has been unmapped, we have no security against the |
483 | * anon_vma structure being freed and reused (for another anon_vma: | 483 | * anon_vma structure being freed and reused (for another anon_vma: |
484 | * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero() | 484 | * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() |
485 | * above cannot corrupt). | 485 | * above cannot corrupt). |
486 | */ | 486 | */ |
487 | if (!page_mapped(page)) { | 487 | if (!page_mapped(page)) { |