diff options
| -rw-r--r-- | Documentation/ABI/testing/sysfs-kernel-slab | 109 | ||||
| -rw-r--r-- | mm/slab.c | 118 | ||||
| -rw-r--r-- | mm/slub.c | 20 |
3 files changed, 145 insertions, 102 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab index 6dcf75e594fb..8b093f8222d3 100644 --- a/Documentation/ABI/testing/sysfs-kernel-slab +++ b/Documentation/ABI/testing/sysfs-kernel-slab | |||
| @@ -45,8 +45,9 @@ KernelVersion: 2.6.25 | |||
| 45 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 45 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 46 | Christoph Lameter <cl@linux-foundation.org> | 46 | Christoph Lameter <cl@linux-foundation.org> |
| 47 | Description: | 47 | Description: |
| 48 | The alloc_fastpath file is read-only and specifies how many | 48 | The alloc_fastpath file shows how many objects have been |
| 49 | objects have been allocated using the fast path. | 49 | allocated using the fast path. It can be written to clear the |
| 50 | current count. | ||
| 50 | Available when CONFIG_SLUB_STATS is enabled. | 51 | Available when CONFIG_SLUB_STATS is enabled. |
| 51 | 52 | ||
| 52 | What: /sys/kernel/slab/cache/alloc_from_partial | 53 | What: /sys/kernel/slab/cache/alloc_from_partial |
| @@ -55,9 +56,10 @@ KernelVersion: 2.6.25 | |||
| 55 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 56 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 56 | Christoph Lameter <cl@linux-foundation.org> | 57 | Christoph Lameter <cl@linux-foundation.org> |
| 57 | Description: | 58 | Description: |
| 58 | The alloc_from_partial file is read-only and specifies how | 59 | The alloc_from_partial file shows how many times a cpu slab has |
| 59 | many times a cpu slab has been full and it has been refilled | 60 | been full and it has been refilled by using a slab from the list |
| 60 | by using a slab from the list of partially used slabs. | 61 | of partially used slabs. It can be written to clear the current |
| 62 | count. | ||
| 61 | Available when CONFIG_SLUB_STATS is enabled. | 63 | Available when CONFIG_SLUB_STATS is enabled. |
| 62 | 64 | ||
| 63 | What: /sys/kernel/slab/cache/alloc_refill | 65 | What: /sys/kernel/slab/cache/alloc_refill |
| @@ -66,9 +68,9 @@ KernelVersion: 2.6.25 | |||
| 66 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 68 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 67 | Christoph Lameter <cl@linux-foundation.org> | 69 | Christoph Lameter <cl@linux-foundation.org> |
| 68 | Description: | 70 | Description: |
| 69 | The alloc_refill file is read-only and specifies how many | 71 | The alloc_refill file shows how many times the per-cpu freelist |
| 70 | times the per-cpu freelist was empty but there were objects | 72 | was empty but there were objects available as the result of |
| 71 | available as the result of remote cpu frees. | 73 | remote cpu frees. It can be written to clear the current count. |
| 72 | Available when CONFIG_SLUB_STATS is enabled. | 74 | Available when CONFIG_SLUB_STATS is enabled. |
| 73 | 75 | ||
| 74 | What: /sys/kernel/slab/cache/alloc_slab | 76 | What: /sys/kernel/slab/cache/alloc_slab |
| @@ -77,8 +79,9 @@ KernelVersion: 2.6.25 | |||
| 77 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 79 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 78 | Christoph Lameter <cl@linux-foundation.org> | 80 | Christoph Lameter <cl@linux-foundation.org> |
| 79 | Description: | 81 | Description: |
| 80 | The alloc_slab file is read-only and specifies how many times | 82 | The alloc_slab file is shows how many times a new slab had to |
| 81 | a new slab had to be allocated from the page allocator. | 83 | be allocated from the page allocator. It can be written to |
| 84 | clear the current count. | ||
| 82 | Available when CONFIG_SLUB_STATS is enabled. | 85 | Available when CONFIG_SLUB_STATS is enabled. |
| 83 | 86 | ||
| 84 | What: /sys/kernel/slab/cache/alloc_slowpath | 87 | What: /sys/kernel/slab/cache/alloc_slowpath |
| @@ -87,9 +90,10 @@ KernelVersion: 2.6.25 | |||
| 87 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 90 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 88 | Christoph Lameter <cl@linux-foundation.org> | 91 | Christoph Lameter <cl@linux-foundation.org> |
| 89 | Description: | 92 | Description: |
| 90 | The alloc_slowpath file is read-only and specifies how many | 93 | The alloc_slowpath file shows how many objects have been |
| 91 | objects have been allocated using the slow path because of a | 94 | allocated using the slow path because of a refill or |
| 92 | refill or allocation from a partial or new slab. | 95 | allocation from a partial or new slab. It can be written to |
| 96 | clear the current count. | ||
| 93 | Available when CONFIG_SLUB_STATS is enabled. | 97 | Available when CONFIG_SLUB_STATS is enabled. |
| 94 | 98 | ||
| 95 | What: /sys/kernel/slab/cache/cache_dma | 99 | What: /sys/kernel/slab/cache/cache_dma |
| @@ -117,10 +121,11 @@ KernelVersion: 2.6.31 | |||
| 117 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 121 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 118 | Christoph Lameter <cl@linux-foundation.org> | 122 | Christoph Lameter <cl@linux-foundation.org> |
| 119 | Description: | 123 | Description: |
| 120 | The file cpuslab_flush is read-only and specifies how many | 124 | The file cpuslab_flush shows how many times a cache's cpu slabs |
| 121 | times a cache's cpu slabs have been flushed as the result of | 125 | have been flushed as the result of destroying or shrinking a |
| 122 | destroying or shrinking a cache, a cpu going offline, or as | 126 | cache, a cpu going offline, or as the result of forcing an |
| 123 | the result of forcing an allocation from a certain node. | 127 | allocation from a certain node. It can be written to clear the |
| 128 | current count. | ||
| 124 | Available when CONFIG_SLUB_STATS is enabled. | 129 | Available when CONFIG_SLUB_STATS is enabled. |
| 125 | 130 | ||
| 126 | What: /sys/kernel/slab/cache/ctor | 131 | What: /sys/kernel/slab/cache/ctor |
| @@ -139,8 +144,8 @@ KernelVersion: 2.6.25 | |||
| 139 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 144 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 140 | Christoph Lameter <cl@linux-foundation.org> | 145 | Christoph Lameter <cl@linux-foundation.org> |
| 141 | Description: | 146 | Description: |
| 142 | The file deactivate_empty is read-only and specifies how many | 147 | The deactivate_empty file shows how many times an empty cpu slab |
| 143 | times an empty cpu slab was deactivated. | 148 | was deactivated. It can be written to clear the current count. |
| 144 | Available when CONFIG_SLUB_STATS is enabled. | 149 | Available when CONFIG_SLUB_STATS is enabled. |
| 145 | 150 | ||
| 146 | What: /sys/kernel/slab/cache/deactivate_full | 151 | What: /sys/kernel/slab/cache/deactivate_full |
| @@ -149,8 +154,8 @@ KernelVersion: 2.6.25 | |||
| 149 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 154 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 150 | Christoph Lameter <cl@linux-foundation.org> | 155 | Christoph Lameter <cl@linux-foundation.org> |
| 151 | Description: | 156 | Description: |
| 152 | The file deactivate_full is read-only and specifies how many | 157 | The deactivate_full file shows how many times a full cpu slab |
| 153 | times a full cpu slab was deactivated. | 158 | was deactivated. It can be written to clear the current count. |
| 154 | Available when CONFIG_SLUB_STATS is enabled. | 159 | Available when CONFIG_SLUB_STATS is enabled. |
| 155 | 160 | ||
| 156 | What: /sys/kernel/slab/cache/deactivate_remote_frees | 161 | What: /sys/kernel/slab/cache/deactivate_remote_frees |
| @@ -159,9 +164,9 @@ KernelVersion: 2.6.25 | |||
| 159 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 164 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 160 | Christoph Lameter <cl@linux-foundation.org> | 165 | Christoph Lameter <cl@linux-foundation.org> |
| 161 | Description: | 166 | Description: |
| 162 | The file deactivate_remote_frees is read-only and specifies how | 167 | The deactivate_remote_frees file shows how many times a cpu slab |
| 163 | many times a cpu slab has been deactivated and contained free | 168 | has been deactivated and contained free objects that were freed |
| 164 | objects that were freed remotely. | 169 | remotely. It can be written to clear the current count. |
| 165 | Available when CONFIG_SLUB_STATS is enabled. | 170 | Available when CONFIG_SLUB_STATS is enabled. |
| 166 | 171 | ||
| 167 | What: /sys/kernel/slab/cache/deactivate_to_head | 172 | What: /sys/kernel/slab/cache/deactivate_to_head |
| @@ -170,9 +175,9 @@ KernelVersion: 2.6.25 | |||
| 170 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 175 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 171 | Christoph Lameter <cl@linux-foundation.org> | 176 | Christoph Lameter <cl@linux-foundation.org> |
| 172 | Description: | 177 | Description: |
| 173 | The file deactivate_to_head is read-only and specifies how | 178 | The deactivate_to_head file shows how many times a partial cpu |
| 174 | many times a partial cpu slab was deactivated and added to the | 179 | slab was deactivated and added to the head of its node's partial |
| 175 | head of its node's partial list. | 180 | list. It can be written to clear the current count. |
| 176 | Available when CONFIG_SLUB_STATS is enabled. | 181 | Available when CONFIG_SLUB_STATS is enabled. |
| 177 | 182 | ||
| 178 | What: /sys/kernel/slab/cache/deactivate_to_tail | 183 | What: /sys/kernel/slab/cache/deactivate_to_tail |
| @@ -181,9 +186,9 @@ KernelVersion: 2.6.25 | |||
| 181 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 186 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 182 | Christoph Lameter <cl@linux-foundation.org> | 187 | Christoph Lameter <cl@linux-foundation.org> |
| 183 | Description: | 188 | Description: |
| 184 | The file deactivate_to_tail is read-only and specifies how | 189 | The deactivate_to_tail file shows how many times a partial cpu |
| 185 | many times a partial cpu slab was deactivated and added to the | 190 | slab was deactivated and added to the tail of its node's partial |
| 186 | tail of its node's partial list. | 191 | list. It can be written to clear the current count. |
| 187 | Available when CONFIG_SLUB_STATS is enabled. | 192 | Available when CONFIG_SLUB_STATS is enabled. |
| 188 | 193 | ||
| 189 | What: /sys/kernel/slab/cache/destroy_by_rcu | 194 | What: /sys/kernel/slab/cache/destroy_by_rcu |
| @@ -201,9 +206,9 @@ KernelVersion: 2.6.25 | |||
| 201 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 206 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 202 | Christoph Lameter <cl@linux-foundation.org> | 207 | Christoph Lameter <cl@linux-foundation.org> |
| 203 | Description: | 208 | Description: |
| 204 | The file free_add_partial is read-only and specifies how many | 209 | The free_add_partial file shows how many times an object has |
| 205 | times an object has been freed in a full slab so that it had to | 210 | been freed in a full slab so that it had to added to its node's |
| 206 | added to its node's partial list. | 211 | partial list. It can be written to clear the current count. |
| 207 | Available when CONFIG_SLUB_STATS is enabled. | 212 | Available when CONFIG_SLUB_STATS is enabled. |
| 208 | 213 | ||
| 209 | What: /sys/kernel/slab/cache/free_calls | 214 | What: /sys/kernel/slab/cache/free_calls |
| @@ -222,9 +227,9 @@ KernelVersion: 2.6.25 | |||
| 222 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 227 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 223 | Christoph Lameter <cl@linux-foundation.org> | 228 | Christoph Lameter <cl@linux-foundation.org> |
| 224 | Description: | 229 | Description: |
| 225 | The free_fastpath file is read-only and specifies how many | 230 | The free_fastpath file shows how many objects have been freed |
| 226 | objects have been freed using the fast path because it was an | 231 | using the fast path because it was an object from the cpu slab. |
| 227 | object from the cpu slab. | 232 | It can be written to clear the current count. |
| 228 | Available when CONFIG_SLUB_STATS is enabled. | 233 | Available when CONFIG_SLUB_STATS is enabled. |
| 229 | 234 | ||
| 230 | What: /sys/kernel/slab/cache/free_frozen | 235 | What: /sys/kernel/slab/cache/free_frozen |
| @@ -233,9 +238,9 @@ KernelVersion: 2.6.25 | |||
| 233 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 238 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 234 | Christoph Lameter <cl@linux-foundation.org> | 239 | Christoph Lameter <cl@linux-foundation.org> |
| 235 | Description: | 240 | Description: |
| 236 | The free_frozen file is read-only and specifies how many | 241 | The free_frozen file shows how many objects have been freed to |
| 237 | objects have been freed to a frozen slab (i.e. a remote cpu | 242 | a frozen slab (i.e. a remote cpu slab). It can be written to |
| 238 | slab). | 243 | clear the current count. |
| 239 | Available when CONFIG_SLUB_STATS is enabled. | 244 | Available when CONFIG_SLUB_STATS is enabled. |
| 240 | 245 | ||
| 241 | What: /sys/kernel/slab/cache/free_remove_partial | 246 | What: /sys/kernel/slab/cache/free_remove_partial |
| @@ -244,9 +249,10 @@ KernelVersion: 2.6.25 | |||
| 244 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 249 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 245 | Christoph Lameter <cl@linux-foundation.org> | 250 | Christoph Lameter <cl@linux-foundation.org> |
| 246 | Description: | 251 | Description: |
| 247 | The file free_remove_partial is read-only and specifies how | 252 | The free_remove_partial file shows how many times an object has |
| 248 | many times an object has been freed to a now-empty slab so | 253 | been freed to a now-empty slab so that it had to be removed from |
| 249 | that it had to be removed from its node's partial list. | 254 | its node's partial list. It can be written to clear the current |
| 255 | count. | ||
| 250 | Available when CONFIG_SLUB_STATS is enabled. | 256 | Available when CONFIG_SLUB_STATS is enabled. |
| 251 | 257 | ||
| 252 | What: /sys/kernel/slab/cache/free_slab | 258 | What: /sys/kernel/slab/cache/free_slab |
| @@ -255,8 +261,9 @@ KernelVersion: 2.6.25 | |||
| 255 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 261 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 256 | Christoph Lameter <cl@linux-foundation.org> | 262 | Christoph Lameter <cl@linux-foundation.org> |
| 257 | Description: | 263 | Description: |
| 258 | The free_slab file is read-only and specifies how many times an | 264 | The free_slab file shows how many times an empty slab has been |
| 259 | empty slab has been freed back to the page allocator. | 265 | freed back to the page allocator. It can be written to clear |
| 266 | the current count. | ||
| 260 | Available when CONFIG_SLUB_STATS is enabled. | 267 | Available when CONFIG_SLUB_STATS is enabled. |
| 261 | 268 | ||
| 262 | What: /sys/kernel/slab/cache/free_slowpath | 269 | What: /sys/kernel/slab/cache/free_slowpath |
| @@ -265,9 +272,9 @@ KernelVersion: 2.6.25 | |||
| 265 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 272 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 266 | Christoph Lameter <cl@linux-foundation.org> | 273 | Christoph Lameter <cl@linux-foundation.org> |
| 267 | Description: | 274 | Description: |
| 268 | The free_slowpath file is read-only and specifies how many | 275 | The free_slowpath file shows how many objects have been freed |
| 269 | objects have been freed using the slow path (i.e. to a full or | 276 | using the slow path (i.e. to a full or partial slab). It can |
| 270 | partial slab). | 277 | be written to clear the current count. |
| 271 | Available when CONFIG_SLUB_STATS is enabled. | 278 | Available when CONFIG_SLUB_STATS is enabled. |
| 272 | 279 | ||
| 273 | What: /sys/kernel/slab/cache/hwcache_align | 280 | What: /sys/kernel/slab/cache/hwcache_align |
| @@ -346,10 +353,10 @@ KernelVersion: 2.6.26 | |||
| 346 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, | 353 | Contact: Pekka Enberg <penberg@cs.helsinki.fi>, |
| 347 | Christoph Lameter <cl@linux-foundation.org> | 354 | Christoph Lameter <cl@linux-foundation.org> |
| 348 | Description: | 355 | Description: |
| 349 | The file order_fallback is read-only and specifies how many | 356 | The order_fallback file shows how many times an allocation of a |
| 350 | times an allocation of a new slab has not been possible at the | 357 | new slab has not been possible at the cache's order and instead |
| 351 | cache's order and instead fallen back to its minimum possible | 358 | fallen back to its minimum possible order. It can be written to |
| 352 | order. | 359 | clear the current count. |
| 353 | Available when CONFIG_SLUB_STATS is enabled. | 360 | Available when CONFIG_SLUB_STATS is enabled. |
| 354 | 361 | ||
| 355 | What: /sys/kernel/slab/cache/partial | 362 | What: /sys/kernel/slab/cache/partial |
| @@ -604,6 +604,26 @@ static struct kmem_cache cache_cache = { | |||
| 604 | 604 | ||
| 605 | #define BAD_ALIEN_MAGIC 0x01020304ul | 605 | #define BAD_ALIEN_MAGIC 0x01020304ul |
| 606 | 606 | ||
| 607 | /* | ||
| 608 | * chicken and egg problem: delay the per-cpu array allocation | ||
| 609 | * until the general caches are up. | ||
| 610 | */ | ||
| 611 | static enum { | ||
| 612 | NONE, | ||
| 613 | PARTIAL_AC, | ||
| 614 | PARTIAL_L3, | ||
| 615 | EARLY, | ||
| 616 | FULL | ||
| 617 | } g_cpucache_up; | ||
| 618 | |||
| 619 | /* | ||
| 620 | * used by boot code to determine if it can use slab based allocator | ||
| 621 | */ | ||
| 622 | int slab_is_available(void) | ||
| 623 | { | ||
| 624 | return g_cpucache_up >= EARLY; | ||
| 625 | } | ||
| 626 | |||
| 607 | #ifdef CONFIG_LOCKDEP | 627 | #ifdef CONFIG_LOCKDEP |
| 608 | 628 | ||
| 609 | /* | 629 | /* |
| @@ -620,40 +640,52 @@ static struct kmem_cache cache_cache = { | |||
| 620 | static struct lock_class_key on_slab_l3_key; | 640 | static struct lock_class_key on_slab_l3_key; |
| 621 | static struct lock_class_key on_slab_alc_key; | 641 | static struct lock_class_key on_slab_alc_key; |
| 622 | 642 | ||
| 623 | static inline void init_lock_keys(void) | 643 | static void init_node_lock_keys(int q) |
| 624 | |||
| 625 | { | 644 | { |
| 626 | int q; | ||
| 627 | struct cache_sizes *s = malloc_sizes; | 645 | struct cache_sizes *s = malloc_sizes; |
| 628 | 646 | ||
| 629 | while (s->cs_size != ULONG_MAX) { | 647 | if (g_cpucache_up != FULL) |
| 630 | for_each_node(q) { | 648 | return; |
| 631 | struct array_cache **alc; | 649 | |
| 632 | int r; | 650 | for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { |
| 633 | struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; | 651 | struct array_cache **alc; |
| 634 | if (!l3 || OFF_SLAB(s->cs_cachep)) | 652 | struct kmem_list3 *l3; |
| 635 | continue; | 653 | int r; |
| 636 | lockdep_set_class(&l3->list_lock, &on_slab_l3_key); | 654 | |
| 637 | alc = l3->alien; | 655 | l3 = s->cs_cachep->nodelists[q]; |
| 638 | /* | 656 | if (!l3 || OFF_SLAB(s->cs_cachep)) |
| 639 | * FIXME: This check for BAD_ALIEN_MAGIC | 657 | return; |
| 640 | * should go away when common slab code is taught to | 658 | lockdep_set_class(&l3->list_lock, &on_slab_l3_key); |
| 641 | * work even without alien caches. | 659 | alc = l3->alien; |
| 642 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC | 660 | /* |
| 643 | * for alloc_alien_cache, | 661 | * FIXME: This check for BAD_ALIEN_MAGIC |
| 644 | */ | 662 | * should go away when common slab code is taught to |
| 645 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) | 663 | * work even without alien caches. |
| 646 | continue; | 664 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC |
| 647 | for_each_node(r) { | 665 | * for alloc_alien_cache, |
| 648 | if (alc[r]) | 666 | */ |
| 649 | lockdep_set_class(&alc[r]->lock, | 667 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) |
| 650 | &on_slab_alc_key); | 668 | return; |
| 651 | } | 669 | for_each_node(r) { |
| 670 | if (alc[r]) | ||
| 671 | lockdep_set_class(&alc[r]->lock, | ||
| 672 | &on_slab_alc_key); | ||
| 652 | } | 673 | } |
| 653 | s++; | ||
| 654 | } | 674 | } |
| 655 | } | 675 | } |
| 676 | |||
| 677 | static inline void init_lock_keys(void) | ||
| 678 | { | ||
| 679 | int node; | ||
| 680 | |||
| 681 | for_each_node(node) | ||
| 682 | init_node_lock_keys(node); | ||
| 683 | } | ||
| 656 | #else | 684 | #else |
| 685 | static void init_node_lock_keys(int q) | ||
| 686 | { | ||
| 687 | } | ||
| 688 | |||
| 657 | static inline void init_lock_keys(void) | 689 | static inline void init_lock_keys(void) |
| 658 | { | 690 | { |
| 659 | } | 691 | } |
| @@ -665,26 +697,6 @@ static inline void init_lock_keys(void) | |||
| 665 | static DEFINE_MUTEX(cache_chain_mutex); | 697 | static DEFINE_MUTEX(cache_chain_mutex); |
| 666 | static struct list_head cache_chain; | 698 | static struct list_head cache_chain; |
| 667 | 699 | ||
| 668 | /* | ||
| 669 | * chicken and egg problem: delay the per-cpu array allocation | ||
| 670 | * until the general caches are up. | ||
| 671 | */ | ||
| 672 | static enum { | ||
| 673 | NONE, | ||
| 674 | PARTIAL_AC, | ||
| 675 | PARTIAL_L3, | ||
| 676 | EARLY, | ||
| 677 | FULL | ||
| 678 | } g_cpucache_up; | ||
| 679 | |||
| 680 | /* | ||
| 681 | * used by boot code to determine if it can use slab based allocator | ||
| 682 | */ | ||
| 683 | int slab_is_available(void) | ||
| 684 | { | ||
| 685 | return g_cpucache_up >= EARLY; | ||
| 686 | } | ||
| 687 | |||
| 688 | static DEFINE_PER_CPU(struct delayed_work, reap_work); | 700 | static DEFINE_PER_CPU(struct delayed_work, reap_work); |
| 689 | 701 | ||
| 690 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 702 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) |
| @@ -1254,6 +1266,8 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
| 1254 | kfree(shared); | 1266 | kfree(shared); |
| 1255 | free_alien_cache(alien); | 1267 | free_alien_cache(alien); |
| 1256 | } | 1268 | } |
| 1269 | init_node_lock_keys(node); | ||
| 1270 | |||
| 1257 | return 0; | 1271 | return 0; |
| 1258 | bad: | 1272 | bad: |
| 1259 | cpuup_canceled(cpu); | 1273 | cpuup_canceled(cpu); |
| @@ -3103,13 +3117,19 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
| 3103 | } else { | 3117 | } else { |
| 3104 | STATS_INC_ALLOCMISS(cachep); | 3118 | STATS_INC_ALLOCMISS(cachep); |
| 3105 | objp = cache_alloc_refill(cachep, flags); | 3119 | objp = cache_alloc_refill(cachep, flags); |
| 3120 | /* | ||
| 3121 | * the 'ac' may be updated by cache_alloc_refill(), | ||
| 3122 | * and kmemleak_erase() requires its correct value. | ||
| 3123 | */ | ||
| 3124 | ac = cpu_cache_get(cachep); | ||
| 3106 | } | 3125 | } |
| 3107 | /* | 3126 | /* |
| 3108 | * To avoid a false negative, if an object that is in one of the | 3127 | * To avoid a false negative, if an object that is in one of the |
| 3109 | * per-CPU caches is leaked, we need to make sure kmemleak doesn't | 3128 | * per-CPU caches is leaked, we need to make sure kmemleak doesn't |
| 3110 | * treat the array pointers as a reference to the object. | 3129 | * treat the array pointers as a reference to the object. |
| 3111 | */ | 3130 | */ |
| 3112 | kmemleak_erase(&ac->entry[ac->avail]); | 3131 | if (objp) |
| 3132 | kmemleak_erase(&ac->entry[ac->avail]); | ||
| 3113 | return objp; | 3133 | return objp; |
| 3114 | } | 3134 | } |
| 3115 | 3135 | ||
| @@ -3306,7 +3326,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
| 3306 | cache_alloc_debugcheck_before(cachep, flags); | 3326 | cache_alloc_debugcheck_before(cachep, flags); |
| 3307 | local_irq_save(save_flags); | 3327 | local_irq_save(save_flags); |
| 3308 | 3328 | ||
| 3309 | if (unlikely(nodeid == -1)) | 3329 | if (nodeid == -1) |
| 3310 | nodeid = numa_node_id(); | 3330 | nodeid = numa_node_id(); |
| 3311 | 3331 | ||
| 3312 | if (unlikely(!cachep->nodelists[nodeid])) { | 3332 | if (unlikely(!cachep->nodelists[nodeid])) { |
| @@ -1735,7 +1735,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
| 1735 | } | 1735 | } |
| 1736 | local_irq_restore(flags); | 1736 | local_irq_restore(flags); |
| 1737 | 1737 | ||
| 1738 | if (unlikely((gfpflags & __GFP_ZERO) && object)) | 1738 | if (unlikely(gfpflags & __GFP_ZERO) && object) |
| 1739 | memset(object, 0, objsize); | 1739 | memset(object, 0, objsize); |
| 1740 | 1740 | ||
| 1741 | kmemcheck_slab_alloc(s, gfpflags, object, c->objsize); | 1741 | kmemcheck_slab_alloc(s, gfpflags, object, c->objsize); |
| @@ -4371,12 +4371,28 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) | |||
| 4371 | return len + sprintf(buf + len, "\n"); | 4371 | return len + sprintf(buf + len, "\n"); |
| 4372 | } | 4372 | } |
| 4373 | 4373 | ||
| 4374 | static void clear_stat(struct kmem_cache *s, enum stat_item si) | ||
| 4375 | { | ||
| 4376 | int cpu; | ||
| 4377 | |||
| 4378 | for_each_online_cpu(cpu) | ||
| 4379 | get_cpu_slab(s, cpu)->stat[si] = 0; | ||
| 4380 | } | ||
| 4381 | |||
| 4374 | #define STAT_ATTR(si, text) \ | 4382 | #define STAT_ATTR(si, text) \ |
| 4375 | static ssize_t text##_show(struct kmem_cache *s, char *buf) \ | 4383 | static ssize_t text##_show(struct kmem_cache *s, char *buf) \ |
| 4376 | { \ | 4384 | { \ |
| 4377 | return show_stat(s, buf, si); \ | 4385 | return show_stat(s, buf, si); \ |
| 4378 | } \ | 4386 | } \ |
| 4379 | SLAB_ATTR_RO(text); \ | 4387 | static ssize_t text##_store(struct kmem_cache *s, \ |
| 4388 | const char *buf, size_t length) \ | ||
| 4389 | { \ | ||
| 4390 | if (buf[0] != '0') \ | ||
| 4391 | return -EINVAL; \ | ||
| 4392 | clear_stat(s, si); \ | ||
| 4393 | return length; \ | ||
| 4394 | } \ | ||
| 4395 | SLAB_ATTR(text); \ | ||
| 4380 | 4396 | ||
| 4381 | STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); | 4397 | STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); |
| 4382 | STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); | 4398 | STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); |
