diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 310 |
1 files changed, 153 insertions, 157 deletions
@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache, | |||
313 | struct kmem_list3 *l3, int tofree); | 313 | struct kmem_list3 *l3, int tofree); |
314 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | 314 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, |
315 | int node); | 315 | int node); |
316 | static void enable_cpucache(struct kmem_cache *cachep); | 316 | static int enable_cpucache(struct kmem_cache *cachep); |
317 | static void cache_reap(void *unused); | 317 | static void cache_reap(void *unused); |
318 | 318 | ||
319 | /* | 319 | /* |
@@ -674,6 +674,8 @@ static struct kmem_cache cache_cache = { | |||
674 | #endif | 674 | #endif |
675 | }; | 675 | }; |
676 | 676 | ||
677 | #define BAD_ALIEN_MAGIC 0x01020304ul | ||
678 | |||
677 | #ifdef CONFIG_LOCKDEP | 679 | #ifdef CONFIG_LOCKDEP |
678 | 680 | ||
679 | /* | 681 | /* |
@@ -682,42 +684,58 @@ static struct kmem_cache cache_cache = { | |||
682 | * The locking for this is tricky in that it nests within the locks | 684 | * The locking for this is tricky in that it nests within the locks |
683 | * of all other slabs in a few places; to deal with this special | 685 | * of all other slabs in a few places; to deal with this special |
684 | * locking we put on-slab caches into a separate lock-class. | 686 | * locking we put on-slab caches into a separate lock-class. |
687 | * | ||
688 | * We set lock class for alien array caches which are up during init. | ||
689 | * The lock annotation will be lost if all cpus of a node goes down and | ||
690 | * then comes back up during hotplug | ||
685 | */ | 691 | */ |
686 | static struct lock_class_key on_slab_key; | 692 | static struct lock_class_key on_slab_l3_key; |
693 | static struct lock_class_key on_slab_alc_key; | ||
694 | |||
695 | static inline void init_lock_keys(void) | ||
687 | 696 | ||
688 | static inline void init_lock_keys(struct cache_sizes *s) | ||
689 | { | 697 | { |
690 | int q; | 698 | int q; |
691 | 699 | struct cache_sizes *s = malloc_sizes; | |
692 | for (q = 0; q < MAX_NUMNODES; q++) { | 700 | |
693 | if (!s->cs_cachep->nodelists[q] || OFF_SLAB(s->cs_cachep)) | 701 | while (s->cs_size != ULONG_MAX) { |
694 | continue; | 702 | for_each_node(q) { |
695 | lockdep_set_class(&s->cs_cachep->nodelists[q]->list_lock, | 703 | struct array_cache **alc; |
696 | &on_slab_key); | 704 | int r; |
705 | struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; | ||
706 | if (!l3 || OFF_SLAB(s->cs_cachep)) | ||
707 | continue; | ||
708 | lockdep_set_class(&l3->list_lock, &on_slab_l3_key); | ||
709 | alc = l3->alien; | ||
710 | /* | ||
711 | * FIXME: This check for BAD_ALIEN_MAGIC | ||
712 | * should go away when common slab code is taught to | ||
713 | * work even without alien caches. | ||
714 | * Currently, non NUMA code returns BAD_ALIEN_MAGIC | ||
715 | * for alloc_alien_cache, | ||
716 | */ | ||
717 | if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) | ||
718 | continue; | ||
719 | for_each_node(r) { | ||
720 | if (alc[r]) | ||
721 | lockdep_set_class(&alc[r]->lock, | ||
722 | &on_slab_alc_key); | ||
723 | } | ||
724 | } | ||
725 | s++; | ||
697 | } | 726 | } |
698 | } | 727 | } |
699 | |||
700 | #else | 728 | #else |
701 | static inline void init_lock_keys(struct cache_sizes *s) | 729 | static inline void init_lock_keys(void) |
702 | { | 730 | { |
703 | } | 731 | } |
704 | #endif | 732 | #endif |
705 | 733 | ||
706 | |||
707 | |||
708 | /* Guard access to the cache-chain. */ | 734 | /* Guard access to the cache-chain. */ |
709 | static DEFINE_MUTEX(cache_chain_mutex); | 735 | static DEFINE_MUTEX(cache_chain_mutex); |
710 | static struct list_head cache_chain; | 736 | static struct list_head cache_chain; |
711 | 737 | ||
712 | /* | 738 | /* |
713 | * vm_enough_memory() looks at this to determine how many slab-allocated pages | ||
714 | * are possibly freeable under pressure | ||
715 | * | ||
716 | * SLAB_RECLAIM_ACCOUNT turns this on per-slab | ||
717 | */ | ||
718 | atomic_t slab_reclaim_pages; | ||
719 | |||
720 | /* | ||
721 | * chicken and egg problem: delay the per-cpu array allocation | 739 | * chicken and egg problem: delay the per-cpu array allocation |
722 | * until the general caches are up. | 740 | * until the general caches are up. |
723 | */ | 741 | */ |
@@ -768,11 +786,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, | |||
768 | return csizep->cs_cachep; | 786 | return csizep->cs_cachep; |
769 | } | 787 | } |
770 | 788 | ||
771 | struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) | 789 | static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) |
772 | { | 790 | { |
773 | return __find_general_cachep(size, gfpflags); | 791 | return __find_general_cachep(size, gfpflags); |
774 | } | 792 | } |
775 | EXPORT_SYMBOL(kmem_find_general_cachep); | ||
776 | 793 | ||
777 | static size_t slab_mgmt_size(size_t nr_objs, size_t align) | 794 | static size_t slab_mgmt_size(size_t nr_objs, size_t align) |
778 | { | 795 | { |
@@ -1092,7 +1109,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1092 | 1109 | ||
1093 | static inline struct array_cache **alloc_alien_cache(int node, int limit) | 1110 | static inline struct array_cache **alloc_alien_cache(int node, int limit) |
1094 | { | 1111 | { |
1095 | return (struct array_cache **) 0x01020304ul; | 1112 | return (struct array_cache **)BAD_ALIEN_MAGIC; |
1096 | } | 1113 | } |
1097 | 1114 | ||
1098 | static inline void free_alien_cache(struct array_cache **ac_ptr) | 1115 | static inline void free_alien_cache(struct array_cache **ac_ptr) |
@@ -1422,7 +1439,6 @@ void __init kmem_cache_init(void) | |||
1422 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, | 1439 | ARCH_KMALLOC_FLAGS|SLAB_PANIC, |
1423 | NULL, NULL); | 1440 | NULL, NULL); |
1424 | } | 1441 | } |
1425 | init_lock_keys(sizes); | ||
1426 | 1442 | ||
1427 | sizes->cs_dmacachep = kmem_cache_create(names->name_dma, | 1443 | sizes->cs_dmacachep = kmem_cache_create(names->name_dma, |
1428 | sizes->cs_size, | 1444 | sizes->cs_size, |
@@ -1491,10 +1507,15 @@ void __init kmem_cache_init(void) | |||
1491 | struct kmem_cache *cachep; | 1507 | struct kmem_cache *cachep; |
1492 | mutex_lock(&cache_chain_mutex); | 1508 | mutex_lock(&cache_chain_mutex); |
1493 | list_for_each_entry(cachep, &cache_chain, next) | 1509 | list_for_each_entry(cachep, &cache_chain, next) |
1494 | enable_cpucache(cachep); | 1510 | if (enable_cpucache(cachep)) |
1511 | BUG(); | ||
1495 | mutex_unlock(&cache_chain_mutex); | 1512 | mutex_unlock(&cache_chain_mutex); |
1496 | } | 1513 | } |
1497 | 1514 | ||
1515 | /* Annotate slab for lockdep -- annotate the malloc caches */ | ||
1516 | init_lock_keys(); | ||
1517 | |||
1518 | |||
1498 | /* Done! */ | 1519 | /* Done! */ |
1499 | g_cpucache_up = FULL; | 1520 | g_cpucache_up = FULL; |
1500 | 1521 | ||
@@ -1551,8 +1572,11 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1551 | 1572 | ||
1552 | nr_pages = (1 << cachep->gfporder); | 1573 | nr_pages = (1 << cachep->gfporder); |
1553 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1574 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1554 | atomic_add(nr_pages, &slab_reclaim_pages); | 1575 | add_zone_page_state(page_zone(page), |
1555 | add_zone_page_state(page_zone(page), NR_SLAB, nr_pages); | 1576 | NR_SLAB_RECLAIMABLE, nr_pages); |
1577 | else | ||
1578 | add_zone_page_state(page_zone(page), | ||
1579 | NR_SLAB_UNRECLAIMABLE, nr_pages); | ||
1556 | for (i = 0; i < nr_pages; i++) | 1580 | for (i = 0; i < nr_pages; i++) |
1557 | __SetPageSlab(page + i); | 1581 | __SetPageSlab(page + i); |
1558 | return page_address(page); | 1582 | return page_address(page); |
@@ -1567,7 +1591,12 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |||
1567 | struct page *page = virt_to_page(addr); | 1591 | struct page *page = virt_to_page(addr); |
1568 | const unsigned long nr_freed = i; | 1592 | const unsigned long nr_freed = i; |
1569 | 1593 | ||
1570 | sub_zone_page_state(page_zone(page), NR_SLAB, nr_freed); | 1594 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
1595 | sub_zone_page_state(page_zone(page), | ||
1596 | NR_SLAB_RECLAIMABLE, nr_freed); | ||
1597 | else | ||
1598 | sub_zone_page_state(page_zone(page), | ||
1599 | NR_SLAB_UNRECLAIMABLE, nr_freed); | ||
1571 | while (i--) { | 1600 | while (i--) { |
1572 | BUG_ON(!PageSlab(page)); | 1601 | BUG_ON(!PageSlab(page)); |
1573 | __ClearPageSlab(page); | 1602 | __ClearPageSlab(page); |
@@ -1576,8 +1605,6 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |||
1576 | if (current->reclaim_state) | 1605 | if (current->reclaim_state) |
1577 | current->reclaim_state->reclaimed_slab += nr_freed; | 1606 | current->reclaim_state->reclaimed_slab += nr_freed; |
1578 | free_pages((unsigned long)addr, cachep->gfporder); | 1607 | free_pages((unsigned long)addr, cachep->gfporder); |
1579 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | ||
1580 | atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages); | ||
1581 | } | 1608 | } |
1582 | 1609 | ||
1583 | static void kmem_rcu_free(struct rcu_head *head) | 1610 | static void kmem_rcu_free(struct rcu_head *head) |
@@ -1834,6 +1861,27 @@ static void set_up_list3s(struct kmem_cache *cachep, int index) | |||
1834 | } | 1861 | } |
1835 | } | 1862 | } |
1836 | 1863 | ||
1864 | static void __kmem_cache_destroy(struct kmem_cache *cachep) | ||
1865 | { | ||
1866 | int i; | ||
1867 | struct kmem_list3 *l3; | ||
1868 | |||
1869 | for_each_online_cpu(i) | ||
1870 | kfree(cachep->array[i]); | ||
1871 | |||
1872 | /* NUMA: free the list3 structures */ | ||
1873 | for_each_online_node(i) { | ||
1874 | l3 = cachep->nodelists[i]; | ||
1875 | if (l3) { | ||
1876 | kfree(l3->shared); | ||
1877 | free_alien_cache(l3->alien); | ||
1878 | kfree(l3); | ||
1879 | } | ||
1880 | } | ||
1881 | kmem_cache_free(&cache_cache, cachep); | ||
1882 | } | ||
1883 | |||
1884 | |||
1837 | /** | 1885 | /** |
1838 | * calculate_slab_order - calculate size (page order) of slabs | 1886 | * calculate_slab_order - calculate size (page order) of slabs |
1839 | * @cachep: pointer to the cache that is being created | 1887 | * @cachep: pointer to the cache that is being created |
@@ -1904,12 +1952,11 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, | |||
1904 | return left_over; | 1952 | return left_over; |
1905 | } | 1953 | } |
1906 | 1954 | ||
1907 | static void setup_cpu_cache(struct kmem_cache *cachep) | 1955 | static int setup_cpu_cache(struct kmem_cache *cachep) |
1908 | { | 1956 | { |
1909 | if (g_cpucache_up == FULL) { | 1957 | if (g_cpucache_up == FULL) |
1910 | enable_cpucache(cachep); | 1958 | return enable_cpucache(cachep); |
1911 | return; | 1959 | |
1912 | } | ||
1913 | if (g_cpucache_up == NONE) { | 1960 | if (g_cpucache_up == NONE) { |
1914 | /* | 1961 | /* |
1915 | * Note: the first kmem_cache_create must create the cache | 1962 | * Note: the first kmem_cache_create must create the cache |
@@ -1956,6 +2003,7 @@ static void setup_cpu_cache(struct kmem_cache *cachep) | |||
1956 | cpu_cache_get(cachep)->touched = 0; | 2003 | cpu_cache_get(cachep)->touched = 0; |
1957 | cachep->batchcount = 1; | 2004 | cachep->batchcount = 1; |
1958 | cachep->limit = BOOT_CPUCACHE_ENTRIES; | 2005 | cachep->limit = BOOT_CPUCACHE_ENTRIES; |
2006 | return 0; | ||
1959 | } | 2007 | } |
1960 | 2008 | ||
1961 | /** | 2009 | /** |
@@ -2097,6 +2145,15 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2097 | } else { | 2145 | } else { |
2098 | ralign = BYTES_PER_WORD; | 2146 | ralign = BYTES_PER_WORD; |
2099 | } | 2147 | } |
2148 | |||
2149 | /* | ||
2150 | * Redzoning and user store require word alignment. Note this will be | ||
2151 | * overridden by architecture or caller mandated alignment if either | ||
2152 | * is greater than BYTES_PER_WORD. | ||
2153 | */ | ||
2154 | if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) | ||
2155 | ralign = BYTES_PER_WORD; | ||
2156 | |||
2100 | /* 2) arch mandated alignment: disables debug if necessary */ | 2157 | /* 2) arch mandated alignment: disables debug if necessary */ |
2101 | if (ralign < ARCH_SLAB_MINALIGN) { | 2158 | if (ralign < ARCH_SLAB_MINALIGN) { |
2102 | ralign = ARCH_SLAB_MINALIGN; | 2159 | ralign = ARCH_SLAB_MINALIGN; |
@@ -2110,8 +2167,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2110 | flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); | 2167 | flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); |
2111 | } | 2168 | } |
2112 | /* | 2169 | /* |
2113 | * 4) Store it. Note that the debug code below can reduce | 2170 | * 4) Store it. |
2114 | * the alignment to BYTES_PER_WORD. | ||
2115 | */ | 2171 | */ |
2116 | align = ralign; | 2172 | align = ralign; |
2117 | 2173 | ||
@@ -2123,20 +2179,19 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2123 | #if DEBUG | 2179 | #if DEBUG |
2124 | cachep->obj_size = size; | 2180 | cachep->obj_size = size; |
2125 | 2181 | ||
2182 | /* | ||
2183 | * Both debugging options require word-alignment which is calculated | ||
2184 | * into align above. | ||
2185 | */ | ||
2126 | if (flags & SLAB_RED_ZONE) { | 2186 | if (flags & SLAB_RED_ZONE) { |
2127 | /* redzoning only works with word aligned caches */ | ||
2128 | align = BYTES_PER_WORD; | ||
2129 | |||
2130 | /* add space for red zone words */ | 2187 | /* add space for red zone words */ |
2131 | cachep->obj_offset += BYTES_PER_WORD; | 2188 | cachep->obj_offset += BYTES_PER_WORD; |
2132 | size += 2 * BYTES_PER_WORD; | 2189 | size += 2 * BYTES_PER_WORD; |
2133 | } | 2190 | } |
2134 | if (flags & SLAB_STORE_USER) { | 2191 | if (flags & SLAB_STORE_USER) { |
2135 | /* user store requires word alignment and | 2192 | /* user store requires one word storage behind the end of |
2136 | * one word storage behind the end of the real | 2193 | * the real object. |
2137 | * object. | ||
2138 | */ | 2194 | */ |
2139 | align = BYTES_PER_WORD; | ||
2140 | size += BYTES_PER_WORD; | 2195 | size += BYTES_PER_WORD; |
2141 | } | 2196 | } |
2142 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 2197 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) |
@@ -2200,14 +2255,26 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2200 | cachep->gfpflags |= GFP_DMA; | 2255 | cachep->gfpflags |= GFP_DMA; |
2201 | cachep->buffer_size = size; | 2256 | cachep->buffer_size = size; |
2202 | 2257 | ||
2203 | if (flags & CFLGS_OFF_SLAB) | 2258 | if (flags & CFLGS_OFF_SLAB) { |
2204 | cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); | 2259 | cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); |
2260 | /* | ||
2261 | * This is a possibility for one of the malloc_sizes caches. | ||
2262 | * But since we go off slab only for object size greater than | ||
2263 | * PAGE_SIZE/8, and malloc_sizes gets created in ascending order, | ||
2264 | * this should not happen at all. | ||
2265 | * But leave a BUG_ON for some lucky dude. | ||
2266 | */ | ||
2267 | BUG_ON(!cachep->slabp_cache); | ||
2268 | } | ||
2205 | cachep->ctor = ctor; | 2269 | cachep->ctor = ctor; |
2206 | cachep->dtor = dtor; | 2270 | cachep->dtor = dtor; |
2207 | cachep->name = name; | 2271 | cachep->name = name; |
2208 | 2272 | ||
2209 | 2273 | if (setup_cpu_cache(cachep)) { | |
2210 | setup_cpu_cache(cachep); | 2274 | __kmem_cache_destroy(cachep); |
2275 | cachep = NULL; | ||
2276 | goto oops; | ||
2277 | } | ||
2211 | 2278 | ||
2212 | /* cache setup completed, link it into the list */ | 2279 | /* cache setup completed, link it into the list */ |
2213 | list_add(&cachep->next, &cache_chain); | 2280 | list_add(&cachep->next, &cache_chain); |
@@ -2389,9 +2456,6 @@ EXPORT_SYMBOL(kmem_cache_shrink); | |||
2389 | */ | 2456 | */ |
2390 | int kmem_cache_destroy(struct kmem_cache *cachep) | 2457 | int kmem_cache_destroy(struct kmem_cache *cachep) |
2391 | { | 2458 | { |
2392 | int i; | ||
2393 | struct kmem_list3 *l3; | ||
2394 | |||
2395 | BUG_ON(!cachep || in_interrupt()); | 2459 | BUG_ON(!cachep || in_interrupt()); |
2396 | 2460 | ||
2397 | /* Don't let CPUs to come and go */ | 2461 | /* Don't let CPUs to come and go */ |
@@ -2417,25 +2481,23 @@ int kmem_cache_destroy(struct kmem_cache *cachep) | |||
2417 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) | 2481 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) |
2418 | synchronize_rcu(); | 2482 | synchronize_rcu(); |
2419 | 2483 | ||
2420 | for_each_online_cpu(i) | 2484 | __kmem_cache_destroy(cachep); |
2421 | kfree(cachep->array[i]); | ||
2422 | |||
2423 | /* NUMA: free the list3 structures */ | ||
2424 | for_each_online_node(i) { | ||
2425 | l3 = cachep->nodelists[i]; | ||
2426 | if (l3) { | ||
2427 | kfree(l3->shared); | ||
2428 | free_alien_cache(l3->alien); | ||
2429 | kfree(l3); | ||
2430 | } | ||
2431 | } | ||
2432 | kmem_cache_free(&cache_cache, cachep); | ||
2433 | unlock_cpu_hotplug(); | 2485 | unlock_cpu_hotplug(); |
2434 | return 0; | 2486 | return 0; |
2435 | } | 2487 | } |
2436 | EXPORT_SYMBOL(kmem_cache_destroy); | 2488 | EXPORT_SYMBOL(kmem_cache_destroy); |
2437 | 2489 | ||
2438 | /* Get the memory for a slab management obj. */ | 2490 | /* |
2491 | * Get the memory for a slab management obj. | ||
2492 | * For a slab cache when the slab descriptor is off-slab, slab descriptors | ||
2493 | * always come from malloc_sizes caches. The slab descriptor cannot | ||
2494 | * come from the same cache which is getting created because, | ||
2495 | * when we are searching for an appropriate cache for these | ||
2496 | * descriptors in kmem_cache_create, we search through the malloc_sizes array. | ||
2497 | * If we are creating a malloc_sizes cache here it would not be visible to | ||
2498 | * kmem_find_general_cachep till the initialization is complete. | ||
2499 | * Hence we cannot have slabp_cache same as the original cache. | ||
2500 | */ | ||
2439 | static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, | 2501 | static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, |
2440 | int colour_off, gfp_t local_flags, | 2502 | int colour_off, gfp_t local_flags, |
2441 | int nodeid) | 2503 | int nodeid) |
@@ -3119,6 +3181,12 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | |||
3119 | if (slabp->inuse == 0) { | 3181 | if (slabp->inuse == 0) { |
3120 | if (l3->free_objects > l3->free_limit) { | 3182 | if (l3->free_objects > l3->free_limit) { |
3121 | l3->free_objects -= cachep->num; | 3183 | l3->free_objects -= cachep->num; |
3184 | /* No need to drop any previously held | ||
3185 | * lock here, even if we have a off-slab slab | ||
3186 | * descriptor it is guaranteed to come from | ||
3187 | * a different cache, refer to comments before | ||
3188 | * alloc_slabmgmt. | ||
3189 | */ | ||
3122 | slab_destroy(cachep, slabp); | 3190 | slab_destroy(cachep, slabp); |
3123 | } else { | 3191 | } else { |
3124 | list_add(&slabp->list, &l3->slabs_free); | 3192 | list_add(&slabp->list, &l3->slabs_free); |
@@ -3317,7 +3385,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
3317 | } | 3385 | } |
3318 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 3386 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
3319 | 3387 | ||
3320 | void *kmalloc_node(size_t size, gfp_t flags, int node) | 3388 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
3321 | { | 3389 | { |
3322 | struct kmem_cache *cachep; | 3390 | struct kmem_cache *cachep; |
3323 | 3391 | ||
@@ -3326,7 +3394,7 @@ void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
3326 | return NULL; | 3394 | return NULL; |
3327 | return kmem_cache_alloc_node(cachep, flags, node); | 3395 | return kmem_cache_alloc_node(cachep, flags, node); |
3328 | } | 3396 | } |
3329 | EXPORT_SYMBOL(kmalloc_node); | 3397 | EXPORT_SYMBOL(__kmalloc_node); |
3330 | #endif | 3398 | #endif |
3331 | 3399 | ||
3332 | /** | 3400 | /** |
@@ -3370,55 +3438,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) | |||
3370 | EXPORT_SYMBOL(__kmalloc_track_caller); | 3438 | EXPORT_SYMBOL(__kmalloc_track_caller); |
3371 | #endif | 3439 | #endif |
3372 | 3440 | ||
3373 | #ifdef CONFIG_SMP | ||
3374 | /** | ||
3375 | * __alloc_percpu - allocate one copy of the object for every present | ||
3376 | * cpu in the system, zeroing them. | ||
3377 | * Objects should be dereferenced using the per_cpu_ptr macro only. | ||
3378 | * | ||
3379 | * @size: how many bytes of memory are required. | ||
3380 | */ | ||
3381 | void *__alloc_percpu(size_t size) | ||
3382 | { | ||
3383 | int i; | ||
3384 | struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL); | ||
3385 | |||
3386 | if (!pdata) | ||
3387 | return NULL; | ||
3388 | |||
3389 | /* | ||
3390 | * Cannot use for_each_online_cpu since a cpu may come online | ||
3391 | * and we have no way of figuring out how to fix the array | ||
3392 | * that we have allocated then.... | ||
3393 | */ | ||
3394 | for_each_possible_cpu(i) { | ||
3395 | int node = cpu_to_node(i); | ||
3396 | |||
3397 | if (node_online(node)) | ||
3398 | pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, node); | ||
3399 | else | ||
3400 | pdata->ptrs[i] = kmalloc(size, GFP_KERNEL); | ||
3401 | |||
3402 | if (!pdata->ptrs[i]) | ||
3403 | goto unwind_oom; | ||
3404 | memset(pdata->ptrs[i], 0, size); | ||
3405 | } | ||
3406 | |||
3407 | /* Catch derefs w/o wrappers */ | ||
3408 | return (void *)(~(unsigned long)pdata); | ||
3409 | |||
3410 | unwind_oom: | ||
3411 | while (--i >= 0) { | ||
3412 | if (!cpu_possible(i)) | ||
3413 | continue; | ||
3414 | kfree(pdata->ptrs[i]); | ||
3415 | } | ||
3416 | kfree(pdata); | ||
3417 | return NULL; | ||
3418 | } | ||
3419 | EXPORT_SYMBOL(__alloc_percpu); | ||
3420 | #endif | ||
3421 | |||
3422 | /** | 3441 | /** |
3423 | * kmem_cache_free - Deallocate an object | 3442 | * kmem_cache_free - Deallocate an object |
3424 | * @cachep: The cache the allocation was from. | 3443 | * @cachep: The cache the allocation was from. |
@@ -3464,29 +3483,6 @@ void kfree(const void *objp) | |||
3464 | } | 3483 | } |
3465 | EXPORT_SYMBOL(kfree); | 3484 | EXPORT_SYMBOL(kfree); |
3466 | 3485 | ||
3467 | #ifdef CONFIG_SMP | ||
3468 | /** | ||
3469 | * free_percpu - free previously allocated percpu memory | ||
3470 | * @objp: pointer returned by alloc_percpu. | ||
3471 | * | ||
3472 | * Don't free memory not originally allocated by alloc_percpu() | ||
3473 | * The complemented objp is to check for that. | ||
3474 | */ | ||
3475 | void free_percpu(const void *objp) | ||
3476 | { | ||
3477 | int i; | ||
3478 | struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp); | ||
3479 | |||
3480 | /* | ||
3481 | * We allocate for all cpus so we cannot use for online cpu here. | ||
3482 | */ | ||
3483 | for_each_possible_cpu(i) | ||
3484 | kfree(p->ptrs[i]); | ||
3485 | kfree(p); | ||
3486 | } | ||
3487 | EXPORT_SYMBOL(free_percpu); | ||
3488 | #endif | ||
3489 | |||
3490 | unsigned int kmem_cache_size(struct kmem_cache *cachep) | 3486 | unsigned int kmem_cache_size(struct kmem_cache *cachep) |
3491 | { | 3487 | { |
3492 | return obj_size(cachep); | 3488 | return obj_size(cachep); |
@@ -3603,22 +3599,26 @@ static void do_ccupdate_local(void *info) | |||
3603 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | 3599 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, |
3604 | int batchcount, int shared) | 3600 | int batchcount, int shared) |
3605 | { | 3601 | { |
3606 | struct ccupdate_struct new; | 3602 | struct ccupdate_struct *new; |
3607 | int i, err; | 3603 | int i; |
3604 | |||
3605 | new = kzalloc(sizeof(*new), GFP_KERNEL); | ||
3606 | if (!new) | ||
3607 | return -ENOMEM; | ||
3608 | 3608 | ||
3609 | memset(&new.new, 0, sizeof(new.new)); | ||
3610 | for_each_online_cpu(i) { | 3609 | for_each_online_cpu(i) { |
3611 | new.new[i] = alloc_arraycache(cpu_to_node(i), limit, | 3610 | new->new[i] = alloc_arraycache(cpu_to_node(i), limit, |
3612 | batchcount); | 3611 | batchcount); |
3613 | if (!new.new[i]) { | 3612 | if (!new->new[i]) { |
3614 | for (i--; i >= 0; i--) | 3613 | for (i--; i >= 0; i--) |
3615 | kfree(new.new[i]); | 3614 | kfree(new->new[i]); |
3615 | kfree(new); | ||
3616 | return -ENOMEM; | 3616 | return -ENOMEM; |
3617 | } | 3617 | } |
3618 | } | 3618 | } |
3619 | new.cachep = cachep; | 3619 | new->cachep = cachep; |
3620 | 3620 | ||
3621 | on_each_cpu(do_ccupdate_local, (void *)&new, 1, 1); | 3621 | on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); |
3622 | 3622 | ||
3623 | check_irq_on(); | 3623 | check_irq_on(); |
3624 | cachep->batchcount = batchcount; | 3624 | cachep->batchcount = batchcount; |
@@ -3626,7 +3626,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | |||
3626 | cachep->shared = shared; | 3626 | cachep->shared = shared; |
3627 | 3627 | ||
3628 | for_each_online_cpu(i) { | 3628 | for_each_online_cpu(i) { |
3629 | struct array_cache *ccold = new.new[i]; | 3629 | struct array_cache *ccold = new->new[i]; |
3630 | if (!ccold) | 3630 | if (!ccold) |
3631 | continue; | 3631 | continue; |
3632 | spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); | 3632 | spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); |
@@ -3634,18 +3634,12 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | |||
3634 | spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); | 3634 | spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); |
3635 | kfree(ccold); | 3635 | kfree(ccold); |
3636 | } | 3636 | } |
3637 | 3637 | kfree(new); | |
3638 | err = alloc_kmemlist(cachep); | 3638 | return alloc_kmemlist(cachep); |
3639 | if (err) { | ||
3640 | printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n", | ||
3641 | cachep->name, -err); | ||
3642 | BUG(); | ||
3643 | } | ||
3644 | return 0; | ||
3645 | } | 3639 | } |
3646 | 3640 | ||
3647 | /* Called with cache_chain_mutex held always */ | 3641 | /* Called with cache_chain_mutex held always */ |
3648 | static void enable_cpucache(struct kmem_cache *cachep) | 3642 | static int enable_cpucache(struct kmem_cache *cachep) |
3649 | { | 3643 | { |
3650 | int err; | 3644 | int err; |
3651 | int limit, shared; | 3645 | int limit, shared; |
@@ -3697,6 +3691,7 @@ static void enable_cpucache(struct kmem_cache *cachep) | |||
3697 | if (err) | 3691 | if (err) |
3698 | printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", | 3692 | printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", |
3699 | cachep->name, -err); | 3693 | cachep->name, -err); |
3694 | return err; | ||
3700 | } | 3695 | } |
3701 | 3696 | ||
3702 | /* | 3697 | /* |
@@ -4157,6 +4152,7 @@ static int leaks_show(struct seq_file *m, void *p) | |||
4157 | show_symbol(m, n[2*i+2]); | 4152 | show_symbol(m, n[2*i+2]); |
4158 | seq_putc(m, '\n'); | 4153 | seq_putc(m, '\n'); |
4159 | } | 4154 | } |
4155 | |||
4160 | return 0; | 4156 | return 0; |
4161 | } | 4157 | } |
4162 | 4158 | ||