diff options
| author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-07-03 13:49:45 -0400 |
|---|---|---|
| committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-07-03 13:49:45 -0400 |
| commit | 026477c1141b67e98e3bd8bdedb7d4b88a3ecd09 (patch) | |
| tree | 2624a44924c625c367f3cebf937853b9da2de282 /mm/slab.c | |
| parent | 9f2fa466383ce100b90fe52cb4489d7a26bf72a9 (diff) | |
| parent | 29454dde27d8e340bb1987bad9aa504af7081eba (diff) | |
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Diffstat (limited to 'mm/slab.c')
| -rw-r--r-- | mm/slab.c | 124 |
1 files changed, 54 insertions, 70 deletions
| @@ -309,6 +309,13 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; | |||
| 309 | #define SIZE_AC 1 | 309 | #define SIZE_AC 1 |
| 310 | #define SIZE_L3 (1 + MAX_NUMNODES) | 310 | #define SIZE_L3 (1 + MAX_NUMNODES) |
| 311 | 311 | ||
| 312 | static int drain_freelist(struct kmem_cache *cache, | ||
| 313 | struct kmem_list3 *l3, int tofree); | ||
| 314 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | ||
| 315 | int node); | ||
| 316 | static void enable_cpucache(struct kmem_cache *cachep); | ||
| 317 | static void cache_reap(void *unused); | ||
| 318 | |||
| 312 | /* | 319 | /* |
| 313 | * This function must be completely optimized away if a constant is passed to | 320 | * This function must be completely optimized away if a constant is passed to |
| 314 | * it. Mostly the same as what is in linux/slab.h except it returns an index. | 321 | * it. Mostly the same as what is in linux/slab.h except it returns an index. |
| @@ -456,7 +463,7 @@ struct kmem_cache { | |||
| 456 | #define STATS_DEC_ACTIVE(x) ((x)->num_active--) | 463 | #define STATS_DEC_ACTIVE(x) ((x)->num_active--) |
| 457 | #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) | 464 | #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) |
| 458 | #define STATS_INC_GROWN(x) ((x)->grown++) | 465 | #define STATS_INC_GROWN(x) ((x)->grown++) |
| 459 | #define STATS_INC_REAPED(x) ((x)->reaped++) | 466 | #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) |
| 460 | #define STATS_SET_HIGH(x) \ | 467 | #define STATS_SET_HIGH(x) \ |
| 461 | do { \ | 468 | do { \ |
| 462 | if ((x)->num_active > (x)->high_mark) \ | 469 | if ((x)->num_active > (x)->high_mark) \ |
| @@ -480,7 +487,7 @@ struct kmem_cache { | |||
| 480 | #define STATS_DEC_ACTIVE(x) do { } while (0) | 487 | #define STATS_DEC_ACTIVE(x) do { } while (0) |
| 481 | #define STATS_INC_ALLOCED(x) do { } while (0) | 488 | #define STATS_INC_ALLOCED(x) do { } while (0) |
| 482 | #define STATS_INC_GROWN(x) do { } while (0) | 489 | #define STATS_INC_GROWN(x) do { } while (0) |
| 483 | #define STATS_INC_REAPED(x) do { } while (0) | 490 | #define STATS_ADD_REAPED(x,y) do { } while (0) |
| 484 | #define STATS_SET_HIGH(x) do { } while (0) | 491 | #define STATS_SET_HIGH(x) do { } while (0) |
| 485 | #define STATS_INC_ERR(x) do { } while (0) | 492 | #define STATS_INC_ERR(x) do { } while (0) |
| 486 | #define STATS_INC_NODEALLOCS(x) do { } while (0) | 493 | #define STATS_INC_NODEALLOCS(x) do { } while (0) |
| @@ -700,12 +707,6 @@ int slab_is_available(void) | |||
| 700 | 707 | ||
| 701 | static DEFINE_PER_CPU(struct work_struct, reap_work); | 708 | static DEFINE_PER_CPU(struct work_struct, reap_work); |
| 702 | 709 | ||
| 703 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | ||
| 704 | int node); | ||
| 705 | static void enable_cpucache(struct kmem_cache *cachep); | ||
| 706 | static void cache_reap(void *unused); | ||
| 707 | static int __node_shrink(struct kmem_cache *cachep, int node); | ||
| 708 | |||
| 709 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 710 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) |
| 710 | { | 711 | { |
| 711 | return cachep->array[smp_processor_id()]; | 712 | return cachep->array[smp_processor_id()]; |
| @@ -1241,10 +1242,7 @@ free_array_cache: | |||
| 1241 | l3 = cachep->nodelists[node]; | 1242 | l3 = cachep->nodelists[node]; |
| 1242 | if (!l3) | 1243 | if (!l3) |
| 1243 | continue; | 1244 | continue; |
| 1244 | spin_lock_irq(&l3->list_lock); | 1245 | drain_freelist(cachep, l3, l3->free_objects); |
| 1245 | /* free slabs belonging to this node */ | ||
| 1246 | __node_shrink(cachep, node); | ||
| 1247 | spin_unlock_irq(&l3->list_lock); | ||
| 1248 | } | 1246 | } |
| 1249 | mutex_unlock(&cache_chain_mutex); | 1247 | mutex_unlock(&cache_chain_mutex); |
| 1250 | break; | 1248 | break; |
| @@ -1507,7 +1505,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
| 1507 | nr_pages = (1 << cachep->gfporder); | 1505 | nr_pages = (1 << cachep->gfporder); |
| 1508 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 1506 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) |
| 1509 | atomic_add(nr_pages, &slab_reclaim_pages); | 1507 | atomic_add(nr_pages, &slab_reclaim_pages); |
| 1510 | add_page_state(nr_slab, nr_pages); | 1508 | add_zone_page_state(page_zone(page), NR_SLAB, nr_pages); |
| 1511 | for (i = 0; i < nr_pages; i++) | 1509 | for (i = 0; i < nr_pages; i++) |
| 1512 | __SetPageSlab(page + i); | 1510 | __SetPageSlab(page + i); |
| 1513 | return page_address(page); | 1511 | return page_address(page); |
| @@ -1522,12 +1520,12 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |||
| 1522 | struct page *page = virt_to_page(addr); | 1520 | struct page *page = virt_to_page(addr); |
| 1523 | const unsigned long nr_freed = i; | 1521 | const unsigned long nr_freed = i; |
| 1524 | 1522 | ||
| 1523 | sub_zone_page_state(page_zone(page), NR_SLAB, nr_freed); | ||
| 1525 | while (i--) { | 1524 | while (i--) { |
| 1526 | BUG_ON(!PageSlab(page)); | 1525 | BUG_ON(!PageSlab(page)); |
| 1527 | __ClearPageSlab(page); | 1526 | __ClearPageSlab(page); |
| 1528 | page++; | 1527 | page++; |
| 1529 | } | 1528 | } |
| 1530 | sub_page_state(nr_slab, nr_freed); | ||
| 1531 | if (current->reclaim_state) | 1529 | if (current->reclaim_state) |
| 1532 | current->reclaim_state->reclaimed_slab += nr_freed; | 1530 | current->reclaim_state->reclaimed_slab += nr_freed; |
| 1533 | free_pages((unsigned long)addr, cachep->gfporder); | 1531 | free_pages((unsigned long)addr, cachep->gfporder); |
| @@ -2248,32 +2246,45 @@ static void drain_cpu_caches(struct kmem_cache *cachep) | |||
| 2248 | } | 2246 | } |
| 2249 | } | 2247 | } |
| 2250 | 2248 | ||
| 2251 | static int __node_shrink(struct kmem_cache *cachep, int node) | 2249 | /* |
| 2250 | * Remove slabs from the list of free slabs. | ||
| 2251 | * Specify the number of slabs to drain in tofree. | ||
| 2252 | * | ||
| 2253 | * Returns the actual number of slabs released. | ||
| 2254 | */ | ||
| 2255 | static int drain_freelist(struct kmem_cache *cache, | ||
| 2256 | struct kmem_list3 *l3, int tofree) | ||
| 2252 | { | 2257 | { |
| 2258 | struct list_head *p; | ||
| 2259 | int nr_freed; | ||
| 2253 | struct slab *slabp; | 2260 | struct slab *slabp; |
| 2254 | struct kmem_list3 *l3 = cachep->nodelists[node]; | ||
| 2255 | int ret; | ||
| 2256 | 2261 | ||
| 2257 | for (;;) { | 2262 | nr_freed = 0; |
| 2258 | struct list_head *p; | 2263 | while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { |
| 2259 | 2264 | ||
| 2265 | spin_lock_irq(&l3->list_lock); | ||
| 2260 | p = l3->slabs_free.prev; | 2266 | p = l3->slabs_free.prev; |
| 2261 | if (p == &l3->slabs_free) | 2267 | if (p == &l3->slabs_free) { |
| 2262 | break; | 2268 | spin_unlock_irq(&l3->list_lock); |
| 2269 | goto out; | ||
| 2270 | } | ||
| 2263 | 2271 | ||
| 2264 | slabp = list_entry(l3->slabs_free.prev, struct slab, list); | 2272 | slabp = list_entry(p, struct slab, list); |
| 2265 | #if DEBUG | 2273 | #if DEBUG |
| 2266 | BUG_ON(slabp->inuse); | 2274 | BUG_ON(slabp->inuse); |
| 2267 | #endif | 2275 | #endif |
| 2268 | list_del(&slabp->list); | 2276 | list_del(&slabp->list); |
| 2269 | 2277 | /* | |
| 2270 | l3->free_objects -= cachep->num; | 2278 | * Safe to drop the lock. The slab is no longer linked |
| 2279 | * to the cache. | ||
| 2280 | */ | ||
| 2281 | l3->free_objects -= cache->num; | ||
| 2271 | spin_unlock_irq(&l3->list_lock); | 2282 | spin_unlock_irq(&l3->list_lock); |
| 2272 | slab_destroy(cachep, slabp); | 2283 | slab_destroy(cache, slabp); |
| 2273 | spin_lock_irq(&l3->list_lock); | 2284 | nr_freed++; |
| 2274 | } | 2285 | } |
| 2275 | ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial); | 2286 | out: |
| 2276 | return ret; | 2287 | return nr_freed; |
| 2277 | } | 2288 | } |
| 2278 | 2289 | ||
| 2279 | static int __cache_shrink(struct kmem_cache *cachep) | 2290 | static int __cache_shrink(struct kmem_cache *cachep) |
| @@ -2286,11 +2297,13 @@ static int __cache_shrink(struct kmem_cache *cachep) | |||
| 2286 | check_irq_on(); | 2297 | check_irq_on(); |
| 2287 | for_each_online_node(i) { | 2298 | for_each_online_node(i) { |
| 2288 | l3 = cachep->nodelists[i]; | 2299 | l3 = cachep->nodelists[i]; |
| 2289 | if (l3) { | 2300 | if (!l3) |
| 2290 | spin_lock_irq(&l3->list_lock); | 2301 | continue; |
| 2291 | ret += __node_shrink(cachep, i); | 2302 | |
| 2292 | spin_unlock_irq(&l3->list_lock); | 2303 | drain_freelist(cachep, l3, l3->free_objects); |
| 2293 | } | 2304 | |
| 2305 | ret += !list_empty(&l3->slabs_full) || | ||
| 2306 | !list_empty(&l3->slabs_partial); | ||
| 2294 | } | 2307 | } |
| 2295 | return (ret ? 1 : 0); | 2308 | return (ret ? 1 : 0); |
| 2296 | } | 2309 | } |
| @@ -3694,10 +3707,6 @@ static void cache_reap(void *unused) | |||
| 3694 | } | 3707 | } |
| 3695 | 3708 | ||
| 3696 | list_for_each_entry(searchp, &cache_chain, next) { | 3709 | list_for_each_entry(searchp, &cache_chain, next) { |
| 3697 | struct list_head *p; | ||
| 3698 | int tofree; | ||
| 3699 | struct slab *slabp; | ||
| 3700 | |||
| 3701 | check_irq_on(); | 3710 | check_irq_on(); |
| 3702 | 3711 | ||
| 3703 | /* | 3712 | /* |
| @@ -3722,47 +3731,22 @@ static void cache_reap(void *unused) | |||
| 3722 | 3731 | ||
| 3723 | drain_array(searchp, l3, l3->shared, 0, node); | 3732 | drain_array(searchp, l3, l3->shared, 0, node); |
| 3724 | 3733 | ||
| 3725 | if (l3->free_touched) { | 3734 | if (l3->free_touched) |
| 3726 | l3->free_touched = 0; | 3735 | l3->free_touched = 0; |
| 3727 | goto next; | 3736 | else { |
| 3728 | } | 3737 | int freed; |
| 3729 | |||
| 3730 | tofree = (l3->free_limit + 5 * searchp->num - 1) / | ||
| 3731 | (5 * searchp->num); | ||
| 3732 | do { | ||
| 3733 | /* | ||
| 3734 | * Do not lock if there are no free blocks. | ||
| 3735 | */ | ||
| 3736 | if (list_empty(&l3->slabs_free)) | ||
| 3737 | break; | ||
| 3738 | |||
| 3739 | spin_lock_irq(&l3->list_lock); | ||
| 3740 | p = l3->slabs_free.next; | ||
| 3741 | if (p == &(l3->slabs_free)) { | ||
| 3742 | spin_unlock_irq(&l3->list_lock); | ||
| 3743 | break; | ||
| 3744 | } | ||
| 3745 | 3738 | ||
| 3746 | slabp = list_entry(p, struct slab, list); | 3739 | freed = drain_freelist(searchp, l3, (l3->free_limit + |
| 3747 | BUG_ON(slabp->inuse); | 3740 | 5 * searchp->num - 1) / (5 * searchp->num)); |
| 3748 | list_del(&slabp->list); | 3741 | STATS_ADD_REAPED(searchp, freed); |
| 3749 | STATS_INC_REAPED(searchp); | 3742 | } |
| 3750 | |||
| 3751 | /* | ||
| 3752 | * Safe to drop the lock. The slab is no longer linked | ||
| 3753 | * to the cache. searchp cannot disappear, we hold | ||
| 3754 | * cache_chain_lock | ||
| 3755 | */ | ||
| 3756 | l3->free_objects -= searchp->num; | ||
| 3757 | spin_unlock_irq(&l3->list_lock); | ||
| 3758 | slab_destroy(searchp, slabp); | ||
| 3759 | } while (--tofree > 0); | ||
| 3760 | next: | 3743 | next: |
| 3761 | cond_resched(); | 3744 | cond_resched(); |
| 3762 | } | 3745 | } |
| 3763 | check_irq_on(); | 3746 | check_irq_on(); |
| 3764 | mutex_unlock(&cache_chain_mutex); | 3747 | mutex_unlock(&cache_chain_mutex); |
| 3765 | next_reap_node(); | 3748 | next_reap_node(); |
| 3749 | refresh_cpu_vm_stats(smp_processor_id()); | ||
| 3766 | /* Set up the next iteration */ | 3750 | /* Set up the next iteration */ |
| 3767 | schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); | 3751 | schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); |
| 3768 | } | 3752 | } |
