diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 57 |
1 files changed, 30 insertions, 27 deletions
@@ -1124,6 +1124,7 @@ void __init kmem_cache_init(void) | |||
1124 | struct cache_sizes *sizes; | 1124 | struct cache_sizes *sizes; |
1125 | struct cache_names *names; | 1125 | struct cache_names *names; |
1126 | int i; | 1126 | int i; |
1127 | int order; | ||
1127 | 1128 | ||
1128 | for (i = 0; i < NUM_INIT_LISTS; i++) { | 1129 | for (i = 0; i < NUM_INIT_LISTS; i++) { |
1129 | kmem_list3_init(&initkmem_list3[i]); | 1130 | kmem_list3_init(&initkmem_list3[i]); |
@@ -1167,11 +1168,15 @@ void __init kmem_cache_init(void) | |||
1167 | 1168 | ||
1168 | cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size()); | 1169 | cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size()); |
1169 | 1170 | ||
1170 | cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0, | 1171 | for (order = 0; order < MAX_ORDER; order++) { |
1171 | &left_over, &cache_cache.num); | 1172 | cache_estimate(order, cache_cache.buffer_size, |
1173 | cache_line_size(), 0, &left_over, &cache_cache.num); | ||
1174 | if (cache_cache.num) | ||
1175 | break; | ||
1176 | } | ||
1172 | if (!cache_cache.num) | 1177 | if (!cache_cache.num) |
1173 | BUG(); | 1178 | BUG(); |
1174 | 1179 | cache_cache.gfporder = order; | |
1175 | cache_cache.colour = left_over / cache_cache.colour_off; | 1180 | cache_cache.colour = left_over / cache_cache.colour_off; |
1176 | cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + | 1181 | cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + |
1177 | sizeof(struct slab), cache_line_size()); | 1182 | sizeof(struct slab), cache_line_size()); |
@@ -1628,36 +1633,44 @@ static inline size_t calculate_slab_order(struct kmem_cache *cachep, | |||
1628 | size_t size, size_t align, unsigned long flags) | 1633 | size_t size, size_t align, unsigned long flags) |
1629 | { | 1634 | { |
1630 | size_t left_over = 0; | 1635 | size_t left_over = 0; |
1636 | int gfporder; | ||
1631 | 1637 | ||
1632 | for (;; cachep->gfporder++) { | 1638 | for (gfporder = 0 ; gfporder <= MAX_GFP_ORDER; gfporder++) { |
1633 | unsigned int num; | 1639 | unsigned int num; |
1634 | size_t remainder; | 1640 | size_t remainder; |
1635 | 1641 | ||
1636 | if (cachep->gfporder > MAX_GFP_ORDER) { | 1642 | cache_estimate(gfporder, size, align, flags, &remainder, &num); |
1637 | cachep->num = 0; | ||
1638 | break; | ||
1639 | } | ||
1640 | |||
1641 | cache_estimate(cachep->gfporder, size, align, flags, | ||
1642 | &remainder, &num); | ||
1643 | if (!num) | 1643 | if (!num) |
1644 | continue; | 1644 | continue; |
1645 | |||
1645 | /* More than offslab_limit objects will cause problems */ | 1646 | /* More than offslab_limit objects will cause problems */ |
1646 | if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit) | 1647 | if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit) |
1647 | break; | 1648 | break; |
1648 | 1649 | ||
1650 | /* Found something acceptable - save it away */ | ||
1649 | cachep->num = num; | 1651 | cachep->num = num; |
1652 | cachep->gfporder = gfporder; | ||
1650 | left_over = remainder; | 1653 | left_over = remainder; |
1651 | 1654 | ||
1652 | /* | 1655 | /* |
1656 | * A VFS-reclaimable slab tends to have most allocations | ||
1657 | * as GFP_NOFS and we really don't want to have to be allocating | ||
1658 | * higher-order pages when we are unable to shrink dcache. | ||
1659 | */ | ||
1660 | if (flags & SLAB_RECLAIM_ACCOUNT) | ||
1661 | break; | ||
1662 | |||
1663 | /* | ||
1653 | * Large number of objects is good, but very large slabs are | 1664 | * Large number of objects is good, but very large slabs are |
1654 | * currently bad for the gfp()s. | 1665 | * currently bad for the gfp()s. |
1655 | */ | 1666 | */ |
1656 | if (cachep->gfporder >= slab_break_gfp_order) | 1667 | if (gfporder >= slab_break_gfp_order) |
1657 | break; | 1668 | break; |
1658 | 1669 | ||
1659 | if ((left_over * 8) <= (PAGE_SIZE << cachep->gfporder)) | 1670 | /* |
1660 | /* Acceptable internal fragmentation */ | 1671 | * Acceptable internal fragmentation? |
1672 | */ | ||
1673 | if ((left_over * 8) <= (PAGE_SIZE << gfporder)) | ||
1661 | break; | 1674 | break; |
1662 | } | 1675 | } |
1663 | return left_over; | 1676 | return left_over; |
@@ -1869,17 +1882,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
1869 | 1882 | ||
1870 | size = ALIGN(size, align); | 1883 | size = ALIGN(size, align); |
1871 | 1884 | ||
1872 | if ((flags & SLAB_RECLAIM_ACCOUNT) && size <= PAGE_SIZE) { | 1885 | left_over = calculate_slab_order(cachep, size, align, flags); |
1873 | /* | ||
1874 | * A VFS-reclaimable slab tends to have most allocations | ||
1875 | * as GFP_NOFS and we really don't want to have to be allocating | ||
1876 | * higher-order pages when we are unable to shrink dcache. | ||
1877 | */ | ||
1878 | cachep->gfporder = 0; | ||
1879 | cache_estimate(cachep->gfporder, size, align, flags, | ||
1880 | &left_over, &cachep->num); | ||
1881 | } else | ||
1882 | left_over = calculate_slab_order(cachep, size, align, flags); | ||
1883 | 1886 | ||
1884 | if (!cachep->num) { | 1887 | if (!cachep->num) { |
1885 | printk("kmem_cache_create: couldn't create cache %s.\n", name); | 1888 | printk("kmem_cache_create: couldn't create cache %s.\n", name); |
@@ -2554,7 +2557,7 @@ static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) | |||
2554 | "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n", | 2557 | "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n", |
2555 | cachep->name, cachep->num, slabp, slabp->inuse); | 2558 | cachep->name, cachep->num, slabp, slabp->inuse); |
2556 | for (i = 0; | 2559 | for (i = 0; |
2557 | i < sizeof(slabp) + cachep->num * sizeof(kmem_bufctl_t); | 2560 | i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); |
2558 | i++) { | 2561 | i++) { |
2559 | if ((i % 16) == 0) | 2562 | if ((i % 16) == 0) |
2560 | printk("\n%03x:", i); | 2563 | printk("\n%03x:", i); |