aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-07-17 07:03:20 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 13:23:01 -0400
commit6300ea75031e7aebfe3331245b7f750d82621223 (patch)
treeccd49c8173ac6b8449b57555bd0070ceafe3f3b8
parent68dff6a9af9f27df5aeee6d0339818b0e36c1b51 (diff)
SLUB: ensure that the number of objects per slab stays low for high orders
Currently SLUB has no provision to deal with too high page orders that may be specified on the kernel boot line. If an order higher than 6 (on a 4k platform) is generated then we will BUG() because slabs get more than 65535 objects. Add some logic that decreases order for slabs that have too many objects. This allow booting with slab sizes up to MAX_ORDER. For example slub_min_order=10 will boot with a default slab size of 4M and reduce slab sizes for small object sizes to lower orders if the number of objects becomes too big. Large slab sizes like that allow a concentration of objects of the same slab cache under as few as possible TLB entries and thus potentially reduces TLB pressure. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slub.c21
1 files changed, 19 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a5832f82234c..03ae5490c3dd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -205,6 +205,11 @@ static inline void ClearSlabDebug(struct page *page)
205#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 205#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
206#endif 206#endif
207 207
208/*
209 * The page->inuse field is 16 bit thus we have this limitation
210 */
211#define MAX_OBJECTS_PER_SLAB 65535
212
208/* Internal SLUB flags */ 213/* Internal SLUB flags */
209#define __OBJECT_POISON 0x80000000 /* Poison object */ 214#define __OBJECT_POISON 0x80000000 /* Poison object */
210 215
@@ -1736,8 +1741,17 @@ static inline int slab_order(int size, int min_objects,
1736{ 1741{
1737 int order; 1742 int order;
1738 int rem; 1743 int rem;
1744 int min_order = slub_min_order;
1739 1745
1740 for (order = max(slub_min_order, 1746 /*
1747 * If we would create too many object per slab then reduce
1748 * the slab order even if it goes below slub_min_order.
1749 */
1750 while (min_order > 0 &&
1751 (PAGE_SIZE << min_order) >= MAX_OBJECTS_PER_SLAB * size)
1752 min_order--;
1753
1754 for (order = max(min_order,
1741 fls(min_objects * size - 1) - PAGE_SHIFT); 1755 fls(min_objects * size - 1) - PAGE_SHIFT);
1742 order <= max_order; order++) { 1756 order <= max_order; order++) {
1743 1757
@@ -1751,6 +1765,9 @@ static inline int slab_order(int size, int min_objects,
1751 if (rem <= slab_size / fract_leftover) 1765 if (rem <= slab_size / fract_leftover)
1752 break; 1766 break;
1753 1767
1768 /* If the next size is too high then exit now */
1769 if (slab_size * 2 >= MAX_OBJECTS_PER_SLAB * size)
1770 break;
1754 } 1771 }
1755 1772
1756 return order; 1773 return order;
@@ -2037,7 +2054,7 @@ static int calculate_sizes(struct kmem_cache *s)
2037 * The page->inuse field is only 16 bit wide! So we cannot have 2054 * The page->inuse field is only 16 bit wide! So we cannot have
2038 * more than 64k objects per slab. 2055 * more than 64k objects per slab.
2039 */ 2056 */
2040 if (!s->objects || s->objects > 65535) 2057 if (!s->objects || s->objects > MAX_OBJECTS_PER_SLAB)
2041 return 0; 2058 return 0;
2042 return 1; 2059 return 1;
2043 2060