diff options
author | Pekka Enberg <penberg@cs.helsinki.fi> | 2006-01-08 04:00:36 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-08 23:12:39 -0500 |
commit | 4d268eba1187ef66844a6a33b9431e5d0dadd4ad (patch) | |
tree | 575aa29016688a07b2a80132a15cc8b5a5027f60 | |
parent | 85289f98ddc13f6cea82c59d6ff78f9d205dfccc (diff) |
[PATCH] slab: extract slab order calculation to separate function
This patch moves the ugly loop that determines the 'optimal' size (page order)
of cache slabs from kmem_cache_create() to a separate function and cleans it
up a bit.
Thanks to Matthew Wilcox for the help with this patch.
Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/slab.c | 89 |
1 files changed, 49 insertions, 40 deletions
@@ -1474,6 +1474,53 @@ static inline void set_up_list3s(kmem_cache_t *cachep, int index) | |||
1474 | } | 1474 | } |
1475 | 1475 | ||
1476 | /** | 1476 | /** |
1477 | * calculate_slab_order - calculate size (page order) of slabs and the number | ||
1478 | * of objects per slab. | ||
1479 | * | ||
1480 | * This could be made much more intelligent. For now, try to avoid using | ||
1481 | * high order pages for slabs. When the gfp() functions are more friendly | ||
1482 | * towards high-order requests, this should be changed. | ||
1483 | */ | ||
1484 | static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size, | ||
1485 | size_t align, gfp_t flags) | ||
1486 | { | ||
1487 | size_t left_over = 0; | ||
1488 | |||
1489 | for ( ; ; cachep->gfporder++) { | ||
1490 | unsigned int num; | ||
1491 | size_t remainder; | ||
1492 | |||
1493 | if (cachep->gfporder > MAX_GFP_ORDER) { | ||
1494 | cachep->num = 0; | ||
1495 | break; | ||
1496 | } | ||
1497 | |||
1498 | cache_estimate(cachep->gfporder, size, align, flags, | ||
1499 | &remainder, &num); | ||
1500 | if (!num) | ||
1501 | continue; | ||
1502 | /* More than offslab_limit objects will cause problems */ | ||
1503 | if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit) | ||
1504 | break; | ||
1505 | |||
1506 | cachep->num = num; | ||
1507 | left_over = remainder; | ||
1508 | |||
1509 | /* | ||
1510 | * Large number of objects is good, but very large slabs are | ||
1511 | * currently bad for the gfp()s. | ||
1512 | */ | ||
1513 | if (cachep->gfporder >= slab_break_gfp_order) | ||
1514 | break; | ||
1515 | |||
1516 | if ((left_over * 8) <= (PAGE_SIZE << cachep->gfporder)) | ||
1517 | /* Acceptable internal fragmentation */ | ||
1518 | break; | ||
1519 | } | ||
1520 | return left_over; | ||
1521 | } | ||
1522 | |||
1523 | /** | ||
1477 | * kmem_cache_create - Create a cache. | 1524 | * kmem_cache_create - Create a cache. |
1478 | * @name: A string which is used in /proc/slabinfo to identify this cache. | 1525 | * @name: A string which is used in /proc/slabinfo to identify this cache. |
1479 | * @size: The size of objects to be created in this cache. | 1526 | * @size: The size of objects to be created in this cache. |
@@ -1682,46 +1729,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
1682 | cachep->gfporder = 0; | 1729 | cachep->gfporder = 0; |
1683 | cache_estimate(cachep->gfporder, size, align, flags, | 1730 | cache_estimate(cachep->gfporder, size, align, flags, |
1684 | &left_over, &cachep->num); | 1731 | &left_over, &cachep->num); |
1685 | } else { | 1732 | } else |
1686 | /* | 1733 | left_over = calculate_slab_order(cachep, size, align, flags); |
1687 | * Calculate size (in pages) of slabs, and the num of objs per | ||
1688 | * slab. This could be made much more intelligent. For now, | ||
1689 | * try to avoid using high page-orders for slabs. When the | ||
1690 | * gfp() funcs are more friendly towards high-order requests, | ||
1691 | * this should be changed. | ||
1692 | */ | ||
1693 | do { | ||
1694 | unsigned int break_flag = 0; | ||
1695 | cal_wastage: | ||
1696 | cache_estimate(cachep->gfporder, size, align, flags, | ||
1697 | &left_over, &cachep->num); | ||
1698 | if (break_flag) | ||
1699 | break; | ||
1700 | if (cachep->gfporder >= MAX_GFP_ORDER) | ||
1701 | break; | ||
1702 | if (!cachep->num) | ||
1703 | goto next; | ||
1704 | if (flags & CFLGS_OFF_SLAB && | ||
1705 | cachep->num > offslab_limit) { | ||
1706 | /* This num of objs will cause problems. */ | ||
1707 | cachep->gfporder--; | ||
1708 | break_flag++; | ||
1709 | goto cal_wastage; | ||
1710 | } | ||
1711 | |||
1712 | /* | ||
1713 | * Large num of objs is good, but v. large slabs are | ||
1714 | * currently bad for the gfp()s. | ||
1715 | */ | ||
1716 | if (cachep->gfporder >= slab_break_gfp_order) | ||
1717 | break; | ||
1718 | |||
1719 | if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder)) | ||
1720 | break; /* Acceptable internal fragmentation. */ | ||
1721 | next: | ||
1722 | cachep->gfporder++; | ||
1723 | } while (1); | ||
1724 | } | ||
1725 | 1734 | ||
1726 | if (!cachep->num) { | 1735 | if (!cachep->num) { |
1727 | printk("kmem_cache_create: couldn't create cache %s.\n", name); | 1736 | printk("kmem_cache_create: couldn't create cache %s.\n", name); |