diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-10-16 04:26:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 12:43:01 -0400 |
commit | b3fba8da653999c67d7517050f196e92da6f8d3b (patch) | |
tree | 12ee00ca91ceafe4fa9bb75d62debaf1feebb7b9 /mm/slub.c | |
parent | 8e65d24c7caf2a4c69b3ae0ce170bf3082ba359f (diff) |
SLUB: Move page->offset to kmem_cache_cpu->offset
We need the offset from the page struct during slab_alloc and slab_free. In
both cases we also reference the cacheline of the kmem_cache_cpu structure.
We can therefore move the offset field into the kmem_cache_cpu structure
freeing up 16 bits in the page struct.
Moving the offset allows an allocation from slab_alloc() without touching the
page struct in the hot path.
The only thing left in slab_free() that touches the page struct cacheline for
per cpu freeing is the checking of SlabDebug(page). The next patch deals with
that.
Use the available 16 bits to broaden page->inuse. More than 64k objects per
slab become possible and we can get rid of the checks for that limitation.
No need anymore to shrink the order of slabs if we boot with 2M sized slabs
(slub_min_order=9).
No need anymore to switch off the offset calculation for very large slabs
since the field in the kmem_cache_cpu structure is 32 bits and so the offset
field can now handle slab sizes of up to 8GB.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 52 |
1 files changed, 11 insertions, 41 deletions
@@ -200,11 +200,6 @@ static inline void ClearSlabDebug(struct page *page) | |||
200 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | 200 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) |
201 | #endif | 201 | #endif |
202 | 202 | ||
203 | /* | ||
204 | * The page->inuse field is 16 bit thus we have this limitation | ||
205 | */ | ||
206 | #define MAX_OBJECTS_PER_SLAB 65535 | ||
207 | |||
208 | /* Internal SLUB flags */ | 203 | /* Internal SLUB flags */ |
209 | #define __OBJECT_POISON 0x80000000 /* Poison object */ | 204 | #define __OBJECT_POISON 0x80000000 /* Poison object */ |
210 | #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ | 205 | #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ |
@@ -729,11 +724,6 @@ static int check_slab(struct kmem_cache *s, struct page *page) | |||
729 | slab_err(s, page, "Not a valid slab page"); | 724 | slab_err(s, page, "Not a valid slab page"); |
730 | return 0; | 725 | return 0; |
731 | } | 726 | } |
732 | if (page->offset * sizeof(void *) != s->offset) { | ||
733 | slab_err(s, page, "Corrupted offset %lu", | ||
734 | (unsigned long)(page->offset * sizeof(void *))); | ||
735 | return 0; | ||
736 | } | ||
737 | if (page->inuse > s->objects) { | 727 | if (page->inuse > s->objects) { |
738 | slab_err(s, page, "inuse %u > max %u", | 728 | slab_err(s, page, "inuse %u > max %u", |
739 | s->name, page->inuse, s->objects); | 729 | s->name, page->inuse, s->objects); |
@@ -872,8 +862,6 @@ bad: | |||
872 | slab_fix(s, "Marking all objects used"); | 862 | slab_fix(s, "Marking all objects used"); |
873 | page->inuse = s->objects; | 863 | page->inuse = s->objects; |
874 | page->freelist = NULL; | 864 | page->freelist = NULL; |
875 | /* Fix up fields that may be corrupted */ | ||
876 | page->offset = s->offset / sizeof(void *); | ||
877 | } | 865 | } |
878 | return 0; | 866 | return 0; |
879 | } | 867 | } |
@@ -1104,7 +1092,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1104 | n = get_node(s, page_to_nid(page)); | 1092 | n = get_node(s, page_to_nid(page)); |
1105 | if (n) | 1093 | if (n) |
1106 | atomic_long_inc(&n->nr_slabs); | 1094 | atomic_long_inc(&n->nr_slabs); |
1107 | page->offset = s->offset / sizeof(void *); | ||
1108 | page->slab = s; | 1095 | page->slab = s; |
1109 | page->flags |= 1 << PG_slab; | 1096 | page->flags |= 1 << PG_slab; |
1110 | if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | | 1097 | if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | |
@@ -1398,10 +1385,10 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | |||
1398 | 1385 | ||
1399 | /* Retrieve object from cpu_freelist */ | 1386 | /* Retrieve object from cpu_freelist */ |
1400 | object = c->freelist; | 1387 | object = c->freelist; |
1401 | c->freelist = c->freelist[page->offset]; | 1388 | c->freelist = c->freelist[c->offset]; |
1402 | 1389 | ||
1403 | /* And put onto the regular freelist */ | 1390 | /* And put onto the regular freelist */ |
1404 | object[page->offset] = page->freelist; | 1391 | object[c->offset] = page->freelist; |
1405 | page->freelist = object; | 1392 | page->freelist = object; |
1406 | page->inuse--; | 1393 | page->inuse--; |
1407 | } | 1394 | } |
@@ -1497,7 +1484,7 @@ load_freelist: | |||
1497 | goto debug; | 1484 | goto debug; |
1498 | 1485 | ||
1499 | object = c->page->freelist; | 1486 | object = c->page->freelist; |
1500 | c->freelist = object[c->page->offset]; | 1487 | c->freelist = object[c->offset]; |
1501 | c->page->inuse = s->objects; | 1488 | c->page->inuse = s->objects; |
1502 | c->page->freelist = NULL; | 1489 | c->page->freelist = NULL; |
1503 | c->node = page_to_nid(c->page); | 1490 | c->node = page_to_nid(c->page); |
@@ -1549,7 +1536,7 @@ debug: | |||
1549 | goto another_slab; | 1536 | goto another_slab; |
1550 | 1537 | ||
1551 | c->page->inuse++; | 1538 | c->page->inuse++; |
1552 | c->page->freelist = object[c->page->offset]; | 1539 | c->page->freelist = object[c->offset]; |
1553 | slab_unlock(c->page); | 1540 | slab_unlock(c->page); |
1554 | return object; | 1541 | return object; |
1555 | } | 1542 | } |
@@ -1580,7 +1567,7 @@ static void __always_inline *slab_alloc(struct kmem_cache *s, | |||
1580 | 1567 | ||
1581 | else { | 1568 | else { |
1582 | object = c->freelist; | 1569 | object = c->freelist; |
1583 | c->freelist = object[c->page->offset]; | 1570 | c->freelist = object[c->offset]; |
1584 | } | 1571 | } |
1585 | local_irq_restore(flags); | 1572 | local_irq_restore(flags); |
1586 | 1573 | ||
@@ -1613,7 +1600,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); | |||
1613 | * handling required then we can return immediately. | 1600 | * handling required then we can return immediately. |
1614 | */ | 1601 | */ |
1615 | static void __slab_free(struct kmem_cache *s, struct page *page, | 1602 | static void __slab_free(struct kmem_cache *s, struct page *page, |
1616 | void *x, void *addr) | 1603 | void *x, void *addr, unsigned int offset) |
1617 | { | 1604 | { |
1618 | void *prior; | 1605 | void *prior; |
1619 | void **object = (void *)x; | 1606 | void **object = (void *)x; |
@@ -1623,7 +1610,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
1623 | if (unlikely(SlabDebug(page))) | 1610 | if (unlikely(SlabDebug(page))) |
1624 | goto debug; | 1611 | goto debug; |
1625 | checks_ok: | 1612 | checks_ok: |
1626 | prior = object[page->offset] = page->freelist; | 1613 | prior = object[offset] = page->freelist; |
1627 | page->freelist = object; | 1614 | page->freelist = object; |
1628 | page->inuse--; | 1615 | page->inuse--; |
1629 | 1616 | ||
@@ -1684,10 +1671,10 @@ static void __always_inline slab_free(struct kmem_cache *s, | |||
1684 | debug_check_no_locks_freed(object, s->objsize); | 1671 | debug_check_no_locks_freed(object, s->objsize); |
1685 | c = get_cpu_slab(s, smp_processor_id()); | 1672 | c = get_cpu_slab(s, smp_processor_id()); |
1686 | if (likely(page == c->page && !SlabDebug(page))) { | 1673 | if (likely(page == c->page && !SlabDebug(page))) { |
1687 | object[page->offset] = c->freelist; | 1674 | object[c->offset] = c->freelist; |
1688 | c->freelist = object; | 1675 | c->freelist = object; |
1689 | } else | 1676 | } else |
1690 | __slab_free(s, page, x, addr); | 1677 | __slab_free(s, page, x, addr, c->offset); |
1691 | 1678 | ||
1692 | local_irq_restore(flags); | 1679 | local_irq_restore(flags); |
1693 | } | 1680 | } |
@@ -1774,14 +1761,6 @@ static inline int slab_order(int size, int min_objects, | |||
1774 | int rem; | 1761 | int rem; |
1775 | int min_order = slub_min_order; | 1762 | int min_order = slub_min_order; |
1776 | 1763 | ||
1777 | /* | ||
1778 | * If we would create too many object per slab then reduce | ||
1779 | * the slab order even if it goes below slub_min_order. | ||
1780 | */ | ||
1781 | while (min_order > 0 && | ||
1782 | (PAGE_SIZE << min_order) >= MAX_OBJECTS_PER_SLAB * size) | ||
1783 | min_order--; | ||
1784 | |||
1785 | for (order = max(min_order, | 1764 | for (order = max(min_order, |
1786 | fls(min_objects * size - 1) - PAGE_SHIFT); | 1765 | fls(min_objects * size - 1) - PAGE_SHIFT); |
1787 | order <= max_order; order++) { | 1766 | order <= max_order; order++) { |
@@ -1796,9 +1775,6 @@ static inline int slab_order(int size, int min_objects, | |||
1796 | if (rem <= slab_size / fract_leftover) | 1775 | if (rem <= slab_size / fract_leftover) |
1797 | break; | 1776 | break; |
1798 | 1777 | ||
1799 | /* If the next size is too high then exit now */ | ||
1800 | if (slab_size * 2 >= MAX_OBJECTS_PER_SLAB * size) | ||
1801 | break; | ||
1802 | } | 1778 | } |
1803 | 1779 | ||
1804 | return order; | 1780 | return order; |
@@ -1878,6 +1854,7 @@ static void init_kmem_cache_cpu(struct kmem_cache *s, | |||
1878 | { | 1854 | { |
1879 | c->page = NULL; | 1855 | c->page = NULL; |
1880 | c->freelist = NULL; | 1856 | c->freelist = NULL; |
1857 | c->offset = s->offset / sizeof(void *); | ||
1881 | c->node = 0; | 1858 | c->node = 0; |
1882 | } | 1859 | } |
1883 | 1860 | ||
@@ -2110,14 +2087,7 @@ static int calculate_sizes(struct kmem_cache *s) | |||
2110 | */ | 2087 | */ |
2111 | s->objects = (PAGE_SIZE << s->order) / size; | 2088 | s->objects = (PAGE_SIZE << s->order) / size; |
2112 | 2089 | ||
2113 | /* | 2090 | return !!s->objects; |
2114 | * Verify that the number of objects is within permitted limits. | ||
2115 | * The page->inuse field is only 16 bit wide! So we cannot have | ||
2116 | * more than 64k objects per slab. | ||
2117 | */ | ||
2118 | if (!s->objects || s->objects > MAX_OBJECTS_PER_SLAB) | ||
2119 | return 0; | ||
2120 | return 1; | ||
2121 | 2091 | ||
2122 | } | 2092 | } |
2123 | 2093 | ||