aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-08-09 17:12:26 -0400
committerPekka Enberg <penberg@kernel.org>2011-08-19 12:34:27 -0400
commit497b66f2ecc97844493e6a147fd5a7e73f73f408 (patch)
tree6005da56ead66fca5fb413193bc52ad72a743b7c /mm/slub.c
parentacd19fd1a7b5152cf29f67aaab23aa61078aaa74 (diff)
slub: return object pointer from get_partial() / new_slab().
There is no need anymore to return the pointer to a slab page from get_partial() since the page reference can be stored in the kmem_cache_cpu structures "page" field. Return an object pointer instead. That in turn allows a simplification of the spaghetti code in __slab_alloc(). Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c133
1 files changed, 73 insertions, 60 deletions
diff --git a/mm/slub.c b/mm/slub.c
index cb53affecca7..df381af963b7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1554,9 +1554,11 @@ static inline void remove_partial(struct kmem_cache_node *n,
1554 * Lock slab, remove from the partial list and put the object into the 1554 * Lock slab, remove from the partial list and put the object into the
1555 * per cpu freelist. 1555 * per cpu freelist.
1556 * 1556 *
1557 * Returns a list of objects or NULL if it fails.
1558 *
1557 * Must hold list_lock. 1559 * Must hold list_lock.
1558 */ 1560 */
1559static inline int acquire_slab(struct kmem_cache *s, 1561static inline void *acquire_slab(struct kmem_cache *s,
1560 struct kmem_cache_node *n, struct page *page, 1562 struct kmem_cache_node *n, struct page *page,
1561 struct kmem_cache_cpu *c) 1563 struct kmem_cache_cpu *c)
1562{ 1564{
@@ -1587,10 +1589,11 @@ static inline int acquire_slab(struct kmem_cache *s,
1587 1589
1588 if (freelist) { 1590 if (freelist) {
1589 /* Populate the per cpu freelist */ 1591 /* Populate the per cpu freelist */
1590 c->freelist = freelist;
1591 c->page = page; 1592 c->page = page;
1592 c->node = page_to_nid(page); 1593 c->node = page_to_nid(page);
1593 return 1; 1594 stat(s, ALLOC_FROM_PARTIAL);
1595
1596 return freelist;
1594 } else { 1597 } else {
1595 /* 1598 /*
1596 * Slab page came from the wrong list. No object to allocate 1599 * Slab page came from the wrong list. No object to allocate
@@ -1599,17 +1602,18 @@ static inline int acquire_slab(struct kmem_cache *s,
1599 */ 1602 */
1600 printk(KERN_ERR "SLUB: %s : Page without available objects on" 1603 printk(KERN_ERR "SLUB: %s : Page without available objects on"
1601 " partial list\n", s->name); 1604 " partial list\n", s->name);
1602 return 0; 1605 return NULL;
1603 } 1606 }
1604} 1607}
1605 1608
1606/* 1609/*
1607 * Try to allocate a partial slab from a specific node. 1610 * Try to allocate a partial slab from a specific node.
1608 */ 1611 */
1609static struct page *get_partial_node(struct kmem_cache *s, 1612static void *get_partial_node(struct kmem_cache *s,
1610 struct kmem_cache_node *n, struct kmem_cache_cpu *c) 1613 struct kmem_cache_node *n, struct kmem_cache_cpu *c)
1611{ 1614{
1612 struct page *page; 1615 struct page *page;
1616 void *object;
1613 1617
1614 /* 1618 /*
1615 * Racy check. If we mistakenly see no partial slabs then we 1619 * Racy check. If we mistakenly see no partial slabs then we
@@ -1621,13 +1625,15 @@ static struct page *get_partial_node(struct kmem_cache *s,
1621 return NULL; 1625 return NULL;
1622 1626
1623 spin_lock(&n->list_lock); 1627 spin_lock(&n->list_lock);
1624 list_for_each_entry(page, &n->partial, lru) 1628 list_for_each_entry(page, &n->partial, lru) {
1625 if (acquire_slab(s, n, page, c)) 1629 object = acquire_slab(s, n, page, c);
1630 if (object)
1626 goto out; 1631 goto out;
1627 page = NULL; 1632 }
1633 object = NULL;
1628out: 1634out:
1629 spin_unlock(&n->list_lock); 1635 spin_unlock(&n->list_lock);
1630 return page; 1636 return object;
1631} 1637}
1632 1638
1633/* 1639/*
@@ -1641,7 +1647,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
1641 struct zoneref *z; 1647 struct zoneref *z;
1642 struct zone *zone; 1648 struct zone *zone;
1643 enum zone_type high_zoneidx = gfp_zone(flags); 1649 enum zone_type high_zoneidx = gfp_zone(flags);
1644 struct page *page; 1650 void *object;
1645 1651
1646 /* 1652 /*
1647 * The defrag ratio allows a configuration of the tradeoffs between 1653 * The defrag ratio allows a configuration of the tradeoffs between
@@ -1674,10 +1680,10 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
1674 1680
1675 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1681 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1676 n->nr_partial > s->min_partial) { 1682 n->nr_partial > s->min_partial) {
1677 page = get_partial_node(s, n, c); 1683 object = get_partial_node(s, n, c);
1678 if (page) { 1684 if (object) {
1679 put_mems_allowed(); 1685 put_mems_allowed();
1680 return page; 1686 return object;
1681 } 1687 }
1682 } 1688 }
1683 } 1689 }
@@ -1689,15 +1695,15 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
1689/* 1695/*
1690 * Get a partial page, lock it and return it. 1696 * Get a partial page, lock it and return it.
1691 */ 1697 */
1692static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node, 1698static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1693 struct kmem_cache_cpu *c) 1699 struct kmem_cache_cpu *c)
1694{ 1700{
1695 struct page *page; 1701 void *object;
1696 int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; 1702 int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
1697 1703
1698 page = get_partial_node(s, get_node(s, searchnode), c); 1704 object = get_partial_node(s, get_node(s, searchnode), c);
1699 if (page || node != NUMA_NO_NODE) 1705 if (object || node != NUMA_NO_NODE)
1700 return page; 1706 return object;
1701 1707
1702 return get_any_partial(s, flags, c); 1708 return get_any_partial(s, flags, c);
1703} 1709}
@@ -2027,6 +2033,35 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2027 } 2033 }
2028} 2034}
2029 2035
2036static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2037 int node, struct kmem_cache_cpu **pc)
2038{
2039 void *object;
2040 struct kmem_cache_cpu *c;
2041 struct page *page = new_slab(s, flags, node);
2042
2043 if (page) {
2044 c = __this_cpu_ptr(s->cpu_slab);
2045 if (c->page)
2046 flush_slab(s, c);
2047
2048 /*
2049 * No other reference to the page yet so we can
2050 * muck around with it freely without cmpxchg
2051 */
2052 object = page->freelist;
2053 page->freelist = NULL;
2054
2055 stat(s, ALLOC_SLAB);
2056 c->node = page_to_nid(page);
2057 c->page = page;
2058 *pc = c;
2059 } else
2060 object = NULL;
2061
2062 return object;
2063}
2064
2030/* 2065/*
2031 * Slow path. The lockless freelist is empty or we need to perform 2066 * Slow path. The lockless freelist is empty or we need to perform
2032 * debugging duties. 2067 * debugging duties.
@@ -2049,7 +2084,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2049 unsigned long addr, struct kmem_cache_cpu *c) 2084 unsigned long addr, struct kmem_cache_cpu *c)
2050{ 2085{
2051 void **object; 2086 void **object;
2052 struct page *page;
2053 unsigned long flags; 2087 unsigned long flags;
2054 struct page new; 2088 struct page new;
2055 unsigned long counters; 2089 unsigned long counters;
@@ -2064,8 +2098,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2064 c = this_cpu_ptr(s->cpu_slab); 2098 c = this_cpu_ptr(s->cpu_slab);
2065#endif 2099#endif
2066 2100
2067 page = c->page; 2101 if (!c->page)
2068 if (!page)
2069 goto new_slab; 2102 goto new_slab;
2070 2103
2071 if (unlikely(!node_match(c, node))) { 2104 if (unlikely(!node_match(c, node))) {
@@ -2077,8 +2110,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2077 stat(s, ALLOC_SLOWPATH); 2110 stat(s, ALLOC_SLOWPATH);
2078 2111
2079 do { 2112 do {
2080 object = page->freelist; 2113 object = c->page->freelist;
2081 counters = page->counters; 2114 counters = c->page->counters;
2082 new.counters = counters; 2115 new.counters = counters;
2083 VM_BUG_ON(!new.frozen); 2116 VM_BUG_ON(!new.frozen);
2084 2117
@@ -2090,12 +2123,12 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2090 * 2123 *
2091 * If there are objects left then we retrieve them 2124 * If there are objects left then we retrieve them
2092 * and use them to refill the per cpu queue. 2125 * and use them to refill the per cpu queue.
2093 */ 2126 */
2094 2127
2095 new.inuse = page->objects; 2128 new.inuse = c->page->objects;
2096 new.frozen = object != NULL; 2129 new.frozen = object != NULL;
2097 2130
2098 } while (!__cmpxchg_double_slab(s, page, 2131 } while (!__cmpxchg_double_slab(s, c->page,
2099 object, counters, 2132 object, counters,
2100 NULL, new.counters, 2133 NULL, new.counters,
2101 "__slab_alloc")); 2134 "__slab_alloc"));
@@ -2109,53 +2142,33 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2109 stat(s, ALLOC_REFILL); 2142 stat(s, ALLOC_REFILL);
2110 2143
2111load_freelist: 2144load_freelist:
2112 VM_BUG_ON(!page->frozen);
2113 c->freelist = get_freepointer(s, object); 2145 c->freelist = get_freepointer(s, object);
2114 c->tid = next_tid(c->tid); 2146 c->tid = next_tid(c->tid);
2115 local_irq_restore(flags); 2147 local_irq_restore(flags);
2116 return object; 2148 return object;
2117 2149
2118new_slab: 2150new_slab:
2119 page = get_partial(s, gfpflags, node, c); 2151 object = get_partial(s, gfpflags, node, c);
2120 if (page) {
2121 stat(s, ALLOC_FROM_PARTIAL);
2122 object = c->freelist;
2123 2152
2124 if (kmem_cache_debug(s)) 2153 if (unlikely(!object)) {
2125 goto debug;
2126 goto load_freelist;
2127 }
2128 2154
2129 page = new_slab(s, gfpflags, node); 2155 object = new_slab_objects(s, gfpflags, node, &c);
2130 2156
2131 if (page) { 2157 if (unlikely(!object)) {
2132 c = __this_cpu_ptr(s->cpu_slab); 2158 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
2133 if (c->page) 2159 slab_out_of_memory(s, gfpflags, node);
2134 flush_slab(s, c);
2135 2160
2136 /* 2161 local_irq_restore(flags);
2137 * No other reference to the page yet so we can 2162 return NULL;
2138 * muck around with it freely without cmpxchg 2163 }
2139 */ 2164 }
2140 object = page->freelist;
2141 page->freelist = NULL;
2142
2143 stat(s, ALLOC_SLAB);
2144 c->node = page_to_nid(page);
2145 c->page = page;
2146 2165
2147 if (kmem_cache_debug(s)) 2166 if (likely(!kmem_cache_debug(s)))
2148 goto debug;
2149 goto load_freelist; 2167 goto load_freelist;
2150 }
2151 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
2152 slab_out_of_memory(s, gfpflags, node);
2153 local_irq_restore(flags);
2154 return NULL;
2155 2168
2156debug: 2169 /* Only entered in the debug case */
2157 if (!object || !alloc_debug_processing(s, page, object, addr)) 2170 if (!alloc_debug_processing(s, c->page, object, addr))
2158 goto new_slab; 2171 goto new_slab; /* Slab failed checks. Next slab needed */
2159 2172
2160 c->freelist = get_freepointer(s, object); 2173 c->freelist = get_freepointer(s, object);
2161 deactivate_slab(s, c); 2174 deactivate_slab(s, c);