diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 30 |
1 files changed, 15 insertions, 15 deletions
@@ -1557,7 +1557,8 @@ static inline void remove_partial(struct kmem_cache_node *n, | |||
1557 | * Must hold list_lock. | 1557 | * Must hold list_lock. |
1558 | */ | 1558 | */ |
1559 | static inline int acquire_slab(struct kmem_cache *s, | 1559 | static inline int acquire_slab(struct kmem_cache *s, |
1560 | struct kmem_cache_node *n, struct page *page) | 1560 | struct kmem_cache_node *n, struct page *page, |
1561 | struct kmem_cache_cpu *c) | ||
1561 | { | 1562 | { |
1562 | void *freelist; | 1563 | void *freelist; |
1563 | unsigned long counters; | 1564 | unsigned long counters; |
@@ -1586,9 +1587,9 @@ static inline int acquire_slab(struct kmem_cache *s, | |||
1586 | 1587 | ||
1587 | if (freelist) { | 1588 | if (freelist) { |
1588 | /* Populate the per cpu freelist */ | 1589 | /* Populate the per cpu freelist */ |
1589 | this_cpu_write(s->cpu_slab->freelist, freelist); | 1590 | c->freelist = freelist; |
1590 | this_cpu_write(s->cpu_slab->page, page); | 1591 | c->page = page; |
1591 | this_cpu_write(s->cpu_slab->node, page_to_nid(page)); | 1592 | c->node = page_to_nid(page); |
1592 | return 1; | 1593 | return 1; |
1593 | } else { | 1594 | } else { |
1594 | /* | 1595 | /* |
@@ -1606,7 +1607,7 @@ static inline int acquire_slab(struct kmem_cache *s, | |||
1606 | * Try to allocate a partial slab from a specific node. | 1607 | * Try to allocate a partial slab from a specific node. |
1607 | */ | 1608 | */ |
1608 | static struct page *get_partial_node(struct kmem_cache *s, | 1609 | static struct page *get_partial_node(struct kmem_cache *s, |
1609 | struct kmem_cache_node *n) | 1610 | struct kmem_cache_node *n, struct kmem_cache_cpu *c) |
1610 | { | 1611 | { |
1611 | struct page *page; | 1612 | struct page *page; |
1612 | 1613 | ||
@@ -1621,7 +1622,7 @@ static struct page *get_partial_node(struct kmem_cache *s, | |||
1621 | 1622 | ||
1622 | spin_lock(&n->list_lock); | 1623 | spin_lock(&n->list_lock); |
1623 | list_for_each_entry(page, &n->partial, lru) | 1624 | list_for_each_entry(page, &n->partial, lru) |
1624 | if (acquire_slab(s, n, page)) | 1625 | if (acquire_slab(s, n, page, c)) |
1625 | goto out; | 1626 | goto out; |
1626 | page = NULL; | 1627 | page = NULL; |
1627 | out: | 1628 | out: |
@@ -1632,7 +1633,8 @@ out: | |||
1632 | /* | 1633 | /* |
1633 | * Get a page from somewhere. Search in increasing NUMA distances. | 1634 | * Get a page from somewhere. Search in increasing NUMA distances. |
1634 | */ | 1635 | */ |
1635 | static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | 1636 | static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags, |
1637 | struct kmem_cache_cpu *c) | ||
1636 | { | 1638 | { |
1637 | #ifdef CONFIG_NUMA | 1639 | #ifdef CONFIG_NUMA |
1638 | struct zonelist *zonelist; | 1640 | struct zonelist *zonelist; |
@@ -1672,7 +1674,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
1672 | 1674 | ||
1673 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && | 1675 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && |
1674 | n->nr_partial > s->min_partial) { | 1676 | n->nr_partial > s->min_partial) { |
1675 | page = get_partial_node(s, n); | 1677 | page = get_partial_node(s, n, c); |
1676 | if (page) { | 1678 | if (page) { |
1677 | put_mems_allowed(); | 1679 | put_mems_allowed(); |
1678 | return page; | 1680 | return page; |
@@ -1687,16 +1689,17 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
1687 | /* | 1689 | /* |
1688 | * Get a partial page, lock it and return it. | 1690 | * Get a partial page, lock it and return it. |
1689 | */ | 1691 | */ |
1690 | static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) | 1692 | static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node, |
1693 | struct kmem_cache_cpu *c) | ||
1691 | { | 1694 | { |
1692 | struct page *page; | 1695 | struct page *page; |
1693 | int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; | 1696 | int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; |
1694 | 1697 | ||
1695 | page = get_partial_node(s, get_node(s, searchnode)); | 1698 | page = get_partial_node(s, get_node(s, searchnode), c); |
1696 | if (page || node != NUMA_NO_NODE) | 1699 | if (page || node != NUMA_NO_NODE) |
1697 | return page; | 1700 | return page; |
1698 | 1701 | ||
1699 | return get_any_partial(s, flags); | 1702 | return get_any_partial(s, flags, c); |
1700 | } | 1703 | } |
1701 | 1704 | ||
1702 | #ifdef CONFIG_PREEMPT | 1705 | #ifdef CONFIG_PREEMPT |
@@ -1765,9 +1768,6 @@ void init_kmem_cache_cpus(struct kmem_cache *s) | |||
1765 | for_each_possible_cpu(cpu) | 1768 | for_each_possible_cpu(cpu) |
1766 | per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); | 1769 | per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); |
1767 | } | 1770 | } |
1768 | /* | ||
1769 | * Remove the cpu slab | ||
1770 | */ | ||
1771 | 1771 | ||
1772 | /* | 1772 | /* |
1773 | * Remove the cpu slab | 1773 | * Remove the cpu slab |
@@ -2116,7 +2116,7 @@ load_freelist: | |||
2116 | return object; | 2116 | return object; |
2117 | 2117 | ||
2118 | new_slab: | 2118 | new_slab: |
2119 | page = get_partial(s, gfpflags, node); | 2119 | page = get_partial(s, gfpflags, node, c); |
2120 | if (page) { | 2120 | if (page) { |
2121 | stat(s, ALLOC_FROM_PARTIAL); | 2121 | stat(s, ALLOC_FROM_PARTIAL); |
2122 | object = c->freelist; | 2122 | object = c->freelist; |