diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 15 |
1 files changed, 8 insertions, 7 deletions
@@ -1457,8 +1457,8 @@ static inline void remove_partial(struct kmem_cache_node *n, | |||
1457 | * | 1457 | * |
1458 | * Must hold list_lock. | 1458 | * Must hold list_lock. |
1459 | */ | 1459 | */ |
1460 | static inline int lock_and_freeze_slab(struct kmem_cache_node *n, | 1460 | static inline int lock_and_freeze_slab(struct kmem_cache *s, |
1461 | struct page *page) | 1461 | struct kmem_cache_node *n, struct page *page) |
1462 | { | 1462 | { |
1463 | if (slab_trylock(page)) { | 1463 | if (slab_trylock(page)) { |
1464 | remove_partial(n, page); | 1464 | remove_partial(n, page); |
@@ -1470,7 +1470,8 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, | |||
1470 | /* | 1470 | /* |
1471 | * Try to allocate a partial slab from a specific node. | 1471 | * Try to allocate a partial slab from a specific node. |
1472 | */ | 1472 | */ |
1473 | static struct page *get_partial_node(struct kmem_cache_node *n) | 1473 | static struct page *get_partial_node(struct kmem_cache *s, |
1474 | struct kmem_cache_node *n) | ||
1474 | { | 1475 | { |
1475 | struct page *page; | 1476 | struct page *page; |
1476 | 1477 | ||
@@ -1485,7 +1486,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n) | |||
1485 | 1486 | ||
1486 | spin_lock(&n->list_lock); | 1487 | spin_lock(&n->list_lock); |
1487 | list_for_each_entry(page, &n->partial, lru) | 1488 | list_for_each_entry(page, &n->partial, lru) |
1488 | if (lock_and_freeze_slab(n, page)) | 1489 | if (lock_and_freeze_slab(s, n, page)) |
1489 | goto out; | 1490 | goto out; |
1490 | page = NULL; | 1491 | page = NULL; |
1491 | out: | 1492 | out: |
@@ -1536,7 +1537,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
1536 | 1537 | ||
1537 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && | 1538 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && |
1538 | n->nr_partial > s->min_partial) { | 1539 | n->nr_partial > s->min_partial) { |
1539 | page = get_partial_node(n); | 1540 | page = get_partial_node(s, n); |
1540 | if (page) { | 1541 | if (page) { |
1541 | put_mems_allowed(); | 1542 | put_mems_allowed(); |
1542 | return page; | 1543 | return page; |
@@ -1556,7 +1557,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) | |||
1556 | struct page *page; | 1557 | struct page *page; |
1557 | int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; | 1558 | int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; |
1558 | 1559 | ||
1559 | page = get_partial_node(get_node(s, searchnode)); | 1560 | page = get_partial_node(s, get_node(s, searchnode)); |
1560 | if (page || node != NUMA_NO_NODE) | 1561 | if (page || node != NUMA_NO_NODE) |
1561 | return page; | 1562 | return page; |
1562 | 1563 | ||
@@ -2081,7 +2082,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2081 | { | 2082 | { |
2082 | void *prior; | 2083 | void *prior; |
2083 | void **object = (void *)x; | 2084 | void **object = (void *)x; |
2084 | unsigned long flags; | 2085 | unsigned long uninitialized_var(flags); |
2085 | 2086 | ||
2086 | local_irq_save(flags); | 2087 | local_irq_save(flags); |
2087 | slab_lock(page); | 2088 | slab_lock(page); |