aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c18
1 files changed, 7 insertions, 11 deletions
diff --git a/mm/slub.c b/mm/slub.c
index d821ce6fff39..992ecd4f0d39 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -186,11 +186,6 @@ static inline void ClearSlabDebug(struct page *page)
186#define __OBJECT_POISON 0x80000000 /* Poison object */ 186#define __OBJECT_POISON 0x80000000 /* Poison object */
187#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ 187#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
188 188
189/* Not all arches define cache_line_size */
190#ifndef cache_line_size
191#define cache_line_size() L1_CACHE_BYTES
192#endif
193
194static int kmem_size = sizeof(struct kmem_cache); 189static int kmem_size = sizeof(struct kmem_cache);
195 190
196#ifdef CONFIG_SMP 191#ifdef CONFIG_SMP
@@ -1330,7 +1325,9 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1330{ 1325{
1331#ifdef CONFIG_NUMA 1326#ifdef CONFIG_NUMA
1332 struct zonelist *zonelist; 1327 struct zonelist *zonelist;
1333 struct zone **z; 1328 struct zoneref *z;
1329 struct zone *zone;
1330 enum zone_type high_zoneidx = gfp_zone(flags);
1334 struct page *page; 1331 struct page *page;
1335 1332
1336 /* 1333 /*
@@ -1355,14 +1352,13 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1355 get_cycles() % 1024 > s->remote_node_defrag_ratio) 1352 get_cycles() % 1024 > s->remote_node_defrag_ratio)
1356 return NULL; 1353 return NULL;
1357 1354
1358 zonelist = &NODE_DATA( 1355 zonelist = node_zonelist(slab_node(current->mempolicy), flags);
1359 slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)]; 1356 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1360 for (z = zonelist->zones; *z; z++) {
1361 struct kmem_cache_node *n; 1357 struct kmem_cache_node *n;
1362 1358
1363 n = get_node(s, zone_to_nid(*z)); 1359 n = get_node(s, zone_to_nid(zone));
1364 1360
1365 if (n && cpuset_zone_allowed_hardwall(*z, flags) && 1361 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1366 n->nr_partial > MIN_PARTIAL) { 1362 n->nr_partial > MIN_PARTIAL) {
1367 page = get_partial_node(n); 1363 page = get_partial_node(n);
1368 if (page) 1364 if (page)