diff options
author | David Rientjes <rientjes@google.com> | 2014-06-04 19:08:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-04 19:54:06 -0400 |
commit | e0b9daeb453e602a95ea43853dc12d385558ce1f (patch) | |
tree | febaa53d23065ef97ab8608ca4e9ec9c47e36f2c | |
parent | 35979ef3393110ff3c12c6b94552208d3bdf1a36 (diff) |
mm, compaction: embed migration mode in compact_control
We're going to want to manipulate the migration mode for compaction in the
page allocator, and currently compact_control's sync field is only a bool.
Currently, we only do MIGRATE_ASYNC or MIGRATE_SYNC_LIGHT compaction
depending on the value of this bool. Convert the bool to enum
migrate_mode and pass the migration mode in directly. Later, we'll want
to avoid MIGRATE_SYNC_LIGHT for thp allocations in the pagefault patch to
avoid unnecessary latency.
This also alters compaction triggered from sysfs, either for the entire
system or for a node, to force MIGRATE_SYNC.
[akpm@linux-foundation.org: fix build]
[iamjoonsoo.kim@lge.com: use MIGRATE_SYNC in alloc_contig_range()]
Signed-off-by: David Rientjes <rientjes@google.com>
Suggested-by: Mel Gorman <mgorman@suse.de>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Greg Thelen <gthelen@google.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/compaction.h | 4 | ||||
-rw-r--r-- | mm/compaction.c | 36 | ||||
-rw-r--r-- | mm/internal.h | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 39 |
4 files changed, 39 insertions, 42 deletions
diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 7e1c76e3cd68..01e3132820da 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h | |||
@@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, | |||
22 | extern int fragmentation_index(struct zone *zone, unsigned int order); | 22 | extern int fragmentation_index(struct zone *zone, unsigned int order); |
23 | extern unsigned long try_to_compact_pages(struct zonelist *zonelist, | 23 | extern unsigned long try_to_compact_pages(struct zonelist *zonelist, |
24 | int order, gfp_t gfp_mask, nodemask_t *mask, | 24 | int order, gfp_t gfp_mask, nodemask_t *mask, |
25 | bool sync, bool *contended); | 25 | enum migrate_mode mode, bool *contended); |
26 | extern void compact_pgdat(pg_data_t *pgdat, int order); | 26 | extern void compact_pgdat(pg_data_t *pgdat, int order); |
27 | extern void reset_isolation_suitable(pg_data_t *pgdat); | 27 | extern void reset_isolation_suitable(pg_data_t *pgdat); |
28 | extern unsigned long compaction_suitable(struct zone *zone, int order); | 28 | extern unsigned long compaction_suitable(struct zone *zone, int order); |
@@ -91,7 +91,7 @@ static inline bool compaction_restarting(struct zone *zone, int order) | |||
91 | #else | 91 | #else |
92 | static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, | 92 | static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, |
93 | int order, gfp_t gfp_mask, nodemask_t *nodemask, | 93 | int order, gfp_t gfp_mask, nodemask_t *nodemask, |
94 | bool sync, bool *contended) | 94 | enum migrate_mode mode, bool *contended) |
95 | { | 95 | { |
96 | return COMPACT_CONTINUE; | 96 | return COMPACT_CONTINUE; |
97 | } | 97 | } |
diff --git a/mm/compaction.c b/mm/compaction.c index 70c0f8cda33f..217a6ad9a20e 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -161,7 +161,8 @@ static void update_pageblock_skip(struct compact_control *cc, | |||
161 | return; | 161 | return; |
162 | if (pfn > zone->compact_cached_migrate_pfn[0]) | 162 | if (pfn > zone->compact_cached_migrate_pfn[0]) |
163 | zone->compact_cached_migrate_pfn[0] = pfn; | 163 | zone->compact_cached_migrate_pfn[0] = pfn; |
164 | if (cc->sync && pfn > zone->compact_cached_migrate_pfn[1]) | 164 | if (cc->mode != MIGRATE_ASYNC && |
165 | pfn > zone->compact_cached_migrate_pfn[1]) | ||
165 | zone->compact_cached_migrate_pfn[1] = pfn; | 166 | zone->compact_cached_migrate_pfn[1] = pfn; |
166 | } else { | 167 | } else { |
167 | if (cc->finished_update_free) | 168 | if (cc->finished_update_free) |
@@ -208,7 +209,7 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, | |||
208 | } | 209 | } |
209 | 210 | ||
210 | /* async aborts if taking too long or contended */ | 211 | /* async aborts if taking too long or contended */ |
211 | if (!cc->sync) { | 212 | if (cc->mode == MIGRATE_ASYNC) { |
212 | cc->contended = true; | 213 | cc->contended = true; |
213 | return false; | 214 | return false; |
214 | } | 215 | } |
@@ -473,7 +474,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
473 | bool locked = false; | 474 | bool locked = false; |
474 | struct page *page = NULL, *valid_page = NULL; | 475 | struct page *page = NULL, *valid_page = NULL; |
475 | bool set_unsuitable = true; | 476 | bool set_unsuitable = true; |
476 | const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) | | 477 | const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ? |
478 | ISOLATE_ASYNC_MIGRATE : 0) | | ||
477 | (unevictable ? ISOLATE_UNEVICTABLE : 0); | 479 | (unevictable ? ISOLATE_UNEVICTABLE : 0); |
478 | 480 | ||
479 | /* | 481 | /* |
@@ -483,7 +485,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
483 | */ | 485 | */ |
484 | while (unlikely(too_many_isolated(zone))) { | 486 | while (unlikely(too_many_isolated(zone))) { |
485 | /* async migration should just abort */ | 487 | /* async migration should just abort */ |
486 | if (!cc->sync) | 488 | if (cc->mode == MIGRATE_ASYNC) |
487 | return 0; | 489 | return 0; |
488 | 490 | ||
489 | congestion_wait(BLK_RW_ASYNC, HZ/10); | 491 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
@@ -548,7 +550,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
548 | * the minimum amount of work satisfies the allocation | 550 | * the minimum amount of work satisfies the allocation |
549 | */ | 551 | */ |
550 | mt = get_pageblock_migratetype(page); | 552 | mt = get_pageblock_migratetype(page); |
551 | if (!cc->sync && !migrate_async_suitable(mt)) { | 553 | if (cc->mode == MIGRATE_ASYNC && |
554 | !migrate_async_suitable(mt)) { | ||
552 | set_unsuitable = false; | 555 | set_unsuitable = false; |
553 | goto next_pageblock; | 556 | goto next_pageblock; |
554 | } | 557 | } |
@@ -981,6 +984,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
981 | int ret; | 984 | int ret; |
982 | unsigned long start_pfn = zone->zone_start_pfn; | 985 | unsigned long start_pfn = zone->zone_start_pfn; |
983 | unsigned long end_pfn = zone_end_pfn(zone); | 986 | unsigned long end_pfn = zone_end_pfn(zone); |
987 | const bool sync = cc->mode != MIGRATE_ASYNC; | ||
984 | 988 | ||
985 | ret = compaction_suitable(zone, cc->order); | 989 | ret = compaction_suitable(zone, cc->order); |
986 | switch (ret) { | 990 | switch (ret) { |
@@ -1006,7 +1010,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
1006 | * information on where the scanners should start but check that it | 1010 | * information on where the scanners should start but check that it |
1007 | * is initialised by ensuring the values are within zone boundaries. | 1011 | * is initialised by ensuring the values are within zone boundaries. |
1008 | */ | 1012 | */ |
1009 | cc->migrate_pfn = zone->compact_cached_migrate_pfn[cc->sync]; | 1013 | cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; |
1010 | cc->free_pfn = zone->compact_cached_free_pfn; | 1014 | cc->free_pfn = zone->compact_cached_free_pfn; |
1011 | if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { | 1015 | if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { |
1012 | cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); | 1016 | cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); |
@@ -1040,8 +1044,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
1040 | 1044 | ||
1041 | nr_migrate = cc->nr_migratepages; | 1045 | nr_migrate = cc->nr_migratepages; |
1042 | err = migrate_pages(&cc->migratepages, compaction_alloc, | 1046 | err = migrate_pages(&cc->migratepages, compaction_alloc, |
1043 | compaction_free, (unsigned long)cc, | 1047 | compaction_free, (unsigned long)cc, cc->mode, |
1044 | cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC, | ||
1045 | MR_COMPACTION); | 1048 | MR_COMPACTION); |
1046 | update_nr_listpages(cc); | 1049 | update_nr_listpages(cc); |
1047 | nr_remaining = cc->nr_migratepages; | 1050 | nr_remaining = cc->nr_migratepages; |
@@ -1074,9 +1077,8 @@ out: | |||
1074 | return ret; | 1077 | return ret; |
1075 | } | 1078 | } |
1076 | 1079 | ||
1077 | static unsigned long compact_zone_order(struct zone *zone, | 1080 | static unsigned long compact_zone_order(struct zone *zone, int order, |
1078 | int order, gfp_t gfp_mask, | 1081 | gfp_t gfp_mask, enum migrate_mode mode, bool *contended) |
1079 | bool sync, bool *contended) | ||
1080 | { | 1082 | { |
1081 | unsigned long ret; | 1083 | unsigned long ret; |
1082 | struct compact_control cc = { | 1084 | struct compact_control cc = { |
@@ -1085,7 +1087,7 @@ static unsigned long compact_zone_order(struct zone *zone, | |||
1085 | .order = order, | 1087 | .order = order, |
1086 | .migratetype = allocflags_to_migratetype(gfp_mask), | 1088 | .migratetype = allocflags_to_migratetype(gfp_mask), |
1087 | .zone = zone, | 1089 | .zone = zone, |
1088 | .sync = sync, | 1090 | .mode = mode, |
1089 | }; | 1091 | }; |
1090 | INIT_LIST_HEAD(&cc.freepages); | 1092 | INIT_LIST_HEAD(&cc.freepages); |
1091 | INIT_LIST_HEAD(&cc.migratepages); | 1093 | INIT_LIST_HEAD(&cc.migratepages); |
@@ -1107,7 +1109,7 @@ int sysctl_extfrag_threshold = 500; | |||
1107 | * @order: The order of the current allocation | 1109 | * @order: The order of the current allocation |
1108 | * @gfp_mask: The GFP mask of the current allocation | 1110 | * @gfp_mask: The GFP mask of the current allocation |
1109 | * @nodemask: The allowed nodes to allocate from | 1111 | * @nodemask: The allowed nodes to allocate from |
1110 | * @sync: Whether migration is synchronous or not | 1112 | * @mode: The migration mode for async, sync light, or sync migration |
1111 | * @contended: Return value that is true if compaction was aborted due to lock contention | 1113 | * @contended: Return value that is true if compaction was aborted due to lock contention |
1112 | * @page: Optionally capture a free page of the requested order during compaction | 1114 | * @page: Optionally capture a free page of the requested order during compaction |
1113 | * | 1115 | * |
@@ -1115,7 +1117,7 @@ int sysctl_extfrag_threshold = 500; | |||
1115 | */ | 1117 | */ |
1116 | unsigned long try_to_compact_pages(struct zonelist *zonelist, | 1118 | unsigned long try_to_compact_pages(struct zonelist *zonelist, |
1117 | int order, gfp_t gfp_mask, nodemask_t *nodemask, | 1119 | int order, gfp_t gfp_mask, nodemask_t *nodemask, |
1118 | bool sync, bool *contended) | 1120 | enum migrate_mode mode, bool *contended) |
1119 | { | 1121 | { |
1120 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); | 1122 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); |
1121 | int may_enter_fs = gfp_mask & __GFP_FS; | 1123 | int may_enter_fs = gfp_mask & __GFP_FS; |
@@ -1140,7 +1142,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
1140 | nodemask) { | 1142 | nodemask) { |
1141 | int status; | 1143 | int status; |
1142 | 1144 | ||
1143 | status = compact_zone_order(zone, order, gfp_mask, sync, | 1145 | status = compact_zone_order(zone, order, gfp_mask, mode, |
1144 | contended); | 1146 | contended); |
1145 | rc = max(status, rc); | 1147 | rc = max(status, rc); |
1146 | 1148 | ||
@@ -1190,7 +1192,7 @@ void compact_pgdat(pg_data_t *pgdat, int order) | |||
1190 | { | 1192 | { |
1191 | struct compact_control cc = { | 1193 | struct compact_control cc = { |
1192 | .order = order, | 1194 | .order = order, |
1193 | .sync = false, | 1195 | .mode = MIGRATE_ASYNC, |
1194 | }; | 1196 | }; |
1195 | 1197 | ||
1196 | if (!order) | 1198 | if (!order) |
@@ -1203,7 +1205,7 @@ static void compact_node(int nid) | |||
1203 | { | 1205 | { |
1204 | struct compact_control cc = { | 1206 | struct compact_control cc = { |
1205 | .order = -1, | 1207 | .order = -1, |
1206 | .sync = true, | 1208 | .mode = MIGRATE_SYNC, |
1207 | .ignore_skip_hint = true, | 1209 | .ignore_skip_hint = true, |
1208 | }; | 1210 | }; |
1209 | 1211 | ||
diff --git a/mm/internal.h b/mm/internal.h index 6ee580d69ddd..a25424a24e0c 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -134,7 +134,7 @@ struct compact_control { | |||
134 | unsigned long nr_migratepages; /* Number of pages to migrate */ | 134 | unsigned long nr_migratepages; /* Number of pages to migrate */ |
135 | unsigned long free_pfn; /* isolate_freepages search base */ | 135 | unsigned long free_pfn; /* isolate_freepages search base */ |
136 | unsigned long migrate_pfn; /* isolate_migratepages search base */ | 136 | unsigned long migrate_pfn; /* isolate_migratepages search base */ |
137 | bool sync; /* Synchronous migration */ | 137 | enum migrate_mode mode; /* Async or sync migration mode */ |
138 | bool ignore_skip_hint; /* Scan blocks even if marked skip */ | 138 | bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
139 | bool finished_update_free; /* True when the zone cached pfns are | 139 | bool finished_update_free; /* True when the zone cached pfns are |
140 | * no longer being updated | 140 | * no longer being updated |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 027d0294413a..afb29da0576c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2217,7 +2217,7 @@ static struct page * | |||
2217 | __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | 2217 | __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, |
2218 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2218 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
2219 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, | 2219 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, |
2220 | int migratetype, bool sync_migration, | 2220 | int migratetype, enum migrate_mode mode, |
2221 | bool *contended_compaction, bool *deferred_compaction, | 2221 | bool *contended_compaction, bool *deferred_compaction, |
2222 | unsigned long *did_some_progress) | 2222 | unsigned long *did_some_progress) |
2223 | { | 2223 | { |
@@ -2231,7 +2231,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2231 | 2231 | ||
2232 | current->flags |= PF_MEMALLOC; | 2232 | current->flags |= PF_MEMALLOC; |
2233 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, | 2233 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, |
2234 | nodemask, sync_migration, | 2234 | nodemask, mode, |
2235 | contended_compaction); | 2235 | contended_compaction); |
2236 | current->flags &= ~PF_MEMALLOC; | 2236 | current->flags &= ~PF_MEMALLOC; |
2237 | 2237 | ||
@@ -2264,7 +2264,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2264 | * As async compaction considers a subset of pageblocks, only | 2264 | * As async compaction considers a subset of pageblocks, only |
2265 | * defer if the failure was a sync compaction failure. | 2265 | * defer if the failure was a sync compaction failure. |
2266 | */ | 2266 | */ |
2267 | if (sync_migration) | 2267 | if (mode != MIGRATE_ASYNC) |
2268 | defer_compaction(preferred_zone, order); | 2268 | defer_compaction(preferred_zone, order); |
2269 | 2269 | ||
2270 | cond_resched(); | 2270 | cond_resched(); |
@@ -2277,9 +2277,8 @@ static inline struct page * | |||
2277 | __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | 2277 | __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, |
2278 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2278 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
2279 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, | 2279 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, |
2280 | int migratetype, bool sync_migration, | 2280 | int migratetype, enum migrate_mode mode, bool *contended_compaction, |
2281 | bool *contended_compaction, bool *deferred_compaction, | 2281 | bool *deferred_compaction, unsigned long *did_some_progress) |
2282 | unsigned long *did_some_progress) | ||
2283 | { | 2282 | { |
2284 | return NULL; | 2283 | return NULL; |
2285 | } | 2284 | } |
@@ -2474,7 +2473,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
2474 | int alloc_flags; | 2473 | int alloc_flags; |
2475 | unsigned long pages_reclaimed = 0; | 2474 | unsigned long pages_reclaimed = 0; |
2476 | unsigned long did_some_progress; | 2475 | unsigned long did_some_progress; |
2477 | bool sync_migration = false; | 2476 | enum migrate_mode migration_mode = MIGRATE_ASYNC; |
2478 | bool deferred_compaction = false; | 2477 | bool deferred_compaction = false; |
2479 | bool contended_compaction = false; | 2478 | bool contended_compaction = false; |
2480 | 2479 | ||
@@ -2568,17 +2567,15 @@ rebalance: | |||
2568 | * Try direct compaction. The first pass is asynchronous. Subsequent | 2567 | * Try direct compaction. The first pass is asynchronous. Subsequent |
2569 | * attempts after direct reclaim are synchronous | 2568 | * attempts after direct reclaim are synchronous |
2570 | */ | 2569 | */ |
2571 | page = __alloc_pages_direct_compact(gfp_mask, order, | 2570 | page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, |
2572 | zonelist, high_zoneidx, | 2571 | high_zoneidx, nodemask, alloc_flags, |
2573 | nodemask, | 2572 | preferred_zone, migratetype, |
2574 | alloc_flags, preferred_zone, | 2573 | migration_mode, &contended_compaction, |
2575 | migratetype, sync_migration, | ||
2576 | &contended_compaction, | ||
2577 | &deferred_compaction, | 2574 | &deferred_compaction, |
2578 | &did_some_progress); | 2575 | &did_some_progress); |
2579 | if (page) | 2576 | if (page) |
2580 | goto got_pg; | 2577 | goto got_pg; |
2581 | sync_migration = true; | 2578 | migration_mode = MIGRATE_SYNC_LIGHT; |
2582 | 2579 | ||
2583 | /* | 2580 | /* |
2584 | * If compaction is deferred for high-order allocations, it is because | 2581 | * If compaction is deferred for high-order allocations, it is because |
@@ -2653,12 +2650,10 @@ rebalance: | |||
2653 | * direct reclaim and reclaim/compaction depends on compaction | 2650 | * direct reclaim and reclaim/compaction depends on compaction |
2654 | * being called after reclaim so call directly if necessary | 2651 | * being called after reclaim so call directly if necessary |
2655 | */ | 2652 | */ |
2656 | page = __alloc_pages_direct_compact(gfp_mask, order, | 2653 | page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, |
2657 | zonelist, high_zoneidx, | 2654 | high_zoneidx, nodemask, alloc_flags, |
2658 | nodemask, | 2655 | preferred_zone, migratetype, |
2659 | alloc_flags, preferred_zone, | 2656 | migration_mode, &contended_compaction, |
2660 | migratetype, sync_migration, | ||
2661 | &contended_compaction, | ||
2662 | &deferred_compaction, | 2657 | &deferred_compaction, |
2663 | &did_some_progress); | 2658 | &did_some_progress); |
2664 | if (page) | 2659 | if (page) |
@@ -6218,7 +6213,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, | |||
6218 | cc->nr_migratepages -= nr_reclaimed; | 6213 | cc->nr_migratepages -= nr_reclaimed; |
6219 | 6214 | ||
6220 | ret = migrate_pages(&cc->migratepages, alloc_migrate_target, | 6215 | ret = migrate_pages(&cc->migratepages, alloc_migrate_target, |
6221 | NULL, 0, MIGRATE_SYNC, MR_CMA); | 6216 | NULL, 0, cc->mode, MR_CMA); |
6222 | } | 6217 | } |
6223 | if (ret < 0) { | 6218 | if (ret < 0) { |
6224 | putback_movable_pages(&cc->migratepages); | 6219 | putback_movable_pages(&cc->migratepages); |
@@ -6257,7 +6252,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, | |||
6257 | .nr_migratepages = 0, | 6252 | .nr_migratepages = 0, |
6258 | .order = -1, | 6253 | .order = -1, |
6259 | .zone = page_zone(pfn_to_page(start)), | 6254 | .zone = page_zone(pfn_to_page(start)), |
6260 | .sync = true, | 6255 | .mode = MIGRATE_SYNC, |
6261 | .ignore_skip_hint = true, | 6256 | .ignore_skip_hint = true, |
6262 | }; | 6257 | }; |
6263 | INIT_LIST_HEAD(&cc.migratepages); | 6258 | INIT_LIST_HEAD(&cc.migratepages); |