aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/compaction.h17
-rw-r--r--mm/compaction.c23
-rw-r--r--mm/internal.h22
-rw-r--r--mm/page_alloc.c27
4 files changed, 44 insertions, 45 deletions
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 3238ffa33f68..f2efda2e6ac6 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -21,6 +21,8 @@
21/* Zone lock or lru_lock was contended in async compaction */ 21/* Zone lock or lru_lock was contended in async compaction */
22#define COMPACT_CONTENDED_LOCK 2 22#define COMPACT_CONTENDED_LOCK 2
23 23
24struct alloc_context; /* in mm/internal.h */
25
24#ifdef CONFIG_COMPACTION 26#ifdef CONFIG_COMPACTION
25extern int sysctl_compact_memory; 27extern int sysctl_compact_memory;
26extern int sysctl_compaction_handler(struct ctl_table *table, int write, 28extern int sysctl_compaction_handler(struct ctl_table *table, int write,
@@ -30,10 +32,9 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
30 void __user *buffer, size_t *length, loff_t *ppos); 32 void __user *buffer, size_t *length, loff_t *ppos);
31 33
32extern int fragmentation_index(struct zone *zone, unsigned int order); 34extern int fragmentation_index(struct zone *zone, unsigned int order);
33extern unsigned long try_to_compact_pages(struct zonelist *zonelist, 35extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
34 int order, gfp_t gfp_mask, nodemask_t *mask, 36 int alloc_flags, const struct alloc_context *ac,
35 enum migrate_mode mode, int *contended, 37 enum migrate_mode mode, int *contended);
36 int alloc_flags, int classzone_idx);
37extern void compact_pgdat(pg_data_t *pgdat, int order); 38extern void compact_pgdat(pg_data_t *pgdat, int order);
38extern void reset_isolation_suitable(pg_data_t *pgdat); 39extern void reset_isolation_suitable(pg_data_t *pgdat);
39extern unsigned long compaction_suitable(struct zone *zone, int order, 40extern unsigned long compaction_suitable(struct zone *zone, int order,
@@ -101,10 +102,10 @@ static inline bool compaction_restarting(struct zone *zone, int order)
101} 102}
102 103
103#else 104#else
104static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, 105static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
105 int order, gfp_t gfp_mask, nodemask_t *nodemask, 106 unsigned int order, int alloc_flags,
106 enum migrate_mode mode, int *contended, 107 const struct alloc_context *ac,
107 int alloc_flags, int classzone_idx) 108 enum migrate_mode mode, int *contended)
108{ 109{
109 return COMPACT_CONTINUE; 110 return COMPACT_CONTINUE;
110} 111}
diff --git a/mm/compaction.c b/mm/compaction.c
index 546e571e9d60..9c7e6909dd29 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1335,22 +1335,20 @@ int sysctl_extfrag_threshold = 500;
1335 1335
1336/** 1336/**
1337 * try_to_compact_pages - Direct compact to satisfy a high-order allocation 1337 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1338 * @zonelist: The zonelist used for the current allocation
1339 * @order: The order of the current allocation
1340 * @gfp_mask: The GFP mask of the current allocation 1338 * @gfp_mask: The GFP mask of the current allocation
1341 * @nodemask: The allowed nodes to allocate from 1339 * @order: The order of the current allocation
1340 * @alloc_flags: The allocation flags of the current allocation
1341 * @ac: The context of current allocation
1342 * @mode: The migration mode for async, sync light, or sync migration 1342 * @mode: The migration mode for async, sync light, or sync migration
1343 * @contended: Return value that determines if compaction was aborted due to 1343 * @contended: Return value that determines if compaction was aborted due to
1344 * need_resched() or lock contention 1344 * need_resched() or lock contention
1345 * 1345 *
1346 * This is the main entry point for direct page compaction. 1346 * This is the main entry point for direct page compaction.
1347 */ 1347 */
1348unsigned long try_to_compact_pages(struct zonelist *zonelist, 1348unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1349 int order, gfp_t gfp_mask, nodemask_t *nodemask, 1349 int alloc_flags, const struct alloc_context *ac,
1350 enum migrate_mode mode, int *contended, 1350 enum migrate_mode mode, int *contended)
1351 int alloc_flags, int classzone_idx)
1352{ 1351{
1353 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1354 int may_enter_fs = gfp_mask & __GFP_FS; 1352 int may_enter_fs = gfp_mask & __GFP_FS;
1355 int may_perform_io = gfp_mask & __GFP_IO; 1353 int may_perform_io = gfp_mask & __GFP_IO;
1356 struct zoneref *z; 1354 struct zoneref *z;
@@ -1365,8 +1363,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
1365 return COMPACT_SKIPPED; 1363 return COMPACT_SKIPPED;
1366 1364
1367 /* Compact each zone in the list */ 1365 /* Compact each zone in the list */
1368 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 1366 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1369 nodemask) { 1367 ac->nodemask) {
1370 int status; 1368 int status;
1371 int zone_contended; 1369 int zone_contended;
1372 1370
@@ -1374,7 +1372,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
1374 continue; 1372 continue;
1375 1373
1376 status = compact_zone_order(zone, order, gfp_mask, mode, 1374 status = compact_zone_order(zone, order, gfp_mask, mode,
1377 &zone_contended, alloc_flags, classzone_idx); 1375 &zone_contended, alloc_flags,
1376 ac->classzone_idx);
1378 rc = max(status, rc); 1377 rc = max(status, rc);
1379 /* 1378 /*
1380 * It takes at least one zone that wasn't lock contended 1379 * It takes at least one zone that wasn't lock contended
@@ -1384,7 +1383,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
1384 1383
1385 /* If a normal allocation would succeed, stop compacting */ 1384 /* If a normal allocation would succeed, stop compacting */
1386 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 1385 if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
1387 classzone_idx, alloc_flags)) { 1386 ac->classzone_idx, alloc_flags)) {
1388 /* 1387 /*
1389 * We think the allocation will succeed in this zone, 1388 * We think the allocation will succeed in this zone,
1390 * but it is not certain, hence the false. The caller 1389 * but it is not certain, hence the false. The caller
diff --git a/mm/internal.h b/mm/internal.h
index efad241f7014..c4d6c9b43491 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -110,6 +110,28 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
110 */ 110 */
111 111
112/* 112/*
113 * Structure for holding the mostly immutable allocation parameters passed
114 * between functions involved in allocations, including the alloc_pages*
115 * family of functions.
116 *
117 * nodemask, migratetype and high_zoneidx are initialized only once in
118 * __alloc_pages_nodemask() and then never change.
119 *
120 * zonelist, preferred_zone and classzone_idx are set first in
121 * __alloc_pages_nodemask() for the fast path, and might be later changed
122 * in __alloc_pages_slowpath(). All other functions pass the whole strucure
123 * by a const pointer.
124 */
125struct alloc_context {
126 struct zonelist *zonelist;
127 nodemask_t *nodemask;
128 struct zone *preferred_zone;
129 int classzone_idx;
130 int migratetype;
131 enum zone_type high_zoneidx;
132};
133
134/*
113 * Locate the struct page for both the matching buddy in our 135 * Locate the struct page for both the matching buddy in our
114 * pair (buddy1) and the combined O(n+1) page they form (page). 136 * pair (buddy1) and the combined O(n+1) page they form (page).
115 * 137 *
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4aead0bd8d44..d664eb922a7d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -232,27 +232,6 @@ EXPORT_SYMBOL(nr_node_ids);
232EXPORT_SYMBOL(nr_online_nodes); 232EXPORT_SYMBOL(nr_online_nodes);
233#endif 233#endif
234 234
235/*
236 * Structure for holding the mostly immutable allocation parameters passed
237 * between alloc_pages* family of functions.
238 *
239 * nodemask, migratetype and high_zoneidx are initialized only once in
240 * __alloc_pages_nodemask() and then never change.
241 *
242 * zonelist, preferred_zone and classzone_idx are set first in
243 * __alloc_pages_nodemask() for the fast path, and might be later changed
244 * in __alloc_pages_slowpath(). All other functions pass the whole strucure
245 * by a const pointer.
246 */
247struct alloc_context {
248 struct zonelist *zonelist;
249 nodemask_t *nodemask;
250 struct zone *preferred_zone;
251 int classzone_idx;
252 int migratetype;
253 enum zone_type high_zoneidx;
254};
255
256int page_group_by_mobility_disabled __read_mostly; 235int page_group_by_mobility_disabled __read_mostly;
257 236
258void set_pageblock_migratetype(struct page *page, int migratetype) 237void set_pageblock_migratetype(struct page *page, int migratetype)
@@ -2429,10 +2408,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2429 return NULL; 2408 return NULL;
2430 2409
2431 current->flags |= PF_MEMALLOC; 2410 current->flags |= PF_MEMALLOC;
2432 compact_result = try_to_compact_pages(ac->zonelist, order, gfp_mask, 2411 compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
2433 ac->nodemask, mode, 2412 mode, contended_compaction);
2434 contended_compaction,
2435 alloc_flags, ac->classzone_idx);
2436 current->flags &= ~PF_MEMALLOC; 2413 current->flags &= ~PF_MEMALLOC;
2437 2414
2438 switch (compact_result) { 2415 switch (compact_result) {