summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2016-05-19 20:11:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 22:12:14 -0400
commit06b6640a3902d6d50c1bb4fb1f29a46b207dbf08 (patch)
tree9cfd71e35519fd5b073e7a03d1f487bf12df2131 /mm/compaction.c
parente4c5800a3991f0c6a766983535dfc10d51802cf6 (diff)
mm, compaction: wrap calculating first and last pfn of pageblock
Compaction code has accumulated numerous instances of manual calculations of the first (inclusive) and last (exclusive) pfn of a pageblock (or a smaller block of given order), given a pfn within the pageblock. Wrap these calculations by introducing pageblock_start_pfn(pfn) and pageblock_end_pfn(pfn) macros. [vbabka@suse.cz: fix crash in get_pfnblock_flags_mask() from isolate_freepages():] Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Rik van Riel <riel@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 8fa254043801..017a1a1963cb 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -42,6 +42,11 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
42#define CREATE_TRACE_POINTS 42#define CREATE_TRACE_POINTS
43#include <trace/events/compaction.h> 43#include <trace/events/compaction.h>
44 44
45#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
46#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
47#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
48#define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
49
45static unsigned long release_freepages(struct list_head *freelist) 50static unsigned long release_freepages(struct list_head *freelist)
46{ 51{
47 struct page *page, *next; 52 struct page *page, *next;
@@ -161,7 +166,7 @@ static void reset_cached_positions(struct zone *zone)
161 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 166 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
162 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 167 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
163 zone->compact_cached_free_pfn = 168 zone->compact_cached_free_pfn =
164 round_down(zone_end_pfn(zone) - 1, pageblock_nr_pages); 169 pageblock_start_pfn(zone_end_pfn(zone) - 1);
165} 170}
166 171
167/* 172/*
@@ -519,10 +524,10 @@ isolate_freepages_range(struct compact_control *cc,
519 LIST_HEAD(freelist); 524 LIST_HEAD(freelist);
520 525
521 pfn = start_pfn; 526 pfn = start_pfn;
522 block_start_pfn = pfn & ~(pageblock_nr_pages - 1); 527 block_start_pfn = pageblock_start_pfn(pfn);
523 if (block_start_pfn < cc->zone->zone_start_pfn) 528 if (block_start_pfn < cc->zone->zone_start_pfn)
524 block_start_pfn = cc->zone->zone_start_pfn; 529 block_start_pfn = cc->zone->zone_start_pfn;
525 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 530 block_end_pfn = pageblock_end_pfn(pfn);
526 531
527 for (; pfn < end_pfn; pfn += isolated, 532 for (; pfn < end_pfn; pfn += isolated,
528 block_start_pfn = block_end_pfn, 533 block_start_pfn = block_end_pfn,
@@ -538,8 +543,8 @@ isolate_freepages_range(struct compact_control *cc,
538 * scanning range to right one. 543 * scanning range to right one.
539 */ 544 */
540 if (pfn >= block_end_pfn) { 545 if (pfn >= block_end_pfn) {
541 block_start_pfn = pfn & ~(pageblock_nr_pages - 1); 546 block_start_pfn = pageblock_start_pfn(pfn);
542 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 547 block_end_pfn = pageblock_end_pfn(pfn);
543 block_end_pfn = min(block_end_pfn, end_pfn); 548 block_end_pfn = min(block_end_pfn, end_pfn);
544 } 549 }
545 550
@@ -834,10 +839,10 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
834 839
835 /* Scan block by block. First and last block may be incomplete */ 840 /* Scan block by block. First and last block may be incomplete */
836 pfn = start_pfn; 841 pfn = start_pfn;
837 block_start_pfn = pfn & ~(pageblock_nr_pages - 1); 842 block_start_pfn = pageblock_start_pfn(pfn);
838 if (block_start_pfn < cc->zone->zone_start_pfn) 843 if (block_start_pfn < cc->zone->zone_start_pfn)
839 block_start_pfn = cc->zone->zone_start_pfn; 844 block_start_pfn = cc->zone->zone_start_pfn;
840 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 845 block_end_pfn = pageblock_end_pfn(pfn);
841 846
842 for (; pfn < end_pfn; pfn = block_end_pfn, 847 for (; pfn < end_pfn; pfn = block_end_pfn,
843 block_start_pfn = block_end_pfn, 848 block_start_pfn = block_end_pfn,
@@ -924,10 +929,10 @@ static void isolate_freepages(struct compact_control *cc)
924 * is using. 929 * is using.
925 */ 930 */
926 isolate_start_pfn = cc->free_pfn; 931 isolate_start_pfn = cc->free_pfn;
927 block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1); 932 block_start_pfn = pageblock_start_pfn(cc->free_pfn);
928 block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 933 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
929 zone_end_pfn(zone)); 934 zone_end_pfn(zone));
930 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); 935 low_pfn = pageblock_end_pfn(cc->migrate_pfn);
931 936
932 /* 937 /*
933 * Isolate free pages until enough are available to migrate the 938 * Isolate free pages until enough are available to migrate the
@@ -1081,12 +1086,12 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
1081 * initialized by compact_zone() 1086 * initialized by compact_zone()
1082 */ 1087 */
1083 low_pfn = cc->migrate_pfn; 1088 low_pfn = cc->migrate_pfn;
1084 block_start_pfn = cc->migrate_pfn & ~(pageblock_nr_pages - 1); 1089 block_start_pfn = pageblock_start_pfn(low_pfn);
1085 if (block_start_pfn < zone->zone_start_pfn) 1090 if (block_start_pfn < zone->zone_start_pfn)
1086 block_start_pfn = zone->zone_start_pfn; 1091 block_start_pfn = zone->zone_start_pfn;
1087 1092
1088 /* Only scan within a pageblock boundary */ 1093 /* Only scan within a pageblock boundary */
1089 block_end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); 1094 block_end_pfn = pageblock_end_pfn(low_pfn);
1090 1095
1091 /* 1096 /*
1092 * Iterate over whole pageblocks until we find the first suitable. 1097 * Iterate over whole pageblocks until we find the first suitable.
@@ -1343,7 +1348,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
1343 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; 1348 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1344 cc->free_pfn = zone->compact_cached_free_pfn; 1349 cc->free_pfn = zone->compact_cached_free_pfn;
1345 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { 1350 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
1346 cc->free_pfn = round_down(end_pfn - 1, pageblock_nr_pages); 1351 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1347 zone->compact_cached_free_pfn = cc->free_pfn; 1352 zone->compact_cached_free_pfn = cc->free_pfn;
1348 } 1353 }
1349 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { 1354 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
@@ -1411,7 +1416,7 @@ check_drain:
1411 if (cc->order > 0 && cc->last_migrated_pfn) { 1416 if (cc->order > 0 && cc->last_migrated_pfn) {
1412 int cpu; 1417 int cpu;
1413 unsigned long current_block_start = 1418 unsigned long current_block_start =
1414 cc->migrate_pfn & ~((1UL << cc->order) - 1); 1419 block_start_pfn(cc->migrate_pfn, cc->order);
1415 1420
1416 if (cc->last_migrated_pfn < current_block_start) { 1421 if (cc->last_migrated_pfn < current_block_start) {
1417 cpu = get_cpu(); 1422 cpu = get_cpu();
@@ -1436,7 +1441,7 @@ out:
1436 cc->nr_freepages = 0; 1441 cc->nr_freepages = 0;
1437 VM_BUG_ON(free_pfn == 0); 1442 VM_BUG_ON(free_pfn == 0);
1438 /* The cached pfn is always the first in a pageblock */ 1443 /* The cached pfn is always the first in a pageblock */
1439 free_pfn &= ~(pageblock_nr_pages-1); 1444 free_pfn = pageblock_start_pfn(free_pfn);
1440 /* 1445 /*
1441 * Only go back, not forward. The cached pfn might have been 1446 * Only go back, not forward. The cached pfn might have been
1442 * already reset to zone end in compact_finished() 1447 * already reset to zone end in compact_finished()