diff options
| author | Andrey Ryabinin <aryabinin@virtuozzo.com> | 2019-03-05 18:49:39 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-06 00:07:21 -0500 |
| commit | f4b7e272b5c0425915e2115068e0a5a20a3a628e (patch) | |
| tree | 7b5894a4cc657a7606aa183187392b7eae7e22bc /mm/compaction.c | |
| parent | a7ca12f9d905e7437dd3beb9cbb8e85bc2b991f4 (diff) | |
mm: remove zone_lru_lock() function, access ->lru_lock directly
We have common pattern to access lru_lock from a page pointer:
zone_lru_lock(page_zone(page))
Which is silly, because it unfolds to this:
&NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]->zone_pgdat->lru_lock
while we can simply do
&NODE_DATA(page_to_nid(page))->lru_lock
Remove zone_lru_lock() function, since it's only complicate things. Use
'page_pgdat(page)->lru_lock' pattern instead.
[aryabinin@virtuozzo.com: a slightly better version of __split_huge_page()]
Link: http://lkml.kernel.org/r/20190301121651.7741-1-aryabinin@virtuozzo.com
Link: http://lkml.kernel.org/r/20190228083329.31892-2-aryabinin@virtuozzo.com
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
| -rw-r--r-- | mm/compaction.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 1cc871da3fda..e054276cf397 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
| @@ -775,6 +775,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, | |||
| 775 | unsigned long end_pfn, isolate_mode_t isolate_mode) | 775 | unsigned long end_pfn, isolate_mode_t isolate_mode) |
| 776 | { | 776 | { |
| 777 | struct zone *zone = cc->zone; | 777 | struct zone *zone = cc->zone; |
| 778 | pg_data_t *pgdat = zone->zone_pgdat; | ||
| 778 | unsigned long nr_scanned = 0, nr_isolated = 0; | 779 | unsigned long nr_scanned = 0, nr_isolated = 0; |
| 779 | struct lruvec *lruvec; | 780 | struct lruvec *lruvec; |
| 780 | unsigned long flags = 0; | 781 | unsigned long flags = 0; |
| @@ -839,8 +840,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, | |||
| 839 | * if contended. | 840 | * if contended. |
| 840 | */ | 841 | */ |
| 841 | if (!(low_pfn % SWAP_CLUSTER_MAX) | 842 | if (!(low_pfn % SWAP_CLUSTER_MAX) |
| 842 | && compact_unlock_should_abort(zone_lru_lock(zone), flags, | 843 | && compact_unlock_should_abort(&pgdat->lru_lock, |
| 843 | &locked, cc)) | 844 | flags, &locked, cc)) |
| 844 | break; | 845 | break; |
| 845 | 846 | ||
| 846 | if (!pfn_valid_within(low_pfn)) | 847 | if (!pfn_valid_within(low_pfn)) |
| @@ -910,7 +911,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, | |||
| 910 | if (unlikely(__PageMovable(page)) && | 911 | if (unlikely(__PageMovable(page)) && |
| 911 | !PageIsolated(page)) { | 912 | !PageIsolated(page)) { |
| 912 | if (locked) { | 913 | if (locked) { |
| 913 | spin_unlock_irqrestore(zone_lru_lock(zone), | 914 | spin_unlock_irqrestore(&pgdat->lru_lock, |
| 914 | flags); | 915 | flags); |
| 915 | locked = false; | 916 | locked = false; |
| 916 | } | 917 | } |
| @@ -940,7 +941,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, | |||
| 940 | 941 | ||
| 941 | /* If we already hold the lock, we can skip some rechecking */ | 942 | /* If we already hold the lock, we can skip some rechecking */ |
| 942 | if (!locked) { | 943 | if (!locked) { |
| 943 | locked = compact_lock_irqsave(zone_lru_lock(zone), | 944 | locked = compact_lock_irqsave(&pgdat->lru_lock, |
| 944 | &flags, cc); | 945 | &flags, cc); |
| 945 | 946 | ||
| 946 | /* Try get exclusive access under lock */ | 947 | /* Try get exclusive access under lock */ |
| @@ -965,7 +966,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, | |||
| 965 | } | 966 | } |
| 966 | } | 967 | } |
| 967 | 968 | ||
| 968 | lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); | 969 | lruvec = mem_cgroup_page_lruvec(page, pgdat); |
| 969 | 970 | ||
| 970 | /* Try isolate the page */ | 971 | /* Try isolate the page */ |
| 971 | if (__isolate_lru_page(page, isolate_mode) != 0) | 972 | if (__isolate_lru_page(page, isolate_mode) != 0) |
| @@ -1007,7 +1008,7 @@ isolate_fail: | |||
| 1007 | */ | 1008 | */ |
| 1008 | if (nr_isolated) { | 1009 | if (nr_isolated) { |
| 1009 | if (locked) { | 1010 | if (locked) { |
| 1010 | spin_unlock_irqrestore(zone_lru_lock(zone), flags); | 1011 | spin_unlock_irqrestore(&pgdat->lru_lock, flags); |
| 1011 | locked = false; | 1012 | locked = false; |
| 1012 | } | 1013 | } |
| 1013 | putback_movable_pages(&cc->migratepages); | 1014 | putback_movable_pages(&cc->migratepages); |
| @@ -1034,7 +1035,7 @@ isolate_fail: | |||
| 1034 | 1035 | ||
| 1035 | isolate_abort: | 1036 | isolate_abort: |
| 1036 | if (locked) | 1037 | if (locked) |
| 1037 | spin_unlock_irqrestore(zone_lru_lock(zone), flags); | 1038 | spin_unlock_irqrestore(&pgdat->lru_lock, flags); |
| 1038 | 1039 | ||
| 1039 | /* | 1040 | /* |
| 1040 | * Updated the cached scanner pfn once the pageblock has been scanned | 1041 | * Updated the cached scanner pfn once the pageblock has been scanned |
