diff options
author | Aaron Lu <aaron.lu@intel.com> | 2018-04-05 19:24:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-06 00:36:26 -0400 |
commit | 77ba9062e43c7e4966d9ff3afd87dca86542f86a (patch) | |
tree | c9cf12435f9382e2929621e7aa92b48d664cb247 /mm/page_alloc.c | |
parent | bc3106b26cf6a6f214fd1a8538736afc39ae1b5c (diff) |
mm/free_pcppages_bulk: update pcp->count inside
Matthew Wilcox found that all callers of free_pcppages_bulk() currently
update pcp->count immediately after so it's natural to do it inside
free_pcppages_bulk().
No functionality or performance change is expected from this patch.
Link: http://lkml.kernel.org/r/20180301062845.26038-2-aaron.lu@intel.com
Signed-off-by: Aaron Lu <aaron.lu@intel.com>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Kemi Wang <kemi.wang@intel.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 10 |
1 files changed, 3 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 86b7f0430e02..08c195cdf161 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1112,6 +1112,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
1112 | page = list_last_entry(list, struct page, lru); | 1112 | page = list_last_entry(list, struct page, lru); |
1113 | /* must delete as __free_one_page list manipulates */ | 1113 | /* must delete as __free_one_page list manipulates */ |
1114 | list_del(&page->lru); | 1114 | list_del(&page->lru); |
1115 | pcp->count--; | ||
1115 | 1116 | ||
1116 | mt = get_pcppage_migratetype(page); | 1117 | mt = get_pcppage_migratetype(page); |
1117 | /* MIGRATE_ISOLATE page should not go to pcplists */ | 1118 | /* MIGRATE_ISOLATE page should not go to pcplists */ |
@@ -2495,10 +2496,8 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) | |||
2495 | local_irq_save(flags); | 2496 | local_irq_save(flags); |
2496 | batch = READ_ONCE(pcp->batch); | 2497 | batch = READ_ONCE(pcp->batch); |
2497 | to_drain = min(pcp->count, batch); | 2498 | to_drain = min(pcp->count, batch); |
2498 | if (to_drain > 0) { | 2499 | if (to_drain > 0) |
2499 | free_pcppages_bulk(zone, to_drain, pcp); | 2500 | free_pcppages_bulk(zone, to_drain, pcp); |
2500 | pcp->count -= to_drain; | ||
2501 | } | ||
2502 | local_irq_restore(flags); | 2501 | local_irq_restore(flags); |
2503 | } | 2502 | } |
2504 | #endif | 2503 | #endif |
@@ -2520,10 +2519,8 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) | |||
2520 | pset = per_cpu_ptr(zone->pageset, cpu); | 2519 | pset = per_cpu_ptr(zone->pageset, cpu); |
2521 | 2520 | ||
2522 | pcp = &pset->pcp; | 2521 | pcp = &pset->pcp; |
2523 | if (pcp->count) { | 2522 | if (pcp->count) |
2524 | free_pcppages_bulk(zone, pcp->count, pcp); | 2523 | free_pcppages_bulk(zone, pcp->count, pcp); |
2525 | pcp->count = 0; | ||
2526 | } | ||
2527 | local_irq_restore(flags); | 2524 | local_irq_restore(flags); |
2528 | } | 2525 | } |
2529 | 2526 | ||
@@ -2747,7 +2744,6 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn) | |||
2747 | if (pcp->count >= pcp->high) { | 2744 | if (pcp->count >= pcp->high) { |
2748 | unsigned long batch = READ_ONCE(pcp->batch); | 2745 | unsigned long batch = READ_ONCE(pcp->batch); |
2749 | free_pcppages_bulk(zone, batch, pcp); | 2746 | free_pcppages_bulk(zone, batch, pcp); |
2750 | pcp->count -= batch; | ||
2751 | } | 2747 | } |
2752 | } | 2748 | } |
2753 | 2749 | ||