diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2017-09-08 19:12:59 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-08 21:26:47 -0400 |
commit | 9472f23c9eeba3b32e65a62fe2a9b3e827888afa (patch) | |
tree | 89192d7497a5b90f8b232737df2737489f29d1f3 /mm/mlock.c | |
parent | 638032224ed762a29baca1fc37f1168efc2554ae (diff) |
mm/mlock.c: use page_zone() instead of page_zone_id()
page_zone_id() is a specialized function to compare the zone for the pages
that are within the section range. If the section of the pages are
different, page_zone_id() can be different even if their zone is the same.
This wrong usage doesn't cause any actual problem since
__munlock_pagevec_fill() would be called again with failed index.
However, it's better to use more appropriate function here.
Link: http://lkml.kernel.org/r/1503559211-10259-1-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r-- | mm/mlock.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index b562b5523a65..dfc6f1912176 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -365,8 +365,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) | |||
365 | * @start + PAGE_SIZE when no page could be added by the pte walk. | 365 | * @start + PAGE_SIZE when no page could be added by the pte walk. |
366 | */ | 366 | */ |
367 | static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, | 367 | static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, |
368 | struct vm_area_struct *vma, int zoneid, unsigned long start, | 368 | struct vm_area_struct *vma, struct zone *zone, |
369 | unsigned long end) | 369 | unsigned long start, unsigned long end) |
370 | { | 370 | { |
371 | pte_t *pte; | 371 | pte_t *pte; |
372 | spinlock_t *ptl; | 372 | spinlock_t *ptl; |
@@ -394,7 +394,7 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, | |||
394 | * Break if page could not be obtained or the page's node+zone does not | 394 | * Break if page could not be obtained or the page's node+zone does not |
395 | * match | 395 | * match |
396 | */ | 396 | */ |
397 | if (!page || page_zone_id(page) != zoneid) | 397 | if (!page || page_zone(page) != zone) |
398 | break; | 398 | break; |
399 | 399 | ||
400 | /* | 400 | /* |
@@ -446,7 +446,6 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
446 | unsigned long page_increm; | 446 | unsigned long page_increm; |
447 | struct pagevec pvec; | 447 | struct pagevec pvec; |
448 | struct zone *zone; | 448 | struct zone *zone; |
449 | int zoneid; | ||
450 | 449 | ||
451 | pagevec_init(&pvec, 0); | 450 | pagevec_init(&pvec, 0); |
452 | /* | 451 | /* |
@@ -481,7 +480,6 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
481 | */ | 480 | */ |
482 | pagevec_add(&pvec, page); | 481 | pagevec_add(&pvec, page); |
483 | zone = page_zone(page); | 482 | zone = page_zone(page); |
484 | zoneid = page_zone_id(page); | ||
485 | 483 | ||
486 | /* | 484 | /* |
487 | * Try to fill the rest of pagevec using fast | 485 | * Try to fill the rest of pagevec using fast |
@@ -490,7 +488,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
490 | * pagevec. | 488 | * pagevec. |
491 | */ | 489 | */ |
492 | start = __munlock_pagevec_fill(&pvec, vma, | 490 | start = __munlock_pagevec_fill(&pvec, vma, |
493 | zoneid, start, end); | 491 | zone, start, end); |
494 | __munlock_pagevec(&pvec, zone); | 492 | __munlock_pagevec(&pvec, zone); |
495 | goto next; | 493 | goto next; |
496 | } | 494 | } |