diff options
author | Geliang Tang <geliangtang@163.com> | 2016-01-14 18:20:30 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-14 19:00:49 -0500 |
commit | a16601c5458eb702f26cd48b9e8e1a9471700e72 (patch) | |
tree | d45488fe89564ec88182597bbaafbc133b020e00 /mm/page_alloc.c | |
parent | 6ac0206bc0d13381e3ede3594bc0a3f8cd1d8ec9 (diff) |
mm/page_alloc.c: use list_{first,last}_entry instead of list_entry
To make the intention clearer, use list_{first,last}_entry instead of
list_entry.
Signed-off-by: Geliang Tang <geliangtang@163.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 23 |
1 files changed, 11 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fbff97d7b298..b9747aa0fb59 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -805,7 +805,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
805 | do { | 805 | do { |
806 | int mt; /* migratetype of the to-be-freed page */ | 806 | int mt; /* migratetype of the to-be-freed page */ |
807 | 807 | ||
808 | page = list_entry(list->prev, struct page, lru); | 808 | page = list_last_entry(list, struct page, lru); |
809 | /* must delete as __free_one_page list manipulates */ | 809 | /* must delete as __free_one_page list manipulates */ |
810 | list_del(&page->lru); | 810 | list_del(&page->lru); |
811 | 811 | ||
@@ -1410,11 +1410,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, | |||
1410 | /* Find a page of the appropriate size in the preferred list */ | 1410 | /* Find a page of the appropriate size in the preferred list */ |
1411 | for (current_order = order; current_order < MAX_ORDER; ++current_order) { | 1411 | for (current_order = order; current_order < MAX_ORDER; ++current_order) { |
1412 | area = &(zone->free_area[current_order]); | 1412 | area = &(zone->free_area[current_order]); |
1413 | if (list_empty(&area->free_list[migratetype])) | 1413 | page = list_first_entry_or_null(&area->free_list[migratetype], |
1414 | continue; | ||
1415 | |||
1416 | page = list_entry(area->free_list[migratetype].next, | ||
1417 | struct page, lru); | 1414 | struct page, lru); |
1415 | if (!page) | ||
1416 | continue; | ||
1418 | list_del(&page->lru); | 1417 | list_del(&page->lru); |
1419 | rmv_page_order(page); | 1418 | rmv_page_order(page); |
1420 | area->nr_free--; | 1419 | area->nr_free--; |
@@ -1693,12 +1692,12 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac) | |||
1693 | for (order = 0; order < MAX_ORDER; order++) { | 1692 | for (order = 0; order < MAX_ORDER; order++) { |
1694 | struct free_area *area = &(zone->free_area[order]); | 1693 | struct free_area *area = &(zone->free_area[order]); |
1695 | 1694 | ||
1696 | if (list_empty(&area->free_list[MIGRATE_HIGHATOMIC])) | 1695 | page = list_first_entry_or_null( |
1696 | &area->free_list[MIGRATE_HIGHATOMIC], | ||
1697 | struct page, lru); | ||
1698 | if (!page) | ||
1697 | continue; | 1699 | continue; |
1698 | 1700 | ||
1699 | page = list_entry(area->free_list[MIGRATE_HIGHATOMIC].next, | ||
1700 | struct page, lru); | ||
1701 | |||
1702 | /* | 1701 | /* |
1703 | * It should never happen but changes to locking could | 1702 | * It should never happen but changes to locking could |
1704 | * inadvertently allow a per-cpu drain to add pages | 1703 | * inadvertently allow a per-cpu drain to add pages |
@@ -1746,7 +1745,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) | |||
1746 | if (fallback_mt == -1) | 1745 | if (fallback_mt == -1) |
1747 | continue; | 1746 | continue; |
1748 | 1747 | ||
1749 | page = list_entry(area->free_list[fallback_mt].next, | 1748 | page = list_first_entry(&area->free_list[fallback_mt], |
1750 | struct page, lru); | 1749 | struct page, lru); |
1751 | if (can_steal) | 1750 | if (can_steal) |
1752 | steal_suitable_fallback(zone, page, start_migratetype); | 1751 | steal_suitable_fallback(zone, page, start_migratetype); |
@@ -2205,9 +2204,9 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, | |||
2205 | } | 2204 | } |
2206 | 2205 | ||
2207 | if (cold) | 2206 | if (cold) |
2208 | page = list_entry(list->prev, struct page, lru); | 2207 | page = list_last_entry(list, struct page, lru); |
2209 | else | 2208 | else |
2210 | page = list_entry(list->next, struct page, lru); | 2209 | page = list_first_entry(list, struct page, lru); |
2211 | 2210 | ||
2212 | list_del(&page->lru); | 2211 | list_del(&page->lru); |
2213 | pcp->count--; | 2212 | pcp->count--; |