aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichal Nazarewicz <mina86@mina86.com>2012-01-11 09:16:11 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-05-21 09:09:26 -0400
commit5f63b720b62925ef3c6a85473dcd547b0fd90616 (patch)
tree0c91073fbea0800e186aa3793c51cd3a214d1f82
parent76e10d158efb6d4516018846f60c2ab5501900bc (diff)
mm: page_alloc: remove trailing whitespace
Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Mel Gorman <mel@csn.ul.ie>
-rw-r--r--mm/page_alloc.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 918330f71dba..6fb46c1589b9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -513,10 +513,10 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
513 * free pages of length of (1 << order) and marked with _mapcount -2. Page's 513 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
514 * order is recorded in page_private(page) field. 514 * order is recorded in page_private(page) field.
515 * So when we are allocating or freeing one, we can derive the state of the 515 * So when we are allocating or freeing one, we can derive the state of the
516 * other. That is, if we allocate a small block, and both were 516 * other. That is, if we allocate a small block, and both were
517 * free, the remainder of the region must be split into blocks. 517 * free, the remainder of the region must be split into blocks.
518 * If a block is freed, and its buddy is also free, then this 518 * If a block is freed, and its buddy is also free, then this
519 * triggers coalescing into a block of larger size. 519 * triggers coalescing into a block of larger size.
520 * 520 *
521 * -- wli 521 * -- wli
522 */ 522 */
@@ -1061,17 +1061,17 @@ retry_reserve:
1061 return page; 1061 return page;
1062} 1062}
1063 1063
1064/* 1064/*
1065 * Obtain a specified number of elements from the buddy allocator, all under 1065 * Obtain a specified number of elements from the buddy allocator, all under
1066 * a single hold of the lock, for efficiency. Add them to the supplied list. 1066 * a single hold of the lock, for efficiency. Add them to the supplied list.
1067 * Returns the number of new pages which were placed at *list. 1067 * Returns the number of new pages which were placed at *list.
1068 */ 1068 */
1069static int rmqueue_bulk(struct zone *zone, unsigned int order, 1069static int rmqueue_bulk(struct zone *zone, unsigned int order,
1070 unsigned long count, struct list_head *list, 1070 unsigned long count, struct list_head *list,
1071 int migratetype, int cold) 1071 int migratetype, int cold)
1072{ 1072{
1073 int i; 1073 int i;
1074 1074
1075 spin_lock(&zone->lock); 1075 spin_lock(&zone->lock);
1076 for (i = 0; i < count; ++i) { 1076 for (i = 0; i < count; ++i) {
1077 struct page *page = __rmqueue(zone, order, migratetype); 1077 struct page *page = __rmqueue(zone, order, migratetype);
@@ -4301,7 +4301,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4301 init_waitqueue_head(&pgdat->kswapd_wait); 4301 init_waitqueue_head(&pgdat->kswapd_wait);
4302 pgdat->kswapd_max_order = 0; 4302 pgdat->kswapd_max_order = 0;
4303 pgdat_page_cgroup_init(pgdat); 4303 pgdat_page_cgroup_init(pgdat);
4304 4304
4305 for (j = 0; j < MAX_NR_ZONES; j++) { 4305 for (j = 0; j < MAX_NR_ZONES; j++) {
4306 struct zone *zone = pgdat->node_zones + j; 4306 struct zone *zone = pgdat->node_zones + j;
4307 unsigned long size, realsize, memmap_pages; 4307 unsigned long size, realsize, memmap_pages;