aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPintu Kumar <pintu.k@samsung.com>2013-09-11 17:20:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:57:18 -0400
commitb8af29418a0269d2c12f563add54a95cc19471fb (patch)
tree8cccaa5fb857707420440ef04c2fb6fd1ea8c96d /mm
parentebc2a1a69111eadfeda8487e577f1a5d42ef0dae (diff)
mm/page_alloc.c: fix coding style and spelling
Fix all errors reported by checkpatch and some small spelling mistakes. Signed-off-by: Pintu Kumar <pintu.k@samsung.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c45
1 files changed, 24 insertions, 21 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6cf157637df3..2ca3e9bd739c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -721,7 +721,8 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
721 return false; 721 return false;
722 722
723 if (!PageHighMem(page)) { 723 if (!PageHighMem(page)) {
724 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); 724 debug_check_no_locks_freed(page_address(page),
725 PAGE_SIZE << order);
725 debug_check_no_obj_freed(page_address(page), 726 debug_check_no_obj_freed(page_address(page),
726 PAGE_SIZE << order); 727 PAGE_SIZE << order);
727 } 728 }
@@ -885,7 +886,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
885 int migratetype) 886 int migratetype)
886{ 887{
887 unsigned int current_order; 888 unsigned int current_order;
888 struct free_area * area; 889 struct free_area *area;
889 struct page *page; 890 struct page *page;
890 891
891 /* Find a page of the appropriate size in the preferred list */ 892 /* Find a page of the appropriate size in the preferred list */
@@ -1011,7 +1012,7 @@ static void change_pageblock_range(struct page *pageblock_page,
1011static inline struct page * 1012static inline struct page *
1012__rmqueue_fallback(struct zone *zone, int order, int start_migratetype) 1013__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
1013{ 1014{
1014 struct free_area * area; 1015 struct free_area *area;
1015 int current_order; 1016 int current_order;
1016 struct page *page; 1017 struct page *page;
1017 int migratetype, i; 1018 int migratetype, i;
@@ -3104,7 +3105,7 @@ void show_free_areas(unsigned int filter)
3104 } 3105 }
3105 3106
3106 for_each_populated_zone(zone) { 3107 for_each_populated_zone(zone) {
3107 unsigned long nr[MAX_ORDER], flags, order, total = 0; 3108 unsigned long nr[MAX_ORDER], flags, order, total = 0;
3108 unsigned char types[MAX_ORDER]; 3109 unsigned char types[MAX_ORDER];
3109 3110
3110 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3111 if (skip_free_areas_node(filter, zone_to_nid(zone)))
@@ -3416,11 +3417,11 @@ static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
3416static int default_zonelist_order(void) 3417static int default_zonelist_order(void)
3417{ 3418{
3418 int nid, zone_type; 3419 int nid, zone_type;
3419 unsigned long low_kmem_size,total_size; 3420 unsigned long low_kmem_size, total_size;
3420 struct zone *z; 3421 struct zone *z;
3421 int average_size; 3422 int average_size;
3422 /* 3423 /*
3423 * ZONE_DMA and ZONE_DMA32 can be very small area in the system. 3424 * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
3424 * If they are really small and used heavily, the system can fall 3425 * If they are really small and used heavily, the system can fall
3425 * into OOM very easily. 3426 * into OOM very easily.
3426 * This function detect ZONE_DMA/DMA32 size and configures zone order. 3427 * This function detect ZONE_DMA/DMA32 size and configures zone order.
@@ -3452,9 +3453,9 @@ static int default_zonelist_order(void)
3452 return ZONELIST_ORDER_NODE; 3453 return ZONELIST_ORDER_NODE;
3453 /* 3454 /*
3454 * look into each node's config. 3455 * look into each node's config.
3455 * If there is a node whose DMA/DMA32 memory is very big area on 3456 * If there is a node whose DMA/DMA32 memory is very big area on
3456 * local memory, NODE_ORDER may be suitable. 3457 * local memory, NODE_ORDER may be suitable.
3457 */ 3458 */
3458 average_size = total_size / 3459 average_size = total_size /
3459 (nodes_weight(node_states[N_MEMORY]) + 1); 3460 (nodes_weight(node_states[N_MEMORY]) + 1);
3460 for_each_online_node(nid) { 3461 for_each_online_node(nid) {
@@ -4180,7 +4181,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
4180 if (!zone->wait_table) 4181 if (!zone->wait_table)
4181 return -ENOMEM; 4182 return -ENOMEM;
4182 4183
4183 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) 4184 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
4184 init_waitqueue_head(zone->wait_table + i); 4185 init_waitqueue_head(zone->wait_table + i);
4185 4186
4186 return 0; 4187 return 0;
@@ -4930,7 +4931,7 @@ static unsigned long __init early_calculate_totalpages(void)
4930 if (pages) 4931 if (pages)
4931 node_set_state(nid, N_MEMORY); 4932 node_set_state(nid, N_MEMORY);
4932 } 4933 }
4933 return totalpages; 4934 return totalpages;
4934} 4935}
4935 4936
4936/* 4937/*
@@ -5047,7 +5048,7 @@ restart:
5047 /* 5048 /*
5048 * Some kernelcore has been met, update counts and 5049 * Some kernelcore has been met, update counts and
5049 * break if the kernelcore for this node has been 5050 * break if the kernelcore for this node has been
5050 * satisified 5051 * satisfied
5051 */ 5052 */
5052 required_kernelcore -= min(required_kernelcore, 5053 required_kernelcore -= min(required_kernelcore,
5053 size_pages); 5054 size_pages);
@@ -5061,7 +5062,7 @@ restart:
5061 * If there is still required_kernelcore, we do another pass with one 5062 * If there is still required_kernelcore, we do another pass with one
5062 * less node in the count. This will push zone_movable_pfn[nid] further 5063 * less node in the count. This will push zone_movable_pfn[nid] further
5063 * along on the nodes that still have memory until kernelcore is 5064 * along on the nodes that still have memory until kernelcore is
5064 * satisified 5065 * satisfied
5065 */ 5066 */
5066 usable_nodes--; 5067 usable_nodes--;
5067 if (usable_nodes && required_kernelcore > usable_nodes) 5068 if (usable_nodes && required_kernelcore > usable_nodes)
@@ -5286,8 +5287,10 @@ void __init mem_init_print_info(const char *str)
5286 * 3) .rodata.* may be embedded into .text or .data sections. 5287 * 3) .rodata.* may be embedded into .text or .data sections.
5287 */ 5288 */
5288#define adj_init_size(start, end, size, pos, adj) \ 5289#define adj_init_size(start, end, size, pos, adj) \
5289 if (start <= pos && pos < end && size > adj) \ 5290 do { \
5290 size -= adj; 5291 if (start <= pos && pos < end && size > adj) \
5292 size -= adj; \
5293 } while (0)
5291 5294
5292 adj_init_size(__init_begin, __init_end, init_data_size, 5295 adj_init_size(__init_begin, __init_end, init_data_size,
5293 _sinittext, init_code_size); 5296 _sinittext, init_code_size);
@@ -5570,7 +5573,7 @@ static void __meminit setup_per_zone_inactive_ratio(void)
5570 * we want it large (64MB max). But it is not linear, because network 5573 * we want it large (64MB max). But it is not linear, because network
5571 * bandwidth does not increase linearly with machine size. We use 5574 * bandwidth does not increase linearly with machine size. We use
5572 * 5575 *
5573 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 5576 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5574 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 5577 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
5575 * 5578 *
5576 * which yields 5579 * which yields
@@ -5614,11 +5617,11 @@ int __meminit init_per_zone_wmark_min(void)
5614module_init(init_per_zone_wmark_min) 5617module_init(init_per_zone_wmark_min)
5615 5618
5616/* 5619/*
5617 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 5620 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5618 * that we can call two helper functions whenever min_free_kbytes 5621 * that we can call two helper functions whenever min_free_kbytes
5619 * changes. 5622 * changes.
5620 */ 5623 */
5621int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 5624int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
5622 void __user *buffer, size_t *length, loff_t *ppos) 5625 void __user *buffer, size_t *length, loff_t *ppos)
5623{ 5626{
5624 proc_dointvec(table, write, buffer, length, ppos); 5627 proc_dointvec(table, write, buffer, length, ppos);
@@ -5682,8 +5685,8 @@ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
5682 5685
5683/* 5686/*
5684 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 5687 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5685 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist 5688 * cpu. It is the fraction of total pages in each zone that a hot per cpu
5686 * can have before it gets flushed back to buddy allocator. 5689 * pagelist can have before it gets flushed back to buddy allocator.
5687 */ 5690 */
5688int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 5691int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
5689 void __user *buffer, size_t *length, loff_t *ppos) 5692 void __user *buffer, size_t *length, loff_t *ppos)
@@ -5901,7 +5904,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
5901 * This function checks whether pageblock includes unmovable pages or not. 5904 * This function checks whether pageblock includes unmovable pages or not.
5902 * If @count is not zero, it is okay to include less @count unmovable pages 5905 * If @count is not zero, it is okay to include less @count unmovable pages
5903 * 5906 *
5904 * PageLRU check wihtout isolation or lru_lock could race so that 5907 * PageLRU check without isolation or lru_lock could race so that
5905 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't 5908 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
5906 * expect this function should be exact. 5909 * expect this function should be exact.
5907 */ 5910 */