diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 23 |
1 files changed, 11 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8a52ba9fe693..4b33878e9488 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -127,7 +127,6 @@ static int bad_range(struct zone *zone, struct page *page) | |||
127 | 127 | ||
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | |||
131 | #else | 130 | #else |
132 | static inline int bad_range(struct zone *zone, struct page *page) | 131 | static inline int bad_range(struct zone *zone, struct page *page) |
133 | { | 132 | { |
@@ -218,12 +217,12 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) | |||
218 | { | 217 | { |
219 | int i; | 218 | int i; |
220 | 219 | ||
221 | BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); | 220 | VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); |
222 | /* | 221 | /* |
223 | * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO | 222 | * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO |
224 | * and __GFP_HIGHMEM from hard or soft interrupt context. | 223 | * and __GFP_HIGHMEM from hard or soft interrupt context. |
225 | */ | 224 | */ |
226 | BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); | 225 | VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); |
227 | for (i = 0; i < (1 << order); i++) | 226 | for (i = 0; i < (1 << order); i++) |
228 | clear_highpage(page + i); | 227 | clear_highpage(page + i); |
229 | } | 228 | } |
@@ -347,8 +346,8 @@ static inline void __free_one_page(struct page *page, | |||
347 | 346 | ||
348 | page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); | 347 | page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); |
349 | 348 | ||
350 | BUG_ON(page_idx & (order_size - 1)); | 349 | VM_BUG_ON(page_idx & (order_size - 1)); |
351 | BUG_ON(bad_range(zone, page)); | 350 | VM_BUG_ON(bad_range(zone, page)); |
352 | 351 | ||
353 | zone->free_pages += order_size; | 352 | zone->free_pages += order_size; |
354 | while (order < MAX_ORDER-1) { | 353 | while (order < MAX_ORDER-1) { |
@@ -421,7 +420,7 @@ static void free_pages_bulk(struct zone *zone, int count, | |||
421 | while (count--) { | 420 | while (count--) { |
422 | struct page *page; | 421 | struct page *page; |
423 | 422 | ||
424 | BUG_ON(list_empty(list)); | 423 | VM_BUG_ON(list_empty(list)); |
425 | page = list_entry(list->prev, struct page, lru); | 424 | page = list_entry(list->prev, struct page, lru); |
426 | /* have to delete it as __free_one_page list manipulates */ | 425 | /* have to delete it as __free_one_page list manipulates */ |
427 | list_del(&page->lru); | 426 | list_del(&page->lru); |
@@ -512,7 +511,7 @@ static inline void expand(struct zone *zone, struct page *page, | |||
512 | area--; | 511 | area--; |
513 | high--; | 512 | high--; |
514 | size >>= 1; | 513 | size >>= 1; |
515 | BUG_ON(bad_range(zone, &page[size])); | 514 | VM_BUG_ON(bad_range(zone, &page[size])); |
516 | list_add(&page[size].lru, &area->free_list); | 515 | list_add(&page[size].lru, &area->free_list); |
517 | area->nr_free++; | 516 | area->nr_free++; |
518 | set_page_order(&page[size], high); | 517 | set_page_order(&page[size], high); |
@@ -761,8 +760,8 @@ void split_page(struct page *page, unsigned int order) | |||
761 | { | 760 | { |
762 | int i; | 761 | int i; |
763 | 762 | ||
764 | BUG_ON(PageCompound(page)); | 763 | VM_BUG_ON(PageCompound(page)); |
765 | BUG_ON(!page_count(page)); | 764 | VM_BUG_ON(!page_count(page)); |
766 | for (i = 1; i < (1 << order); i++) | 765 | for (i = 1; i < (1 << order); i++) |
767 | set_page_refcounted(page + i); | 766 | set_page_refcounted(page + i); |
768 | } | 767 | } |
@@ -809,7 +808,7 @@ again: | |||
809 | local_irq_restore(flags); | 808 | local_irq_restore(flags); |
810 | put_cpu(); | 809 | put_cpu(); |
811 | 810 | ||
812 | BUG_ON(bad_range(zone, page)); | 811 | VM_BUG_ON(bad_range(zone, page)); |
813 | if (prep_new_page(page, order, gfp_flags)) | 812 | if (prep_new_page(page, order, gfp_flags)) |
814 | goto again; | 813 | goto again; |
815 | return page; | 814 | return page; |
@@ -1083,7 +1082,7 @@ fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) | |||
1083 | * get_zeroed_page() returns a 32-bit address, which cannot represent | 1082 | * get_zeroed_page() returns a 32-bit address, which cannot represent |
1084 | * a highmem page | 1083 | * a highmem page |
1085 | */ | 1084 | */ |
1086 | BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); | 1085 | VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); |
1087 | 1086 | ||
1088 | page = alloc_pages(gfp_mask | __GFP_ZERO, 0); | 1087 | page = alloc_pages(gfp_mask | __GFP_ZERO, 0); |
1089 | if (page) | 1088 | if (page) |
@@ -1116,7 +1115,7 @@ EXPORT_SYMBOL(__free_pages); | |||
1116 | fastcall void free_pages(unsigned long addr, unsigned int order) | 1115 | fastcall void free_pages(unsigned long addr, unsigned int order) |
1117 | { | 1116 | { |
1118 | if (addr != 0) { | 1117 | if (addr != 0) { |
1119 | BUG_ON(!virt_addr_valid((void *)addr)); | 1118 | VM_BUG_ON(!virt_addr_valid((void *)addr)); |
1120 | __free_pages(virt_to_page((void *)addr), order); | 1119 | __free_pages(virt_to_page((void *)addr), order); |
1121 | } | 1120 | } |
1122 | } | 1121 | } |