diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 4 | ||||
-rw-r--r-- | mm/filemap.c | 4 | ||||
-rw-r--r-- | mm/hugetlb.c | 8 | ||||
-rw-r--r-- | mm/nommu.c | 2 | ||||
-rw-r--r-- | mm/page-writeback.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 53 | ||||
-rw-r--r-- | mm/slab.c | 13 | ||||
-rw-r--r-- | mm/util.c | 6 | ||||
-rw-r--r-- | mm/vmalloc.c | 6 |
9 files changed, 54 insertions, 44 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 5d88489ef2de..db7c55de92cd 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -92,7 +92,7 @@ config HAVE_MEMORY_PRESENT | |||
92 | 92 | ||
93 | # | 93 | # |
94 | # SPARSEMEM_EXTREME (which is the default) does some bootmem | 94 | # SPARSEMEM_EXTREME (which is the default) does some bootmem |
95 | # allocations when memory_present() is called. If this can not | 95 | # allocations when memory_present() is called. If this cannot |
96 | # be done on your architecture, select this option. However, | 96 | # be done on your architecture, select this option. However, |
97 | # statically allocating the mem_section[] array can potentially | 97 | # statically allocating the mem_section[] array can potentially |
98 | # consume vast quantities of .bss, so be careful. | 98 | # consume vast quantities of .bss, so be careful. |
@@ -104,7 +104,7 @@ config SPARSEMEM_STATIC | |||
104 | def_bool n | 104 | def_bool n |
105 | 105 | ||
106 | # | 106 | # |
107 | # Architectecture platforms which require a two level mem_section in SPARSEMEM | 107 | # Architecture platforms which require a two level mem_section in SPARSEMEM |
108 | # must select this option. This is usually for architecture platforms with | 108 | # must select this option. This is usually for architecture platforms with |
109 | # an extremely sparse physical address space. | 109 | # an extremely sparse physical address space. |
110 | # | 110 | # |
diff --git a/mm/filemap.c b/mm/filemap.c index fef7d879ddf5..3464b681f844 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -1139,11 +1139,11 @@ success: | |||
1139 | } | 1139 | } |
1140 | 1140 | ||
1141 | /** | 1141 | /** |
1142 | * __generic_file_aio_read - generic filesystem read routine | 1142 | * generic_file_aio_read - generic filesystem read routine |
1143 | * @iocb: kernel I/O control block | 1143 | * @iocb: kernel I/O control block |
1144 | * @iov: io vector request | 1144 | * @iov: io vector request |
1145 | * @nr_segs: number of segments in the iovec | 1145 | * @nr_segs: number of segments in the iovec |
1146 | * @ppos: current file position | 1146 | * @pos: current file position |
1147 | * | 1147 | * |
1148 | * This is the "read()" routine for all filesystems | 1148 | * This is the "read()" routine for all filesystems |
1149 | * that can use the page cache directly. | 1149 | * that can use the page cache directly. |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7c7d03dbf73d..1d709ff528e1 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -364,6 +364,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
364 | pte_t *ptep; | 364 | pte_t *ptep; |
365 | pte_t pte; | 365 | pte_t pte; |
366 | struct page *page; | 366 | struct page *page; |
367 | struct page *tmp; | ||
368 | LIST_HEAD(page_list); | ||
367 | 369 | ||
368 | WARN_ON(!is_vm_hugetlb_page(vma)); | 370 | WARN_ON(!is_vm_hugetlb_page(vma)); |
369 | BUG_ON(start & ~HPAGE_MASK); | 371 | BUG_ON(start & ~HPAGE_MASK); |
@@ -384,12 +386,16 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
384 | continue; | 386 | continue; |
385 | 387 | ||
386 | page = pte_page(pte); | 388 | page = pte_page(pte); |
387 | put_page(page); | 389 | list_add(&page->lru, &page_list); |
388 | add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE)); | 390 | add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE)); |
389 | } | 391 | } |
390 | 392 | ||
391 | spin_unlock(&mm->page_table_lock); | 393 | spin_unlock(&mm->page_table_lock); |
392 | flush_tlb_range(vma, start, end); | 394 | flush_tlb_range(vma, start, end); |
395 | list_for_each_entry_safe(page, tmp, &page_list, lru) { | ||
396 | list_del(&page->lru); | ||
397 | put_page(page); | ||
398 | } | ||
393 | } | 399 | } |
394 | 400 | ||
395 | static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, | 401 | static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, |
diff --git a/mm/nommu.c b/mm/nommu.c index 365019599df8..8bdde9508f3b 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -221,7 +221,7 @@ long vwrite(char *buf, char *addr, unsigned long count) | |||
221 | * Allocate enough pages to cover @size from the page level | 221 | * Allocate enough pages to cover @size from the page level |
222 | * allocator and map them into continguos kernel virtual space. | 222 | * allocator and map them into continguos kernel virtual space. |
223 | * | 223 | * |
224 | * For tight cotrol over page level allocator and protection flags | 224 | * For tight control over page level allocator and protection flags |
225 | * use __vmalloc() instead. | 225 | * use __vmalloc() instead. |
226 | */ | 226 | */ |
227 | void *vmalloc(unsigned long size) | 227 | void *vmalloc(unsigned long size) |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index c0d4ce144dec..a0f339057449 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * mm/page-writeback.c. | 2 | * mm/page-writeback.c |
3 | * | 3 | * |
4 | * Copyright (C) 2002, Linus Torvalds. | 4 | * Copyright (C) 2002, Linus Torvalds. |
5 | * | 5 | * |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4f59d90b81e6..a8c003e7b3d5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -900,7 +900,8 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
900 | int classzone_idx, int alloc_flags) | 900 | int classzone_idx, int alloc_flags) |
901 | { | 901 | { |
902 | /* free_pages my go negative - that's OK */ | 902 | /* free_pages my go negative - that's OK */ |
903 | long min = mark, free_pages = z->free_pages - (1 << order) + 1; | 903 | unsigned long min = mark; |
904 | long free_pages = z->free_pages - (1 << order) + 1; | ||
904 | int o; | 905 | int o; |
905 | 906 | ||
906 | if (alloc_flags & ALLOC_HIGH) | 907 | if (alloc_flags & ALLOC_HIGH) |
@@ -2050,8 +2051,8 @@ int __init early_pfn_to_nid(unsigned long pfn) | |||
2050 | 2051 | ||
2051 | /** | 2052 | /** |
2052 | * free_bootmem_with_active_regions - Call free_bootmem_node for each active range | 2053 | * free_bootmem_with_active_regions - Call free_bootmem_node for each active range |
2053 | * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed | 2054 | * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. |
2054 | * @max_low_pfn: The highest PFN that till be passed to free_bootmem_node | 2055 | * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node |
2055 | * | 2056 | * |
2056 | * If an architecture guarantees that all ranges registered with | 2057 | * If an architecture guarantees that all ranges registered with |
2057 | * add_active_ranges() contain no holes and may be freed, this | 2058 | * add_active_ranges() contain no holes and may be freed, this |
@@ -2081,11 +2082,11 @@ void __init free_bootmem_with_active_regions(int nid, | |||
2081 | 2082 | ||
2082 | /** | 2083 | /** |
2083 | * sparse_memory_present_with_active_regions - Call memory_present for each active range | 2084 | * sparse_memory_present_with_active_regions - Call memory_present for each active range |
2084 | * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used | 2085 | * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. |
2085 | * | 2086 | * |
2086 | * If an architecture guarantees that all ranges registered with | 2087 | * If an architecture guarantees that all ranges registered with |
2087 | * add_active_ranges() contain no holes and may be freed, this | 2088 | * add_active_ranges() contain no holes and may be freed, this |
2088 | * this function may be used instead of calling memory_present() manually. | 2089 | * function may be used instead of calling memory_present() manually. |
2089 | */ | 2090 | */ |
2090 | void __init sparse_memory_present_with_active_regions(int nid) | 2091 | void __init sparse_memory_present_with_active_regions(int nid) |
2091 | { | 2092 | { |
@@ -2155,14 +2156,14 @@ static void __init account_node_boundary(unsigned int nid, | |||
2155 | 2156 | ||
2156 | /** | 2157 | /** |
2157 | * get_pfn_range_for_nid - Return the start and end page frames for a node | 2158 | * get_pfn_range_for_nid - Return the start and end page frames for a node |
2158 | * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned | 2159 | * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. |
2159 | * @start_pfn: Passed by reference. On return, it will have the node start_pfn | 2160 | * @start_pfn: Passed by reference. On return, it will have the node start_pfn. |
2160 | * @end_pfn: Passed by reference. On return, it will have the node end_pfn | 2161 | * @end_pfn: Passed by reference. On return, it will have the node end_pfn. |
2161 | * | 2162 | * |
2162 | * It returns the start and end page frame of a node based on information | 2163 | * It returns the start and end page frame of a node based on information |
2163 | * provided by an arch calling add_active_range(). If called for a node | 2164 | * provided by an arch calling add_active_range(). If called for a node |
2164 | * with no available memory, a warning is printed and the start and end | 2165 | * with no available memory, a warning is printed and the start and end |
2165 | * PFNs will be 0 | 2166 | * PFNs will be 0. |
2166 | */ | 2167 | */ |
2167 | void __init get_pfn_range_for_nid(unsigned int nid, | 2168 | void __init get_pfn_range_for_nid(unsigned int nid, |
2168 | unsigned long *start_pfn, unsigned long *end_pfn) | 2169 | unsigned long *start_pfn, unsigned long *end_pfn) |
@@ -2215,7 +2216,7 @@ unsigned long __init zone_spanned_pages_in_node(int nid, | |||
2215 | 2216 | ||
2216 | /* | 2217 | /* |
2217 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, | 2218 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, |
2218 | * then all holes in the requested range will be accounted for | 2219 | * then all holes in the requested range will be accounted for. |
2219 | */ | 2220 | */ |
2220 | unsigned long __init __absent_pages_in_range(int nid, | 2221 | unsigned long __init __absent_pages_in_range(int nid, |
2221 | unsigned long range_start_pfn, | 2222 | unsigned long range_start_pfn, |
@@ -2268,7 +2269,7 @@ unsigned long __init __absent_pages_in_range(int nid, | |||
2268 | * @start_pfn: The start PFN to start searching for holes | 2269 | * @start_pfn: The start PFN to start searching for holes |
2269 | * @end_pfn: The end PFN to stop searching for holes | 2270 | * @end_pfn: The end PFN to stop searching for holes |
2270 | * | 2271 | * |
2271 | * It returns the number of pages frames in memory holes within a range | 2272 | * It returns the number of pages frames in memory holes within a range. |
2272 | */ | 2273 | */ |
2273 | unsigned long __init absent_pages_in_range(unsigned long start_pfn, | 2274 | unsigned long __init absent_pages_in_range(unsigned long start_pfn, |
2274 | unsigned long end_pfn) | 2275 | unsigned long end_pfn) |
@@ -2582,11 +2583,12 @@ void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, | |||
2582 | 2583 | ||
2583 | /** | 2584 | /** |
2584 | * remove_all_active_ranges - Remove all currently registered regions | 2585 | * remove_all_active_ranges - Remove all currently registered regions |
2586 | * | ||
2585 | * During discovery, it may be found that a table like SRAT is invalid | 2587 | * During discovery, it may be found that a table like SRAT is invalid |
2586 | * and an alternative discovery method must be used. This function removes | 2588 | * and an alternative discovery method must be used. This function removes |
2587 | * all currently registered regions. | 2589 | * all currently registered regions. |
2588 | */ | 2590 | */ |
2589 | void __init remove_all_active_ranges() | 2591 | void __init remove_all_active_ranges(void) |
2590 | { | 2592 | { |
2591 | memset(early_node_map, 0, sizeof(early_node_map)); | 2593 | memset(early_node_map, 0, sizeof(early_node_map)); |
2592 | nr_nodemap_entries = 0; | 2594 | nr_nodemap_entries = 0; |
@@ -2636,7 +2638,7 @@ unsigned long __init find_min_pfn_for_node(unsigned long nid) | |||
2636 | * find_min_pfn_with_active_regions - Find the minimum PFN registered | 2638 | * find_min_pfn_with_active_regions - Find the minimum PFN registered |
2637 | * | 2639 | * |
2638 | * It returns the minimum PFN based on information provided via | 2640 | * It returns the minimum PFN based on information provided via |
2639 | * add_active_range() | 2641 | * add_active_range(). |
2640 | */ | 2642 | */ |
2641 | unsigned long __init find_min_pfn_with_active_regions(void) | 2643 | unsigned long __init find_min_pfn_with_active_regions(void) |
2642 | { | 2644 | { |
@@ -2647,7 +2649,7 @@ unsigned long __init find_min_pfn_with_active_regions(void) | |||
2647 | * find_max_pfn_with_active_regions - Find the maximum PFN registered | 2649 | * find_max_pfn_with_active_regions - Find the maximum PFN registered |
2648 | * | 2650 | * |
2649 | * It returns the maximum PFN based on information provided via | 2651 | * It returns the maximum PFN based on information provided via |
2650 | * add_active_range() | 2652 | * add_active_range(). |
2651 | */ | 2653 | */ |
2652 | unsigned long __init find_max_pfn_with_active_regions(void) | 2654 | unsigned long __init find_max_pfn_with_active_regions(void) |
2653 | { | 2655 | { |
@@ -2662,10 +2664,7 @@ unsigned long __init find_max_pfn_with_active_regions(void) | |||
2662 | 2664 | ||
2663 | /** | 2665 | /** |
2664 | * free_area_init_nodes - Initialise all pg_data_t and zone data | 2666 | * free_area_init_nodes - Initialise all pg_data_t and zone data |
2665 | * @arch_max_dma_pfn: The maximum PFN usable for ZONE_DMA | 2667 | * @max_zone_pfn: an array of max PFNs for each zone |
2666 | * @arch_max_dma32_pfn: The maximum PFN usable for ZONE_DMA32 | ||
2667 | * @arch_max_low_pfn: The maximum PFN usable for ZONE_NORMAL | ||
2668 | * @arch_max_high_pfn: The maximum PFN usable for ZONE_HIGHMEM | ||
2669 | * | 2668 | * |
2670 | * This will call free_area_init_node() for each active node in the system. | 2669 | * This will call free_area_init_node() for each active node in the system. |
2671 | * Using the page ranges provided by add_active_range(), the size of each | 2670 | * Using the page ranges provided by add_active_range(), the size of each |
@@ -2723,14 +2722,15 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) | |||
2723 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | 2722 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ |
2724 | 2723 | ||
2725 | /** | 2724 | /** |
2726 | * set_dma_reserve - Account the specified number of pages reserved in ZONE_DMA | 2725 | * set_dma_reserve - set the specified number of pages reserved in the first zone |
2727 | * @new_dma_reserve - The number of pages to mark reserved | 2726 | * @new_dma_reserve: The number of pages to mark reserved |
2728 | * | 2727 | * |
2729 | * The per-cpu batchsize and zone watermarks are determined by present_pages. | 2728 | * The per-cpu batchsize and zone watermarks are determined by present_pages. |
2730 | * In the DMA zone, a significant percentage may be consumed by kernel image | 2729 | * In the DMA zone, a significant percentage may be consumed by kernel image |
2731 | * and other unfreeable allocations which can skew the watermarks badly. This | 2730 | * and other unfreeable allocations which can skew the watermarks badly. This |
2732 | * function may optionally be used to account for unfreeable pages in | 2731 | * function may optionally be used to account for unfreeable pages in the |
2733 | * ZONE_DMA. The effect will be lower watermarks and smaller per-cpu batchsize | 2732 | * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and |
2733 | * smaller per-cpu batchsize. | ||
2734 | */ | 2734 | */ |
2735 | void __init set_dma_reserve(unsigned long new_dma_reserve) | 2735 | void __init set_dma_reserve(unsigned long new_dma_reserve) |
2736 | { | 2736 | { |
@@ -2843,10 +2843,11 @@ static void setup_per_zone_lowmem_reserve(void) | |||
2843 | calculate_totalreserve_pages(); | 2843 | calculate_totalreserve_pages(); |
2844 | } | 2844 | } |
2845 | 2845 | ||
2846 | /* | 2846 | /** |
2847 | * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures | 2847 | * setup_per_zone_pages_min - called when min_free_kbytes changes. |
2848 | * that the pages_{min,low,high} values for each zone are set correctly | 2848 | * |
2849 | * with respect to min_free_kbytes. | 2849 | * Ensures that the pages_{min,low,high} values for each zone are set correctly |
2850 | * with respect to min_free_kbytes. | ||
2850 | */ | 2851 | */ |
2851 | void setup_per_zone_pages_min(void) | 2852 | void setup_per_zone_pages_min(void) |
2852 | { | 2853 | { |
@@ -3488,22 +3488,25 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3488 | } | 3488 | } |
3489 | 3489 | ||
3490 | 3490 | ||
3491 | #ifdef CONFIG_DEBUG_SLAB | ||
3491 | void *__kmalloc(size_t size, gfp_t flags) | 3492 | void *__kmalloc(size_t size, gfp_t flags) |
3492 | { | 3493 | { |
3493 | #ifndef CONFIG_DEBUG_SLAB | ||
3494 | return __do_kmalloc(size, flags, NULL); | ||
3495 | #else | ||
3496 | return __do_kmalloc(size, flags, __builtin_return_address(0)); | 3494 | return __do_kmalloc(size, flags, __builtin_return_address(0)); |
3497 | #endif | ||
3498 | } | 3495 | } |
3499 | EXPORT_SYMBOL(__kmalloc); | 3496 | EXPORT_SYMBOL(__kmalloc); |
3500 | 3497 | ||
3501 | #ifdef CONFIG_DEBUG_SLAB | ||
3502 | void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) | 3498 | void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) |
3503 | { | 3499 | { |
3504 | return __do_kmalloc(size, flags, caller); | 3500 | return __do_kmalloc(size, flags, caller); |
3505 | } | 3501 | } |
3506 | EXPORT_SYMBOL(__kmalloc_track_caller); | 3502 | EXPORT_SYMBOL(__kmalloc_track_caller); |
3503 | |||
3504 | #else | ||
3505 | void *__kmalloc(size_t size, gfp_t flags) | ||
3506 | { | ||
3507 | return __do_kmalloc(size, flags, NULL); | ||
3508 | } | ||
3509 | EXPORT_SYMBOL(__kmalloc); | ||
3507 | #endif | 3510 | #endif |
3508 | 3511 | ||
3509 | /** | 3512 | /** |
@@ -11,7 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | void *__kzalloc(size_t size, gfp_t flags) | 12 | void *__kzalloc(size_t size, gfp_t flags) |
13 | { | 13 | { |
14 | void *ret = ____kmalloc(size, flags); | 14 | void *ret = kmalloc_track_caller(size, flags); |
15 | if (ret) | 15 | if (ret) |
16 | memset(ret, 0, size); | 16 | memset(ret, 0, size); |
17 | return ret; | 17 | return ret; |
@@ -33,7 +33,7 @@ char *kstrdup(const char *s, gfp_t gfp) | |||
33 | return NULL; | 33 | return NULL; |
34 | 34 | ||
35 | len = strlen(s) + 1; | 35 | len = strlen(s) + 1; |
36 | buf = ____kmalloc(len, gfp); | 36 | buf = kmalloc_track_caller(len, gfp); |
37 | if (buf) | 37 | if (buf) |
38 | memcpy(buf, s, len); | 38 | memcpy(buf, s, len); |
39 | return buf; | 39 | return buf; |
@@ -51,7 +51,7 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp) | |||
51 | { | 51 | { |
52 | void *p; | 52 | void *p; |
53 | 53 | ||
54 | p = ____kmalloc(len, gfp); | 54 | p = kmalloc_track_caller(len, gfp); |
55 | if (p) | 55 | if (p) |
56 | memcpy(p, src, len); | 56 | memcpy(p, src, len); |
57 | return p; | 57 | return p; |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 1ac191ce5641..750ab6ed13fc 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -503,7 +503,7 @@ EXPORT_SYMBOL(__vmalloc); | |||
503 | * Allocate enough pages to cover @size from the page level | 503 | * Allocate enough pages to cover @size from the page level |
504 | * allocator and map them into contiguous kernel virtual space. | 504 | * allocator and map them into contiguous kernel virtual space. |
505 | * | 505 | * |
506 | * For tight cotrol over page level allocator and protection flags | 506 | * For tight control over page level allocator and protection flags |
507 | * use __vmalloc() instead. | 507 | * use __vmalloc() instead. |
508 | */ | 508 | */ |
509 | void *vmalloc(unsigned long size) | 509 | void *vmalloc(unsigned long size) |
@@ -542,7 +542,7 @@ EXPORT_SYMBOL(vmalloc_user); | |||
542 | * Allocate enough pages to cover @size from the page level | 542 | * Allocate enough pages to cover @size from the page level |
543 | * allocator and map them into contiguous kernel virtual space. | 543 | * allocator and map them into contiguous kernel virtual space. |
544 | * | 544 | * |
545 | * For tight cotrol over page level allocator and protection flags | 545 | * For tight control over page level allocator and protection flags |
546 | * use __vmalloc() instead. | 546 | * use __vmalloc() instead. |
547 | */ | 547 | */ |
548 | void *vmalloc_node(unsigned long size, int node) | 548 | void *vmalloc_node(unsigned long size, int node) |
@@ -563,7 +563,7 @@ EXPORT_SYMBOL(vmalloc_node); | |||
563 | * the page level allocator and map them into contiguous and | 563 | * the page level allocator and map them into contiguous and |
564 | * executable kernel virtual space. | 564 | * executable kernel virtual space. |
565 | * | 565 | * |
566 | * For tight cotrol over page level allocator and protection flags | 566 | * For tight control over page level allocator and protection flags |
567 | * use __vmalloc() instead. | 567 | * use __vmalloc() instead. |
568 | */ | 568 | */ |
569 | 569 | ||