aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c10
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/page_alloc.c53
-rw-r--r--mm/readahead.c1
-rw-r--r--mm/slab.c13
-rw-r--r--mm/util.c6
6 files changed, 52 insertions, 39 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index ec469235985d..3464b681f844 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1139,11 +1139,11 @@ success:
1139} 1139}
1140 1140
1141/** 1141/**
1142 * __generic_file_aio_read - generic filesystem read routine 1142 * generic_file_aio_read - generic filesystem read routine
1143 * @iocb: kernel I/O control block 1143 * @iocb: kernel I/O control block
1144 * @iov: io vector request 1144 * @iov: io vector request
1145 * @nr_segs: number of segments in the iovec 1145 * @nr_segs: number of segments in the iovec
1146 * @ppos: current file position 1146 * @pos: current file position
1147 * 1147 *
1148 * This is the "read()" routine for all filesystems 1148 * This is the "read()" routine for all filesystems
1149 * that can use the page cache directly. 1149 * that can use the page cache directly.
@@ -1198,8 +1198,10 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1198 if (retval > 0) 1198 if (retval > 0)
1199 *ppos = pos + retval; 1199 *ppos = pos + retval;
1200 } 1200 }
1201 file_accessed(filp); 1201 if (likely(retval != 0)) {
1202 goto out; 1202 file_accessed(filp);
1203 goto out;
1204 }
1203 } 1205 }
1204 1206
1205 retval = 0; 1207 retval = 0;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7c7d03dbf73d..1d709ff528e1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -364,6 +364,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
364 pte_t *ptep; 364 pte_t *ptep;
365 pte_t pte; 365 pte_t pte;
366 struct page *page; 366 struct page *page;
367 struct page *tmp;
368 LIST_HEAD(page_list);
367 369
368 WARN_ON(!is_vm_hugetlb_page(vma)); 370 WARN_ON(!is_vm_hugetlb_page(vma));
369 BUG_ON(start & ~HPAGE_MASK); 371 BUG_ON(start & ~HPAGE_MASK);
@@ -384,12 +386,16 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
384 continue; 386 continue;
385 387
386 page = pte_page(pte); 388 page = pte_page(pte);
387 put_page(page); 389 list_add(&page->lru, &page_list);
388 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE)); 390 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
389 } 391 }
390 392
391 spin_unlock(&mm->page_table_lock); 393 spin_unlock(&mm->page_table_lock);
392 flush_tlb_range(vma, start, end); 394 flush_tlb_range(vma, start, end);
395 list_for_each_entry_safe(page, tmp, &page_list, lru) {
396 list_del(&page->lru);
397 put_page(page);
398 }
393} 399}
394 400
395static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 401static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4f59d90b81e6..a8c003e7b3d5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -900,7 +900,8 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
900 int classzone_idx, int alloc_flags) 900 int classzone_idx, int alloc_flags)
901{ 901{
902 /* free_pages my go negative - that's OK */ 902 /* free_pages my go negative - that's OK */
903 long min = mark, free_pages = z->free_pages - (1 << order) + 1; 903 unsigned long min = mark;
904 long free_pages = z->free_pages - (1 << order) + 1;
904 int o; 905 int o;
905 906
906 if (alloc_flags & ALLOC_HIGH) 907 if (alloc_flags & ALLOC_HIGH)
@@ -2050,8 +2051,8 @@ int __init early_pfn_to_nid(unsigned long pfn)
2050 2051
2051/** 2052/**
2052 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range 2053 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
2053 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed 2054 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
2054 * @max_low_pfn: The highest PFN that till be passed to free_bootmem_node 2055 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
2055 * 2056 *
2056 * If an architecture guarantees that all ranges registered with 2057 * If an architecture guarantees that all ranges registered with
2057 * add_active_ranges() contain no holes and may be freed, this 2058 * add_active_ranges() contain no holes and may be freed, this
@@ -2081,11 +2082,11 @@ void __init free_bootmem_with_active_regions(int nid,
2081 2082
2082/** 2083/**
2083 * sparse_memory_present_with_active_regions - Call memory_present for each active range 2084 * sparse_memory_present_with_active_regions - Call memory_present for each active range
2084 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used 2085 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
2085 * 2086 *
2086 * If an architecture guarantees that all ranges registered with 2087 * If an architecture guarantees that all ranges registered with
2087 * add_active_ranges() contain no holes and may be freed, this 2088 * add_active_ranges() contain no holes and may be freed, this
2088 * this function may be used instead of calling memory_present() manually. 2089 * function may be used instead of calling memory_present() manually.
2089 */ 2090 */
2090void __init sparse_memory_present_with_active_regions(int nid) 2091void __init sparse_memory_present_with_active_regions(int nid)
2091{ 2092{
@@ -2155,14 +2156,14 @@ static void __init account_node_boundary(unsigned int nid,
2155 2156
2156/** 2157/**
2157 * get_pfn_range_for_nid - Return the start and end page frames for a node 2158 * get_pfn_range_for_nid - Return the start and end page frames for a node
2158 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned 2159 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
2159 * @start_pfn: Passed by reference. On return, it will have the node start_pfn 2160 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
2160 * @end_pfn: Passed by reference. On return, it will have the node end_pfn 2161 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
2161 * 2162 *
2162 * It returns the start and end page frame of a node based on information 2163 * It returns the start and end page frame of a node based on information
2163 * provided by an arch calling add_active_range(). If called for a node 2164 * provided by an arch calling add_active_range(). If called for a node
2164 * with no available memory, a warning is printed and the start and end 2165 * with no available memory, a warning is printed and the start and end
2165 * PFNs will be 0 2166 * PFNs will be 0.
2166 */ 2167 */
2167void __init get_pfn_range_for_nid(unsigned int nid, 2168void __init get_pfn_range_for_nid(unsigned int nid,
2168 unsigned long *start_pfn, unsigned long *end_pfn) 2169 unsigned long *start_pfn, unsigned long *end_pfn)
@@ -2215,7 +2216,7 @@ unsigned long __init zone_spanned_pages_in_node(int nid,
2215 2216
2216/* 2217/*
2217 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 2218 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
2218 * then all holes in the requested range will be accounted for 2219 * then all holes in the requested range will be accounted for.
2219 */ 2220 */
2220unsigned long __init __absent_pages_in_range(int nid, 2221unsigned long __init __absent_pages_in_range(int nid,
2221 unsigned long range_start_pfn, 2222 unsigned long range_start_pfn,
@@ -2268,7 +2269,7 @@ unsigned long __init __absent_pages_in_range(int nid,
2268 * @start_pfn: The start PFN to start searching for holes 2269 * @start_pfn: The start PFN to start searching for holes
2269 * @end_pfn: The end PFN to stop searching for holes 2270 * @end_pfn: The end PFN to stop searching for holes
2270 * 2271 *
2271 * It returns the number of pages frames in memory holes within a range 2272 * It returns the number of pages frames in memory holes within a range.
2272 */ 2273 */
2273unsigned long __init absent_pages_in_range(unsigned long start_pfn, 2274unsigned long __init absent_pages_in_range(unsigned long start_pfn,
2274 unsigned long end_pfn) 2275 unsigned long end_pfn)
@@ -2582,11 +2583,12 @@ void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
2582 2583
2583/** 2584/**
2584 * remove_all_active_ranges - Remove all currently registered regions 2585 * remove_all_active_ranges - Remove all currently registered regions
2586 *
2585 * During discovery, it may be found that a table like SRAT is invalid 2587 * During discovery, it may be found that a table like SRAT is invalid
2586 * and an alternative discovery method must be used. This function removes 2588 * and an alternative discovery method must be used. This function removes
2587 * all currently registered regions. 2589 * all currently registered regions.
2588 */ 2590 */
2589void __init remove_all_active_ranges() 2591void __init remove_all_active_ranges(void)
2590{ 2592{
2591 memset(early_node_map, 0, sizeof(early_node_map)); 2593 memset(early_node_map, 0, sizeof(early_node_map));
2592 nr_nodemap_entries = 0; 2594 nr_nodemap_entries = 0;
@@ -2636,7 +2638,7 @@ unsigned long __init find_min_pfn_for_node(unsigned long nid)
2636 * find_min_pfn_with_active_regions - Find the minimum PFN registered 2638 * find_min_pfn_with_active_regions - Find the minimum PFN registered
2637 * 2639 *
2638 * It returns the minimum PFN based on information provided via 2640 * It returns the minimum PFN based on information provided via
2639 * add_active_range() 2641 * add_active_range().
2640 */ 2642 */
2641unsigned long __init find_min_pfn_with_active_regions(void) 2643unsigned long __init find_min_pfn_with_active_regions(void)
2642{ 2644{
@@ -2647,7 +2649,7 @@ unsigned long __init find_min_pfn_with_active_regions(void)
2647 * find_max_pfn_with_active_regions - Find the maximum PFN registered 2649 * find_max_pfn_with_active_regions - Find the maximum PFN registered
2648 * 2650 *
2649 * It returns the maximum PFN based on information provided via 2651 * It returns the maximum PFN based on information provided via
2650 * add_active_range() 2652 * add_active_range().
2651 */ 2653 */
2652unsigned long __init find_max_pfn_with_active_regions(void) 2654unsigned long __init find_max_pfn_with_active_regions(void)
2653{ 2655{
@@ -2662,10 +2664,7 @@ unsigned long __init find_max_pfn_with_active_regions(void)
2662 2664
2663/** 2665/**
2664 * free_area_init_nodes - Initialise all pg_data_t and zone data 2666 * free_area_init_nodes - Initialise all pg_data_t and zone data
2665 * @arch_max_dma_pfn: The maximum PFN usable for ZONE_DMA 2667 * @max_zone_pfn: an array of max PFNs for each zone
2666 * @arch_max_dma32_pfn: The maximum PFN usable for ZONE_DMA32
2667 * @arch_max_low_pfn: The maximum PFN usable for ZONE_NORMAL
2668 * @arch_max_high_pfn: The maximum PFN usable for ZONE_HIGHMEM
2669 * 2668 *
2670 * This will call free_area_init_node() for each active node in the system. 2669 * This will call free_area_init_node() for each active node in the system.
2671 * Using the page ranges provided by add_active_range(), the size of each 2670 * Using the page ranges provided by add_active_range(), the size of each
@@ -2723,14 +2722,15 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
2723#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 2722#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
2724 2723
2725/** 2724/**
2726 * set_dma_reserve - Account the specified number of pages reserved in ZONE_DMA 2725 * set_dma_reserve - set the specified number of pages reserved in the first zone
2727 * @new_dma_reserve - The number of pages to mark reserved 2726 * @new_dma_reserve: The number of pages to mark reserved
2728 * 2727 *
2729 * The per-cpu batchsize and zone watermarks are determined by present_pages. 2728 * The per-cpu batchsize and zone watermarks are determined by present_pages.
2730 * In the DMA zone, a significant percentage may be consumed by kernel image 2729 * In the DMA zone, a significant percentage may be consumed by kernel image
2731 * and other unfreeable allocations which can skew the watermarks badly. This 2730 * and other unfreeable allocations which can skew the watermarks badly. This
2732 * function may optionally be used to account for unfreeable pages in 2731 * function may optionally be used to account for unfreeable pages in the
2733 * ZONE_DMA. The effect will be lower watermarks and smaller per-cpu batchsize 2732 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
2733 * smaller per-cpu batchsize.
2734 */ 2734 */
2735void __init set_dma_reserve(unsigned long new_dma_reserve) 2735void __init set_dma_reserve(unsigned long new_dma_reserve)
2736{ 2736{
@@ -2843,10 +2843,11 @@ static void setup_per_zone_lowmem_reserve(void)
2843 calculate_totalreserve_pages(); 2843 calculate_totalreserve_pages();
2844} 2844}
2845 2845
2846/* 2846/**
2847 * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures 2847 * setup_per_zone_pages_min - called when min_free_kbytes changes.
2848 * that the pages_{min,low,high} values for each zone are set correctly 2848 *
2849 * with respect to min_free_kbytes. 2849 * Ensures that the pages_{min,low,high} values for each zone are set correctly
2850 * with respect to min_free_kbytes.
2850 */ 2851 */
2851void setup_per_zone_pages_min(void) 2852void setup_per_zone_pages_min(void)
2852{ 2853{
diff --git a/mm/readahead.c b/mm/readahead.c
index aa7ec424656a..1ba736ac0367 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -38,6 +38,7 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
38 ra->ra_pages = mapping->backing_dev_info->ra_pages; 38 ra->ra_pages = mapping->backing_dev_info->ra_pages;
39 ra->prev_page = -1; 39 ra->prev_page = -1;
40} 40}
41EXPORT_SYMBOL_GPL(file_ra_state_init);
41 42
42/* 43/*
43 * Return max readahead size for this inode in number-of-pages. 44 * Return max readahead size for this inode in number-of-pages.
diff --git a/mm/slab.c b/mm/slab.c
index f3514351aed8..e9a63b5a7fb9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3487,22 +3487,25 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3487} 3487}
3488 3488
3489 3489
3490#ifdef CONFIG_DEBUG_SLAB
3490void *__kmalloc(size_t size, gfp_t flags) 3491void *__kmalloc(size_t size, gfp_t flags)
3491{ 3492{
3492#ifndef CONFIG_DEBUG_SLAB
3493 return __do_kmalloc(size, flags, NULL);
3494#else
3495 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3493 return __do_kmalloc(size, flags, __builtin_return_address(0));
3496#endif
3497} 3494}
3498EXPORT_SYMBOL(__kmalloc); 3495EXPORT_SYMBOL(__kmalloc);
3499 3496
3500#ifdef CONFIG_DEBUG_SLAB
3501void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) 3497void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
3502{ 3498{
3503 return __do_kmalloc(size, flags, caller); 3499 return __do_kmalloc(size, flags, caller);
3504} 3500}
3505EXPORT_SYMBOL(__kmalloc_track_caller); 3501EXPORT_SYMBOL(__kmalloc_track_caller);
3502
3503#else
3504void *__kmalloc(size_t size, gfp_t flags)
3505{
3506 return __do_kmalloc(size, flags, NULL);
3507}
3508EXPORT_SYMBOL(__kmalloc);
3506#endif 3509#endif
3507 3510
3508/** 3511/**
diff --git a/mm/util.c b/mm/util.c
index e14fa84ef39a..ace2aea69f1a 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -11,7 +11,7 @@
11 */ 11 */
12void *__kzalloc(size_t size, gfp_t flags) 12void *__kzalloc(size_t size, gfp_t flags)
13{ 13{
14 void *ret = ____kmalloc(size, flags); 14 void *ret = kmalloc_track_caller(size, flags);
15 if (ret) 15 if (ret)
16 memset(ret, 0, size); 16 memset(ret, 0, size);
17 return ret; 17 return ret;
@@ -33,7 +33,7 @@ char *kstrdup(const char *s, gfp_t gfp)
33 return NULL; 33 return NULL;
34 34
35 len = strlen(s) + 1; 35 len = strlen(s) + 1;
36 buf = ____kmalloc(len, gfp); 36 buf = kmalloc_track_caller(len, gfp);
37 if (buf) 37 if (buf)
38 memcpy(buf, s, len); 38 memcpy(buf, s, len);
39 return buf; 39 return buf;
@@ -51,7 +51,7 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp)
51{ 51{
52 void *p; 52 void *p;
53 53
54 p = ____kmalloc(len, gfp); 54 p = kmalloc_track_caller(len, gfp);
55 if (p) 55 if (p)
56 memcpy(p, src, len); 56 memcpy(p, src, len);
57 return p; 57 return p;