aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c10
-rw-r--r--mm/filemap_xip.c2
-rw-r--r--mm/mlock.c7
-rw-r--r--mm/page-writeback.c15
-rw-r--r--mm/page_alloc.c27
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/slab.c1
-rw-r--r--mm/slob.c1
-rw-r--r--mm/slub.c1
-rw-r--r--mm/swapfile.c4
-rw-r--r--mm/util.c20
-rw-r--r--mm/vmalloc.c13
-rw-r--r--mm/vmscan.c28
13 files changed, 93 insertions, 38 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 23acefe51808..60fd56772cc6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1816,14 +1816,14 @@ EXPORT_SYMBOL(file_remove_suid);
1816static size_t __iovec_copy_from_user_inatomic(char *vaddr, 1816static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1817 const struct iovec *iov, size_t base, size_t bytes) 1817 const struct iovec *iov, size_t base, size_t bytes)
1818{ 1818{
1819 size_t copied = 0, left = 0; 1819 size_t copied = 0, left = 0, total = bytes;
1820 1820
1821 while (bytes) { 1821 while (bytes) {
1822 char __user *buf = iov->iov_base + base; 1822 char __user *buf = iov->iov_base + base;
1823 int copy = min(bytes, iov->iov_len - base); 1823 int copy = min(bytes, iov->iov_len - base);
1824 1824
1825 base = 0; 1825 base = 0;
1826 left = __copy_from_user_inatomic_nocache(vaddr, buf, copy); 1826 left = __copy_from_user_inatomic_nocache(vaddr, buf, copy, total);
1827 copied += copy; 1827 copied += copy;
1828 bytes -= copy; 1828 bytes -= copy;
1829 vaddr += copy; 1829 vaddr += copy;
@@ -1851,8 +1851,9 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
1851 if (likely(i->nr_segs == 1)) { 1851 if (likely(i->nr_segs == 1)) {
1852 int left; 1852 int left;
1853 char __user *buf = i->iov->iov_base + i->iov_offset; 1853 char __user *buf = i->iov->iov_base + i->iov_offset;
1854
1854 left = __copy_from_user_inatomic_nocache(kaddr + offset, 1855 left = __copy_from_user_inatomic_nocache(kaddr + offset,
1855 buf, bytes); 1856 buf, bytes, bytes);
1856 copied = bytes - left; 1857 copied = bytes - left;
1857 } else { 1858 } else {
1858 copied = __iovec_copy_from_user_inatomic(kaddr + offset, 1859 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -1880,7 +1881,8 @@ size_t iov_iter_copy_from_user(struct page *page,
1880 if (likely(i->nr_segs == 1)) { 1881 if (likely(i->nr_segs == 1)) {
1881 int left; 1882 int left;
1882 char __user *buf = i->iov->iov_base + i->iov_offset; 1883 char __user *buf = i->iov->iov_base + i->iov_offset;
1883 left = __copy_from_user_nocache(kaddr + offset, buf, bytes); 1884
1885 left = __copy_from_user_nocache(kaddr + offset, buf, bytes, bytes);
1884 copied = bytes - left; 1886 copied = bytes - left;
1885 } else { 1887 } else {
1886 copied = __iovec_copy_from_user_inatomic(kaddr + offset, 1888 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 0c04615651b7..bf54f8a2cf1d 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -354,7 +354,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
354 break; 354 break;
355 355
356 copied = bytes - 356 copied = bytes -
357 __copy_from_user_nocache(xip_mem + offset, buf, bytes); 357 __copy_from_user_nocache(xip_mem + offset, buf, bytes, bytes);
358 358
359 if (likely(copied > 0)) { 359 if (likely(copied > 0)) {
360 status = copied; 360 status = copied;
diff --git a/mm/mlock.c b/mm/mlock.c
index 037161d61b4e..cbe9e0581b75 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -660,7 +660,7 @@ void *alloc_locked_buffer(size_t size)
660 return buffer; 660 return buffer;
661} 661}
662 662
663void free_locked_buffer(void *buffer, size_t size) 663void release_locked_buffer(void *buffer, size_t size)
664{ 664{
665 unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; 665 unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
666 666
@@ -670,6 +670,11 @@ void free_locked_buffer(void *buffer, size_t size)
670 current->mm->locked_vm -= pgsz; 670 current->mm->locked_vm -= pgsz;
671 671
672 up_write(&current->mm->mmap_sem); 672 up_write(&current->mm->mmap_sem);
673}
674
675void free_locked_buffer(void *buffer, size_t size)
676{
677 release_locked_buffer(buffer, size);
673 678
674 kfree(buffer); 679 kfree(buffer);
675} 680}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 6106a5c7ed44..74dc57c74349 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -240,7 +240,7 @@ void bdi_writeout_inc(struct backing_dev_info *bdi)
240} 240}
241EXPORT_SYMBOL_GPL(bdi_writeout_inc); 241EXPORT_SYMBOL_GPL(bdi_writeout_inc);
242 242
243static inline void task_dirty_inc(struct task_struct *tsk) 243void task_dirty_inc(struct task_struct *tsk)
244{ 244{
245 prop_inc_single(&vm_dirties, &tsk->dirties); 245 prop_inc_single(&vm_dirties, &tsk->dirties);
246} 246}
@@ -1079,7 +1079,7 @@ continue_unlock:
1079 pagevec_release(&pvec); 1079 pagevec_release(&pvec);
1080 cond_resched(); 1080 cond_resched();
1081 } 1081 }
1082 if (!cycled) { 1082 if (!cycled && !done) {
1083 /* 1083 /*
1084 * range_cyclic: 1084 * range_cyclic:
1085 * We hit the last page and there is more work to be done: wrap 1085 * We hit the last page and there is more work to be done: wrap
@@ -1230,6 +1230,7 @@ int __set_page_dirty_nobuffers(struct page *page)
1230 __inc_zone_page_state(page, NR_FILE_DIRTY); 1230 __inc_zone_page_state(page, NR_FILE_DIRTY);
1231 __inc_bdi_stat(mapping->backing_dev_info, 1231 __inc_bdi_stat(mapping->backing_dev_info,
1232 BDI_RECLAIMABLE); 1232 BDI_RECLAIMABLE);
1233 task_dirty_inc(current);
1233 task_io_account_write(PAGE_CACHE_SIZE); 1234 task_io_account_write(PAGE_CACHE_SIZE);
1234 } 1235 }
1235 radix_tree_tag_set(&mapping->page_tree, 1236 radix_tree_tag_set(&mapping->page_tree,
@@ -1262,7 +1263,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage);
1262 * If the mapping doesn't provide a set_page_dirty a_op, then 1263 * If the mapping doesn't provide a set_page_dirty a_op, then
1263 * just fall through and assume that it wants buffer_heads. 1264 * just fall through and assume that it wants buffer_heads.
1264 */ 1265 */
1265static int __set_page_dirty(struct page *page) 1266int set_page_dirty(struct page *page)
1266{ 1267{
1267 struct address_space *mapping = page_mapping(page); 1268 struct address_space *mapping = page_mapping(page);
1268 1269
@@ -1280,14 +1281,6 @@ static int __set_page_dirty(struct page *page)
1280 } 1281 }
1281 return 0; 1282 return 0;
1282} 1283}
1283
1284int set_page_dirty(struct page *page)
1285{
1286 int ret = __set_page_dirty(page);
1287 if (ret)
1288 task_dirty_inc(current);
1289 return ret;
1290}
1291EXPORT_SYMBOL(set_page_dirty); 1284EXPORT_SYMBOL(set_page_dirty);
1292 1285
1293/* 1286/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5675b3073854..5c44ed49ca93 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2989,7 +2989,7 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
2989 * was used and there are no special requirements, this is a convenient 2989 * was used and there are no special requirements, this is a convenient
2990 * alternative 2990 * alternative
2991 */ 2991 */
2992int __meminit early_pfn_to_nid(unsigned long pfn) 2992int __meminit __early_pfn_to_nid(unsigned long pfn)
2993{ 2993{
2994 int i; 2994 int i;
2995 2995
@@ -3000,10 +3000,33 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
3000 if (start_pfn <= pfn && pfn < end_pfn) 3000 if (start_pfn <= pfn && pfn < end_pfn)
3001 return early_node_map[i].nid; 3001 return early_node_map[i].nid;
3002 } 3002 }
3003 /* This is a memory hole */
3004 return -1;
3005}
3006#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3007
3008int __meminit early_pfn_to_nid(unsigned long pfn)
3009{
3010 int nid;
3003 3011
3012 nid = __early_pfn_to_nid(pfn);
3013 if (nid >= 0)
3014 return nid;
3015 /* just returns 0 */
3004 return 0; 3016 return 0;
3005} 3017}
3006#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 3018
3019#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3020bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3021{
3022 int nid;
3023
3024 nid = __early_pfn_to_nid(pfn);
3025 if (nid >= 0 && nid != node)
3026 return false;
3027 return true;
3028}
3029#endif
3007 3030
3008/* Basic iterator support to walk early_node_map[] */ 3031/* Basic iterator support to walk early_node_map[] */
3009#define for_each_active_range_index_in_nid(i, nid) \ 3032#define for_each_active_range_index_in_nid(i, nid) \
diff --git a/mm/page_io.c b/mm/page_io.c
index dc6ce0afbded..3023c475e041 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -111,7 +111,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
111 goto out; 111 goto out;
112 } 112 }
113 if (wbc->sync_mode == WB_SYNC_ALL) 113 if (wbc->sync_mode == WB_SYNC_ALL)
114 rw |= (1 << BIO_RW_SYNC); 114 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
115 count_vm_event(PSWPOUT); 115 count_vm_event(PSWPOUT);
116 set_page_writeback(page); 116 set_page_writeback(page);
117 unlock_page(page); 117 unlock_page(page);
diff --git a/mm/slab.c b/mm/slab.c
index ddc41f337d58..4d00855629c4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4457,3 +4457,4 @@ size_t ksize(const void *objp)
4457 4457
4458 return obj_size(virt_to_cache(objp)); 4458 return obj_size(virt_to_cache(objp));
4459} 4459}
4460EXPORT_SYMBOL(ksize);
diff --git a/mm/slob.c b/mm/slob.c
index bf7e8fc3aed8..52bc8a2bd9ef 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -521,6 +521,7 @@ size_t ksize(const void *block)
521 } else 521 } else
522 return sp->page.private; 522 return sp->page.private;
523} 523}
524EXPORT_SYMBOL(ksize);
524 525
525struct kmem_cache { 526struct kmem_cache {
526 unsigned int size, align; 527 unsigned int size, align;
diff --git a/mm/slub.c b/mm/slub.c
index bdc9abb08a23..0280eee6cf37 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2736,6 +2736,7 @@ size_t ksize(const void *object)
2736 */ 2736 */
2737 return s->size; 2737 return s->size;
2738} 2738}
2739EXPORT_SYMBOL(ksize);
2739 2740
2740void kfree(const void *x) 2741void kfree(const void *x)
2741{ 2742{
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 7e6304dfafab..312fafe0ab6e 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -635,7 +635,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
635 635
636 if (!bdev) { 636 if (!bdev) {
637 if (bdev_p) 637 if (bdev_p)
638 *bdev_p = sis->bdev; 638 *bdev_p = bdget(sis->bdev->bd_dev);
639 639
640 spin_unlock(&swap_lock); 640 spin_unlock(&swap_lock);
641 return i; 641 return i;
@@ -647,7 +647,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
647 struct swap_extent, list); 647 struct swap_extent, list);
648 if (se->start_block == offset) { 648 if (se->start_block == offset) {
649 if (bdev_p) 649 if (bdev_p)
650 *bdev_p = sis->bdev; 650 *bdev_p = bdget(sis->bdev->bd_dev);
651 651
652 spin_unlock(&swap_lock); 652 spin_unlock(&swap_lock);
653 bdput(bdev); 653 bdput(bdev);
diff --git a/mm/util.c b/mm/util.c
index cb00b748ce47..37eaccdf3054 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -129,6 +129,26 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
129} 129}
130EXPORT_SYMBOL(krealloc); 130EXPORT_SYMBOL(krealloc);
131 131
132/**
133 * kzfree - like kfree but zero memory
134 * @p: object to free memory of
135 *
136 * The memory of the object @p points to is zeroed before freed.
137 * If @p is %NULL, kzfree() does nothing.
138 */
139void kzfree(const void *p)
140{
141 size_t ks;
142 void *mem = (void *)p;
143
144 if (unlikely(ZERO_OR_NULL_PTR(mem)))
145 return;
146 ks = ksize(mem);
147 memset(mem, 0, ks);
148 kfree(mem);
149}
150EXPORT_SYMBOL(kzfree);
151
132/* 152/*
133 * strndup_user - duplicate an existing string from user space 153 * strndup_user - duplicate an existing string from user space
134 * @s: The string to duplicate 154 * @s: The string to duplicate
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 75f49d312e8c..7774c6328970 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1012,6 +1012,8 @@ void __init vmalloc_init(void)
1012void unmap_kernel_range(unsigned long addr, unsigned long size) 1012void unmap_kernel_range(unsigned long addr, unsigned long size)
1013{ 1013{
1014 unsigned long end = addr + size; 1014 unsigned long end = addr + size;
1015
1016 flush_cache_vunmap(addr, end);
1015 vunmap_page_range(addr, end); 1017 vunmap_page_range(addr, end);
1016 flush_tlb_kernel_range(addr, end); 1018 flush_tlb_kernel_range(addr, end);
1017} 1019}
@@ -1106,6 +1108,14 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1106} 1108}
1107EXPORT_SYMBOL_GPL(__get_vm_area); 1109EXPORT_SYMBOL_GPL(__get_vm_area);
1108 1110
1111struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1112 unsigned long start, unsigned long end,
1113 void *caller)
1114{
1115 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
1116 caller);
1117}
1118
1109/** 1119/**
1110 * get_vm_area - reserve a contiguous kernel virtual area 1120 * get_vm_area - reserve a contiguous kernel virtual area
1111 * @size: size of the area 1121 * @size: size of the area
@@ -1249,6 +1259,7 @@ EXPORT_SYMBOL(vfree);
1249void vunmap(const void *addr) 1259void vunmap(const void *addr)
1250{ 1260{
1251 BUG_ON(in_interrupt()); 1261 BUG_ON(in_interrupt());
1262 might_sleep();
1252 __vunmap(addr, 0); 1263 __vunmap(addr, 0);
1253} 1264}
1254EXPORT_SYMBOL(vunmap); 1265EXPORT_SYMBOL(vunmap);
@@ -1268,6 +1279,8 @@ void *vmap(struct page **pages, unsigned int count,
1268{ 1279{
1269 struct vm_struct *area; 1280 struct vm_struct *area;
1270 1281
1282 might_sleep();
1283
1271 if (count > num_physpages) 1284 if (count > num_physpages)
1272 return NULL; 1285 return NULL;
1273 1286
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9a27c44aa327..6177e3bcd66b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2057,31 +2057,31 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
2057 int pass, struct scan_control *sc) 2057 int pass, struct scan_control *sc)
2058{ 2058{
2059 struct zone *zone; 2059 struct zone *zone;
2060 unsigned long nr_to_scan, ret = 0; 2060 unsigned long ret = 0;
2061 enum lru_list l;
2062 2061
2063 for_each_zone(zone) { 2062 for_each_zone(zone) {
2063 enum lru_list l;
2064 2064
2065 if (!populated_zone(zone)) 2065 if (!populated_zone(zone))
2066 continue; 2066 continue;
2067
2068 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) 2067 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
2069 continue; 2068 continue;
2070 2069
2071 for_each_evictable_lru(l) { 2070 for_each_evictable_lru(l) {
2071 enum zone_stat_item ls = NR_LRU_BASE + l;
2072 unsigned long lru_pages = zone_page_state(zone, ls);
2073
2072 /* For pass = 0, we don't shrink the active list */ 2074 /* For pass = 0, we don't shrink the active list */
2073 if (pass == 0 && 2075 if (pass == 0 && (l == LRU_ACTIVE_ANON ||
2074 (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE)) 2076 l == LRU_ACTIVE_FILE))
2075 continue; 2077 continue;
2076 2078
2077 zone->lru[l].nr_scan += 2079 zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
2078 (zone_page_state(zone, NR_LRU_BASE + l)
2079 >> prio) + 1;
2080 if (zone->lru[l].nr_scan >= nr_pages || pass > 3) { 2080 if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
2081 unsigned long nr_to_scan;
2082
2081 zone->lru[l].nr_scan = 0; 2083 zone->lru[l].nr_scan = 0;
2082 nr_to_scan = min(nr_pages, 2084 nr_to_scan = min(nr_pages, lru_pages);
2083 zone_page_state(zone,
2084 NR_LRU_BASE + l));
2085 ret += shrink_list(l, nr_to_scan, zone, 2085 ret += shrink_list(l, nr_to_scan, zone,
2086 sc, prio); 2086 sc, prio);
2087 if (ret >= nr_pages) 2087 if (ret >= nr_pages)
@@ -2089,7 +2089,6 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
2089 } 2089 }
2090 } 2090 }
2091 } 2091 }
2092
2093 return ret; 2092 return ret;
2094} 2093}
2095 2094
@@ -2112,7 +2111,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
2112 .may_swap = 0, 2111 .may_swap = 0,
2113 .swap_cluster_max = nr_pages, 2112 .swap_cluster_max = nr_pages,
2114 .may_writepage = 1, 2113 .may_writepage = 1,
2115 .swappiness = vm_swappiness,
2116 .isolate_pages = isolate_pages_global, 2114 .isolate_pages = isolate_pages_global,
2117 }; 2115 };
2118 2116
@@ -2146,10 +2144,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
2146 int prio; 2144 int prio;
2147 2145
2148 /* Force reclaiming mapped pages in the passes #3 and #4 */ 2146 /* Force reclaiming mapped pages in the passes #3 and #4 */
2149 if (pass > 2) { 2147 if (pass > 2)
2150 sc.may_swap = 1; 2148 sc.may_swap = 1;
2151 sc.swappiness = 100;
2152 }
2153 2149
2154 for (prio = DEF_PRIORITY; prio >= 0; prio--) { 2150 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
2155 unsigned long nr_to_scan = nr_pages - ret; 2151 unsigned long nr_to_scan = nr_pages - ret;