aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/memory.c17
-rw-r--r--mm/mmap.c63
-rw-r--r--mm/mremap.c7
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/vmalloc.c33
8 files changed, 79 insertions, 56 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 47263ac3e4ea..1d33fec7bac6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1004,7 +1004,7 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1004 if (pos < size) { 1004 if (pos < size) {
1005 retval = generic_file_direct_IO(READ, iocb, 1005 retval = generic_file_direct_IO(READ, iocb,
1006 iov, pos, nr_segs); 1006 iov, pos, nr_segs);
1007 if (retval >= 0 && !is_sync_kiocb(iocb)) 1007 if (retval > 0 && !is_sync_kiocb(iocb))
1008 retval = -EIOCBQUEUED; 1008 retval = -EIOCBQUEUED;
1009 if (retval > 0) 1009 if (retval > 0)
1010 *ppos = pos + retval; 1010 *ppos = pos + retval;
diff --git a/mm/memory.c b/mm/memory.c
index 6bad4c4064e7..d209f745db7f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1701,12 +1701,13 @@ static int do_swap_page(struct mm_struct * mm,
1701 spin_lock(&mm->page_table_lock); 1701 spin_lock(&mm->page_table_lock);
1702 page_table = pte_offset_map(pmd, address); 1702 page_table = pte_offset_map(pmd, address);
1703 if (unlikely(!pte_same(*page_table, orig_pte))) { 1703 if (unlikely(!pte_same(*page_table, orig_pte))) {
1704 pte_unmap(page_table);
1705 spin_unlock(&mm->page_table_lock);
1706 unlock_page(page);
1707 page_cache_release(page);
1708 ret = VM_FAULT_MINOR; 1704 ret = VM_FAULT_MINOR;
1709 goto out; 1705 goto out_nomap;
1706 }
1707
1708 if (unlikely(!PageUptodate(page))) {
1709 ret = VM_FAULT_SIGBUS;
1710 goto out_nomap;
1710 } 1711 }
1711 1712
1712 /* The page isn't present yet, go ahead with the fault. */ 1713 /* The page isn't present yet, go ahead with the fault. */
@@ -1741,6 +1742,12 @@ static int do_swap_page(struct mm_struct * mm,
1741 spin_unlock(&mm->page_table_lock); 1742 spin_unlock(&mm->page_table_lock);
1742out: 1743out:
1743 return ret; 1744 return ret;
1745out_nomap:
1746 pte_unmap(page_table);
1747 spin_unlock(&mm->page_table_lock);
1748 unlock_page(page);
1749 page_cache_release(page);
1750 goto out;
1744} 1751}
1745 1752
1746/* 1753/*
diff --git a/mm/mmap.c b/mm/mmap.c
index 01f9793591f6..de54acd9942f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1244,7 +1244,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1244 addr = mm->free_area_cache; 1244 addr = mm->free_area_cache;
1245 1245
1246 /* make sure it can fit in the remaining address space */ 1246 /* make sure it can fit in the remaining address space */
1247 if (addr >= len) { 1247 if (addr > len) {
1248 vma = find_vma(mm, addr-len); 1248 vma = find_vma(mm, addr-len);
1249 if (!vma || addr <= vma->vm_start) 1249 if (!vma || addr <= vma->vm_start)
1250 /* remember the address as a hint for next time */ 1250 /* remember the address as a hint for next time */
@@ -1266,7 +1266,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1266 1266
1267 /* try just below the current vma->vm_start */ 1267 /* try just below the current vma->vm_start */
1268 addr = vma->vm_start-len; 1268 addr = vma->vm_start-len;
1269 } while (len <= vma->vm_start); 1269 } while (len < vma->vm_start);
1270 1270
1271 /* 1271 /*
1272 * A failed mmap() very likely causes application failure, 1272 * A failed mmap() very likely causes application failure,
@@ -1302,37 +1302,40 @@ unsigned long
1302get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 1302get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1303 unsigned long pgoff, unsigned long flags) 1303 unsigned long pgoff, unsigned long flags)
1304{ 1304{
1305 if (flags & MAP_FIXED) { 1305 unsigned long ret;
1306 unsigned long ret;
1307 1306
1308 if (addr > TASK_SIZE - len) 1307 if (!(flags & MAP_FIXED)) {
1309 return -ENOMEM; 1308 unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1310 if (addr & ~PAGE_MASK)
1311 return -EINVAL;
1312 if (file && is_file_hugepages(file)) {
1313 /*
1314 * Check if the given range is hugepage aligned, and
1315 * can be made suitable for hugepages.
1316 */
1317 ret = prepare_hugepage_range(addr, len);
1318 } else {
1319 /*
1320 * Ensure that a normal request is not falling in a
1321 * reserved hugepage range. For some archs like IA-64,
1322 * there is a separate region for hugepages.
1323 */
1324 ret = is_hugepage_only_range(current->mm, addr, len);
1325 }
1326 if (ret)
1327 return -EINVAL;
1328 return addr;
1329 }
1330 1309
1331 if (file && file->f_op && file->f_op->get_unmapped_area) 1310 get_area = current->mm->get_unmapped_area;
1332 return file->f_op->get_unmapped_area(file, addr, len, 1311 if (file && file->f_op && file->f_op->get_unmapped_area)
1333 pgoff, flags); 1312 get_area = file->f_op->get_unmapped_area;
1313 addr = get_area(file, addr, len, pgoff, flags);
1314 if (IS_ERR_VALUE(addr))
1315 return addr;
1316 }
1334 1317
1335 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 1318 if (addr > TASK_SIZE - len)
1319 return -ENOMEM;
1320 if (addr & ~PAGE_MASK)
1321 return -EINVAL;
1322 if (file && is_file_hugepages(file)) {
1323 /*
1324 * Check if the given range is hugepage aligned, and
1325 * can be made suitable for hugepages.
1326 */
1327 ret = prepare_hugepage_range(addr, len);
1328 } else {
1329 /*
1330 * Ensure that a normal request is not falling in a
1331 * reserved hugepage range. For some archs like IA-64,
1332 * there is a separate region for hugepages.
1333 */
1334 ret = is_hugepage_only_range(current->mm, addr, len);
1335 }
1336 if (ret)
1337 return -EINVAL;
1338 return addr;
1336} 1339}
1337 1340
1338EXPORT_SYMBOL(get_unmapped_area); 1341EXPORT_SYMBOL(get_unmapped_area);
diff --git a/mm/mremap.c b/mm/mremap.c
index 0dd7ace94e51..ec7238a78f36 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -224,6 +224,12 @@ static unsigned long move_vma(struct vm_area_struct *vma,
224 split = 1; 224 split = 1;
225 } 225 }
226 226
227 /*
228 * if we failed to move page tables we still do total_vm increment
229 * since do_munmap() will decrement it by old_len == new_len
230 */
231 mm->total_vm += new_len >> PAGE_SHIFT;
232
227 if (do_munmap(mm, old_addr, old_len) < 0) { 233 if (do_munmap(mm, old_addr, old_len) < 0) {
228 /* OOM: unable to split vma, just get accounts right */ 234 /* OOM: unable to split vma, just get accounts right */
229 vm_unacct_memory(excess >> PAGE_SHIFT); 235 vm_unacct_memory(excess >> PAGE_SHIFT);
@@ -237,7 +243,6 @@ static unsigned long move_vma(struct vm_area_struct *vma,
237 vma->vm_next->vm_flags |= VM_ACCOUNT; 243 vma->vm_next->vm_flags |= VM_ACCOUNT;
238 } 244 }
239 245
240 mm->total_vm += new_len >> PAGE_SHIFT;
241 __vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); 246 __vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
242 if (vm_flags & VM_LOCKED) { 247 if (vm_flags & VM_LOCKED) {
243 mm->locked_vm += new_len >> PAGE_SHIFT; 248 mm->locked_vm += new_len >> PAGE_SHIFT;
diff --git a/mm/nommu.c b/mm/nommu.c
index b293ec1cc4e6..c53e9c8f6b4a 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -150,7 +150,8 @@ void vfree(void *addr)
150 kfree(addr); 150 kfree(addr);
151} 151}
152 152
153void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot) 153void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask,
154 pgprot_t prot)
154{ 155{
155 /* 156 /*
156 * kmalloc doesn't like __GFP_HIGHMEM for some reason 157 * kmalloc doesn't like __GFP_HIGHMEM for some reason
diff --git a/mm/rmap.c b/mm/rmap.c
index 378de234c12b..9827409eb7c7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -586,7 +586,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
586 dec_mm_counter(mm, anon_rss); 586 dec_mm_counter(mm, anon_rss);
587 } 587 }
588 588
589 inc_mm_counter(mm, rss); 589 dec_mm_counter(mm, rss);
590 page_remove_rmap(page); 590 page_remove_rmap(page);
591 page_cache_release(page); 591 page_cache_release(page);
592 592
@@ -626,7 +626,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
626 pgd_t *pgd; 626 pgd_t *pgd;
627 pud_t *pud; 627 pud_t *pud;
628 pmd_t *pmd; 628 pmd_t *pmd;
629 pte_t *pte; 629 pte_t *pte, *original_pte;
630 pte_t pteval; 630 pte_t pteval;
631 struct page *page; 631 struct page *page;
632 unsigned long address; 632 unsigned long address;
@@ -658,7 +658,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
658 if (!pmd_present(*pmd)) 658 if (!pmd_present(*pmd))
659 goto out_unlock; 659 goto out_unlock;
660 660
661 for (pte = pte_offset_map(pmd, address); 661 for (original_pte = pte = pte_offset_map(pmd, address);
662 address < end; pte++, address += PAGE_SIZE) { 662 address < end; pte++, address += PAGE_SIZE) {
663 663
664 if (!pte_present(*pte)) 664 if (!pte_present(*pte))
@@ -694,7 +694,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
694 (*mapcount)--; 694 (*mapcount)--;
695 } 695 }
696 696
697 pte_unmap(pte); 697 pte_unmap(original_pte);
698out_unlock: 698out_unlock:
699 spin_unlock(&mm->page_table_lock); 699 spin_unlock(&mm->page_table_lock);
700} 700}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index a60e0075d55b..da48405cd9a3 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -79,7 +79,7 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
79 WARN_ON(page_count(page) <= 1); 79 WARN_ON(page_count(page) <= 1);
80 80
81 bdi = bdev->bd_inode->i_mapping->backing_dev_info; 81 bdi = bdev->bd_inode->i_mapping->backing_dev_info;
82 bdi->unplug_io_fn(bdi, page); 82 blk_run_backing_dev(bdi, page);
83 } 83 }
84 up_read(&swap_unplug_sem); 84 up_read(&swap_unplug_sem);
85} 85}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 2bd83e5c2bbf..8ff16a1eee6a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -248,31 +248,20 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
248 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); 248 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
249} 249}
250 250
251/** 251/* Caller must hold vmlist_lock */
252 * remove_vm_area - find and remove a contingous kernel virtual area 252struct vm_struct *__remove_vm_area(void *addr)
253 *
254 * @addr: base address
255 *
256 * Search for the kernel VM area starting at @addr, and remove it.
257 * This function returns the found VM area, but using it is NOT safe
258 * on SMP machines.
259 */
260struct vm_struct *remove_vm_area(void *addr)
261{ 253{
262 struct vm_struct **p, *tmp; 254 struct vm_struct **p, *tmp;
263 255
264 write_lock(&vmlist_lock);
265 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { 256 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
266 if (tmp->addr == addr) 257 if (tmp->addr == addr)
267 goto found; 258 goto found;
268 } 259 }
269 write_unlock(&vmlist_lock);
270 return NULL; 260 return NULL;
271 261
272found: 262found:
273 unmap_vm_area(tmp); 263 unmap_vm_area(tmp);
274 *p = tmp->next; 264 *p = tmp->next;
275 write_unlock(&vmlist_lock);
276 265
277 /* 266 /*
278 * Remove the guard page. 267 * Remove the guard page.
@@ -281,6 +270,24 @@ found:
281 return tmp; 270 return tmp;
282} 271}
283 272
273/**
274 * remove_vm_area - find and remove a contingous kernel virtual area
275 *
276 * @addr: base address
277 *
278 * Search for the kernel VM area starting at @addr, and remove it.
279 * This function returns the found VM area, but using it is NOT safe
280 * on SMP machines, except for its size or flags.
281 */
282struct vm_struct *remove_vm_area(void *addr)
283{
284 struct vm_struct *v;
285 write_lock(&vmlist_lock);
286 v = __remove_vm_area(addr);
287 write_unlock(&vmlist_lock);
288 return v;
289}
290
284void __vunmap(void *addr, int deallocate_pages) 291void __vunmap(void *addr, int deallocate_pages)
285{ 292{
286 struct vm_struct *area; 293 struct vm_struct *area;