diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 77 |
1 files changed, 61 insertions, 16 deletions
diff --git a/mm/memory.c b/mm/memory.c index 109e9866237e..92a3ebd8d795 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/module.h> | 49 | #include <linux/module.h> |
50 | #include <linux/delayacct.h> | 50 | #include <linux/delayacct.h> |
51 | #include <linux/init.h> | 51 | #include <linux/init.h> |
52 | #include <linux/writeback.h> | ||
52 | 53 | ||
53 | #include <asm/pgalloc.h> | 54 | #include <asm/pgalloc.h> |
54 | #include <asm/uaccess.h> | 55 | #include <asm/uaccess.h> |
@@ -1226,7 +1227,12 @@ out: | |||
1226 | return retval; | 1227 | return retval; |
1227 | } | 1228 | } |
1228 | 1229 | ||
1229 | /* | 1230 | /** |
1231 | * vm_insert_page - insert single page into user vma | ||
1232 | * @vma: user vma to map to | ||
1233 | * @addr: target user address of this page | ||
1234 | * @page: source kernel page | ||
1235 | * | ||
1230 | * This allows drivers to insert individual pages they've allocated | 1236 | * This allows drivers to insert individual pages they've allocated |
1231 | * into a user vma. | 1237 | * into a user vma. |
1232 | * | 1238 | * |
@@ -1318,7 +1324,16 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, | |||
1318 | return 0; | 1324 | return 0; |
1319 | } | 1325 | } |
1320 | 1326 | ||
1321 | /* Note: this is only safe if the mm semaphore is held when called. */ | 1327 | /** |
1328 | * remap_pfn_range - remap kernel memory to userspace | ||
1329 | * @vma: user vma to map to | ||
1330 | * @addr: target user address to start at | ||
1331 | * @pfn: physical address of kernel memory | ||
1332 | * @size: size of map area | ||
1333 | * @prot: page protection flags for this mapping | ||
1334 | * | ||
1335 | * Note: this is only safe if the mm semaphore is held when called. | ||
1336 | */ | ||
1322 | int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | 1337 | int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, |
1323 | unsigned long pfn, unsigned long size, pgprot_t prot) | 1338 | unsigned long pfn, unsigned long size, pgprot_t prot) |
1324 | { | 1339 | { |
@@ -1458,14 +1473,29 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1458 | { | 1473 | { |
1459 | struct page *old_page, *new_page; | 1474 | struct page *old_page, *new_page; |
1460 | pte_t entry; | 1475 | pte_t entry; |
1461 | int reuse, ret = VM_FAULT_MINOR; | 1476 | int reuse = 0, ret = VM_FAULT_MINOR; |
1477 | struct page *dirty_page = NULL; | ||
1462 | 1478 | ||
1463 | old_page = vm_normal_page(vma, address, orig_pte); | 1479 | old_page = vm_normal_page(vma, address, orig_pte); |
1464 | if (!old_page) | 1480 | if (!old_page) |
1465 | goto gotten; | 1481 | goto gotten; |
1466 | 1482 | ||
1467 | if (unlikely((vma->vm_flags & (VM_SHARED|VM_WRITE)) == | 1483 | /* |
1468 | (VM_SHARED|VM_WRITE))) { | 1484 | * Take out anonymous pages first, anonymous shared vmas are |
1485 | * not dirty accountable. | ||
1486 | */ | ||
1487 | if (PageAnon(old_page)) { | ||
1488 | if (!TestSetPageLocked(old_page)) { | ||
1489 | reuse = can_share_swap_page(old_page); | ||
1490 | unlock_page(old_page); | ||
1491 | } | ||
1492 | } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == | ||
1493 | (VM_WRITE|VM_SHARED))) { | ||
1494 | /* | ||
1495 | * Only catch write-faults on shared writable pages, | ||
1496 | * read-only shared pages can get COWed by | ||
1497 | * get_user_pages(.write=1, .force=1). | ||
1498 | */ | ||
1469 | if (vma->vm_ops && vma->vm_ops->page_mkwrite) { | 1499 | if (vma->vm_ops && vma->vm_ops->page_mkwrite) { |
1470 | /* | 1500 | /* |
1471 | * Notify the address space that the page is about to | 1501 | * Notify the address space that the page is about to |
@@ -1494,13 +1524,9 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1494 | if (!pte_same(*page_table, orig_pte)) | 1524 | if (!pte_same(*page_table, orig_pte)) |
1495 | goto unlock; | 1525 | goto unlock; |
1496 | } | 1526 | } |
1497 | 1527 | dirty_page = old_page; | |
1528 | get_page(dirty_page); | ||
1498 | reuse = 1; | 1529 | reuse = 1; |
1499 | } else if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { | ||
1500 | reuse = can_share_swap_page(old_page); | ||
1501 | unlock_page(old_page); | ||
1502 | } else { | ||
1503 | reuse = 0; | ||
1504 | } | 1530 | } |
1505 | 1531 | ||
1506 | if (reuse) { | 1532 | if (reuse) { |
@@ -1566,6 +1592,10 @@ gotten: | |||
1566 | page_cache_release(old_page); | 1592 | page_cache_release(old_page); |
1567 | unlock: | 1593 | unlock: |
1568 | pte_unmap_unlock(page_table, ptl); | 1594 | pte_unmap_unlock(page_table, ptl); |
1595 | if (dirty_page) { | ||
1596 | set_page_dirty_balance(dirty_page); | ||
1597 | put_page(dirty_page); | ||
1598 | } | ||
1569 | return ret; | 1599 | return ret; |
1570 | oom: | 1600 | oom: |
1571 | if (old_page) | 1601 | if (old_page) |
@@ -1785,9 +1815,10 @@ void unmap_mapping_range(struct address_space *mapping, | |||
1785 | } | 1815 | } |
1786 | EXPORT_SYMBOL(unmap_mapping_range); | 1816 | EXPORT_SYMBOL(unmap_mapping_range); |
1787 | 1817 | ||
1788 | /* | 1818 | /** |
1789 | * Handle all mappings that got truncated by a "truncate()" | 1819 | * vmtruncate - unmap mappings "freed" by truncate() syscall |
1790 | * system call. | 1820 | * @inode: inode of the file used |
1821 | * @offset: file offset to start truncating | ||
1791 | * | 1822 | * |
1792 | * NOTE! We have to be ready to update the memory sharing | 1823 | * NOTE! We have to be ready to update the memory sharing |
1793 | * between the file and the memory map for a potential last | 1824 | * between the file and the memory map for a potential last |
@@ -1856,11 +1887,16 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) | |||
1856 | } | 1887 | } |
1857 | EXPORT_UNUSED_SYMBOL(vmtruncate_range); /* June 2006 */ | 1888 | EXPORT_UNUSED_SYMBOL(vmtruncate_range); /* June 2006 */ |
1858 | 1889 | ||
1859 | /* | 1890 | /** |
1891 | * swapin_readahead - swap in pages in hope we need them soon | ||
1892 | * @entry: swap entry of this memory | ||
1893 | * @addr: address to start | ||
1894 | * @vma: user vma this addresses belong to | ||
1895 | * | ||
1860 | * Primitive swap readahead code. We simply read an aligned block of | 1896 | * Primitive swap readahead code. We simply read an aligned block of |
1861 | * (1 << page_cluster) entries in the swap area. This method is chosen | 1897 | * (1 << page_cluster) entries in the swap area. This method is chosen |
1862 | * because it doesn't cost us any seek time. We also make sure to queue | 1898 | * because it doesn't cost us any seek time. We also make sure to queue |
1863 | * the 'original' request together with the readahead ones... | 1899 | * the 'original' request together with the readahead ones... |
1864 | * | 1900 | * |
1865 | * This has been extended to use the NUMA policies from the mm triggering | 1901 | * This has been extended to use the NUMA policies from the mm triggering |
1866 | * the readahead. | 1902 | * the readahead. |
@@ -2098,6 +2134,7 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2098 | unsigned int sequence = 0; | 2134 | unsigned int sequence = 0; |
2099 | int ret = VM_FAULT_MINOR; | 2135 | int ret = VM_FAULT_MINOR; |
2100 | int anon = 0; | 2136 | int anon = 0; |
2137 | struct page *dirty_page = NULL; | ||
2101 | 2138 | ||
2102 | pte_unmap(page_table); | 2139 | pte_unmap(page_table); |
2103 | BUG_ON(vma->vm_flags & VM_PFNMAP); | 2140 | BUG_ON(vma->vm_flags & VM_PFNMAP); |
@@ -2192,6 +2229,10 @@ retry: | |||
2192 | } else { | 2229 | } else { |
2193 | inc_mm_counter(mm, file_rss); | 2230 | inc_mm_counter(mm, file_rss); |
2194 | page_add_file_rmap(new_page); | 2231 | page_add_file_rmap(new_page); |
2232 | if (write_access) { | ||
2233 | dirty_page = new_page; | ||
2234 | get_page(dirty_page); | ||
2235 | } | ||
2195 | } | 2236 | } |
2196 | } else { | 2237 | } else { |
2197 | /* One of our sibling threads was faster, back out. */ | 2238 | /* One of our sibling threads was faster, back out. */ |
@@ -2204,6 +2245,10 @@ retry: | |||
2204 | lazy_mmu_prot_update(entry); | 2245 | lazy_mmu_prot_update(entry); |
2205 | unlock: | 2246 | unlock: |
2206 | pte_unmap_unlock(page_table, ptl); | 2247 | pte_unmap_unlock(page_table, ptl); |
2248 | if (dirty_page) { | ||
2249 | set_page_dirty_balance(dirty_page); | ||
2250 | put_page(dirty_page); | ||
2251 | } | ||
2207 | return ret; | 2252 | return ret; |
2208 | oom: | 2253 | oom: |
2209 | page_cache_release(new_page); | 2254 | page_cache_release(new_page); |