diff options
-rw-r--r-- | mm/memory.c | 34 |
1 files changed, 27 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c index 65962534b4ed..92a3ebd8d795 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1227,7 +1227,12 @@ out: | |||
1227 | return retval; | 1227 | return retval; |
1228 | } | 1228 | } |
1229 | 1229 | ||
1230 | /* | 1230 | /** |
1231 | * vm_insert_page - insert single page into user vma | ||
1232 | * @vma: user vma to map to | ||
1233 | * @addr: target user address of this page | ||
1234 | * @page: source kernel page | ||
1235 | * | ||
1231 | * This allows drivers to insert individual pages they've allocated | 1236 | * This allows drivers to insert individual pages they've allocated |
1232 | * into a user vma. | 1237 | * into a user vma. |
1233 | * | 1238 | * |
@@ -1319,7 +1324,16 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, | |||
1319 | return 0; | 1324 | return 0; |
1320 | } | 1325 | } |
1321 | 1326 | ||
1322 | /* Note: this is only safe if the mm semaphore is held when called. */ | 1327 | /** |
1328 | * remap_pfn_range - remap kernel memory to userspace | ||
1329 | * @vma: user vma to map to | ||
1330 | * @addr: target user address to start at | ||
1331 | * @pfn: physical address of kernel memory | ||
1332 | * @size: size of map area | ||
1333 | * @prot: page protection flags for this mapping | ||
1334 | * | ||
1335 | * Note: this is only safe if the mm semaphore is held when called. | ||
1336 | */ | ||
1323 | int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | 1337 | int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, |
1324 | unsigned long pfn, unsigned long size, pgprot_t prot) | 1338 | unsigned long pfn, unsigned long size, pgprot_t prot) |
1325 | { | 1339 | { |
@@ -1801,9 +1815,10 @@ void unmap_mapping_range(struct address_space *mapping, | |||
1801 | } | 1815 | } |
1802 | EXPORT_SYMBOL(unmap_mapping_range); | 1816 | EXPORT_SYMBOL(unmap_mapping_range); |
1803 | 1817 | ||
1804 | /* | 1818 | /** |
1805 | * Handle all mappings that got truncated by a "truncate()" | 1819 | * vmtruncate - unmap mappings "freed" by truncate() syscall |
1806 | * system call. | 1820 | * @inode: inode of the file used |
1821 | * @offset: file offset to start truncating | ||
1807 | * | 1822 | * |
1808 | * NOTE! We have to be ready to update the memory sharing | 1823 | * NOTE! We have to be ready to update the memory sharing |
1809 | * between the file and the memory map for a potential last | 1824 | * between the file and the memory map for a potential last |
@@ -1872,11 +1887,16 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) | |||
1872 | } | 1887 | } |
1873 | EXPORT_UNUSED_SYMBOL(vmtruncate_range); /* June 2006 */ | 1888 | EXPORT_UNUSED_SYMBOL(vmtruncate_range); /* June 2006 */ |
1874 | 1889 | ||
1875 | /* | 1890 | /** |
1891 | * swapin_readahead - swap in pages in hope we need them soon | ||
1892 | * @entry: swap entry of this memory | ||
1893 | * @addr: address to start | ||
1894 | * @vma: user vma this addresses belong to | ||
1895 | * | ||
1876 | * Primitive swap readahead code. We simply read an aligned block of | 1896 | * Primitive swap readahead code. We simply read an aligned block of |
1877 | * (1 << page_cluster) entries in the swap area. This method is chosen | 1897 | * (1 << page_cluster) entries in the swap area. This method is chosen |
1878 | * because it doesn't cost us any seek time. We also make sure to queue | 1898 | * because it doesn't cost us any seek time. We also make sure to queue |
1879 | * the 'original' request together with the readahead ones... | 1899 | * the 'original' request together with the readahead ones... |
1880 | * | 1900 | * |
1881 | * This has been extended to use the NUMA policies from the mm triggering | 1901 | * This has been extended to use the NUMA policies from the mm triggering |
1882 | * the readahead. | 1902 | * the readahead. |