aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/filemap.c20
-rw-r--r--mm/fremap.c2
-rw-r--r--mm/pagewalk.c10
-rw-r--r--mm/slab.c5
-rw-r--r--mm/swap.c5
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/vmalloc.c6
7 files changed, 33 insertions, 17 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index df343d1e6345..07e9d9258b48 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -343,7 +343,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
343EXPORT_SYMBOL(sync_page_range); 343EXPORT_SYMBOL(sync_page_range);
344 344
345/** 345/**
346 * sync_page_range_nolock 346 * sync_page_range_nolock - write & wait on all pages in the passed range without locking
347 * @inode: target inode 347 * @inode: target inode
348 * @mapping: target address_space 348 * @mapping: target address_space
349 * @pos: beginning offset in pages to write 349 * @pos: beginning offset in pages to write
@@ -611,7 +611,10 @@ int __lock_page_killable(struct page *page)
611 sync_page_killable, TASK_KILLABLE); 611 sync_page_killable, TASK_KILLABLE);
612} 612}
613 613
614/* 614/**
615 * __lock_page_nosync - get a lock on the page, without calling sync_page()
616 * @page: the page to lock
617 *
615 * Variant of lock_page that does not require the caller to hold a reference 618 * Variant of lock_page that does not require the caller to hold a reference
616 * on the page's mapping. 619 * on the page's mapping.
617 */ 620 */
@@ -1538,9 +1541,20 @@ repeat:
1538 return page; 1541 return page;
1539} 1542}
1540 1543
1541/* 1544/**
1545 * read_cache_page_async - read into page cache, fill it if needed
1546 * @mapping: the page's address_space
1547 * @index: the page index
1548 * @filler: function to perform the read
1549 * @data: destination for read data
1550 *
1542 * Same as read_cache_page, but don't wait for page to become unlocked 1551 * Same as read_cache_page, but don't wait for page to become unlocked
1543 * after submitting it to the filler. 1552 * after submitting it to the filler.
1553 *
1554 * Read into the page cache. If a page already exists, and PageUptodate() is
1555 * not set, try to fill the page but don't wait for it to become unlocked.
1556 *
1557 * If the page does not get brought uptodate, return -EIO.
1544 */ 1558 */
1545struct page *read_cache_page_async(struct address_space *mapping, 1559struct page *read_cache_page_async(struct address_space *mapping,
1546 pgoff_t index, 1560 pgoff_t index,
diff --git a/mm/fremap.c b/mm/fremap.c
index 69a37c2bdf81..07a9c82ce1a3 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -113,7 +113,7 @@ static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma,
113 * mmap()/mremap() it does not create any new vmas. The new mappings are 113 * mmap()/mremap() it does not create any new vmas. The new mappings are
114 * also safe across swapout. 114 * also safe across swapout.
115 * 115 *
116 * NOTE: the 'prot' parameter right now is ignored (but must be zero), 116 * NOTE: the @prot parameter right now is ignored (but must be zero),
117 * and the vma's default protection is used. Arbitrary protections 117 * and the vma's default protection is used. Arbitrary protections
118 * might be implemented in the future. 118 * might be implemented in the future.
119 */ 119 */
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index b4f27d22da91..1cf1417ef8b7 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -77,11 +77,11 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
77 77
78/** 78/**
79 * walk_page_range - walk a memory map's page tables with a callback 79 * walk_page_range - walk a memory map's page tables with a callback
80 * @mm - memory map to walk 80 * @mm: memory map to walk
81 * @addr - starting address 81 * @addr: starting address
82 * @end - ending address 82 * @end: ending address
83 * @walk - set of callbacks to invoke for each level of the tree 83 * @walk: set of callbacks to invoke for each level of the tree
84 * @private - private data passed to the callback function 84 * @private: private data passed to the callback function
85 * 85 *
86 * Recursively walk the page table for the memory area in a VMA, 86 * Recursively walk the page table for the memory area in a VMA,
87 * calling supplied callbacks. Callbacks are called in-order (first 87 * calling supplied callbacks. Callbacks are called in-order (first
diff --git a/mm/slab.c b/mm/slab.c
index e6c698f55674..bb4070e1079f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3624,12 +3624,11 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3624EXPORT_SYMBOL(kmem_cache_alloc); 3624EXPORT_SYMBOL(kmem_cache_alloc);
3625 3625
3626/** 3626/**
3627 * kmem_ptr_validate - check if an untrusted pointer might 3627 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
3628 * be a slab entry.
3629 * @cachep: the cache we're checking against 3628 * @cachep: the cache we're checking against
3630 * @ptr: pointer to validate 3629 * @ptr: pointer to validate
3631 * 3630 *
3632 * This verifies that the untrusted pointer looks sane: 3631 * This verifies that the untrusted pointer looks sane;
3633 * it is _not_ a guarantee that the pointer is actually 3632 * it is _not_ a guarantee that the pointer is actually
3634 * part of the slab cache in question, but it at least 3633 * part of the slab cache in question, but it at least
3635 * validates that the pointer can be dereferenced and 3634 * validates that the pointer can be dereferenced and
diff --git a/mm/swap.c b/mm/swap.c
index d4ec59aa5c46..aa1139ccf3a7 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -78,12 +78,11 @@ void put_page(struct page *page)
78EXPORT_SYMBOL(put_page); 78EXPORT_SYMBOL(put_page);
79 79
80/** 80/**
81 * put_pages_list(): release a list of pages 81 * put_pages_list() - release a list of pages
82 * @pages: list of pages threaded on page->lru
82 * 83 *
83 * Release a list of pages which are strung together on page.lru. Currently 84 * Release a list of pages which are strung together on page.lru. Currently
84 * used by read_cache_pages() and related error recovery code. 85 * used by read_cache_pages() and related error recovery code.
85 *
86 * @pages: list of pages threaded on page->lru
87 */ 86 */
88void put_pages_list(struct list_head *pages) 87void put_pages_list(struct list_head *pages)
89{ 88{
diff --git a/mm/swap_state.c b/mm/swap_state.c
index ec42f01a8d02..50757ee3f9f3 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -115,6 +115,7 @@ void __delete_from_swap_cache(struct page *page)
115/** 115/**
116 * add_to_swap - allocate swap space for a page 116 * add_to_swap - allocate swap space for a page
117 * @page: page we want to move to swap 117 * @page: page we want to move to swap
118 * @gfp_mask: memory allocation flags
118 * 119 *
119 * Allocate swap space for the page and add the page to the 120 * Allocate swap space for the page and add the page to the
120 * swap cache. Caller needs to hold the page lock. 121 * swap cache. Caller needs to hold the page lock.
@@ -315,6 +316,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
315/** 316/**
316 * swapin_readahead - swap in pages in hope we need them soon 317 * swapin_readahead - swap in pages in hope we need them soon
317 * @entry: swap entry of this memory 318 * @entry: swap entry of this memory
319 * @gfp_mask: memory allocation flags
318 * @vma: user vma this address belongs to 320 * @vma: user vma this address belongs to
319 * @addr: target address for mempolicy 321 * @addr: target address for mempolicy
320 * 322 *
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 950c0be9ca81..ecf91f8034bf 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -757,7 +757,8 @@ finished:
757 * @vma: vma to cover (map full range of vma) 757 * @vma: vma to cover (map full range of vma)
758 * @addr: vmalloc memory 758 * @addr: vmalloc memory
759 * @pgoff: number of pages into addr before first page to map 759 * @pgoff: number of pages into addr before first page to map
760 * @returns: 0 for success, -Exxx on failure 760 *
761 * Returns: 0 for success, -Exxx on failure
761 * 762 *
762 * This function checks that addr is a valid vmalloc'ed area, and 763 * This function checks that addr is a valid vmalloc'ed area, and
763 * that it is big enough to cover the vma. Will return failure if 764 * that it is big enough to cover the vma. Will return failure if
@@ -829,7 +830,8 @@ static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
829/** 830/**
830 * alloc_vm_area - allocate a range of kernel address space 831 * alloc_vm_area - allocate a range of kernel address space
831 * @size: size of the area 832 * @size: size of the area
832 * @returns: NULL on failure, vm_struct on success 833 *
834 * Returns: NULL on failure, vm_struct on success
833 * 835 *
834 * This function reserves a range of kernel address space, and 836 * This function reserves a range of kernel address space, and
835 * allocates pagetables to map that range. No actual mappings 837 * allocates pagetables to map that range. No actual mappings