aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/bootmem.c2
-rw-r--r--mm/maccess.c2
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/process_vm_access.c2
-rw-r--r--mm/swap.c4
-rw-r--r--mm/z3fold.c4
-rw-r--r--mm/zbud.c4
-rw-r--r--mm/zpool.c20
8 files changed, 20 insertions, 20 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 6aef64254203..9e197987b67d 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -410,7 +410,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
410 410
411/** 411/**
412 * free_bootmem - mark a page range as usable 412 * free_bootmem - mark a page range as usable
413 * @addr: starting physical address of the range 413 * @physaddr: starting physical address of the range
414 * @size: size of the range in bytes 414 * @size: size of the range in bytes
415 * 415 *
416 * Partial pages will be considered reserved and left as they are. 416 * Partial pages will be considered reserved and left as they are.
diff --git a/mm/maccess.c b/mm/maccess.c
index 78f9274dd49d..ec00be51a24f 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_write);
70 * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address. 70 * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address.
71 * @dst: Destination address, in kernel space. This buffer must be at 71 * @dst: Destination address, in kernel space. This buffer must be at
72 * least @count bytes long. 72 * least @count bytes long.
73 * @src: Unsafe address. 73 * @unsafe_addr: Unsafe address.
74 * @count: Maximum number of bytes to copy, including the trailing NUL. 74 * @count: Maximum number of bytes to copy, including the trailing NUL.
75 * 75 *
76 * Copies a NUL-terminated string from unsafe address to kernel buffer. 76 * Copies a NUL-terminated string from unsafe address to kernel buffer.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3793e22977c0..13b35ffa021e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -917,7 +917,7 @@ int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
917/** 917/**
918 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 918 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
919 * @page: the page 919 * @page: the page
920 * @zone: zone of the page 920 * @pgdat: pgdat of the page
921 * 921 *
922 * This function is only safe when following the LRU page isolation 922 * This function is only safe when following the LRU page isolation
923 * and putback protocol: the LRU lock must be held, and the page must 923 * and putback protocol: the LRU lock must be held, and the page must
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 16424b9ae424..f24c297dba6f 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -25,7 +25,7 @@
25/** 25/**
26 * process_vm_rw_pages - read/write pages from task specified 26 * process_vm_rw_pages - read/write pages from task specified
27 * @pages: array of pointers to pages we want to copy 27 * @pages: array of pointers to pages we want to copy
28 * @start_offset: offset in page to start copying from/to 28 * @offset: offset in page to start copying from/to
29 * @len: number of bytes to copy 29 * @len: number of bytes to copy
30 * @iter: where to copy to/from locally 30 * @iter: where to copy to/from locally
31 * @vm_write: 0 means copy from, 1 means copy to 31 * @vm_write: 0 means copy from, 1 means copy to
diff --git a/mm/swap.c b/mm/swap.c
index 10568b1548d4..567a7b96e41d 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -913,11 +913,11 @@ EXPORT_SYMBOL(__pagevec_lru_add);
913 * @pvec: Where the resulting entries are placed 913 * @pvec: Where the resulting entries are placed
914 * @mapping: The address_space to search 914 * @mapping: The address_space to search
915 * @start: The starting entry index 915 * @start: The starting entry index
916 * @nr_entries: The maximum number of entries 916 * @nr_pages: The maximum number of pages
917 * @indices: The cache indices corresponding to the entries in @pvec 917 * @indices: The cache indices corresponding to the entries in @pvec
918 * 918 *
919 * pagevec_lookup_entries() will search for and return a group of up 919 * pagevec_lookup_entries() will search for and return a group of up
920 * to @nr_entries pages and shadow entries in the mapping. All 920 * to @nr_pages pages and shadow entries in the mapping. All
921 * entries are placed in @pvec. pagevec_lookup_entries() takes a 921 * entries are placed in @pvec. pagevec_lookup_entries() takes a
922 * reference against actual pages in @pvec. 922 * reference against actual pages in @pvec.
923 * 923 *
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 39e19125d6a0..d589d318727f 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -769,7 +769,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
769/** 769/**
770 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it 770 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
771 * @pool: pool from which a page will attempt to be evicted 771 * @pool: pool from which a page will attempt to be evicted
772 * @retires: number of pages on the LRU list for which eviction will 772 * @retries: number of pages on the LRU list for which eviction will
773 * be attempted before failing 773 * be attempted before failing
774 * 774 *
775 * z3fold reclaim is different from normal system reclaim in that it is done 775 * z3fold reclaim is different from normal system reclaim in that it is done
@@ -779,7 +779,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
779 * z3fold and the user, however. 779 * z3fold and the user, however.
780 * 780 *
781 * To avoid these, this is how z3fold_reclaim_page() should be called: 781 * To avoid these, this is how z3fold_reclaim_page() should be called:
782 782 *
783 * The user detects a page should be reclaimed and calls z3fold_reclaim_page(). 783 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
784 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and 784 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
785 * call the user-defined eviction handler with the pool and handle as 785 * call the user-defined eviction handler with the pool and handle as
diff --git a/mm/zbud.c b/mm/zbud.c
index b42322e50f63..28458f7d1e84 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -466,7 +466,7 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle)
466/** 466/**
467 * zbud_reclaim_page() - evicts allocations from a pool page and frees it 467 * zbud_reclaim_page() - evicts allocations from a pool page and frees it
468 * @pool: pool from which a page will attempt to be evicted 468 * @pool: pool from which a page will attempt to be evicted
469 * @retires: number of pages on the LRU list for which eviction will 469 * @retries: number of pages on the LRU list for which eviction will
470 * be attempted before failing 470 * be attempted before failing
471 * 471 *
472 * zbud reclaim is different from normal system reclaim in that the reclaim is 472 * zbud reclaim is different from normal system reclaim in that the reclaim is
@@ -476,7 +476,7 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle)
476 * the user, however. 476 * the user, however.
477 * 477 *
478 * To avoid these, this is how zbud_reclaim_page() should be called: 478 * To avoid these, this is how zbud_reclaim_page() should be called:
479 479 *
480 * The user detects a page should be reclaimed and calls zbud_reclaim_page(). 480 * The user detects a page should be reclaimed and calls zbud_reclaim_page().
481 * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call 481 * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call
482 * the user-defined eviction handler with the pool and handle as arguments. 482 * the user-defined eviction handler with the pool and handle as arguments.
diff --git a/mm/zpool.c b/mm/zpool.c
index be67bcffb9ef..f8cb83e7699b 100644
--- a/mm/zpool.c
+++ b/mm/zpool.c
@@ -201,7 +201,7 @@ struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp,
201 201
202/** 202/**
203 * zpool_destroy_pool() - Destroy a zpool 203 * zpool_destroy_pool() - Destroy a zpool
204 * @pool: The zpool to destroy. 204 * @zpool: The zpool to destroy.
205 * 205 *
206 * Implementations must guarantee this to be thread-safe, 206 * Implementations must guarantee this to be thread-safe,
207 * however only when destroying different pools. The same 207 * however only when destroying different pools. The same
@@ -224,7 +224,7 @@ void zpool_destroy_pool(struct zpool *zpool)
224 224
225/** 225/**
226 * zpool_get_type() - Get the type of the zpool 226 * zpool_get_type() - Get the type of the zpool
227 * @pool: The zpool to check 227 * @zpool: The zpool to check
228 * 228 *
229 * This returns the type of the pool. 229 * This returns the type of the pool.
230 * 230 *
@@ -239,7 +239,7 @@ const char *zpool_get_type(struct zpool *zpool)
239 239
240/** 240/**
241 * zpool_malloc() - Allocate memory 241 * zpool_malloc() - Allocate memory
242 * @pool: The zpool to allocate from. 242 * @zpool: The zpool to allocate from.
243 * @size: The amount of memory to allocate. 243 * @size: The amount of memory to allocate.
244 * @gfp: The GFP flags to use when allocating memory. 244 * @gfp: The GFP flags to use when allocating memory.
245 * @handle: Pointer to the handle to set 245 * @handle: Pointer to the handle to set
@@ -261,7 +261,7 @@ int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp,
261 261
262/** 262/**
263 * zpool_free() - Free previously allocated memory 263 * zpool_free() - Free previously allocated memory
264 * @pool: The zpool that allocated the memory. 264 * @zpool: The zpool that allocated the memory.
265 * @handle: The handle to the memory to free. 265 * @handle: The handle to the memory to free.
266 * 266 *
267 * This frees previously allocated memory. This does not guarantee 267 * This frees previously allocated memory. This does not guarantee
@@ -280,7 +280,7 @@ void zpool_free(struct zpool *zpool, unsigned long handle)
280 280
281/** 281/**
282 * zpool_shrink() - Shrink the pool size 282 * zpool_shrink() - Shrink the pool size
283 * @pool: The zpool to shrink. 283 * @zpool: The zpool to shrink.
284 * @pages: The number of pages to shrink the pool. 284 * @pages: The number of pages to shrink the pool.
285 * @reclaimed: The number of pages successfully evicted. 285 * @reclaimed: The number of pages successfully evicted.
286 * 286 *
@@ -304,11 +304,11 @@ int zpool_shrink(struct zpool *zpool, unsigned int pages,
304 304
305/** 305/**
306 * zpool_map_handle() - Map a previously allocated handle into memory 306 * zpool_map_handle() - Map a previously allocated handle into memory
307 * @pool: The zpool that the handle was allocated from 307 * @zpool: The zpool that the handle was allocated from
308 * @handle: The handle to map 308 * @handle: The handle to map
309 * @mm: How the memory should be mapped 309 * @mapmode: How the memory should be mapped
310 * 310 *
311 * This maps a previously allocated handle into memory. The @mm 311 * This maps a previously allocated handle into memory. The @mapmode
312 * param indicates to the implementation how the memory will be 312 * param indicates to the implementation how the memory will be
313 * used, i.e. read-only, write-only, read-write. If the 313 * used, i.e. read-only, write-only, read-write. If the
314 * implementation does not support it, the memory will be treated 314 * implementation does not support it, the memory will be treated
@@ -332,7 +332,7 @@ void *zpool_map_handle(struct zpool *zpool, unsigned long handle,
332 332
333/** 333/**
334 * zpool_unmap_handle() - Unmap a previously mapped handle 334 * zpool_unmap_handle() - Unmap a previously mapped handle
335 * @pool: The zpool that the handle was allocated from 335 * @zpool: The zpool that the handle was allocated from
336 * @handle: The handle to unmap 336 * @handle: The handle to unmap
337 * 337 *
338 * This unmaps a previously mapped handle. Any locks or other 338 * This unmaps a previously mapped handle. Any locks or other
@@ -347,7 +347,7 @@ void zpool_unmap_handle(struct zpool *zpool, unsigned long handle)
347 347
348/** 348/**
349 * zpool_get_total_size() - The total size of the pool 349 * zpool_get_total_size() - The total size of the pool
350 * @pool: The zpool to check 350 * @zpool: The zpool to check
351 * 351 *
352 * This returns the total size in bytes of the pool. 352 * This returns the total size in bytes of the pool.
353 * 353 *