diff options
author | Mike Rapoport <rppt@linux.vnet.ibm.com> | 2018-04-05 19:24:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-06 00:36:27 -0400 |
commit | e8b098fc5747a7c871f113c9eb65453cc2d86e6f (patch) | |
tree | 1ea298e22974f4d2eb9d36cd47efca403893b470 | |
parent | 002843de36e18bd5be6f5bb858c0de18b6447a64 (diff) |
mm: kernel-doc: add missing parameter descriptions
Link: http://lkml.kernel.org/r/1519585191-10180-4-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/cma.c | 5 | ||||
-rw-r--r-- | mm/compaction.c | 1 | ||||
-rw-r--r-- | mm/kmemleak.c | 10 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 6 | ||||
-rw-r--r-- | mm/oom_kill.c | 2 | ||||
-rw-r--r-- | mm/pagewalk.c | 3 | ||||
-rw-r--r-- | mm/rmap.c | 1 | ||||
-rw-r--r-- | mm/zsmalloc.c | 2 |
8 files changed, 30 insertions, 0 deletions
@@ -165,6 +165,9 @@ core_initcall(cma_init_reserved_areas); | |||
165 | * @base: Base address of the reserved area | 165 | * @base: Base address of the reserved area |
166 | * @size: Size of the reserved area (in bytes), | 166 | * @size: Size of the reserved area (in bytes), |
167 | * @order_per_bit: Order of pages represented by one bit on bitmap. | 167 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
168 | * @name: The name of the area. If this parameter is NULL, the name of | ||
169 | * the area will be set to "cmaN", where N is a running counter of | ||
170 | * used areas. | ||
168 | * @res_cma: Pointer to store the created cma region. | 171 | * @res_cma: Pointer to store the created cma region. |
169 | * | 172 | * |
170 | * This function creates custom contiguous area from already reserved memory. | 173 | * This function creates custom contiguous area from already reserved memory. |
@@ -227,6 +230,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, | |||
227 | * @alignment: Alignment for the CMA area, should be power of 2 or zero | 230 | * @alignment: Alignment for the CMA area, should be power of 2 or zero |
228 | * @order_per_bit: Order of pages represented by one bit on bitmap. | 231 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
229 | * @fixed: hint about where to place the reserved area | 232 | * @fixed: hint about where to place the reserved area |
233 | * @name: The name of the area. See function cma_init_reserved_mem() | ||
230 | * @res_cma: Pointer to store the created cma region. | 234 | * @res_cma: Pointer to store the created cma region. |
231 | * | 235 | * |
232 | * This function reserves memory from early allocator. It should be | 236 | * This function reserves memory from early allocator. It should be |
@@ -390,6 +394,7 @@ static inline void cma_debug_show_areas(struct cma *cma) { } | |||
390 | * @cma: Contiguous memory region for which the allocation is performed. | 394 | * @cma: Contiguous memory region for which the allocation is performed. |
391 | * @count: Requested number of pages. | 395 | * @count: Requested number of pages. |
392 | * @align: Requested alignment of pages (in PAGE_SIZE order). | 396 | * @align: Requested alignment of pages (in PAGE_SIZE order). |
397 | * @gfp_mask: GFP mask to use during compaction | ||
393 | * | 398 | * |
394 | * This function allocates part of contiguous memory on specific | 399 | * This function allocates part of contiguous memory on specific |
395 | * contiguous memory area. | 400 | * contiguous memory area. |
diff --git a/mm/compaction.c b/mm/compaction.c index a68230ab451d..88d01a50a015 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -576,6 +576,7 @@ isolate_fail: | |||
576 | 576 | ||
577 | /** | 577 | /** |
578 | * isolate_freepages_range() - isolate free pages. | 578 | * isolate_freepages_range() - isolate free pages. |
579 | * @cc: Compaction control structure. | ||
579 | * @start_pfn: The first PFN to start isolating. | 580 | * @start_pfn: The first PFN to start isolating. |
580 | * @end_pfn: The one-past-last PFN. | 581 | * @end_pfn: The one-past-last PFN. |
581 | * | 582 | * |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 8029501dc65c..9a085d525bbc 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -1187,6 +1187,11 @@ EXPORT_SYMBOL(kmemleak_no_scan); | |||
1187 | /** | 1187 | /** |
1188 | * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical | 1188 | * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical |
1189 | * address argument | 1189 | * address argument |
1190 | * @phys: physical address of the object | ||
1191 | * @size: size of the object | ||
1192 | * @min_count: minimum number of references to this object. | ||
1193 | * See kmemleak_alloc() | ||
1194 | * @gfp: kmalloc() flags used for kmemleak internal memory allocations | ||
1190 | */ | 1195 | */ |
1191 | void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, | 1196 | void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, |
1192 | gfp_t gfp) | 1197 | gfp_t gfp) |
@@ -1199,6 +1204,9 @@ EXPORT_SYMBOL(kmemleak_alloc_phys); | |||
1199 | /** | 1204 | /** |
1200 | * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a | 1205 | * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a |
1201 | * physical address argument | 1206 | * physical address argument |
1207 | * @phys: physical address if the beginning or inside an object. This | ||
1208 | * also represents the start of the range to be freed | ||
1209 | * @size: size to be unregistered | ||
1202 | */ | 1210 | */ |
1203 | void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) | 1211 | void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) |
1204 | { | 1212 | { |
@@ -1210,6 +1218,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys); | |||
1210 | /** | 1218 | /** |
1211 | * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical | 1219 | * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical |
1212 | * address argument | 1220 | * address argument |
1221 | * @phys: physical address of the object | ||
1213 | */ | 1222 | */ |
1214 | void __ref kmemleak_not_leak_phys(phys_addr_t phys) | 1223 | void __ref kmemleak_not_leak_phys(phys_addr_t phys) |
1215 | { | 1224 | { |
@@ -1221,6 +1230,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys); | |||
1221 | /** | 1230 | /** |
1222 | * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical | 1231 | * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical |
1223 | * address argument | 1232 | * address argument |
1233 | * @phys: physical address of the object | ||
1224 | */ | 1234 | */ |
1225 | void __ref kmemleak_ignore_phys(phys_addr_t phys) | 1235 | void __ref kmemleak_ignore_phys(phys_addr_t phys) |
1226 | { | 1236 | { |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 6a9ba14e18ed..cc6dfa5832ca 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -541,6 +541,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms, | |||
541 | * @zone: zone from which pages need to be removed | 541 | * @zone: zone from which pages need to be removed |
542 | * @phys_start_pfn: starting pageframe (must be aligned to start of a section) | 542 | * @phys_start_pfn: starting pageframe (must be aligned to start of a section) |
543 | * @nr_pages: number of pages to remove (must be multiple of section size) | 543 | * @nr_pages: number of pages to remove (must be multiple of section size) |
544 | * @altmap: alternative device page map or %NULL if default memmap is used | ||
544 | * | 545 | * |
545 | * Generic helper function to remove section mappings and sysfs entries | 546 | * Generic helper function to remove section mappings and sysfs entries |
546 | * for the section of the memory we are removing. Caller needs to make | 547 | * for the section of the memory we are removing. Caller needs to make |
@@ -1044,6 +1045,7 @@ static void rollback_node_hotadd(int nid, pg_data_t *pgdat) | |||
1044 | 1045 | ||
1045 | /** | 1046 | /** |
1046 | * try_online_node - online a node if offlined | 1047 | * try_online_node - online a node if offlined |
1048 | * @nid: the node ID | ||
1047 | * | 1049 | * |
1048 | * called by cpu_up() to online a node without onlined memory. | 1050 | * called by cpu_up() to online a node without onlined memory. |
1049 | */ | 1051 | */ |
@@ -1804,6 +1806,7 @@ static int check_and_unmap_cpu_on_node(pg_data_t *pgdat) | |||
1804 | 1806 | ||
1805 | /** | 1807 | /** |
1806 | * try_offline_node | 1808 | * try_offline_node |
1809 | * @nid: the node ID | ||
1807 | * | 1810 | * |
1808 | * Offline a node if all memory sections and cpus of the node are removed. | 1811 | * Offline a node if all memory sections and cpus of the node are removed. |
1809 | * | 1812 | * |
@@ -1847,6 +1850,9 @@ EXPORT_SYMBOL(try_offline_node); | |||
1847 | 1850 | ||
1848 | /** | 1851 | /** |
1849 | * remove_memory | 1852 | * remove_memory |
1853 | * @nid: the node ID | ||
1854 | * @start: physical address of the region to remove | ||
1855 | * @size: size of the region to remove | ||
1850 | * | 1856 | * |
1851 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | 1857 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug |
1852 | * and online/offline operations before this call, as required by | 1858 | * and online/offline operations before this call, as required by |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index f2e7dfb81eee..82a92ad67af3 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -185,6 +185,8 @@ static bool is_dump_unreclaim_slabs(void) | |||
185 | * oom_badness - heuristic function to determine which candidate task to kill | 185 | * oom_badness - heuristic function to determine which candidate task to kill |
186 | * @p: task struct of which task we should calculate | 186 | * @p: task struct of which task we should calculate |
187 | * @totalpages: total present RAM allowed for page allocation | 187 | * @totalpages: total present RAM allowed for page allocation |
188 | * @memcg: task's memory controller, if constrained | ||
189 | * @nodemask: nodemask passed to page allocator for mempolicy ooms | ||
188 | * | 190 | * |
189 | * The heuristic for determining which task to kill is made to be as simple and | 191 | * The heuristic for determining which task to kill is made to be as simple and |
190 | * predictable as possible. The goal is to return the highest value for the | 192 | * predictable as possible. The goal is to return the highest value for the |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 8d2da5dec1e0..c3084ff2569d 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
@@ -258,6 +258,9 @@ static int __walk_page_range(unsigned long start, unsigned long end, | |||
258 | 258 | ||
259 | /** | 259 | /** |
260 | * walk_page_range - walk page table with caller specific callbacks | 260 | * walk_page_range - walk page table with caller specific callbacks |
261 | * @start: start address of the virtual address range | ||
262 | * @end: end address of the virtual address range | ||
263 | * @walk: mm_walk structure defining the callbacks and the target address space | ||
261 | * | 264 | * |
262 | * Recursively walk the page table tree of the process represented by @walk->mm | 265 | * Recursively walk the page table tree of the process represented by @walk->mm |
263 | * within the virtual address range [@start, @end). During walking, we can do | 266 | * within the virtual address range [@start, @end). During walking, we can do |
@@ -1171,6 +1171,7 @@ void page_add_new_anon_rmap(struct page *page, | |||
1171 | /** | 1171 | /** |
1172 | * page_add_file_rmap - add pte mapping to a file page | 1172 | * page_add_file_rmap - add pte mapping to a file page |
1173 | * @page: the page to add the mapping to | 1173 | * @page: the page to add the mapping to |
1174 | * @compound: charge the page as compound or small page | ||
1174 | * | 1175 | * |
1175 | * The caller needs to hold the pte lock. | 1176 | * The caller needs to hold the pte lock. |
1176 | */ | 1177 | */ |
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 5a532ebedc44..61cb05dc950c 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c | |||
@@ -851,6 +851,7 @@ static struct page *get_next_page(struct page *page) | |||
851 | 851 | ||
852 | /** | 852 | /** |
853 | * obj_to_location - get (<page>, <obj_idx>) from encoded object value | 853 | * obj_to_location - get (<page>, <obj_idx>) from encoded object value |
854 | * @obj: the encoded object value | ||
854 | * @page: page object resides in zspage | 855 | * @page: page object resides in zspage |
855 | * @obj_idx: object index | 856 | * @obj_idx: object index |
856 | */ | 857 | */ |
@@ -1301,6 +1302,7 @@ EXPORT_SYMBOL_GPL(zs_get_total_pages); | |||
1301 | * zs_map_object - get address of allocated object from handle. | 1302 | * zs_map_object - get address of allocated object from handle. |
1302 | * @pool: pool from which the object was allocated | 1303 | * @pool: pool from which the object was allocated |
1303 | * @handle: handle returned from zs_malloc | 1304 | * @handle: handle returned from zs_malloc |
1305 | * @mm: maping mode to use | ||
1304 | * | 1306 | * |
1305 | * Before using an object allocated from zs_malloc, it must be mapped using | 1307 | * Before using an object allocated from zs_malloc, it must be mapped using |
1306 | * this function. When done with the object, it must be unmapped using | 1308 | * this function. When done with the object, it must be unmapped using |