summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.ibm.com>2019-03-12 02:30:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-12 13:04:02 -0400
commita2974133b7e0a31c71fabe86aad42a61db4f01ed (patch)
treeb2fa78a6f869e328d8c2c605db7bb164a4c8e0ae
parentc9a688a3e918c4eb4f3916ff99a6dae8995af41b (diff)
mm: memblock: update comments and kernel-doc
* Remove comments mentioning bootmem * Extend "DOC: memblock overview" * Add kernel-doc comments for several more functions [akpm@linux-foundation.org: fix copy-n-paste error] Link: http://lkml.kernel.org/r/1549626347-25461-1-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Jonathan Corbet <corbet@lwn.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/memblock.c60
1 files changed, 43 insertions, 17 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index dfe90bc210d9..e7665cf914b1 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -76,8 +76,19 @@
76 * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node` 76 * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node`
77 * performs such an assignment directly. 77 * performs such an assignment directly.
78 * 78 *
79 * Once memblock is setup the memory can be allocated using either 79 * Once memblock is setup the memory can be allocated using one of the
80 * memblock or bootmem APIs. 80 * API variants:
81 *
82 * * :c:func:`memblock_phys_alloc*` - these functions return the
83 * **physical** address of the allocated memory
84 * * :c:func:`memblock_alloc*` - these functions return the **virtual**
85 * address of the allocated memory.
86 *
87 * Note, that both API variants use implict assumptions about allowed
88 * memory ranges and the fallback methods. Consult the documentation
89 * of :c:func:`memblock_alloc_internal` and
90 * :c:func:`memblock_alloc_range_nid` functions for more elaboarte
91 * description.
81 * 92 *
82 * As the system boot progresses, the architecture specific 93 * As the system boot progresses, the architecture specific
83 * :c:func:`mem_init` function frees all the memory to the buddy page 94 * :c:func:`mem_init` function frees all the memory to the buddy page
@@ -435,17 +446,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
435 else 446 else
436 in_slab = &memblock_reserved_in_slab; 447 in_slab = &memblock_reserved_in_slab;
437 448
438 /* Try to find some space for it. 449 /* Try to find some space for it */
439 *
440 * WARNING: We assume that either slab_is_available() and we use it or
441 * we use MEMBLOCK for allocations. That means that this is unsafe to
442 * use when bootmem is currently active (unless bootmem itself is
443 * implemented on top of MEMBLOCK which isn't the case yet)
444 *
445 * This should however not be an issue for now, as we currently only
446 * call into MEMBLOCK while it's still active, or much later when slab
447 * is active for memory hotplug operations
448 */
449 if (use_slab) { 450 if (use_slab) {
450 new_array = kmalloc(new_size, GFP_KERNEL); 451 new_array = kmalloc(new_size, GFP_KERNEL);
451 addr = new_array ? __pa(new_array) : 0; 452 addr = new_array ? __pa(new_array) : 0;
@@ -989,7 +990,7 @@ static bool should_skip_region(struct memblock_region *m, int nid, int flags)
989} 990}
990 991
991/** 992/**
992 * __next__mem_range - next function for for_each_free_mem_range() etc. 993 * __next_mem_range - next function for for_each_free_mem_range() etc.
993 * @idx: pointer to u64 loop variable 994 * @idx: pointer to u64 loop variable
994 * @nid: node selector, %NUMA_NO_NODE for all nodes 995 * @nid: node selector, %NUMA_NO_NODE for all nodes
995 * @flags: pick from blocks based on memory attributes 996 * @flags: pick from blocks based on memory attributes
@@ -1335,6 +1336,18 @@ done:
1335 return found; 1336 return found;
1336} 1337}
1337 1338
1339/**
1340 * memblock_phys_alloc_range - allocate a memory block inside specified range
1341 * @size: size of memory block to be allocated in bytes
1342 * @align: alignment of the region and block's size
1343 * @start: the lower bound of the memory region to allocate (physical address)
1344 * @end: the upper bound of the memory region to allocate (physical address)
1345 *
1346 * Allocate @size bytes in the between @start and @end.
1347 *
1348 * Return: physical address of the allocated memory block on success,
1349 * %0 on failure.
1350 */
1338phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size, 1351phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1339 phys_addr_t align, 1352 phys_addr_t align,
1340 phys_addr_t start, 1353 phys_addr_t start,
@@ -1343,6 +1356,19 @@ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1343 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE); 1356 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
1344} 1357}
1345 1358
1359/**
1360 * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
1361 * @size: size of memory block to be allocated in bytes
1362 * @align: alignment of the region and block's size
1363 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1364 *
1365 * Allocates memory block from the specified NUMA node. If the node
1366 * has no available memory, attempts to allocated from any node in the
1367 * system.
1368 *
1369 * Return: physical address of the allocated memory block on success,
1370 * %0 on failure.
1371 */
1346phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 1372phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1347{ 1373{
1348 return memblock_alloc_range_nid(size, align, 0, 1374 return memblock_alloc_range_nid(size, align, 0,
@@ -1469,13 +1495,13 @@ void * __init memblock_alloc_try_nid(
1469} 1495}
1470 1496
1471/** 1497/**
1472 * __memblock_free_late - free bootmem block pages directly to buddy allocator 1498 * __memblock_free_late - free pages directly to buddy allocator
1473 * @base: phys starting address of the boot memory block 1499 * @base: phys starting address of the boot memory block
1474 * @size: size of the boot memory block in bytes 1500 * @size: size of the boot memory block in bytes
1475 * 1501 *
1476 * This is only useful when the bootmem allocator has already been torn 1502 * This is only useful when the memblock allocator has already been torn
1477 * down, but we are still initializing the system. Pages are released directly 1503 * down, but we are still initializing the system. Pages are released directly
1478 * to the buddy allocator, no bootmem metadata is updated because it is gone. 1504 * to the buddy allocator.
1479 */ 1505 */
1480void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 1506void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1481{ 1507{