diff options
author | Tejun Heo <tj@kernel.org> | 2009-08-14 02:00:50 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-08-14 02:00:50 -0400 |
commit | 1d9d32572163b30be81dbe1409dfa7ea9763d0e8 (patch) | |
tree | a9ba62cffda9f77637ac509331cf6367075fd2c4 /mm/percpu.c | |
parent | 9a7737691e90d3cce0e5248f91826c50e5aa3fcf (diff) |
percpu: make @dyn_size mandatory for pcpu_setup_first_chunk()
Now that all actual first chunk allocation and copying happen in the
first chunk allocators and helpers, there's no reason for
pcpu_setup_first_chunk() to try to determine @dyn_size automatically.
The only left user is page first chunk allocator. Make it determine
dyn_size like other allocators and make @dyn_size mandatory for
pcpu_setup_first_chunk().
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r-- | mm/percpu.c | 39 |
1 files changed, 19 insertions, 20 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index e2ac58a39bb2..287f59cc5fb9 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -1235,7 +1235,7 @@ EXPORT_SYMBOL_GPL(free_percpu); | |||
1235 | * pcpu_setup_first_chunk - initialize the first percpu chunk | 1235 | * pcpu_setup_first_chunk - initialize the first percpu chunk |
1236 | * @static_size: the size of static percpu area in bytes | 1236 | * @static_size: the size of static percpu area in bytes |
1237 | * @reserved_size: the size of reserved percpu area in bytes, 0 for none | 1237 | * @reserved_size: the size of reserved percpu area in bytes, 0 for none |
1238 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto | 1238 | * @dyn_size: free size for dynamic allocation in bytes |
1239 | * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE | 1239 | * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE |
1240 | * @base_addr: mapped address | 1240 | * @base_addr: mapped address |
1241 | * @unit_map: cpu -> unit map, NULL for sequential mapping | 1241 | * @unit_map: cpu -> unit map, NULL for sequential mapping |
@@ -1252,10 +1252,9 @@ EXPORT_SYMBOL_GPL(free_percpu); | |||
1252 | * limited offset range for symbol relocations to guarantee module | 1252 | * limited offset range for symbol relocations to guarantee module |
1253 | * percpu symbols fall inside the relocatable range. | 1253 | * percpu symbols fall inside the relocatable range. |
1254 | * | 1254 | * |
1255 | * @dyn_size, if non-negative, determines the number of bytes | 1255 | * @dyn_size determines the number of bytes available for dynamic |
1256 | * available for dynamic allocation in the first chunk. Specifying | 1256 | * allocation in the first chunk. The area between @static_size + |
1257 | * non-negative value makes percpu leave alone the area beyond | 1257 | * @reserved_size + @dyn_size and @unit_size is unused. |
1258 | * @static_size + @reserved_size + @dyn_size. | ||
1259 | * | 1258 | * |
1260 | * @unit_size specifies unit size and must be aligned to PAGE_SIZE and | 1259 | * @unit_size specifies unit size and must be aligned to PAGE_SIZE and |
1261 | * equal to or larger than @static_size + @reserved_size + if | 1260 | * equal to or larger than @static_size + @reserved_size + if |
@@ -1276,13 +1275,12 @@ EXPORT_SYMBOL_GPL(free_percpu); | |||
1276 | * percpu access. | 1275 | * percpu access. |
1277 | */ | 1276 | */ |
1278 | size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size, | 1277 | size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size, |
1279 | ssize_t dyn_size, size_t unit_size, | 1278 | size_t dyn_size, size_t unit_size, |
1280 | void *base_addr, const int *unit_map) | 1279 | void *base_addr, const int *unit_map) |
1281 | { | 1280 | { |
1282 | static struct vm_struct first_vm; | 1281 | static struct vm_struct first_vm; |
1283 | static int smap[2], dmap[2]; | 1282 | static int smap[2], dmap[2]; |
1284 | size_t size_sum = static_size + reserved_size + | 1283 | size_t size_sum = static_size + reserved_size + dyn_size; |
1285 | (dyn_size >= 0 ? dyn_size : 0); | ||
1286 | struct pcpu_chunk *schunk, *dchunk = NULL; | 1284 | struct pcpu_chunk *schunk, *dchunk = NULL; |
1287 | unsigned int cpu, tcpu; | 1285 | unsigned int cpu, tcpu; |
1288 | int i; | 1286 | int i; |
@@ -1345,9 +1343,6 @@ size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size, | |||
1345 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + | 1343 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + |
1346 | BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); | 1344 | BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); |
1347 | 1345 | ||
1348 | if (dyn_size < 0) | ||
1349 | dyn_size = pcpu_unit_size - static_size - reserved_size; | ||
1350 | |||
1351 | first_vm.flags = VM_ALLOC; | 1346 | first_vm.flags = VM_ALLOC; |
1352 | first_vm.size = pcpu_chunk_size; | 1347 | first_vm.size = pcpu_chunk_size; |
1353 | first_vm.addr = base_addr; | 1348 | first_vm.addr = base_addr; |
@@ -1557,6 +1552,8 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size, | |||
1557 | { | 1552 | { |
1558 | static struct vm_struct vm; | 1553 | static struct vm_struct vm; |
1559 | const size_t static_size = __per_cpu_end - __per_cpu_start; | 1554 | const size_t static_size = __per_cpu_end - __per_cpu_start; |
1555 | ssize_t dyn_size = -1; | ||
1556 | size_t size_sum, unit_size; | ||
1560 | char psize_str[16]; | 1557 | char psize_str[16]; |
1561 | int unit_pages; | 1558 | int unit_pages; |
1562 | size_t pages_size; | 1559 | size_t pages_size; |
@@ -1567,8 +1564,9 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size, | |||
1567 | 1564 | ||
1568 | snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); | 1565 | snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); |
1569 | 1566 | ||
1570 | unit_pages = PFN_UP(max_t(size_t, static_size + reserved_size, | 1567 | size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size); |
1571 | PCPU_MIN_UNIT_SIZE)); | 1568 | unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); |
1569 | unit_pages = unit_size >> PAGE_SHIFT; | ||
1572 | 1570 | ||
1573 | /* unaligned allocations can't be freed, round up to page size */ | 1571 | /* unaligned allocations can't be freed, round up to page size */ |
1574 | pages_size = PFN_ALIGN(unit_pages * nr_cpu_ids * sizeof(pages[0])); | 1572 | pages_size = PFN_ALIGN(unit_pages * nr_cpu_ids * sizeof(pages[0])); |
@@ -1591,12 +1589,12 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size, | |||
1591 | 1589 | ||
1592 | /* allocate vm area, map the pages and copy static data */ | 1590 | /* allocate vm area, map the pages and copy static data */ |
1593 | vm.flags = VM_ALLOC; | 1591 | vm.flags = VM_ALLOC; |
1594 | vm.size = nr_cpu_ids * unit_pages << PAGE_SHIFT; | 1592 | vm.size = nr_cpu_ids * unit_size; |
1595 | vm_area_register_early(&vm, PAGE_SIZE); | 1593 | vm_area_register_early(&vm, PAGE_SIZE); |
1596 | 1594 | ||
1597 | for_each_possible_cpu(cpu) { | 1595 | for_each_possible_cpu(cpu) { |
1598 | unsigned long unit_addr = (unsigned long)vm.addr + | 1596 | unsigned long unit_addr = |
1599 | (cpu * unit_pages << PAGE_SHIFT); | 1597 | (unsigned long)vm.addr + cpu * unit_size; |
1600 | 1598 | ||
1601 | for (i = 0; i < unit_pages; i++) | 1599 | for (i = 0; i < unit_pages; i++) |
1602 | populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); | 1600 | populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); |
@@ -1620,11 +1618,12 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size, | |||
1620 | } | 1618 | } |
1621 | 1619 | ||
1622 | /* we're ready, commit */ | 1620 | /* we're ready, commit */ |
1623 | pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu\n", | 1621 | pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n", |
1624 | unit_pages, psize_str, vm.addr, static_size, reserved_size); | 1622 | unit_pages, psize_str, vm.addr, static_size, reserved_size, |
1623 | dyn_size); | ||
1625 | 1624 | ||
1626 | ret = pcpu_setup_first_chunk(static_size, reserved_size, -1, | 1625 | ret = pcpu_setup_first_chunk(static_size, reserved_size, dyn_size, |
1627 | unit_pages << PAGE_SHIFT, vm.addr, NULL); | 1626 | unit_size, vm.addr, NULL); |
1628 | goto out_free_ar; | 1627 | goto out_free_ar; |
1629 | 1628 | ||
1630 | enomem: | 1629 | enomem: |