aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-03-06 00:33:59 -0500
committerTejun Heo <tj@kernel.org>2009-03-06 00:33:59 -0500
commitcafe8816b217b98dc3f268d3b77445da498beb4f (patch)
tree7ebbb4e9fc2c4f6ec0035170d4736aecc00d2501
parent61ace7fa2fff9c4b6641c506b6b3f1a9394a1b11 (diff)
percpu: use negative for auto for pcpu_setup_first_chunk() arguments
Impact: argument semantic cleanup In pcpu_setup_first_chunk(), zero @unit_size and @dyn_size meant auto-sizing. It's okay for @unit_size as 0 doesn't make sense but 0 dynamic reserve size is valid. Alos, if arch @dyn_size is calculated from other parameters, it might end up passing in 0 @dyn_size and malfunction when the size is automatically adjusted. This patch makes both @unit_size and @dyn_size ssize_t and use -1 for auto sizing. Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--arch/x86/kernel/setup_percpu.c2
-rw-r--r--include/linux/percpu.h5
-rw-r--r--mm/percpu.c46
3 files changed, 29 insertions, 24 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index c29f301d3885..ef3a2cd3fe64 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -344,7 +344,7 @@ static ssize_t __init setup_pcpu_4k(size_t static_size)
344 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", 344 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
345 pcpu4k_nr_static_pages, static_size); 345 pcpu4k_nr_static_pages, static_size);
346 346
347 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL, 347 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, -1, -1, NULL,
348 pcpu4k_populate_pte); 348 pcpu4k_populate_pte);
349 goto out_free_ar; 349 goto out_free_ar;
350 350
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index a0b4ea2a3354..a96fc53bbd62 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -117,8 +117,9 @@ typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
117typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); 117typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
118 118
119extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, 119extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
120 size_t static_size, size_t unit_size, 120 size_t static_size,
121 size_t dyn_size, void *base_addr, 121 ssize_t unit_size, ssize_t dyn_size,
122 void *base_addr,
122 pcpu_populate_pte_fn_t populate_pte_fn); 123 pcpu_populate_pte_fn_t populate_pte_fn);
123 124
124/* 125/*
diff --git a/mm/percpu.c b/mm/percpu.c
index 503ccad091af..a84cf9977faf 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -824,8 +824,8 @@ EXPORT_SYMBOL_GPL(free_percpu);
824 * pcpu_setup_first_chunk - initialize the first percpu chunk 824 * pcpu_setup_first_chunk - initialize the first percpu chunk
825 * @get_page_fn: callback to fetch page pointer 825 * @get_page_fn: callback to fetch page pointer
826 * @static_size: the size of static percpu area in bytes 826 * @static_size: the size of static percpu area in bytes
827 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, 0 for auto 827 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
828 * @dyn_size: free size for dynamic allocation in bytes, 0 for auto 828 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
829 * @base_addr: mapped address, NULL for auto 829 * @base_addr: mapped address, NULL for auto
830 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary 830 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
831 * 831 *
@@ -842,13 +842,14 @@ EXPORT_SYMBOL_GPL(free_percpu);
842 * indicates end of pages for the cpu. Note that @get_page_fn() must 842 * indicates end of pages for the cpu. Note that @get_page_fn() must
843 * return the same number of pages for all cpus. 843 * return the same number of pages for all cpus.
844 * 844 *
845 * @unit_size, if non-zero, determines unit size and must be aligned 845 * @unit_size, if non-negative, specifies unit size and must be
846 * to PAGE_SIZE and equal to or larger than @static_size + @dyn_size. 846 * aligned to PAGE_SIZE and equal to or larger than @static_size +
847 * @dyn_size.
847 * 848 *
848 * @dyn_size determines the number of free bytes after the static 849 * @dyn_size, if non-negative, limits the number of bytes available
849 * area in the first chunk. If zero, whatever left is available. 850 * for dynamic allocation in the first chunk. Specifying non-negative
850 * Specifying non-zero value make percpu leave the area after 851 * value make percpu leave alone the area beyond @static_size +
851 * @static_size + @dyn_size alone. 852 * @dyn_size.
852 * 853 *
853 * Non-null @base_addr means that the caller already allocated virtual 854 * Non-null @base_addr means that the caller already allocated virtual
854 * region for the first chunk and mapped it. percpu must not mess 855 * region for the first chunk and mapped it. percpu must not mess
@@ -863,8 +864,9 @@ EXPORT_SYMBOL_GPL(free_percpu);
863 * percpu access. 864 * percpu access.
864 */ 865 */
865size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, 866size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
866 size_t static_size, size_t unit_size, 867 size_t static_size,
867 size_t dyn_size, void *base_addr, 868 ssize_t unit_size, ssize_t dyn_size,
869 void *base_addr,
868 pcpu_populate_pte_fn_t populate_pte_fn) 870 pcpu_populate_pte_fn_t populate_pte_fn)
869{ 871{
870 static struct vm_struct first_vm; 872 static struct vm_struct first_vm;
@@ -877,13 +879,17 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
877 /* santiy checks */ 879 /* santiy checks */
878 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC); 880 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC);
879 BUG_ON(!static_size); 881 BUG_ON(!static_size);
880 BUG_ON(!unit_size && dyn_size); 882 if (unit_size >= 0) {
881 BUG_ON(unit_size && unit_size < static_size + dyn_size); 883 BUG_ON(unit_size < static_size +
882 BUG_ON(unit_size & ~PAGE_MASK); 884 (dyn_size >= 0 ? dyn_size : 0));
883 BUG_ON(base_addr && !unit_size); 885 BUG_ON(unit_size & ~PAGE_MASK);
886 } else {
887 BUG_ON(dyn_size >= 0);
888 BUG_ON(base_addr);
889 }
884 BUG_ON(base_addr && populate_pte_fn); 890 BUG_ON(base_addr && populate_pte_fn);
885 891
886 if (unit_size) 892 if (unit_size >= 0)
887 pcpu_unit_pages = unit_size >> PAGE_SHIFT; 893 pcpu_unit_pages = unit_size >> PAGE_SHIFT;
888 else 894 else
889 pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT, 895 pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
@@ -894,6 +900,9 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
894 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) 900 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk)
895 + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *); 901 + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *);
896 902
903 if (dyn_size < 0)
904 dyn_size = pcpu_unit_size - static_size;
905
897 /* 906 /*
898 * Allocate chunk slots. The additional last slot is for 907 * Allocate chunk slots. The additional last slot is for
899 * empty chunks. 908 * empty chunks.
@@ -909,12 +918,7 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
909 schunk->vm = &first_vm; 918 schunk->vm = &first_vm;
910 schunk->map = smap; 919 schunk->map = smap;
911 schunk->map_alloc = ARRAY_SIZE(smap); 920 schunk->map_alloc = ARRAY_SIZE(smap);
912 921 schunk->free_size = dyn_size;
913 if (dyn_size)
914 schunk->free_size = dyn_size;
915 else
916 schunk->free_size = pcpu_unit_size - static_size;
917
918 schunk->contig_hint = schunk->free_size; 922 schunk->contig_hint = schunk->free_size;
919 923
920 schunk->map[schunk->map_used++] = -static_size; 924 schunk->map[schunk->map_used++] = -static_size;