aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/percpu.h2
-rw-r--r--mm/percpu.c58
2 files changed, 30 insertions, 30 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 2d34b038fe70..a0b4ea2a3354 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -118,7 +118,7 @@ typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
118 118
119extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, 119extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
120 size_t static_size, size_t unit_size, 120 size_t static_size, size_t unit_size,
121 size_t free_size, void *base_addr, 121 size_t dyn_size, void *base_addr,
122 pcpu_populate_pte_fn_t populate_pte_fn); 122 pcpu_populate_pte_fn_t populate_pte_fn);
123 123
124/* 124/*
diff --git a/mm/percpu.c b/mm/percpu.c
index 3d0f5456827c..9531590e6b69 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -831,7 +831,7 @@ EXPORT_SYMBOL_GPL(free_percpu);
831 * @get_page_fn: callback to fetch page pointer 831 * @get_page_fn: callback to fetch page pointer
832 * @static_size: the size of static percpu area in bytes 832 * @static_size: the size of static percpu area in bytes
833 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, 0 for auto 833 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, 0 for auto
834 * @free_size: free size in bytes, 0 for auto 834 * @dyn_size: free size for dynamic allocation in bytes, 0 for auto
835 * @base_addr: mapped address, NULL for auto 835 * @base_addr: mapped address, NULL for auto
836 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary 836 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
837 * 837 *
@@ -849,12 +849,12 @@ EXPORT_SYMBOL_GPL(free_percpu);
849 * return the same number of pages for all cpus. 849 * return the same number of pages for all cpus.
850 * 850 *
851 * @unit_size, if non-zero, determines unit size and must be aligned 851 * @unit_size, if non-zero, determines unit size and must be aligned
852 * to PAGE_SIZE and equal to or larger than @static_size + @free_size. 852 * to PAGE_SIZE and equal to or larger than @static_size + @dyn_size.
853 * 853 *
854 * @free_size determines the number of free bytes after the static 854 * @dyn_size determines the number of free bytes after the static
855 * area in the first chunk. If zero, whatever left is available. 855 * area in the first chunk. If zero, whatever left is available.
856 * Specifying non-zero value make percpu leave the area after 856 * Specifying non-zero value make percpu leave the area after
857 * @static_size + @free_size alone. 857 * @static_size + @dyn_size alone.
858 * 858 *
859 * Non-null @base_addr means that the caller already allocated virtual 859 * Non-null @base_addr means that the caller already allocated virtual
860 * region for the first chunk and mapped it. percpu must not mess 860 * region for the first chunk and mapped it. percpu must not mess
@@ -870,19 +870,19 @@ EXPORT_SYMBOL_GPL(free_percpu);
870 */ 870 */
871size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, 871size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
872 size_t static_size, size_t unit_size, 872 size_t static_size, size_t unit_size,
873 size_t free_size, void *base_addr, 873 size_t dyn_size, void *base_addr,
874 pcpu_populate_pte_fn_t populate_pte_fn) 874 pcpu_populate_pte_fn_t populate_pte_fn)
875{ 875{
876 static struct vm_struct static_vm; 876 static struct vm_struct first_vm;
877 struct pcpu_chunk *static_chunk; 877 struct pcpu_chunk *schunk;
878 unsigned int cpu; 878 unsigned int cpu;
879 int nr_pages; 879 int nr_pages;
880 int err, i; 880 int err, i;
881 881
882 /* santiy checks */ 882 /* santiy checks */
883 BUG_ON(!static_size); 883 BUG_ON(!static_size);
884 BUG_ON(!unit_size && free_size); 884 BUG_ON(!unit_size && dyn_size);
885 BUG_ON(unit_size && unit_size < static_size + free_size); 885 BUG_ON(unit_size && unit_size < static_size + dyn_size);
886 BUG_ON(unit_size & ~PAGE_MASK); 886 BUG_ON(unit_size & ~PAGE_MASK);
887 BUG_ON(base_addr && !unit_size); 887 BUG_ON(base_addr && !unit_size);
888 BUG_ON(base_addr && populate_pte_fn); 888 BUG_ON(base_addr && populate_pte_fn);
@@ -908,24 +908,24 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
908 for (i = 0; i < pcpu_nr_slots; i++) 908 for (i = 0; i < pcpu_nr_slots; i++)
909 INIT_LIST_HEAD(&pcpu_slot[i]); 909 INIT_LIST_HEAD(&pcpu_slot[i]);
910 910
911 /* init static_chunk */ 911 /* init static chunk */
912 static_chunk = alloc_bootmem(pcpu_chunk_struct_size); 912 schunk = alloc_bootmem(pcpu_chunk_struct_size);
913 INIT_LIST_HEAD(&static_chunk->list); 913 INIT_LIST_HEAD(&schunk->list);
914 static_chunk->vm = &static_vm; 914 schunk->vm = &first_vm;
915 915
916 if (free_size) 916 if (dyn_size)
917 static_chunk->free_size = free_size; 917 schunk->free_size = dyn_size;
918 else 918 else
919 static_chunk->free_size = pcpu_unit_size - pcpu_static_size; 919 schunk->free_size = pcpu_unit_size - pcpu_static_size;
920 920
921 static_chunk->contig_hint = static_chunk->free_size; 921 schunk->contig_hint = schunk->free_size;
922 922
923 /* allocate vm address */ 923 /* allocate vm address */
924 static_vm.flags = VM_ALLOC; 924 first_vm.flags = VM_ALLOC;
925 static_vm.size = pcpu_chunk_size; 925 first_vm.size = pcpu_chunk_size;
926 926
927 if (!base_addr) 927 if (!base_addr)
928 vm_area_register_early(&static_vm, PAGE_SIZE); 928 vm_area_register_early(&first_vm, PAGE_SIZE);
929 else { 929 else {
930 /* 930 /*
931 * Pages already mapped. No need to remap into 931 * Pages already mapped. No need to remap into
@@ -933,8 +933,8 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
933 * be mapped or unmapped by percpu and is marked 933 * be mapped or unmapped by percpu and is marked
934 * immutable. 934 * immutable.
935 */ 935 */
936 static_vm.addr = base_addr; 936 first_vm.addr = base_addr;
937 static_chunk->immutable = true; 937 schunk->immutable = true;
938 } 938 }
939 939
940 /* assign pages */ 940 /* assign pages */
@@ -945,7 +945,7 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
945 945
946 if (!page) 946 if (!page)
947 break; 947 break;
948 *pcpu_chunk_pagep(static_chunk, cpu, i) = page; 948 *pcpu_chunk_pagep(schunk, cpu, i) = page;
949 } 949 }
950 950
951 BUG_ON(i < PFN_UP(pcpu_static_size)); 951 BUG_ON(i < PFN_UP(pcpu_static_size));
@@ -960,20 +960,20 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
960 if (populate_pte_fn) { 960 if (populate_pte_fn) {
961 for_each_possible_cpu(cpu) 961 for_each_possible_cpu(cpu)
962 for (i = 0; i < nr_pages; i++) 962 for (i = 0; i < nr_pages; i++)
963 populate_pte_fn(pcpu_chunk_addr(static_chunk, 963 populate_pte_fn(pcpu_chunk_addr(schunk,
964 cpu, i)); 964 cpu, i));
965 965
966 err = pcpu_map(static_chunk, 0, nr_pages); 966 err = pcpu_map(schunk, 0, nr_pages);
967 if (err) 967 if (err)
968 panic("failed to setup static percpu area, err=%d\n", 968 panic("failed to setup static percpu area, err=%d\n",
969 err); 969 err);
970 } 970 }
971 971
972 /* link static_chunk in */ 972 /* link the first chunk in */
973 pcpu_chunk_relocate(static_chunk, -1); 973 pcpu_chunk_relocate(schunk, -1);
974 pcpu_chunk_addr_insert(static_chunk); 974 pcpu_chunk_addr_insert(schunk);
975 975
976 /* we're done */ 976 /* we're done */
977 pcpu_base_addr = (void *)pcpu_chunk_addr(static_chunk, 0, 0); 977 pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
978 return pcpu_unit_size; 978 return pcpu_unit_size;
979} 979}