diff options
Diffstat (limited to 'mm/percpu.c')
-rw-r--r-- | mm/percpu.c | 1318 |
1 files changed, 1028 insertions, 290 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index 5fe37842e0ea..3f9f182f9b44 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -8,12 +8,13 @@ | |||
8 | * | 8 | * |
9 | * This is percpu allocator which can handle both static and dynamic | 9 | * This is percpu allocator which can handle both static and dynamic |
10 | * areas. Percpu areas are allocated in chunks in vmalloc area. Each | 10 | * areas. Percpu areas are allocated in chunks in vmalloc area. Each |
11 | * chunk is consisted of nr_cpu_ids units and the first chunk is used | 11 | * chunk is consisted of boot-time determined number of units and the |
12 | * for static percpu variables in the kernel image (special boot time | 12 | * first chunk is used for static percpu variables in the kernel image |
13 | * alloc/init handling necessary as these areas need to be brought up | 13 | * (special boot time alloc/init handling necessary as these areas |
14 | * before allocation services are running). Unit grows as necessary | 14 | * need to be brought up before allocation services are running). |
15 | * and all units grow or shrink in unison. When a chunk is filled up, | 15 | * Unit grows as necessary and all units grow or shrink in unison. |
16 | * another chunk is allocated. ie. in vmalloc area | 16 | * When a chunk is filled up, another chunk is allocated. ie. in |
17 | * vmalloc area | ||
17 | * | 18 | * |
18 | * c0 c1 c2 | 19 | * c0 c1 c2 |
19 | * ------------------- ------------------- ------------ | 20 | * ------------------- ------------------- ------------ |
@@ -22,11 +23,13 @@ | |||
22 | * | 23 | * |
23 | * Allocation is done in offset-size areas of single unit space. Ie, | 24 | * Allocation is done in offset-size areas of single unit space. Ie, |
24 | * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, | 25 | * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, |
25 | * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring | 26 | * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to |
26 | * percpu base registers pcpu_unit_size apart. | 27 | * cpus. On NUMA, the mapping can be non-linear and even sparse. |
28 | * Percpu access can be done by configuring percpu base registers | ||
29 | * according to cpu to unit mapping and pcpu_unit_size. | ||
27 | * | 30 | * |
28 | * There are usually many small percpu allocations many of them as | 31 | * There are usually many small percpu allocations many of them being |
29 | * small as 4 bytes. The allocator organizes chunks into lists | 32 | * as small as 4 bytes. The allocator organizes chunks into lists |
30 | * according to free size and tries to allocate from the fullest one. | 33 | * according to free size and tries to allocate from the fullest one. |
31 | * Each chunk keeps the maximum contiguous area size hint which is | 34 | * Each chunk keeps the maximum contiguous area size hint which is |
32 | * guaranteed to be eqaul to or larger than the maximum contiguous | 35 | * guaranteed to be eqaul to or larger than the maximum contiguous |
@@ -43,7 +46,7 @@ | |||
43 | * | 46 | * |
44 | * To use this allocator, arch code should do the followings. | 47 | * To use this allocator, arch code should do the followings. |
45 | * | 48 | * |
46 | * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA | 49 | * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA |
47 | * | 50 | * |
48 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate | 51 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate |
49 | * regular address to percpu pointer and back if they need to be | 52 | * regular address to percpu pointer and back if they need to be |
@@ -56,6 +59,7 @@ | |||
56 | #include <linux/bitmap.h> | 59 | #include <linux/bitmap.h> |
57 | #include <linux/bootmem.h> | 60 | #include <linux/bootmem.h> |
58 | #include <linux/list.h> | 61 | #include <linux/list.h> |
62 | #include <linux/log2.h> | ||
59 | #include <linux/mm.h> | 63 | #include <linux/mm.h> |
60 | #include <linux/module.h> | 64 | #include <linux/module.h> |
61 | #include <linux/mutex.h> | 65 | #include <linux/mutex.h> |
@@ -94,20 +98,27 @@ struct pcpu_chunk { | |||
94 | int map_alloc; /* # of map entries allocated */ | 98 | int map_alloc; /* # of map entries allocated */ |
95 | int *map; /* allocation map */ | 99 | int *map; /* allocation map */ |
96 | bool immutable; /* no [de]population allowed */ | 100 | bool immutable; /* no [de]population allowed */ |
97 | struct page **page; /* points to page array */ | 101 | unsigned long populated[]; /* populated bitmap */ |
98 | struct page *page_ar[]; /* #cpus * UNIT_PAGES */ | ||
99 | }; | 102 | }; |
100 | 103 | ||
101 | static int pcpu_unit_pages __read_mostly; | 104 | static int pcpu_unit_pages __read_mostly; |
102 | static int pcpu_unit_size __read_mostly; | 105 | static int pcpu_unit_size __read_mostly; |
106 | static int pcpu_nr_units __read_mostly; | ||
103 | static int pcpu_chunk_size __read_mostly; | 107 | static int pcpu_chunk_size __read_mostly; |
104 | static int pcpu_nr_slots __read_mostly; | 108 | static int pcpu_nr_slots __read_mostly; |
105 | static size_t pcpu_chunk_struct_size __read_mostly; | 109 | static size_t pcpu_chunk_struct_size __read_mostly; |
106 | 110 | ||
111 | /* cpus with the lowest and highest unit numbers */ | ||
112 | static unsigned int pcpu_first_unit_cpu __read_mostly; | ||
113 | static unsigned int pcpu_last_unit_cpu __read_mostly; | ||
114 | |||
107 | /* the address of the first chunk which starts with the kernel static area */ | 115 | /* the address of the first chunk which starts with the kernel static area */ |
108 | void *pcpu_base_addr __read_mostly; | 116 | void *pcpu_base_addr __read_mostly; |
109 | EXPORT_SYMBOL_GPL(pcpu_base_addr); | 117 | EXPORT_SYMBOL_GPL(pcpu_base_addr); |
110 | 118 | ||
119 | /* cpu -> unit map */ | ||
120 | const int *pcpu_unit_map __read_mostly; | ||
121 | |||
111 | /* | 122 | /* |
112 | * The first chunk which always exists. Note that unlike other | 123 | * The first chunk which always exists. Note that unlike other |
113 | * chunks, this one can be allocated and mapped in several different | 124 | * chunks, this one can be allocated and mapped in several different |
@@ -129,9 +140,9 @@ static int pcpu_reserved_chunk_limit; | |||
129 | * Synchronization rules. | 140 | * Synchronization rules. |
130 | * | 141 | * |
131 | * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former | 142 | * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former |
132 | * protects allocation/reclaim paths, chunks and chunk->page arrays. | 143 | * protects allocation/reclaim paths, chunks, populated bitmap and |
133 | * The latter is a spinlock and protects the index data structures - | 144 | * vmalloc mapping. The latter is a spinlock and protects the index |
134 | * chunk slots, chunks and area maps in chunks. | 145 | * data structures - chunk slots, chunks and area maps in chunks. |
135 | * | 146 | * |
136 | * During allocation, pcpu_alloc_mutex is kept locked all the time and | 147 | * During allocation, pcpu_alloc_mutex is kept locked all the time and |
137 | * pcpu_lock is grabbed and released as necessary. All actual memory | 148 | * pcpu_lock is grabbed and released as necessary. All actual memory |
@@ -178,13 +189,7 @@ static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) | |||
178 | 189 | ||
179 | static int pcpu_page_idx(unsigned int cpu, int page_idx) | 190 | static int pcpu_page_idx(unsigned int cpu, int page_idx) |
180 | { | 191 | { |
181 | return cpu * pcpu_unit_pages + page_idx; | 192 | return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; |
182 | } | ||
183 | |||
184 | static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk, | ||
185 | unsigned int cpu, int page_idx) | ||
186 | { | ||
187 | return &chunk->page[pcpu_page_idx(cpu, page_idx)]; | ||
188 | } | 193 | } |
189 | 194 | ||
190 | static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, | 195 | static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, |
@@ -194,10 +199,13 @@ static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, | |||
194 | (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT); | 199 | (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT); |
195 | } | 200 | } |
196 | 201 | ||
197 | static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk, | 202 | static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, |
198 | int page_idx) | 203 | unsigned int cpu, int page_idx) |
199 | { | 204 | { |
200 | return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL; | 205 | /* must not be used on pre-mapped chunk */ |
206 | WARN_ON(chunk->immutable); | ||
207 | |||
208 | return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); | ||
201 | } | 209 | } |
202 | 210 | ||
203 | /* set the pointer to a chunk in a page struct */ | 211 | /* set the pointer to a chunk in a page struct */ |
@@ -212,6 +220,34 @@ static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) | |||
212 | return (struct pcpu_chunk *)page->index; | 220 | return (struct pcpu_chunk *)page->index; |
213 | } | 221 | } |
214 | 222 | ||
223 | static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end) | ||
224 | { | ||
225 | *rs = find_next_zero_bit(chunk->populated, end, *rs); | ||
226 | *re = find_next_bit(chunk->populated, end, *rs + 1); | ||
227 | } | ||
228 | |||
229 | static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end) | ||
230 | { | ||
231 | *rs = find_next_bit(chunk->populated, end, *rs); | ||
232 | *re = find_next_zero_bit(chunk->populated, end, *rs + 1); | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * (Un)populated page region iterators. Iterate over (un)populated | ||
237 | * page regions betwen @start and @end in @chunk. @rs and @re should | ||
238 | * be integer variables and will be set to start and end page index of | ||
239 | * the current region. | ||
240 | */ | ||
241 | #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ | ||
242 | for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ | ||
243 | (rs) < (re); \ | ||
244 | (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) | ||
245 | |||
246 | #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ | ||
247 | for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ | ||
248 | (rs) < (re); \ | ||
249 | (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) | ||
250 | |||
215 | /** | 251 | /** |
216 | * pcpu_mem_alloc - allocate memory | 252 | * pcpu_mem_alloc - allocate memory |
217 | * @size: bytes to allocate | 253 | * @size: bytes to allocate |
@@ -290,13 +326,21 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | |||
290 | void *first_start = pcpu_first_chunk->vm->addr; | 326 | void *first_start = pcpu_first_chunk->vm->addr; |
291 | 327 | ||
292 | /* is it in the first chunk? */ | 328 | /* is it in the first chunk? */ |
293 | if (addr >= first_start && addr < first_start + pcpu_chunk_size) { | 329 | if (addr >= first_start && addr < first_start + pcpu_unit_size) { |
294 | /* is it in the reserved area? */ | 330 | /* is it in the reserved area? */ |
295 | if (addr < first_start + pcpu_reserved_chunk_limit) | 331 | if (addr < first_start + pcpu_reserved_chunk_limit) |
296 | return pcpu_reserved_chunk; | 332 | return pcpu_reserved_chunk; |
297 | return pcpu_first_chunk; | 333 | return pcpu_first_chunk; |
298 | } | 334 | } |
299 | 335 | ||
336 | /* | ||
337 | * The address is relative to unit0 which might be unused and | ||
338 | * thus unmapped. Offset the address to the unit space of the | ||
339 | * current processor before looking it up in the vmalloc | ||
340 | * space. Note that any possible cpu id can be used here, so | ||
341 | * there's no need to worry about preemption or cpu hotplug. | ||
342 | */ | ||
343 | addr += pcpu_unit_map[smp_processor_id()] * pcpu_unit_size; | ||
300 | return pcpu_get_page_chunk(vmalloc_to_page(addr)); | 344 | return pcpu_get_page_chunk(vmalloc_to_page(addr)); |
301 | } | 345 | } |
302 | 346 | ||
@@ -545,125 +589,327 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) | |||
545 | } | 589 | } |
546 | 590 | ||
547 | /** | 591 | /** |
548 | * pcpu_unmap - unmap pages out of a pcpu_chunk | 592 | * pcpu_get_pages_and_bitmap - get temp pages array and bitmap |
549 | * @chunk: chunk of interest | 593 | * @chunk: chunk of interest |
550 | * @page_start: page index of the first page to unmap | 594 | * @bitmapp: output parameter for bitmap |
551 | * @page_end: page index of the last page to unmap + 1 | 595 | * @may_alloc: may allocate the array |
552 | * @flush_tlb: whether to flush tlb or not | ||
553 | * | 596 | * |
554 | * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. | 597 | * Returns pointer to array of pointers to struct page and bitmap, |
555 | * If @flush is true, vcache is flushed before unmapping and tlb | 598 | * both of which can be indexed with pcpu_page_idx(). The returned |
556 | * after. | 599 | * array is cleared to zero and *@bitmapp is copied from |
600 | * @chunk->populated. Note that there is only one array and bitmap | ||
601 | * and access exclusion is the caller's responsibility. | ||
602 | * | ||
603 | * CONTEXT: | ||
604 | * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc. | ||
605 | * Otherwise, don't care. | ||
606 | * | ||
607 | * RETURNS: | ||
608 | * Pointer to temp pages array on success, NULL on failure. | ||
557 | */ | 609 | */ |
558 | static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, | 610 | static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk, |
559 | bool flush_tlb) | 611 | unsigned long **bitmapp, |
612 | bool may_alloc) | ||
560 | { | 613 | { |
561 | unsigned int last = nr_cpu_ids - 1; | 614 | static struct page **pages; |
562 | unsigned int cpu; | 615 | static unsigned long *bitmap; |
616 | size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); | ||
617 | size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) * | ||
618 | sizeof(unsigned long); | ||
619 | |||
620 | if (!pages || !bitmap) { | ||
621 | if (may_alloc && !pages) | ||
622 | pages = pcpu_mem_alloc(pages_size); | ||
623 | if (may_alloc && !bitmap) | ||
624 | bitmap = pcpu_mem_alloc(bitmap_size); | ||
625 | if (!pages || !bitmap) | ||
626 | return NULL; | ||
627 | } | ||
563 | 628 | ||
564 | /* unmap must not be done on immutable chunk */ | 629 | memset(pages, 0, pages_size); |
565 | WARN_ON(chunk->immutable); | 630 | bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages); |
566 | 631 | ||
567 | /* | 632 | *bitmapp = bitmap; |
568 | * Each flushing trial can be very expensive, issue flush on | 633 | return pages; |
569 | * the whole region at once rather than doing it for each cpu. | 634 | } |
570 | * This could be an overkill but is more scalable. | ||
571 | */ | ||
572 | flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start), | ||
573 | pcpu_chunk_addr(chunk, last, page_end)); | ||
574 | 635 | ||
575 | for_each_possible_cpu(cpu) | 636 | /** |
576 | unmap_kernel_range_noflush( | 637 | * pcpu_free_pages - free pages which were allocated for @chunk |
577 | pcpu_chunk_addr(chunk, cpu, page_start), | 638 | * @chunk: chunk pages were allocated for |
578 | (page_end - page_start) << PAGE_SHIFT); | 639 | * @pages: array of pages to be freed, indexed by pcpu_page_idx() |
579 | 640 | * @populated: populated bitmap | |
580 | /* ditto as flush_cache_vunmap() */ | 641 | * @page_start: page index of the first page to be freed |
581 | if (flush_tlb) | 642 | * @page_end: page index of the last page to be freed + 1 |
582 | flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start), | 643 | * |
583 | pcpu_chunk_addr(chunk, last, page_end)); | 644 | * Free pages [@page_start and @page_end) in @pages for all units. |
645 | * The pages were allocated for @chunk. | ||
646 | */ | ||
647 | static void pcpu_free_pages(struct pcpu_chunk *chunk, | ||
648 | struct page **pages, unsigned long *populated, | ||
649 | int page_start, int page_end) | ||
650 | { | ||
651 | unsigned int cpu; | ||
652 | int i; | ||
653 | |||
654 | for_each_possible_cpu(cpu) { | ||
655 | for (i = page_start; i < page_end; i++) { | ||
656 | struct page *page = pages[pcpu_page_idx(cpu, i)]; | ||
657 | |||
658 | if (page) | ||
659 | __free_page(page); | ||
660 | } | ||
661 | } | ||
584 | } | 662 | } |
585 | 663 | ||
586 | /** | 664 | /** |
587 | * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk | 665 | * pcpu_alloc_pages - allocates pages for @chunk |
588 | * @chunk: chunk to depopulate | 666 | * @chunk: target chunk |
589 | * @off: offset to the area to depopulate | 667 | * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() |
590 | * @size: size of the area to depopulate in bytes | 668 | * @populated: populated bitmap |
591 | * @flush: whether to flush cache and tlb or not | 669 | * @page_start: page index of the first page to be allocated |
592 | * | 670 | * @page_end: page index of the last page to be allocated + 1 |
593 | * For each cpu, depopulate and unmap pages [@page_start,@page_end) | 671 | * |
594 | * from @chunk. If @flush is true, vcache is flushed before unmapping | 672 | * Allocate pages [@page_start,@page_end) into @pages for all units. |
595 | * and tlb after. | 673 | * The allocation is for @chunk. Percpu core doesn't care about the |
596 | * | 674 | * content of @pages and will pass it verbatim to pcpu_map_pages(). |
597 | * CONTEXT: | ||
598 | * pcpu_alloc_mutex. | ||
599 | */ | 675 | */ |
600 | static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size, | 676 | static int pcpu_alloc_pages(struct pcpu_chunk *chunk, |
601 | bool flush) | 677 | struct page **pages, unsigned long *populated, |
678 | int page_start, int page_end) | ||
602 | { | 679 | { |
603 | int page_start = PFN_DOWN(off); | 680 | const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; |
604 | int page_end = PFN_UP(off + size); | ||
605 | int unmap_start = -1; | ||
606 | int uninitialized_var(unmap_end); | ||
607 | unsigned int cpu; | 681 | unsigned int cpu; |
608 | int i; | 682 | int i; |
609 | 683 | ||
610 | for (i = page_start; i < page_end; i++) { | 684 | for_each_possible_cpu(cpu) { |
611 | for_each_possible_cpu(cpu) { | 685 | for (i = page_start; i < page_end; i++) { |
612 | struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i); | 686 | struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; |
687 | |||
688 | *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); | ||
689 | if (!*pagep) { | ||
690 | pcpu_free_pages(chunk, pages, populated, | ||
691 | page_start, page_end); | ||
692 | return -ENOMEM; | ||
693 | } | ||
694 | } | ||
695 | } | ||
696 | return 0; | ||
697 | } | ||
613 | 698 | ||
614 | if (!*pagep) | 699 | /** |
615 | continue; | 700 | * pcpu_pre_unmap_flush - flush cache prior to unmapping |
701 | * @chunk: chunk the regions to be flushed belongs to | ||
702 | * @page_start: page index of the first page to be flushed | ||
703 | * @page_end: page index of the last page to be flushed + 1 | ||
704 | * | ||
705 | * Pages in [@page_start,@page_end) of @chunk are about to be | ||
706 | * unmapped. Flush cache. As each flushing trial can be very | ||
707 | * expensive, issue flush on the whole region at once rather than | ||
708 | * doing it for each cpu. This could be an overkill but is more | ||
709 | * scalable. | ||
710 | */ | ||
711 | static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, | ||
712 | int page_start, int page_end) | ||
713 | { | ||
714 | flush_cache_vunmap( | ||
715 | pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), | ||
716 | pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); | ||
717 | } | ||
616 | 718 | ||
617 | __free_page(*pagep); | 719 | static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) |
720 | { | ||
721 | unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); | ||
722 | } | ||
618 | 723 | ||
619 | /* | 724 | /** |
620 | * If it's partial depopulation, it might get | 725 | * pcpu_unmap_pages - unmap pages out of a pcpu_chunk |
621 | * populated or depopulated again. Mark the | 726 | * @chunk: chunk of interest |
622 | * page gone. | 727 | * @pages: pages array which can be used to pass information to free |
623 | */ | 728 | * @populated: populated bitmap |
624 | *pagep = NULL; | 729 | * @page_start: page index of the first page to unmap |
730 | * @page_end: page index of the last page to unmap + 1 | ||
731 | * | ||
732 | * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. | ||
733 | * Corresponding elements in @pages were cleared by the caller and can | ||
734 | * be used to carry information to pcpu_free_pages() which will be | ||
735 | * called after all unmaps are finished. The caller should call | ||
736 | * proper pre/post flush functions. | ||
737 | */ | ||
738 | static void pcpu_unmap_pages(struct pcpu_chunk *chunk, | ||
739 | struct page **pages, unsigned long *populated, | ||
740 | int page_start, int page_end) | ||
741 | { | ||
742 | unsigned int cpu; | ||
743 | int i; | ||
625 | 744 | ||
626 | unmap_start = unmap_start < 0 ? i : unmap_start; | 745 | for_each_possible_cpu(cpu) { |
627 | unmap_end = i + 1; | 746 | for (i = page_start; i < page_end; i++) { |
747 | struct page *page; | ||
748 | |||
749 | page = pcpu_chunk_page(chunk, cpu, i); | ||
750 | WARN_ON(!page); | ||
751 | pages[pcpu_page_idx(cpu, i)] = page; | ||
628 | } | 752 | } |
753 | __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), | ||
754 | page_end - page_start); | ||
629 | } | 755 | } |
630 | 756 | ||
631 | if (unmap_start >= 0) | 757 | for (i = page_start; i < page_end; i++) |
632 | pcpu_unmap(chunk, unmap_start, unmap_end, flush); | 758 | __clear_bit(i, populated); |
633 | } | 759 | } |
634 | 760 | ||
635 | /** | 761 | /** |
636 | * pcpu_map - map pages into a pcpu_chunk | 762 | * pcpu_post_unmap_tlb_flush - flush TLB after unmapping |
763 | * @chunk: pcpu_chunk the regions to be flushed belong to | ||
764 | * @page_start: page index of the first page to be flushed | ||
765 | * @page_end: page index of the last page to be flushed + 1 | ||
766 | * | ||
767 | * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush | ||
768 | * TLB for the regions. This can be skipped if the area is to be | ||
769 | * returned to vmalloc as vmalloc will handle TLB flushing lazily. | ||
770 | * | ||
771 | * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once | ||
772 | * for the whole region. | ||
773 | */ | ||
774 | static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, | ||
775 | int page_start, int page_end) | ||
776 | { | ||
777 | flush_tlb_kernel_range( | ||
778 | pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), | ||
779 | pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); | ||
780 | } | ||
781 | |||
782 | static int __pcpu_map_pages(unsigned long addr, struct page **pages, | ||
783 | int nr_pages) | ||
784 | { | ||
785 | return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, | ||
786 | PAGE_KERNEL, pages); | ||
787 | } | ||
788 | |||
789 | /** | ||
790 | * pcpu_map_pages - map pages into a pcpu_chunk | ||
637 | * @chunk: chunk of interest | 791 | * @chunk: chunk of interest |
792 | * @pages: pages array containing pages to be mapped | ||
793 | * @populated: populated bitmap | ||
638 | * @page_start: page index of the first page to map | 794 | * @page_start: page index of the first page to map |
639 | * @page_end: page index of the last page to map + 1 | 795 | * @page_end: page index of the last page to map + 1 |
640 | * | 796 | * |
641 | * For each cpu, map pages [@page_start,@page_end) into @chunk. | 797 | * For each cpu, map pages [@page_start,@page_end) into @chunk. The |
642 | * vcache is flushed afterwards. | 798 | * caller is responsible for calling pcpu_post_map_flush() after all |
799 | * mappings are complete. | ||
800 | * | ||
801 | * This function is responsible for setting corresponding bits in | ||
802 | * @chunk->populated bitmap and whatever is necessary for reverse | ||
803 | * lookup (addr -> chunk). | ||
643 | */ | 804 | */ |
644 | static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end) | 805 | static int pcpu_map_pages(struct pcpu_chunk *chunk, |
806 | struct page **pages, unsigned long *populated, | ||
807 | int page_start, int page_end) | ||
645 | { | 808 | { |
646 | unsigned int last = nr_cpu_ids - 1; | 809 | unsigned int cpu, tcpu; |
647 | unsigned int cpu; | 810 | int i, err; |
648 | int err; | ||
649 | |||
650 | /* map must not be done on immutable chunk */ | ||
651 | WARN_ON(chunk->immutable); | ||
652 | 811 | ||
653 | for_each_possible_cpu(cpu) { | 812 | for_each_possible_cpu(cpu) { |
654 | err = map_kernel_range_noflush( | 813 | err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), |
655 | pcpu_chunk_addr(chunk, cpu, page_start), | 814 | &pages[pcpu_page_idx(cpu, page_start)], |
656 | (page_end - page_start) << PAGE_SHIFT, | 815 | page_end - page_start); |
657 | PAGE_KERNEL, | ||
658 | pcpu_chunk_pagep(chunk, cpu, page_start)); | ||
659 | if (err < 0) | 816 | if (err < 0) |
660 | return err; | 817 | goto err; |
818 | } | ||
819 | |||
820 | /* mapping successful, link chunk and mark populated */ | ||
821 | for (i = page_start; i < page_end; i++) { | ||
822 | for_each_possible_cpu(cpu) | ||
823 | pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], | ||
824 | chunk); | ||
825 | __set_bit(i, populated); | ||
661 | } | 826 | } |
662 | 827 | ||
663 | /* flush at once, please read comments in pcpu_unmap() */ | ||
664 | flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start), | ||
665 | pcpu_chunk_addr(chunk, last, page_end)); | ||
666 | return 0; | 828 | return 0; |
829 | |||
830 | err: | ||
831 | for_each_possible_cpu(tcpu) { | ||
832 | if (tcpu == cpu) | ||
833 | break; | ||
834 | __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), | ||
835 | page_end - page_start); | ||
836 | } | ||
837 | return err; | ||
838 | } | ||
839 | |||
840 | /** | ||
841 | * pcpu_post_map_flush - flush cache after mapping | ||
842 | * @chunk: pcpu_chunk the regions to be flushed belong to | ||
843 | * @page_start: page index of the first page to be flushed | ||
844 | * @page_end: page index of the last page to be flushed + 1 | ||
845 | * | ||
846 | * Pages [@page_start,@page_end) of @chunk have been mapped. Flush | ||
847 | * cache. | ||
848 | * | ||
849 | * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once | ||
850 | * for the whole region. | ||
851 | */ | ||
852 | static void pcpu_post_map_flush(struct pcpu_chunk *chunk, | ||
853 | int page_start, int page_end) | ||
854 | { | ||
855 | flush_cache_vmap( | ||
856 | pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), | ||
857 | pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); | ||
858 | } | ||
859 | |||
860 | /** | ||
861 | * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk | ||
862 | * @chunk: chunk to depopulate | ||
863 | * @off: offset to the area to depopulate | ||
864 | * @size: size of the area to depopulate in bytes | ||
865 | * @flush: whether to flush cache and tlb or not | ||
866 | * | ||
867 | * For each cpu, depopulate and unmap pages [@page_start,@page_end) | ||
868 | * from @chunk. If @flush is true, vcache is flushed before unmapping | ||
869 | * and tlb after. | ||
870 | * | ||
871 | * CONTEXT: | ||
872 | * pcpu_alloc_mutex. | ||
873 | */ | ||
874 | static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) | ||
875 | { | ||
876 | int page_start = PFN_DOWN(off); | ||
877 | int page_end = PFN_UP(off + size); | ||
878 | struct page **pages; | ||
879 | unsigned long *populated; | ||
880 | int rs, re; | ||
881 | |||
882 | /* quick path, check whether it's empty already */ | ||
883 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { | ||
884 | if (rs == page_start && re == page_end) | ||
885 | return; | ||
886 | break; | ||
887 | } | ||
888 | |||
889 | /* immutable chunks can't be depopulated */ | ||
890 | WARN_ON(chunk->immutable); | ||
891 | |||
892 | /* | ||
893 | * If control reaches here, there must have been at least one | ||
894 | * successful population attempt so the temp pages array must | ||
895 | * be available now. | ||
896 | */ | ||
897 | pages = pcpu_get_pages_and_bitmap(chunk, &populated, false); | ||
898 | BUG_ON(!pages); | ||
899 | |||
900 | /* unmap and free */ | ||
901 | pcpu_pre_unmap_flush(chunk, page_start, page_end); | ||
902 | |||
903 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) | ||
904 | pcpu_unmap_pages(chunk, pages, populated, rs, re); | ||
905 | |||
906 | /* no need to flush tlb, vmalloc will handle it lazily */ | ||
907 | |||
908 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) | ||
909 | pcpu_free_pages(chunk, pages, populated, rs, re); | ||
910 | |||
911 | /* commit new bitmap */ | ||
912 | bitmap_copy(chunk->populated, populated, pcpu_unit_pages); | ||
667 | } | 913 | } |
668 | 914 | ||
669 | /** | 915 | /** |
@@ -680,50 +926,60 @@ static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end) | |||
680 | */ | 926 | */ |
681 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) | 927 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) |
682 | { | 928 | { |
683 | const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; | ||
684 | int page_start = PFN_DOWN(off); | 929 | int page_start = PFN_DOWN(off); |
685 | int page_end = PFN_UP(off + size); | 930 | int page_end = PFN_UP(off + size); |
686 | int map_start = -1; | 931 | int free_end = page_start, unmap_end = page_start; |
687 | int uninitialized_var(map_end); | 932 | struct page **pages; |
933 | unsigned long *populated; | ||
688 | unsigned int cpu; | 934 | unsigned int cpu; |
689 | int i; | 935 | int rs, re, rc; |
690 | 936 | ||
691 | for (i = page_start; i < page_end; i++) { | 937 | /* quick path, check whether all pages are already there */ |
692 | if (pcpu_chunk_page_occupied(chunk, i)) { | 938 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) { |
693 | if (map_start >= 0) { | 939 | if (rs == page_start && re == page_end) |
694 | if (pcpu_map(chunk, map_start, map_end)) | 940 | goto clear; |
695 | goto err; | 941 | break; |
696 | map_start = -1; | 942 | } |
697 | } | ||
698 | continue; | ||
699 | } | ||
700 | 943 | ||
701 | map_start = map_start < 0 ? i : map_start; | 944 | /* need to allocate and map pages, this chunk can't be immutable */ |
702 | map_end = i + 1; | 945 | WARN_ON(chunk->immutable); |
703 | 946 | ||
704 | for_each_possible_cpu(cpu) { | 947 | pages = pcpu_get_pages_and_bitmap(chunk, &populated, true); |
705 | struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i); | 948 | if (!pages) |
949 | return -ENOMEM; | ||
706 | 950 | ||
707 | *pagep = alloc_pages_node(cpu_to_node(cpu), | 951 | /* alloc and map */ |
708 | alloc_mask, 0); | 952 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { |
709 | if (!*pagep) | 953 | rc = pcpu_alloc_pages(chunk, pages, populated, rs, re); |
710 | goto err; | 954 | if (rc) |
711 | pcpu_set_page_chunk(*pagep, chunk); | 955 | goto err_free; |
712 | } | 956 | free_end = re; |
713 | } | 957 | } |
714 | 958 | ||
715 | if (map_start >= 0 && pcpu_map(chunk, map_start, map_end)) | 959 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { |
716 | goto err; | 960 | rc = pcpu_map_pages(chunk, pages, populated, rs, re); |
961 | if (rc) | ||
962 | goto err_unmap; | ||
963 | unmap_end = re; | ||
964 | } | ||
965 | pcpu_post_map_flush(chunk, page_start, page_end); | ||
717 | 966 | ||
967 | /* commit new bitmap */ | ||
968 | bitmap_copy(chunk->populated, populated, pcpu_unit_pages); | ||
969 | clear: | ||
718 | for_each_possible_cpu(cpu) | 970 | for_each_possible_cpu(cpu) |
719 | memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0, | 971 | memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); |
720 | size); | ||
721 | |||
722 | return 0; | 972 | return 0; |
723 | err: | 973 | |
724 | /* likely under heavy memory pressure, give memory back */ | 974 | err_unmap: |
725 | pcpu_depopulate_chunk(chunk, off, size, true); | 975 | pcpu_pre_unmap_flush(chunk, page_start, unmap_end); |
726 | return -ENOMEM; | 976 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end) |
977 | pcpu_unmap_pages(chunk, pages, populated, rs, re); | ||
978 | pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end); | ||
979 | err_free: | ||
980 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end) | ||
981 | pcpu_free_pages(chunk, pages, populated, rs, re); | ||
982 | return rc; | ||
727 | } | 983 | } |
728 | 984 | ||
729 | static void free_pcpu_chunk(struct pcpu_chunk *chunk) | 985 | static void free_pcpu_chunk(struct pcpu_chunk *chunk) |
@@ -747,7 +1003,6 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void) | |||
747 | chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); | 1003 | chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); |
748 | chunk->map_alloc = PCPU_DFL_MAP_ALLOC; | 1004 | chunk->map_alloc = PCPU_DFL_MAP_ALLOC; |
749 | chunk->map[chunk->map_used++] = pcpu_unit_size; | 1005 | chunk->map[chunk->map_used++] = pcpu_unit_size; |
750 | chunk->page = chunk->page_ar; | ||
751 | 1006 | ||
752 | chunk->vm = get_vm_area(pcpu_chunk_size, VM_ALLOC); | 1007 | chunk->vm = get_vm_area(pcpu_chunk_size, VM_ALLOC); |
753 | if (!chunk->vm) { | 1008 | if (!chunk->vm) { |
@@ -847,6 +1102,7 @@ area_found: | |||
847 | 1102 | ||
848 | mutex_unlock(&pcpu_alloc_mutex); | 1103 | mutex_unlock(&pcpu_alloc_mutex); |
849 | 1104 | ||
1105 | /* return address relative to unit0 */ | ||
850 | return __addr_to_pcpu_ptr(chunk->vm->addr + off); | 1106 | return __addr_to_pcpu_ptr(chunk->vm->addr + off); |
851 | 1107 | ||
852 | fail_unlock: | 1108 | fail_unlock: |
@@ -928,7 +1184,7 @@ static void pcpu_reclaim(struct work_struct *work) | |||
928 | mutex_unlock(&pcpu_alloc_mutex); | 1184 | mutex_unlock(&pcpu_alloc_mutex); |
929 | 1185 | ||
930 | list_for_each_entry_safe(chunk, next, &todo, list) { | 1186 | list_for_each_entry_safe(chunk, next, &todo, list) { |
931 | pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false); | 1187 | pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); |
932 | free_pcpu_chunk(chunk); | 1188 | free_pcpu_chunk(chunk); |
933 | } | 1189 | } |
934 | } | 1190 | } |
@@ -976,26 +1232,16 @@ EXPORT_SYMBOL_GPL(free_percpu); | |||
976 | 1232 | ||
977 | /** | 1233 | /** |
978 | * pcpu_setup_first_chunk - initialize the first percpu chunk | 1234 | * pcpu_setup_first_chunk - initialize the first percpu chunk |
979 | * @get_page_fn: callback to fetch page pointer | ||
980 | * @static_size: the size of static percpu area in bytes | 1235 | * @static_size: the size of static percpu area in bytes |
981 | * @reserved_size: the size of reserved percpu area in bytes | 1236 | * @reserved_size: the size of reserved percpu area in bytes, 0 for none |
982 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto | 1237 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto |
983 | * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto | 1238 | * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE |
984 | * @base_addr: mapped address, NULL for auto | 1239 | * @base_addr: mapped address |
985 | * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary | 1240 | * @unit_map: cpu -> unit map, NULL for sequential mapping |
986 | * | 1241 | * |
987 | * Initialize the first percpu chunk which contains the kernel static | 1242 | * Initialize the first percpu chunk which contains the kernel static |
988 | * perpcu area. This function is to be called from arch percpu area | 1243 | * perpcu area. This function is to be called from arch percpu area |
989 | * setup path. The first two parameters are mandatory. The rest are | 1244 | * setup path. |
990 | * optional. | ||
991 | * | ||
992 | * @get_page_fn() should return pointer to percpu page given cpu | ||
993 | * number and page number. It should at least return enough pages to | ||
994 | * cover the static area. The returned pages for static area should | ||
995 | * have been initialized with valid data. If @unit_size is specified, | ||
996 | * it can also return pages after the static area. NULL return | ||
997 | * indicates end of pages for the cpu. Note that @get_page_fn() must | ||
998 | * return the same number of pages for all cpus. | ||
999 | * | 1245 | * |
1000 | * @reserved_size, if non-zero, specifies the amount of bytes to | 1246 | * @reserved_size, if non-zero, specifies the amount of bytes to |
1001 | * reserve after the static area in the first chunk. This reserves | 1247 | * reserve after the static area in the first chunk. This reserves |
@@ -1010,17 +1256,12 @@ EXPORT_SYMBOL_GPL(free_percpu); | |||
1010 | * non-negative value makes percpu leave alone the area beyond | 1256 | * non-negative value makes percpu leave alone the area beyond |
1011 | * @static_size + @reserved_size + @dyn_size. | 1257 | * @static_size + @reserved_size + @dyn_size. |
1012 | * | 1258 | * |
1013 | * @unit_size, if non-negative, specifies unit size and must be | 1259 | * @unit_size specifies unit size and must be aligned to PAGE_SIZE and |
1014 | * aligned to PAGE_SIZE and equal to or larger than @static_size + | 1260 | * equal to or larger than @static_size + @reserved_size + if |
1015 | * @reserved_size + if non-negative, @dyn_size. | 1261 | * non-negative, @dyn_size. |
1016 | * | ||
1017 | * Non-null @base_addr means that the caller already allocated virtual | ||
1018 | * region for the first chunk and mapped it. percpu must not mess | ||
1019 | * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL | ||
1020 | * @populate_pte_fn doesn't make any sense. | ||
1021 | * | 1262 | * |
1022 | * @populate_pte_fn is used to populate the pagetable. NULL means the | 1263 | * The caller should have mapped the first chunk at @base_addr and |
1023 | * caller already populated the pagetable. | 1264 | * copied static data to each unit. |
1024 | * | 1265 | * |
1025 | * If the first chunk ends up with both reserved and dynamic areas, it | 1266 | * If the first chunk ends up with both reserved and dynamic areas, it |
1026 | * is served by two chunks - one to serve the core static and reserved | 1267 | * is served by two chunks - one to serve the core static and reserved |
@@ -1033,47 +1274,83 @@ EXPORT_SYMBOL_GPL(free_percpu); | |||
1033 | * The determined pcpu_unit_size which can be used to initialize | 1274 | * The determined pcpu_unit_size which can be used to initialize |
1034 | * percpu access. | 1275 | * percpu access. |
1035 | */ | 1276 | */ |
1036 | size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, | 1277 | size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size, |
1037 | size_t static_size, size_t reserved_size, | 1278 | ssize_t dyn_size, size_t unit_size, |
1038 | ssize_t dyn_size, ssize_t unit_size, | 1279 | void *base_addr, const int *unit_map) |
1039 | void *base_addr, | ||
1040 | pcpu_populate_pte_fn_t populate_pte_fn) | ||
1041 | { | 1280 | { |
1042 | static struct vm_struct first_vm; | 1281 | static struct vm_struct first_vm; |
1043 | static int smap[2], dmap[2]; | 1282 | static int smap[2], dmap[2]; |
1044 | size_t size_sum = static_size + reserved_size + | 1283 | size_t size_sum = static_size + reserved_size + |
1045 | (dyn_size >= 0 ? dyn_size : 0); | 1284 | (dyn_size >= 0 ? dyn_size : 0); |
1046 | struct pcpu_chunk *schunk, *dchunk = NULL; | 1285 | struct pcpu_chunk *schunk, *dchunk = NULL; |
1047 | unsigned int cpu; | 1286 | unsigned int cpu, tcpu; |
1048 | int nr_pages; | 1287 | int i; |
1049 | int err, i; | ||
1050 | 1288 | ||
1051 | /* santiy checks */ | 1289 | /* sanity checks */ |
1052 | BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || | 1290 | BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || |
1053 | ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); | 1291 | ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); |
1054 | BUG_ON(!static_size); | 1292 | BUG_ON(!static_size); |
1055 | if (unit_size >= 0) { | 1293 | BUG_ON(!base_addr); |
1056 | BUG_ON(unit_size < size_sum); | 1294 | BUG_ON(unit_size < size_sum); |
1057 | BUG_ON(unit_size & ~PAGE_MASK); | 1295 | BUG_ON(unit_size & ~PAGE_MASK); |
1058 | BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE); | 1296 | BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE); |
1059 | } else | 1297 | |
1060 | BUG_ON(base_addr); | 1298 | /* determine number of units and verify and initialize pcpu_unit_map */ |
1061 | BUG_ON(base_addr && populate_pte_fn); | 1299 | if (unit_map) { |
1062 | 1300 | int first_unit = INT_MAX, last_unit = INT_MIN; | |
1063 | if (unit_size >= 0) | 1301 | |
1064 | pcpu_unit_pages = unit_size >> PAGE_SHIFT; | 1302 | for_each_possible_cpu(cpu) { |
1065 | else | 1303 | int unit = unit_map[cpu]; |
1066 | pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT, | 1304 | |
1067 | PFN_UP(size_sum)); | 1305 | BUG_ON(unit < 0); |
1306 | for_each_possible_cpu(tcpu) { | ||
1307 | if (tcpu == cpu) | ||
1308 | break; | ||
1309 | /* the mapping should be one-to-one */ | ||
1310 | BUG_ON(unit_map[tcpu] == unit); | ||
1311 | } | ||
1312 | |||
1313 | if (unit < first_unit) { | ||
1314 | pcpu_first_unit_cpu = cpu; | ||
1315 | first_unit = unit; | ||
1316 | } | ||
1317 | if (unit > last_unit) { | ||
1318 | pcpu_last_unit_cpu = cpu; | ||
1319 | last_unit = unit; | ||
1320 | } | ||
1321 | } | ||
1322 | pcpu_nr_units = last_unit + 1; | ||
1323 | pcpu_unit_map = unit_map; | ||
1324 | } else { | ||
1325 | int *identity_map; | ||
1326 | |||
1327 | /* #units == #cpus, identity mapped */ | ||
1328 | identity_map = alloc_bootmem(nr_cpu_ids * | ||
1329 | sizeof(identity_map[0])); | ||
1068 | 1330 | ||
1331 | for_each_possible_cpu(cpu) | ||
1332 | identity_map[cpu] = cpu; | ||
1333 | |||
1334 | pcpu_first_unit_cpu = 0; | ||
1335 | pcpu_last_unit_cpu = pcpu_nr_units - 1; | ||
1336 | pcpu_nr_units = nr_cpu_ids; | ||
1337 | pcpu_unit_map = identity_map; | ||
1338 | } | ||
1339 | |||
1340 | /* determine basic parameters */ | ||
1341 | pcpu_unit_pages = unit_size >> PAGE_SHIFT; | ||
1069 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; | 1342 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; |
1070 | pcpu_chunk_size = nr_cpu_ids * pcpu_unit_size; | 1343 | pcpu_chunk_size = pcpu_nr_units * pcpu_unit_size; |
1071 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) | 1344 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + |
1072 | + nr_cpu_ids * pcpu_unit_pages * sizeof(struct page *); | 1345 | BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); |
1073 | 1346 | ||
1074 | if (dyn_size < 0) | 1347 | if (dyn_size < 0) |
1075 | dyn_size = pcpu_unit_size - static_size - reserved_size; | 1348 | dyn_size = pcpu_unit_size - static_size - reserved_size; |
1076 | 1349 | ||
1350 | first_vm.flags = VM_ALLOC; | ||
1351 | first_vm.size = pcpu_chunk_size; | ||
1352 | first_vm.addr = base_addr; | ||
1353 | |||
1077 | /* | 1354 | /* |
1078 | * Allocate chunk slots. The additional last slot is for | 1355 | * Allocate chunk slots. The additional last slot is for |
1079 | * empty chunks. | 1356 | * empty chunks. |
@@ -1095,7 +1372,8 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, | |||
1095 | schunk->vm = &first_vm; | 1372 | schunk->vm = &first_vm; |
1096 | schunk->map = smap; | 1373 | schunk->map = smap; |
1097 | schunk->map_alloc = ARRAY_SIZE(smap); | 1374 | schunk->map_alloc = ARRAY_SIZE(smap); |
1098 | schunk->page = schunk->page_ar; | 1375 | schunk->immutable = true; |
1376 | bitmap_fill(schunk->populated, pcpu_unit_pages); | ||
1099 | 1377 | ||
1100 | if (reserved_size) { | 1378 | if (reserved_size) { |
1101 | schunk->free_size = reserved_size; | 1379 | schunk->free_size = reserved_size; |
@@ -1113,93 +1391,39 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, | |||
1113 | 1391 | ||
1114 | /* init dynamic chunk if necessary */ | 1392 | /* init dynamic chunk if necessary */ |
1115 | if (dyn_size) { | 1393 | if (dyn_size) { |
1116 | dchunk = alloc_bootmem(sizeof(struct pcpu_chunk)); | 1394 | dchunk = alloc_bootmem(pcpu_chunk_struct_size); |
1117 | INIT_LIST_HEAD(&dchunk->list); | 1395 | INIT_LIST_HEAD(&dchunk->list); |
1118 | dchunk->vm = &first_vm; | 1396 | dchunk->vm = &first_vm; |
1119 | dchunk->map = dmap; | 1397 | dchunk->map = dmap; |
1120 | dchunk->map_alloc = ARRAY_SIZE(dmap); | 1398 | dchunk->map_alloc = ARRAY_SIZE(dmap); |
1121 | dchunk->page = schunk->page_ar; /* share page map with schunk */ | 1399 | dchunk->immutable = true; |
1400 | bitmap_fill(dchunk->populated, pcpu_unit_pages); | ||
1122 | 1401 | ||
1123 | dchunk->contig_hint = dchunk->free_size = dyn_size; | 1402 | dchunk->contig_hint = dchunk->free_size = dyn_size; |
1124 | dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; | 1403 | dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; |
1125 | dchunk->map[dchunk->map_used++] = dchunk->free_size; | 1404 | dchunk->map[dchunk->map_used++] = dchunk->free_size; |
1126 | } | 1405 | } |
1127 | 1406 | ||
1128 | /* allocate vm address */ | ||
1129 | first_vm.flags = VM_ALLOC; | ||
1130 | first_vm.size = pcpu_chunk_size; | ||
1131 | |||
1132 | if (!base_addr) | ||
1133 | vm_area_register_early(&first_vm, PAGE_SIZE); | ||
1134 | else { | ||
1135 | /* | ||
1136 | * Pages already mapped. No need to remap into | ||
1137 | * vmalloc area. In this case the first chunks can't | ||
1138 | * be mapped or unmapped by percpu and are marked | ||
1139 | * immutable. | ||
1140 | */ | ||
1141 | first_vm.addr = base_addr; | ||
1142 | schunk->immutable = true; | ||
1143 | if (dchunk) | ||
1144 | dchunk->immutable = true; | ||
1145 | } | ||
1146 | |||
1147 | /* assign pages */ | ||
1148 | nr_pages = -1; | ||
1149 | for_each_possible_cpu(cpu) { | ||
1150 | for (i = 0; i < pcpu_unit_pages; i++) { | ||
1151 | struct page *page = get_page_fn(cpu, i); | ||
1152 | |||
1153 | if (!page) | ||
1154 | break; | ||
1155 | *pcpu_chunk_pagep(schunk, cpu, i) = page; | ||
1156 | } | ||
1157 | |||
1158 | BUG_ON(i < PFN_UP(static_size)); | ||
1159 | |||
1160 | if (nr_pages < 0) | ||
1161 | nr_pages = i; | ||
1162 | else | ||
1163 | BUG_ON(nr_pages != i); | ||
1164 | } | ||
1165 | |||
1166 | /* map them */ | ||
1167 | if (populate_pte_fn) { | ||
1168 | for_each_possible_cpu(cpu) | ||
1169 | for (i = 0; i < nr_pages; i++) | ||
1170 | populate_pte_fn(pcpu_chunk_addr(schunk, | ||
1171 | cpu, i)); | ||
1172 | |||
1173 | err = pcpu_map(schunk, 0, nr_pages); | ||
1174 | if (err) | ||
1175 | panic("failed to setup static percpu area, err=%d\n", | ||
1176 | err); | ||
1177 | } | ||
1178 | |||
1179 | /* link the first chunk in */ | 1407 | /* link the first chunk in */ |
1180 | pcpu_first_chunk = dchunk ?: schunk; | 1408 | pcpu_first_chunk = dchunk ?: schunk; |
1181 | pcpu_chunk_relocate(pcpu_first_chunk, -1); | 1409 | pcpu_chunk_relocate(pcpu_first_chunk, -1); |
1182 | 1410 | ||
1183 | /* we're done */ | 1411 | /* we're done */ |
1184 | pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0); | 1412 | pcpu_base_addr = schunk->vm->addr; |
1185 | return pcpu_unit_size; | 1413 | return pcpu_unit_size; |
1186 | } | 1414 | } |
1187 | 1415 | ||
1188 | /* | 1416 | static size_t pcpu_calc_fc_sizes(size_t static_size, size_t reserved_size, |
1189 | * Embedding first chunk setup helper. | 1417 | ssize_t *dyn_sizep) |
1190 | */ | ||
1191 | static void *pcpue_ptr __initdata; | ||
1192 | static size_t pcpue_size __initdata; | ||
1193 | static size_t pcpue_unit_size __initdata; | ||
1194 | |||
1195 | static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) | ||
1196 | { | 1418 | { |
1197 | size_t off = (size_t)pageno << PAGE_SHIFT; | 1419 | size_t size_sum; |
1198 | 1420 | ||
1199 | if (off >= pcpue_size) | 1421 | size_sum = PFN_ALIGN(static_size + reserved_size + |
1200 | return NULL; | 1422 | (*dyn_sizep >= 0 ? *dyn_sizep : 0)); |
1423 | if (*dyn_sizep != 0) | ||
1424 | *dyn_sizep = size_sum - static_size - reserved_size; | ||
1201 | 1425 | ||
1202 | return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off); | 1426 | return size_sum; |
1203 | } | 1427 | } |
1204 | 1428 | ||
1205 | /** | 1429 | /** |
@@ -1207,7 +1431,6 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) | |||
1207 | * @static_size: the size of static percpu area in bytes | 1431 | * @static_size: the size of static percpu area in bytes |
1208 | * @reserved_size: the size of reserved percpu area in bytes | 1432 | * @reserved_size: the size of reserved percpu area in bytes |
1209 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto | 1433 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto |
1210 | * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto | ||
1211 | * | 1434 | * |
1212 | * This is a helper to ease setting up embedded first percpu chunk and | 1435 | * This is a helper to ease setting up embedded first percpu chunk and |
1213 | * can be called where pcpu_setup_first_chunk() is expected. | 1436 | * can be called where pcpu_setup_first_chunk() is expected. |
@@ -1219,9 +1442,9 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) | |||
1219 | * page size. | 1442 | * page size. |
1220 | * | 1443 | * |
1221 | * When @dyn_size is positive, dynamic area might be larger than | 1444 | * When @dyn_size is positive, dynamic area might be larger than |
1222 | * specified to fill page alignment. Also, when @dyn_size is auto, | 1445 | * specified to fill page alignment. When @dyn_size is auto, |
1223 | * @dyn_size does not fill the whole first chunk but only what's | 1446 | * @dyn_size is just big enough to fill page alignment after static |
1224 | * necessary for page alignment after static and reserved areas. | 1447 | * and reserved areas. |
1225 | * | 1448 | * |
1226 | * If the needed size is smaller than the minimum or specified unit | 1449 | * If the needed size is smaller than the minimum or specified unit |
1227 | * size, the leftover is returned to the bootmem allocator. | 1450 | * size, the leftover is returned to the bootmem allocator. |
@@ -1231,28 +1454,21 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) | |||
1231 | * percpu access on success, -errno on failure. | 1454 | * percpu access on success, -errno on failure. |
1232 | */ | 1455 | */ |
1233 | ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | 1456 | ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, |
1234 | ssize_t dyn_size, ssize_t unit_size) | 1457 | ssize_t dyn_size) |
1235 | { | 1458 | { |
1236 | size_t chunk_size; | 1459 | size_t size_sum, unit_size, chunk_size; |
1460 | void *base; | ||
1237 | unsigned int cpu; | 1461 | unsigned int cpu; |
1238 | 1462 | ||
1239 | /* determine parameters and allocate */ | 1463 | /* determine parameters and allocate */ |
1240 | pcpue_size = PFN_ALIGN(static_size + reserved_size + | 1464 | size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size); |
1241 | (dyn_size >= 0 ? dyn_size : 0)); | 1465 | |
1242 | if (dyn_size != 0) | 1466 | unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); |
1243 | dyn_size = pcpue_size - static_size - reserved_size; | 1467 | chunk_size = unit_size * nr_cpu_ids; |
1244 | 1468 | ||
1245 | if (unit_size >= 0) { | 1469 | base = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE, |
1246 | BUG_ON(unit_size < pcpue_size); | 1470 | __pa(MAX_DMA_ADDRESS)); |
1247 | pcpue_unit_size = unit_size; | 1471 | if (!base) { |
1248 | } else | ||
1249 | pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); | ||
1250 | |||
1251 | chunk_size = pcpue_unit_size * nr_cpu_ids; | ||
1252 | |||
1253 | pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE, | ||
1254 | __pa(MAX_DMA_ADDRESS)); | ||
1255 | if (!pcpue_ptr) { | ||
1256 | pr_warning("PERCPU: failed to allocate %zu bytes for " | 1472 | pr_warning("PERCPU: failed to allocate %zu bytes for " |
1257 | "embedding\n", chunk_size); | 1473 | "embedding\n", chunk_size); |
1258 | return -ENOMEM; | 1474 | return -ENOMEM; |
@@ -1260,21 +1476,543 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | |||
1260 | 1476 | ||
1261 | /* return the leftover and copy */ | 1477 | /* return the leftover and copy */ |
1262 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) { | 1478 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) { |
1263 | void *ptr = pcpue_ptr + cpu * pcpue_unit_size; | 1479 | void *ptr = base + cpu * unit_size; |
1264 | 1480 | ||
1265 | if (cpu_possible(cpu)) { | 1481 | if (cpu_possible(cpu)) { |
1266 | free_bootmem(__pa(ptr + pcpue_size), | 1482 | free_bootmem(__pa(ptr + size_sum), |
1267 | pcpue_unit_size - pcpue_size); | 1483 | unit_size - size_sum); |
1268 | memcpy(ptr, __per_cpu_load, static_size); | 1484 | memcpy(ptr, __per_cpu_load, static_size); |
1269 | } else | 1485 | } else |
1270 | free_bootmem(__pa(ptr), pcpue_unit_size); | 1486 | free_bootmem(__pa(ptr), unit_size); |
1271 | } | 1487 | } |
1272 | 1488 | ||
1273 | /* we're ready, commit */ | 1489 | /* we're ready, commit */ |
1274 | pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", | 1490 | pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", |
1275 | pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size); | 1491 | size_sum >> PAGE_SHIFT, base, static_size); |
1492 | |||
1493 | return pcpu_setup_first_chunk(static_size, reserved_size, dyn_size, | ||
1494 | unit_size, base, NULL); | ||
1495 | } | ||
1496 | |||
1497 | /** | ||
1498 | * pcpu_4k_first_chunk - map the first chunk using PAGE_SIZE pages | ||
1499 | * @static_size: the size of static percpu area in bytes | ||
1500 | * @reserved_size: the size of reserved percpu area in bytes | ||
1501 | * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE | ||
1502 | * @free_fn: funtion to free percpu page, always called with PAGE_SIZE | ||
1503 | * @populate_pte_fn: function to populate pte | ||
1504 | * | ||
1505 | * This is a helper to ease setting up embedded first percpu chunk and | ||
1506 | * can be called where pcpu_setup_first_chunk() is expected. | ||
1507 | * | ||
1508 | * This is the basic allocator. Static percpu area is allocated | ||
1509 | * page-by-page into vmalloc area. | ||
1510 | * | ||
1511 | * RETURNS: | ||
1512 | * The determined pcpu_unit_size which can be used to initialize | ||
1513 | * percpu access on success, -errno on failure. | ||
1514 | */ | ||
1515 | ssize_t __init pcpu_4k_first_chunk(size_t static_size, size_t reserved_size, | ||
1516 | pcpu_fc_alloc_fn_t alloc_fn, | ||
1517 | pcpu_fc_free_fn_t free_fn, | ||
1518 | pcpu_fc_populate_pte_fn_t populate_pte_fn) | ||
1519 | { | ||
1520 | static struct vm_struct vm; | ||
1521 | int unit_pages; | ||
1522 | size_t pages_size; | ||
1523 | struct page **pages; | ||
1524 | unsigned int cpu; | ||
1525 | int i, j; | ||
1526 | ssize_t ret; | ||
1527 | |||
1528 | unit_pages = PFN_UP(max_t(size_t, static_size + reserved_size, | ||
1529 | PCPU_MIN_UNIT_SIZE)); | ||
1530 | |||
1531 | /* unaligned allocations can't be freed, round up to page size */ | ||
1532 | pages_size = PFN_ALIGN(unit_pages * nr_cpu_ids * sizeof(pages[0])); | ||
1533 | pages = alloc_bootmem(pages_size); | ||
1534 | |||
1535 | /* allocate pages */ | ||
1536 | j = 0; | ||
1537 | for_each_possible_cpu(cpu) | ||
1538 | for (i = 0; i < unit_pages; i++) { | ||
1539 | void *ptr; | ||
1540 | |||
1541 | ptr = alloc_fn(cpu, PAGE_SIZE); | ||
1542 | if (!ptr) { | ||
1543 | pr_warning("PERCPU: failed to allocate " | ||
1544 | "4k page for cpu%u\n", cpu); | ||
1545 | goto enomem; | ||
1546 | } | ||
1547 | pages[j++] = virt_to_page(ptr); | ||
1548 | } | ||
1549 | |||
1550 | /* allocate vm area, map the pages and copy static data */ | ||
1551 | vm.flags = VM_ALLOC; | ||
1552 | vm.size = nr_cpu_ids * unit_pages << PAGE_SHIFT; | ||
1553 | vm_area_register_early(&vm, PAGE_SIZE); | ||
1554 | |||
1555 | for_each_possible_cpu(cpu) { | ||
1556 | unsigned long unit_addr = (unsigned long)vm.addr + | ||
1557 | (cpu * unit_pages << PAGE_SHIFT); | ||
1558 | |||
1559 | for (i = 0; i < unit_pages; i++) | ||
1560 | populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); | ||
1561 | |||
1562 | /* pte already populated, the following shouldn't fail */ | ||
1563 | ret = __pcpu_map_pages(unit_addr, &pages[cpu * unit_pages], | ||
1564 | unit_pages); | ||
1565 | if (ret < 0) | ||
1566 | panic("failed to map percpu area, err=%zd\n", ret); | ||
1567 | |||
1568 | /* | ||
1569 | * FIXME: Archs with virtual cache should flush local | ||
1570 | * cache for the linear mapping here - something | ||
1571 | * equivalent to flush_cache_vmap() on the local cpu. | ||
1572 | * flush_cache_vmap() can't be used as most supporting | ||
1573 | * data structures are not set up yet. | ||
1574 | */ | ||
1575 | |||
1576 | /* copy static data */ | ||
1577 | memcpy((void *)unit_addr, __per_cpu_load, static_size); | ||
1578 | } | ||
1579 | |||
1580 | /* we're ready, commit */ | ||
1581 | pr_info("PERCPU: %d 4k pages per cpu, static data %zu bytes\n", | ||
1582 | unit_pages, static_size); | ||
1583 | |||
1584 | ret = pcpu_setup_first_chunk(static_size, reserved_size, -1, | ||
1585 | unit_pages << PAGE_SHIFT, vm.addr, NULL); | ||
1586 | goto out_free_ar; | ||
1587 | |||
1588 | enomem: | ||
1589 | while (--j >= 0) | ||
1590 | free_fn(page_address(pages[j]), PAGE_SIZE); | ||
1591 | ret = -ENOMEM; | ||
1592 | out_free_ar: | ||
1593 | free_bootmem(__pa(pages), pages_size); | ||
1594 | return ret; | ||
1595 | } | ||
1596 | |||
1597 | /* | ||
1598 | * Large page remapping first chunk setup helper | ||
1599 | */ | ||
1600 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
1601 | |||
1602 | /** | ||
1603 | * pcpu_lpage_build_unit_map - build unit_map for large page remapping | ||
1604 | * @static_size: the size of static percpu area in bytes | ||
1605 | * @reserved_size: the size of reserved percpu area in bytes | ||
1606 | * @dyn_sizep: in/out parameter for dynamic size, -1 for auto | ||
1607 | * @unit_sizep: out parameter for unit size | ||
1608 | * @unit_map: unit_map to be filled | ||
1609 | * @cpu_distance_fn: callback to determine distance between cpus | ||
1610 | * | ||
1611 | * This function builds cpu -> unit map and determine other parameters | ||
1612 | * considering needed percpu size, large page size and distances | ||
1613 | * between CPUs in NUMA. | ||
1614 | * | ||
1615 | * CPUs which are of LOCAL_DISTANCE both ways are grouped together and | ||
1616 | * may share units in the same large page. The returned configuration | ||
1617 | * is guaranteed to have CPUs on different nodes on different large | ||
1618 | * pages and >=75% usage of allocated virtual address space. | ||
1619 | * | ||
1620 | * RETURNS: | ||
1621 | * On success, fills in @unit_map, sets *@dyn_sizep, *@unit_sizep and | ||
1622 | * returns the number of units to be allocated. -errno on failure. | ||
1623 | */ | ||
1624 | int __init pcpu_lpage_build_unit_map(size_t static_size, size_t reserved_size, | ||
1625 | ssize_t *dyn_sizep, size_t *unit_sizep, | ||
1626 | size_t lpage_size, int *unit_map, | ||
1627 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn) | ||
1628 | { | ||
1629 | static int group_map[NR_CPUS] __initdata; | ||
1630 | static int group_cnt[NR_CPUS] __initdata; | ||
1631 | int group_cnt_max = 0; | ||
1632 | size_t size_sum, min_unit_size, alloc_size; | ||
1633 | int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ | ||
1634 | int last_allocs; | ||
1635 | unsigned int cpu, tcpu; | ||
1636 | int group, unit; | ||
1637 | |||
1638 | /* | ||
1639 | * Determine min_unit_size, alloc_size and max_upa such that | ||
1640 | * alloc_size is multiple of lpage_size and is the smallest | ||
1641 | * which can accomodate 4k aligned segments which are equal to | ||
1642 | * or larger than min_unit_size. | ||
1643 | */ | ||
1644 | size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, dyn_sizep); | ||
1645 | min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); | ||
1646 | |||
1647 | alloc_size = roundup(min_unit_size, lpage_size); | ||
1648 | upa = alloc_size / min_unit_size; | ||
1649 | while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) | ||
1650 | upa--; | ||
1651 | max_upa = upa; | ||
1652 | |||
1653 | /* group cpus according to their proximity */ | ||
1654 | for_each_possible_cpu(cpu) { | ||
1655 | group = 0; | ||
1656 | next_group: | ||
1657 | for_each_possible_cpu(tcpu) { | ||
1658 | if (cpu == tcpu) | ||
1659 | break; | ||
1660 | if (group_map[tcpu] == group && | ||
1661 | (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || | ||
1662 | cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { | ||
1663 | group++; | ||
1664 | goto next_group; | ||
1665 | } | ||
1666 | } | ||
1667 | group_map[cpu] = group; | ||
1668 | group_cnt[group]++; | ||
1669 | group_cnt_max = max(group_cnt_max, group_cnt[group]); | ||
1670 | } | ||
1671 | |||
1672 | /* | ||
1673 | * Expand unit size until address space usage goes over 75% | ||
1674 | * and then as much as possible without using more address | ||
1675 | * space. | ||
1676 | */ | ||
1677 | last_allocs = INT_MAX; | ||
1678 | for (upa = max_upa; upa; upa--) { | ||
1679 | int allocs = 0, wasted = 0; | ||
1680 | |||
1681 | if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) | ||
1682 | continue; | ||
1683 | |||
1684 | for (group = 0; group_cnt[group]; group++) { | ||
1685 | int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); | ||
1686 | allocs += this_allocs; | ||
1687 | wasted += this_allocs * upa - group_cnt[group]; | ||
1688 | } | ||
1689 | |||
1690 | /* | ||
1691 | * Don't accept if wastage is over 25%. The | ||
1692 | * greater-than comparison ensures upa==1 always | ||
1693 | * passes the following check. | ||
1694 | */ | ||
1695 | if (wasted > num_possible_cpus() / 3) | ||
1696 | continue; | ||
1697 | |||
1698 | /* and then don't consume more memory */ | ||
1699 | if (allocs > last_allocs) | ||
1700 | break; | ||
1701 | last_allocs = allocs; | ||
1702 | best_upa = upa; | ||
1703 | } | ||
1704 | *unit_sizep = alloc_size / best_upa; | ||
1276 | 1705 | ||
1277 | return pcpu_setup_first_chunk(pcpue_get_page, static_size, | 1706 | /* assign units to cpus accordingly */ |
1278 | reserved_size, dyn_size, | 1707 | unit = 0; |
1279 | pcpue_unit_size, pcpue_ptr, NULL); | 1708 | for (group = 0; group_cnt[group]; group++) { |
1709 | for_each_possible_cpu(cpu) | ||
1710 | if (group_map[cpu] == group) | ||
1711 | unit_map[cpu] = unit++; | ||
1712 | unit = roundup(unit, best_upa); | ||
1713 | } | ||
1714 | |||
1715 | return unit; /* unit contains aligned number of units */ | ||
1716 | } | ||
1717 | |||
1718 | struct pcpul_ent { | ||
1719 | void *ptr; | ||
1720 | void *map_addr; | ||
1721 | }; | ||
1722 | |||
1723 | static size_t pcpul_size; | ||
1724 | static size_t pcpul_lpage_size; | ||
1725 | static int pcpul_nr_lpages; | ||
1726 | static struct pcpul_ent *pcpul_map; | ||
1727 | |||
1728 | static bool __init pcpul_unit_to_cpu(int unit, const int *unit_map, | ||
1729 | unsigned int *cpup) | ||
1730 | { | ||
1731 | unsigned int cpu; | ||
1732 | |||
1733 | for_each_possible_cpu(cpu) | ||
1734 | if (unit_map[cpu] == unit) { | ||
1735 | if (cpup) | ||
1736 | *cpup = cpu; | ||
1737 | return true; | ||
1738 | } | ||
1739 | |||
1740 | return false; | ||
1741 | } | ||
1742 | |||
1743 | static void __init pcpul_lpage_dump_cfg(const char *lvl, size_t static_size, | ||
1744 | size_t reserved_size, size_t dyn_size, | ||
1745 | size_t unit_size, size_t lpage_size, | ||
1746 | const int *unit_map, int nr_units) | ||
1747 | { | ||
1748 | int width = 1, v = nr_units; | ||
1749 | char empty_str[] = "--------"; | ||
1750 | int upl, lpl; /* units per lpage, lpage per line */ | ||
1751 | unsigned int cpu; | ||
1752 | int lpage, unit; | ||
1753 | |||
1754 | while (v /= 10) | ||
1755 | width++; | ||
1756 | empty_str[min_t(int, width, sizeof(empty_str) - 1)] = '\0'; | ||
1757 | |||
1758 | upl = max_t(int, lpage_size / unit_size, 1); | ||
1759 | lpl = rounddown_pow_of_two(max_t(int, 60 / (upl * (width + 1) + 2), 1)); | ||
1760 | |||
1761 | printk("%spcpu-lpage: sta/res/dyn=%zu/%zu/%zu unit=%zu lpage=%zu", lvl, | ||
1762 | static_size, reserved_size, dyn_size, unit_size, lpage_size); | ||
1763 | |||
1764 | for (lpage = 0, unit = 0; unit < nr_units; unit++) { | ||
1765 | if (!(unit % upl)) { | ||
1766 | if (!(lpage++ % lpl)) { | ||
1767 | printk("\n"); | ||
1768 | printk("%spcpu-lpage: ", lvl); | ||
1769 | } else | ||
1770 | printk("| "); | ||
1771 | } | ||
1772 | if (pcpul_unit_to_cpu(unit, unit_map, &cpu)) | ||
1773 | printk("%0*d ", width, cpu); | ||
1774 | else | ||
1775 | printk("%s ", empty_str); | ||
1776 | } | ||
1777 | printk("\n"); | ||
1778 | } | ||
1779 | |||
1780 | /** | ||
1781 | * pcpu_lpage_first_chunk - remap the first percpu chunk using large page | ||
1782 | * @static_size: the size of static percpu area in bytes | ||
1783 | * @reserved_size: the size of reserved percpu area in bytes | ||
1784 | * @dyn_size: free size for dynamic allocation in bytes | ||
1785 | * @unit_size: unit size in bytes | ||
1786 | * @lpage_size: the size of a large page | ||
1787 | * @unit_map: cpu -> unit mapping | ||
1788 | * @nr_units: the number of units | ||
1789 | * @alloc_fn: function to allocate percpu lpage, always called with lpage_size | ||
1790 | * @free_fn: function to free percpu memory, @size <= lpage_size | ||
1791 | * @map_fn: function to map percpu lpage, always called with lpage_size | ||
1792 | * | ||
1793 | * This allocator uses large page to build and map the first chunk. | ||
1794 | * Unlike other helpers, the caller should always specify @dyn_size | ||
1795 | * and @unit_size. These parameters along with @unit_map and | ||
1796 | * @nr_units can be determined using pcpu_lpage_build_unit_map(). | ||
1797 | * This two stage initialization is to allow arch code to evaluate the | ||
1798 | * parameters before committing to it. | ||
1799 | * | ||
1800 | * Large pages are allocated as directed by @unit_map and other | ||
1801 | * parameters and mapped to vmalloc space. Unused holes are returned | ||
1802 | * to the page allocator. Note that these holes end up being actively | ||
1803 | * mapped twice - once to the physical mapping and to the vmalloc area | ||
1804 | * for the first percpu chunk. Depending on architecture, this might | ||
1805 | * cause problem when changing page attributes of the returned area. | ||
1806 | * These double mapped areas can be detected using | ||
1807 | * pcpu_lpage_remapped(). | ||
1808 | * | ||
1809 | * RETURNS: | ||
1810 | * The determined pcpu_unit_size which can be used to initialize | ||
1811 | * percpu access on success, -errno on failure. | ||
1812 | */ | ||
1813 | ssize_t __init pcpu_lpage_first_chunk(size_t static_size, size_t reserved_size, | ||
1814 | size_t dyn_size, size_t unit_size, | ||
1815 | size_t lpage_size, const int *unit_map, | ||
1816 | int nr_units, | ||
1817 | pcpu_fc_alloc_fn_t alloc_fn, | ||
1818 | pcpu_fc_free_fn_t free_fn, | ||
1819 | pcpu_fc_map_fn_t map_fn) | ||
1820 | { | ||
1821 | static struct vm_struct vm; | ||
1822 | size_t chunk_size = unit_size * nr_units; | ||
1823 | size_t map_size; | ||
1824 | unsigned int cpu; | ||
1825 | ssize_t ret; | ||
1826 | int i, j, unit; | ||
1827 | |||
1828 | pcpul_lpage_dump_cfg(KERN_DEBUG, static_size, reserved_size, dyn_size, | ||
1829 | unit_size, lpage_size, unit_map, nr_units); | ||
1830 | |||
1831 | BUG_ON(chunk_size % lpage_size); | ||
1832 | |||
1833 | pcpul_size = static_size + reserved_size + dyn_size; | ||
1834 | pcpul_lpage_size = lpage_size; | ||
1835 | pcpul_nr_lpages = chunk_size / lpage_size; | ||
1836 | |||
1837 | /* allocate pointer array and alloc large pages */ | ||
1838 | map_size = pcpul_nr_lpages * sizeof(pcpul_map[0]); | ||
1839 | pcpul_map = alloc_bootmem(map_size); | ||
1840 | |||
1841 | /* allocate all pages */ | ||
1842 | for (i = 0; i < pcpul_nr_lpages; i++) { | ||
1843 | size_t offset = i * lpage_size; | ||
1844 | int first_unit = offset / unit_size; | ||
1845 | int last_unit = (offset + lpage_size - 1) / unit_size; | ||
1846 | void *ptr; | ||
1847 | |||
1848 | /* find out which cpu is mapped to this unit */ | ||
1849 | for (unit = first_unit; unit <= last_unit; unit++) | ||
1850 | if (pcpul_unit_to_cpu(unit, unit_map, &cpu)) | ||
1851 | goto found; | ||
1852 | continue; | ||
1853 | found: | ||
1854 | ptr = alloc_fn(cpu, lpage_size); | ||
1855 | if (!ptr) { | ||
1856 | pr_warning("PERCPU: failed to allocate large page " | ||
1857 | "for cpu%u\n", cpu); | ||
1858 | goto enomem; | ||
1859 | } | ||
1860 | |||
1861 | pcpul_map[i].ptr = ptr; | ||
1862 | } | ||
1863 | |||
1864 | /* return unused holes */ | ||
1865 | for (unit = 0; unit < nr_units; unit++) { | ||
1866 | size_t start = unit * unit_size; | ||
1867 | size_t end = start + unit_size; | ||
1868 | size_t off, next; | ||
1869 | |||
1870 | /* don't free used part of occupied unit */ | ||
1871 | if (pcpul_unit_to_cpu(unit, unit_map, NULL)) | ||
1872 | start += pcpul_size; | ||
1873 | |||
1874 | /* unit can span more than one page, punch the holes */ | ||
1875 | for (off = start; off < end; off = next) { | ||
1876 | void *ptr = pcpul_map[off / lpage_size].ptr; | ||
1877 | next = min(roundup(off + 1, lpage_size), end); | ||
1878 | if (ptr) | ||
1879 | free_fn(ptr + off % lpage_size, next - off); | ||
1880 | } | ||
1881 | } | ||
1882 | |||
1883 | /* allocate address, map and copy */ | ||
1884 | vm.flags = VM_ALLOC; | ||
1885 | vm.size = chunk_size; | ||
1886 | vm_area_register_early(&vm, unit_size); | ||
1887 | |||
1888 | for (i = 0; i < pcpul_nr_lpages; i++) { | ||
1889 | if (!pcpul_map[i].ptr) | ||
1890 | continue; | ||
1891 | pcpul_map[i].map_addr = vm.addr + i * lpage_size; | ||
1892 | map_fn(pcpul_map[i].ptr, lpage_size, pcpul_map[i].map_addr); | ||
1893 | } | ||
1894 | |||
1895 | for_each_possible_cpu(cpu) | ||
1896 | memcpy(vm.addr + unit_map[cpu] * unit_size, __per_cpu_load, | ||
1897 | static_size); | ||
1898 | |||
1899 | /* we're ready, commit */ | ||
1900 | pr_info("PERCPU: Remapped at %p with large pages, static data " | ||
1901 | "%zu bytes\n", vm.addr, static_size); | ||
1902 | |||
1903 | ret = pcpu_setup_first_chunk(static_size, reserved_size, dyn_size, | ||
1904 | unit_size, vm.addr, unit_map); | ||
1905 | |||
1906 | /* | ||
1907 | * Sort pcpul_map array for pcpu_lpage_remapped(). Unmapped | ||
1908 | * lpages are pushed to the end and trimmed. | ||
1909 | */ | ||
1910 | for (i = 0; i < pcpul_nr_lpages - 1; i++) | ||
1911 | for (j = i + 1; j < pcpul_nr_lpages; j++) { | ||
1912 | struct pcpul_ent tmp; | ||
1913 | |||
1914 | if (!pcpul_map[j].ptr) | ||
1915 | continue; | ||
1916 | if (pcpul_map[i].ptr && | ||
1917 | pcpul_map[i].ptr < pcpul_map[j].ptr) | ||
1918 | continue; | ||
1919 | |||
1920 | tmp = pcpul_map[i]; | ||
1921 | pcpul_map[i] = pcpul_map[j]; | ||
1922 | pcpul_map[j] = tmp; | ||
1923 | } | ||
1924 | |||
1925 | while (pcpul_nr_lpages && !pcpul_map[pcpul_nr_lpages - 1].ptr) | ||
1926 | pcpul_nr_lpages--; | ||
1927 | |||
1928 | return ret; | ||
1929 | |||
1930 | enomem: | ||
1931 | for (i = 0; i < pcpul_nr_lpages; i++) | ||
1932 | if (pcpul_map[i].ptr) | ||
1933 | free_fn(pcpul_map[i].ptr, lpage_size); | ||
1934 | free_bootmem(__pa(pcpul_map), map_size); | ||
1935 | return -ENOMEM; | ||
1936 | } | ||
1937 | |||
1938 | /** | ||
1939 | * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area | ||
1940 | * @kaddr: the kernel address in question | ||
1941 | * | ||
1942 | * Determine whether @kaddr falls in the pcpul recycled area. This is | ||
1943 | * used by pageattr to detect VM aliases and break up the pcpu large | ||
1944 | * page mapping such that the same physical page is not mapped under | ||
1945 | * different attributes. | ||
1946 | * | ||
1947 | * The recycled area is always at the tail of a partially used large | ||
1948 | * page. | ||
1949 | * | ||
1950 | * RETURNS: | ||
1951 | * Address of corresponding remapped pcpu address if match is found; | ||
1952 | * otherwise, NULL. | ||
1953 | */ | ||
1954 | void *pcpu_lpage_remapped(void *kaddr) | ||
1955 | { | ||
1956 | unsigned long lpage_mask = pcpul_lpage_size - 1; | ||
1957 | void *lpage_addr = (void *)((unsigned long)kaddr & ~lpage_mask); | ||
1958 | unsigned long offset = (unsigned long)kaddr & lpage_mask; | ||
1959 | int left = 0, right = pcpul_nr_lpages - 1; | ||
1960 | int pos; | ||
1961 | |||
1962 | /* pcpul in use at all? */ | ||
1963 | if (!pcpul_map) | ||
1964 | return NULL; | ||
1965 | |||
1966 | /* okay, perform binary search */ | ||
1967 | while (left <= right) { | ||
1968 | pos = (left + right) / 2; | ||
1969 | |||
1970 | if (pcpul_map[pos].ptr < lpage_addr) | ||
1971 | left = pos + 1; | ||
1972 | else if (pcpul_map[pos].ptr > lpage_addr) | ||
1973 | right = pos - 1; | ||
1974 | else | ||
1975 | return pcpul_map[pos].map_addr + offset; | ||
1976 | } | ||
1977 | |||
1978 | return NULL; | ||
1979 | } | ||
1980 | #endif | ||
1981 | |||
1982 | /* | ||
1983 | * Generic percpu area setup. | ||
1984 | * | ||
1985 | * The embedding helper is used because its behavior closely resembles | ||
1986 | * the original non-dynamic generic percpu area setup. This is | ||
1987 | * important because many archs have addressing restrictions and might | ||
1988 | * fail if the percpu area is located far away from the previous | ||
1989 | * location. As an added bonus, in non-NUMA cases, embedding is | ||
1990 | * generally a good idea TLB-wise because percpu area can piggy back | ||
1991 | * on the physical linear memory mapping which uses large page | ||
1992 | * mappings on applicable archs. | ||
1993 | */ | ||
1994 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | ||
1995 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | ||
1996 | EXPORT_SYMBOL(__per_cpu_offset); | ||
1997 | |||
1998 | void __init setup_per_cpu_areas(void) | ||
1999 | { | ||
2000 | size_t static_size = __per_cpu_end - __per_cpu_start; | ||
2001 | ssize_t unit_size; | ||
2002 | unsigned long delta; | ||
2003 | unsigned int cpu; | ||
2004 | |||
2005 | /* | ||
2006 | * Always reserve area for module percpu variables. That's | ||
2007 | * what the legacy allocator did. | ||
2008 | */ | ||
2009 | unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE, | ||
2010 | PERCPU_DYNAMIC_RESERVE); | ||
2011 | if (unit_size < 0) | ||
2012 | panic("Failed to initialized percpu areas."); | ||
2013 | |||
2014 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | ||
2015 | for_each_possible_cpu(cpu) | ||
2016 | __per_cpu_offset[cpu] = delta + cpu * unit_size; | ||
1280 | } | 2017 | } |
2018 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ | ||