aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig5
-rw-r--r--mm/backing-dev.c8
-rw-r--r--mm/bootmem.c24
-rw-r--r--mm/highmem.c17
-rw-r--r--mm/ksm.c1
-rw-r--r--mm/memory_hotplug.c24
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/percpu.c121
10 files changed, 147 insertions, 63 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index fd3386242cf0..44cf6f0a3a6d 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -128,12 +128,9 @@ config SPARSEMEM_VMEMMAP
128config MEMORY_HOTPLUG 128config MEMORY_HOTPLUG
129 bool "Allow for memory hot-add" 129 bool "Allow for memory hot-add"
130 depends on SPARSEMEM || X86_64_ACPI_NUMA 130 depends on SPARSEMEM || X86_64_ACPI_NUMA
131 depends on HOTPLUG && !(HIBERNATION && !S390) && ARCH_ENABLE_MEMORY_HOTPLUG 131 depends on HOTPLUG && ARCH_ENABLE_MEMORY_HOTPLUG
132 depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390) 132 depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390)
133 133
134comment "Memory hotplug is currently incompatible with Software Suspend"
135 depends on SPARSEMEM && HOTPLUG && HIBERNATION && !S390
136
137config MEMORY_HOTPLUG_SPARSE 134config MEMORY_HOTPLUG_SPARSE
138 def_bool y 135 def_bool y
139 depends on SPARSEMEM && MEMORY_HOTPLUG 136 depends on SPARSEMEM && MEMORY_HOTPLUG
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 11aee09dd2a6..67a33a5a1a93 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -604,10 +604,14 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
604 604
605 /* 605 /*
606 * Finally, kill the kernel threads. We don't need to be RCU 606 * Finally, kill the kernel threads. We don't need to be RCU
607 * safe anymore, since the bdi is gone from visibility. 607 * safe anymore, since the bdi is gone from visibility. Force
608 * unfreeze of the thread before calling kthread_stop(), otherwise
609 * it would never exet if it is currently stuck in the refrigerator.
608 */ 610 */
609 list_for_each_entry(wb, &bdi->wb_list, list) 611 list_for_each_entry(wb, &bdi->wb_list, list) {
612 wb->task->flags &= ~PF_FROZEN;
610 kthread_stop(wb->task); 613 kthread_stop(wb->task);
614 }
611} 615}
612 616
613/* 617/*
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 555d5d2731c6..d1dc23cc7f10 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -143,6 +143,30 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
143 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); 143 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
144} 144}
145 145
146/*
147 * free_bootmem_late - free bootmem pages directly to page allocator
148 * @addr: starting address of the range
149 * @size: size of the range in bytes
150 *
151 * This is only useful when the bootmem allocator has already been torn
152 * down, but we are still initializing the system. Pages are given directly
153 * to the page allocator, no bootmem metadata is updated because it is gone.
154 */
155void __init free_bootmem_late(unsigned long addr, unsigned long size)
156{
157 unsigned long cursor, end;
158
159 kmemleak_free_part(__va(addr), size);
160
161 cursor = PFN_UP(addr);
162 end = PFN_DOWN(addr + size);
163
164 for (; cursor < end; cursor++) {
165 __free_pages_bootmem(pfn_to_page(cursor), 0);
166 totalram_pages++;
167 }
168}
169
146static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) 170static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
147{ 171{
148 int aligned; 172 int aligned;
diff --git a/mm/highmem.c b/mm/highmem.c
index 25878cc49daa..9c1e627f282e 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -426,16 +426,21 @@ void __init page_address_init(void)
426 426
427void debug_kmap_atomic(enum km_type type) 427void debug_kmap_atomic(enum km_type type)
428{ 428{
429 static unsigned warn_count = 10; 429 static int warn_count = 10;
430 430
431 if (unlikely(warn_count == 0)) 431 if (unlikely(warn_count < 0))
432 return; 432 return;
433 433
434 if (unlikely(in_interrupt())) { 434 if (unlikely(in_interrupt())) {
435 if (in_irq()) { 435 if (in_nmi()) {
436 if (type != KM_NMI && type != KM_NMI_PTE) {
437 WARN_ON(1);
438 warn_count--;
439 }
440 } else if (in_irq()) {
436 if (type != KM_IRQ0 && type != KM_IRQ1 && 441 if (type != KM_IRQ0 && type != KM_IRQ1 &&
437 type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ && 442 type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
438 type != KM_BOUNCE_READ) { 443 type != KM_BOUNCE_READ && type != KM_IRQ_PTE) {
439 WARN_ON(1); 444 WARN_ON(1);
440 warn_count--; 445 warn_count--;
441 } 446 }
@@ -452,7 +457,9 @@ void debug_kmap_atomic(enum km_type type)
452 } 457 }
453 458
454 if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || 459 if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
455 type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) { 460 type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ ||
461 type == KM_IRQ_PTE || type == KM_NMI ||
462 type == KM_NMI_PTE ) {
456 if (!irqs_disabled()) { 463 if (!irqs_disabled()) {
457 WARN_ON(1); 464 WARN_ON(1);
458 warn_count--; 465 warn_count--;
diff --git a/mm/ksm.c b/mm/ksm.c
index bef1af4f77e3..5575f8628fef 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1012,6 +1012,7 @@ static struct rmap_item *unstable_tree_search_insert(struct page *page,
1012 struct rmap_item *tree_rmap_item; 1012 struct rmap_item *tree_rmap_item;
1013 int ret; 1013 int ret;
1014 1014
1015 cond_resched();
1015 tree_rmap_item = rb_entry(*new, struct rmap_item, node); 1016 tree_rmap_item = rb_entry(*new, struct rmap_item, node);
1016 page2[0] = get_mergeable_page(tree_rmap_item); 1017 page2[0] = get_mergeable_page(tree_rmap_item);
1017 if (!page2[0]) 1018 if (!page2[0])
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 821dee596377..2047465cd27c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -26,6 +26,7 @@
26#include <linux/migrate.h> 26#include <linux/migrate.h>
27#include <linux/page-isolation.h> 27#include <linux/page-isolation.h>
28#include <linux/pfn.h> 28#include <linux/pfn.h>
29#include <linux/suspend.h>
29 30
30#include <asm/tlbflush.h> 31#include <asm/tlbflush.h>
31 32
@@ -447,7 +448,8 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
447} 448}
448#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 449#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
449 450
450static pg_data_t *hotadd_new_pgdat(int nid, u64 start) 451/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
452static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
451{ 453{
452 struct pglist_data *pgdat; 454 struct pglist_data *pgdat;
453 unsigned long zones_size[MAX_NR_ZONES] = {0}; 455 unsigned long zones_size[MAX_NR_ZONES] = {0};
@@ -484,14 +486,18 @@ int __ref add_memory(int nid, u64 start, u64 size)
484 struct resource *res; 486 struct resource *res;
485 int ret; 487 int ret;
486 488
489 lock_system_sleep();
490
487 res = register_memory_resource(start, size); 491 res = register_memory_resource(start, size);
492 ret = -EEXIST;
488 if (!res) 493 if (!res)
489 return -EEXIST; 494 goto out;
490 495
491 if (!node_online(nid)) { 496 if (!node_online(nid)) {
492 pgdat = hotadd_new_pgdat(nid, start); 497 pgdat = hotadd_new_pgdat(nid, start);
498 ret = -ENOMEM;
493 if (!pgdat) 499 if (!pgdat)
494 return -ENOMEM; 500 goto out;
495 new_pgdat = 1; 501 new_pgdat = 1;
496 } 502 }
497 503
@@ -514,7 +520,8 @@ int __ref add_memory(int nid, u64 start, u64 size)
514 BUG_ON(ret); 520 BUG_ON(ret);
515 } 521 }
516 522
517 return ret; 523 goto out;
524
518error: 525error:
519 /* rollback pgdat allocation and others */ 526 /* rollback pgdat allocation and others */
520 if (new_pgdat) 527 if (new_pgdat)
@@ -522,6 +529,8 @@ error:
522 if (res) 529 if (res)
523 release_memory_resource(res); 530 release_memory_resource(res);
524 531
532out:
533 unlock_system_sleep();
525 return ret; 534 return ret;
526} 535}
527EXPORT_SYMBOL_GPL(add_memory); 536EXPORT_SYMBOL_GPL(add_memory);
@@ -758,6 +767,8 @@ int offline_pages(unsigned long start_pfn,
758 if (!test_pages_in_a_zone(start_pfn, end_pfn)) 767 if (!test_pages_in_a_zone(start_pfn, end_pfn))
759 return -EINVAL; 768 return -EINVAL;
760 769
770 lock_system_sleep();
771
761 zone = page_zone(pfn_to_page(start_pfn)); 772 zone = page_zone(pfn_to_page(start_pfn));
762 node = zone_to_nid(zone); 773 node = zone_to_nid(zone);
763 nr_pages = end_pfn - start_pfn; 774 nr_pages = end_pfn - start_pfn;
@@ -765,7 +776,7 @@ int offline_pages(unsigned long start_pfn,
765 /* set above range as isolated */ 776 /* set above range as isolated */
766 ret = start_isolate_page_range(start_pfn, end_pfn); 777 ret = start_isolate_page_range(start_pfn, end_pfn);
767 if (ret) 778 if (ret)
768 return ret; 779 goto out;
769 780
770 arg.start_pfn = start_pfn; 781 arg.start_pfn = start_pfn;
771 arg.nr_pages = nr_pages; 782 arg.nr_pages = nr_pages;
@@ -843,6 +854,7 @@ repeat:
843 writeback_set_ratelimit(); 854 writeback_set_ratelimit();
844 855
845 memory_notify(MEM_OFFLINE, &arg); 856 memory_notify(MEM_OFFLINE, &arg);
857 unlock_system_sleep();
846 return 0; 858 return 0;
847 859
848failed_removal: 860failed_removal:
@@ -852,6 +864,8 @@ failed_removal:
852 /* pushback to free area */ 864 /* pushback to free area */
853 undo_isolate_page_range(start_pfn, end_pfn); 865 undo_isolate_page_range(start_pfn, end_pfn);
854 866
867out:
868 unlock_system_sleep();
855 return ret; 869 return ret;
856} 870}
857 871
diff --git a/mm/migrate.c b/mm/migrate.c
index 1a4bf4813780..7dbcb22316d2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -602,7 +602,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
602 struct page *newpage = get_new_page(page, private, &result); 602 struct page *newpage = get_new_page(page, private, &result);
603 int rcu_locked = 0; 603 int rcu_locked = 0;
604 int charge = 0; 604 int charge = 0;
605 struct mem_cgroup *mem; 605 struct mem_cgroup *mem = NULL;
606 606
607 if (!newpage) 607 if (!newpage)
608 return -ENOMEM; 608 return -ENOMEM;
diff --git a/mm/mmap.c b/mm/mmap.c
index 73f5e4b64010..292ddc3cef9c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -20,7 +20,6 @@
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/personality.h> 21#include <linux/personality.h>
22#include <linux/security.h> 22#include <linux/security.h>
23#include <linux/ima.h>
24#include <linux/hugetlb.h> 23#include <linux/hugetlb.h>
25#include <linux/profile.h> 24#include <linux/profile.h>
26#include <linux/module.h> 25#include <linux/module.h>
@@ -1061,9 +1060,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1061 error = security_file_mmap(file, reqprot, prot, flags, addr, 0); 1060 error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
1062 if (error) 1061 if (error)
1063 return error; 1062 return error;
1064 error = ima_file_mmap(file, prot);
1065 if (error)
1066 return error;
1067 1063
1068 return mmap_region(file, addr, len, flags, vm_flags, pgoff); 1064 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1069} 1065}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cdcedf661616..2bc2ac63f41e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1769,7 +1769,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
1769 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1769 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1770 */ 1770 */
1771 alloc_flags &= ~ALLOC_CPUSET; 1771 alloc_flags &= ~ALLOC_CPUSET;
1772 } else if (unlikely(rt_task(p))) 1772 } else if (unlikely(rt_task(p)) && !in_interrupt())
1773 alloc_flags |= ALLOC_HARDER; 1773 alloc_flags |= ALLOC_HARDER;
1774 1774
1775 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { 1775 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
@@ -1817,9 +1817,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1817 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 1817 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1818 goto nopage; 1818 goto nopage;
1819 1819
1820restart:
1820 wake_all_kswapd(order, zonelist, high_zoneidx); 1821 wake_all_kswapd(order, zonelist, high_zoneidx);
1821 1822
1822restart:
1823 /* 1823 /*
1824 * OK, we're below the kswapd watermark and have kicked background 1824 * OK, we're below the kswapd watermark and have kicked background
1825 * reclaim. Now things get more complex, so set up alloc_flags according 1825 * reclaim. Now things get more complex, so set up alloc_flags according
diff --git a/mm/percpu.c b/mm/percpu.c
index d90797160c2a..5adfc268b408 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -355,62 +355,86 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
355} 355}
356 356
357/** 357/**
358 * pcpu_extend_area_map - extend area map for allocation 358 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
359 * @chunk: target chunk 359 * @chunk: chunk of interest
360 * 360 *
361 * Extend area map of @chunk so that it can accomodate an allocation. 361 * Determine whether area map of @chunk needs to be extended to
362 * A single allocation can split an area into three areas, so this 362 * accomodate a new allocation.
363 * function makes sure that @chunk->map has at least two extra slots.
364 * 363 *
365 * CONTEXT: 364 * CONTEXT:
366 * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired 365 * pcpu_lock.
367 * if area map is extended.
368 * 366 *
369 * RETURNS: 367 * RETURNS:
370 * 0 if noop, 1 if successfully extended, -errno on failure. 368 * New target map allocation length if extension is necessary, 0
369 * otherwise.
371 */ 370 */
372static int pcpu_extend_area_map(struct pcpu_chunk *chunk, unsigned long *flags) 371static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
373{ 372{
374 int new_alloc; 373 int new_alloc;
375 int *new;
376 size_t size;
377 374
378 /* has enough? */
379 if (chunk->map_alloc >= chunk->map_used + 2) 375 if (chunk->map_alloc >= chunk->map_used + 2)
380 return 0; 376 return 0;
381 377
382 spin_unlock_irqrestore(&pcpu_lock, *flags);
383
384 new_alloc = PCPU_DFL_MAP_ALLOC; 378 new_alloc = PCPU_DFL_MAP_ALLOC;
385 while (new_alloc < chunk->map_used + 2) 379 while (new_alloc < chunk->map_used + 2)
386 new_alloc *= 2; 380 new_alloc *= 2;
387 381
388 new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); 382 return new_alloc;
389 if (!new) { 383}
390 spin_lock_irqsave(&pcpu_lock, *flags); 384
385/**
386 * pcpu_extend_area_map - extend area map of a chunk
387 * @chunk: chunk of interest
388 * @new_alloc: new target allocation length of the area map
389 *
390 * Extend area map of @chunk to have @new_alloc entries.
391 *
392 * CONTEXT:
393 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
394 *
395 * RETURNS:
396 * 0 on success, -errno on failure.
397 */
398static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
399{
400 int *old = NULL, *new = NULL;
401 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
402 unsigned long flags;
403
404 new = pcpu_mem_alloc(new_size);
405 if (!new)
391 return -ENOMEM; 406 return -ENOMEM;
392 }
393 407
394 /* 408 /* acquire pcpu_lock and switch to new area map */
395 * Acquire pcpu_lock and switch to new area map. Only free 409 spin_lock_irqsave(&pcpu_lock, flags);
396 * could have happened inbetween, so map_used couldn't have 410
397 * grown. 411 if (new_alloc <= chunk->map_alloc)
398 */ 412 goto out_unlock;
399 spin_lock_irqsave(&pcpu_lock, *flags);
400 BUG_ON(new_alloc < chunk->map_used + 2);
401 413
402 size = chunk->map_alloc * sizeof(chunk->map[0]); 414 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
403 memcpy(new, chunk->map, size); 415 memcpy(new, chunk->map, old_size);
404 416
405 /* 417 /*
406 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is 418 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
407 * one of the first chunks and still using static map. 419 * one of the first chunks and still using static map.
408 */ 420 */
409 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) 421 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
410 pcpu_mem_free(chunk->map, size); 422 old = chunk->map;
411 423
412 chunk->map_alloc = new_alloc; 424 chunk->map_alloc = new_alloc;
413 chunk->map = new; 425 chunk->map = new;
426 new = NULL;
427
428out_unlock:
429 spin_unlock_irqrestore(&pcpu_lock, flags);
430
431 /*
432 * pcpu_mem_free() might end up calling vfree() which uses
433 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
434 */
435 pcpu_mem_free(old, old_size);
436 pcpu_mem_free(new, new_size);
437
414 return 0; 438 return 0;
415} 439}
416 440
@@ -1049,7 +1073,7 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1049 static int warn_limit = 10; 1073 static int warn_limit = 10;
1050 struct pcpu_chunk *chunk; 1074 struct pcpu_chunk *chunk;
1051 const char *err; 1075 const char *err;
1052 int slot, off; 1076 int slot, off, new_alloc;
1053 unsigned long flags; 1077 unsigned long flags;
1054 1078
1055 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 1079 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
@@ -1064,14 +1088,25 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1064 /* serve reserved allocations from the reserved chunk if available */ 1088 /* serve reserved allocations from the reserved chunk if available */
1065 if (reserved && pcpu_reserved_chunk) { 1089 if (reserved && pcpu_reserved_chunk) {
1066 chunk = pcpu_reserved_chunk; 1090 chunk = pcpu_reserved_chunk;
1067 if (size > chunk->contig_hint || 1091
1068 pcpu_extend_area_map(chunk, &flags) < 0) { 1092 if (size > chunk->contig_hint) {
1069 err = "failed to extend area map of reserved chunk"; 1093 err = "alloc from reserved chunk failed";
1070 goto fail_unlock; 1094 goto fail_unlock;
1071 } 1095 }
1096
1097 while ((new_alloc = pcpu_need_to_extend(chunk))) {
1098 spin_unlock_irqrestore(&pcpu_lock, flags);
1099 if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
1100 err = "failed to extend area map of reserved chunk";
1101 goto fail_unlock_mutex;
1102 }
1103 spin_lock_irqsave(&pcpu_lock, flags);
1104 }
1105
1072 off = pcpu_alloc_area(chunk, size, align); 1106 off = pcpu_alloc_area(chunk, size, align);
1073 if (off >= 0) 1107 if (off >= 0)
1074 goto area_found; 1108 goto area_found;
1109
1075 err = "alloc from reserved chunk failed"; 1110 err = "alloc from reserved chunk failed";
1076 goto fail_unlock; 1111 goto fail_unlock;
1077 } 1112 }
@@ -1083,14 +1118,20 @@ restart:
1083 if (size > chunk->contig_hint) 1118 if (size > chunk->contig_hint)
1084 continue; 1119 continue;
1085 1120
1086 switch (pcpu_extend_area_map(chunk, &flags)) { 1121 new_alloc = pcpu_need_to_extend(chunk);
1087 case 0: 1122 if (new_alloc) {
1088 break; 1123 spin_unlock_irqrestore(&pcpu_lock, flags);
1089 case 1: 1124 if (pcpu_extend_area_map(chunk,
1090 goto restart; /* pcpu_lock dropped, restart */ 1125 new_alloc) < 0) {
1091 default: 1126 err = "failed to extend area map";
1092 err = "failed to extend area map"; 1127 goto fail_unlock_mutex;
1093 goto fail_unlock; 1128 }
1129 spin_lock_irqsave(&pcpu_lock, flags);
1130 /*
1131 * pcpu_lock has been dropped, need to
1132 * restart cpu_slot list walking.
1133 */
1134 goto restart;
1094 } 1135 }
1095 1136
1096 off = pcpu_alloc_area(chunk, size, align); 1137 off = pcpu_alloc_area(chunk, size, align);