summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-07 13:06:46 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-07 13:06:46 -0500
commit8d521d94dacd2bcca5ef2054b6d9d9986da81423 (patch)
tree8932d023b4c3ed09c4bbbe54c726f7800754f33f /mm
parentbdfa15f1a357bb90ab715e326e86cc546b282f49 (diff)
parent1b046b445c0f856c3c1eed38a348bd87cc2dc730 (diff)
Merge branch 'for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu
Pull percpu updates from Dennis Zhou: "There are 2 minor changes to the percpu allocator this merge window: - for loop condition that could be out of bounds on multi-socket UP - cosmetic removal of pcpu_group_offsets[0] in UP code as it is 0 There has been an interest in having better alignment with percpu allocations. This has caused a performance regression in at least one reported workload. I have a series out which adds scan hints to the allocator as well as some other performance oriented changes. I hope to have this queued for v5.2 soon" * 'for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu: percpu: km: no need to consider pcpu_group_offsets[0] percpu: use nr_groups as check condition
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu-km.c2
-rw-r--r--mm/percpu.c2
2 files changed, 2 insertions, 2 deletions
diff --git a/mm/percpu-km.c b/mm/percpu-km.c
index 0f643dc2dc65..b68d5df14731 100644
--- a/mm/percpu-km.c
+++ b/mm/percpu-km.c
@@ -67,7 +67,7 @@ static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
67 pcpu_set_page_chunk(nth_page(pages, i), chunk); 67 pcpu_set_page_chunk(nth_page(pages, i), chunk);
68 68
69 chunk->data = pages; 69 chunk->data = pages;
70 chunk->base_addr = page_address(pages) - pcpu_group_offsets[0]; 70 chunk->base_addr = page_address(pages);
71 71
72 spin_lock_irqsave(&pcpu_lock, flags); 72 spin_lock_irqsave(&pcpu_lock, flags);
73 pcpu_chunk_populated(chunk, 0, nr_pages, false); 73 pcpu_chunk_populated(chunk, 0, nr_pages, false);
diff --git a/mm/percpu.c b/mm/percpu.c
index db86282fd024..c5c750781628 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2384,7 +2384,7 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
2384 ai->atom_size = atom_size; 2384 ai->atom_size = atom_size;
2385 ai->alloc_size = alloc_size; 2385 ai->alloc_size = alloc_size;
2386 2386
2387 for (group = 0, unit = 0; group_cnt[group]; group++) { 2387 for (group = 0, unit = 0; group < nr_groups; group++) {
2388 struct pcpu_group_info *gi = &ai->groups[group]; 2388 struct pcpu_group_info *gi = &ai->groups[group];
2389 2389
2390 /* 2390 /*