aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile2
-rw-r--r--mm/allocpercpu.c28
-rw-r--r--mm/kmemleak-test.c6
-rw-r--r--mm/page-writeback.c5
-rw-r--r--mm/percpu.c40
-rw-r--r--mm/quicklist.c2
-rw-r--r--mm/slub.c4
7 files changed, 77 insertions, 10 deletions
diff --git a/mm/Makefile b/mm/Makefile
index 5e0bd6426693..c77c6487552f 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -33,7 +33,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
33obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 33obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
34obj-$(CONFIG_FS_XIP) += filemap_xip.o 34obj-$(CONFIG_FS_XIP) += filemap_xip.o
35obj-$(CONFIG_MIGRATION) += migrate.o 35obj-$(CONFIG_MIGRATION) += migrate.o
36ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 36ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
37obj-$(CONFIG_SMP) += percpu.o 37obj-$(CONFIG_SMP) += percpu.o
38else 38else
39obj-$(CONFIG_SMP) += allocpercpu.o 39obj-$(CONFIG_SMP) += allocpercpu.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index dfdee6a47359..df34ceae0c67 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -5,6 +5,8 @@
5 */ 5 */
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/bootmem.h>
9#include <asm/sections.h>
8 10
9#ifndef cache_line_size 11#ifndef cache_line_size
10#define cache_line_size() L1_CACHE_BYTES 12#define cache_line_size() L1_CACHE_BYTES
@@ -147,3 +149,29 @@ void free_percpu(void *__pdata)
147 kfree(__percpu_disguise(__pdata)); 149 kfree(__percpu_disguise(__pdata));
148} 150}
149EXPORT_SYMBOL_GPL(free_percpu); 151EXPORT_SYMBOL_GPL(free_percpu);
152
153/*
154 * Generic percpu area setup.
155 */
156#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
157unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
158
159EXPORT_SYMBOL(__per_cpu_offset);
160
161void __init setup_per_cpu_areas(void)
162{
163 unsigned long size, i;
164 char *ptr;
165 unsigned long nr_possible_cpus = num_possible_cpus();
166
167 /* Copy section for each CPU (we discard the original) */
168 size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
169 ptr = alloc_bootmem_pages(size * nr_possible_cpus);
170
171 for_each_possible_cpu(i) {
172 __per_cpu_offset[i] = ptr - __per_cpu_start;
173 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
174 ptr += size;
175 }
176}
177#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
diff --git a/mm/kmemleak-test.c b/mm/kmemleak-test.c
index d5292fc6f523..177a5169bbde 100644
--- a/mm/kmemleak-test.c
+++ b/mm/kmemleak-test.c
@@ -36,7 +36,7 @@ struct test_node {
36}; 36};
37 37
38static LIST_HEAD(test_list); 38static LIST_HEAD(test_list);
39static DEFINE_PER_CPU(void *, test_pointer); 39static DEFINE_PER_CPU(void *, kmemleak_test_pointer);
40 40
41/* 41/*
42 * Some very simple testing. This function needs to be extended for 42 * Some very simple testing. This function needs to be extended for
@@ -86,9 +86,9 @@ static int __init kmemleak_test_init(void)
86 } 86 }
87 87
88 for_each_possible_cpu(i) { 88 for_each_possible_cpu(i) {
89 per_cpu(test_pointer, i) = kmalloc(129, GFP_KERNEL); 89 per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL);
90 pr_info("kmemleak: kmalloc(129) = %p\n", 90 pr_info("kmemleak: kmalloc(129) = %p\n",
91 per_cpu(test_pointer, i)); 91 per_cpu(kmemleak_test_pointer, i));
92 } 92 }
93 93
94 return 0; 94 return 0;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7687879253b9..3c7f5e1afe5f 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -610,6 +610,8 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
610 } 610 }
611} 611}
612 612
613static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
614
613/** 615/**
614 * balance_dirty_pages_ratelimited_nr - balance dirty memory state 616 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
615 * @mapping: address_space which was dirtied 617 * @mapping: address_space which was dirtied
@@ -627,7 +629,6 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
627void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, 629void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
628 unsigned long nr_pages_dirtied) 630 unsigned long nr_pages_dirtied)
629{ 631{
630 static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
631 unsigned long ratelimit; 632 unsigned long ratelimit;
632 unsigned long *p; 633 unsigned long *p;
633 634
@@ -640,7 +641,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
640 * tasks in balance_dirty_pages(). Period. 641 * tasks in balance_dirty_pages(). Period.
641 */ 642 */
642 preempt_disable(); 643 preempt_disable();
643 p = &__get_cpu_var(ratelimits); 644 p = &__get_cpu_var(bdp_ratelimits);
644 *p += nr_pages_dirtied; 645 *p += nr_pages_dirtied;
645 if (unlikely(*p >= ratelimit)) { 646 if (unlikely(*p >= ratelimit)) {
646 *p = 0; 647 *p = 0;
diff --git a/mm/percpu.c b/mm/percpu.c
index b70f2acd8853..b14984566f5a 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -43,7 +43,7 @@
43 * 43 *
44 * To use this allocator, arch code should do the followings. 44 * To use this allocator, arch code should do the followings.
45 * 45 *
46 * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 46 * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
47 * 47 *
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back if they need to be 49 * regular address to percpu pointer and back if they need to be
@@ -1275,3 +1275,41 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1275 reserved_size, dyn_size, 1275 reserved_size, dyn_size,
1276 pcpue_unit_size, pcpue_ptr, NULL); 1276 pcpue_unit_size, pcpue_ptr, NULL);
1277} 1277}
1278
1279/*
1280 * Generic percpu area setup.
1281 *
1282 * The embedding helper is used because its behavior closely resembles
1283 * the original non-dynamic generic percpu area setup. This is
1284 * important because many archs have addressing restrictions and might
1285 * fail if the percpu area is located far away from the previous
1286 * location. As an added bonus, in non-NUMA cases, embedding is
1287 * generally a good idea TLB-wise because percpu area can piggy back
1288 * on the physical linear memory mapping which uses large page
1289 * mappings on applicable archs.
1290 */
1291#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1292unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1293EXPORT_SYMBOL(__per_cpu_offset);
1294
1295void __init setup_per_cpu_areas(void)
1296{
1297 size_t static_size = __per_cpu_end - __per_cpu_start;
1298 ssize_t unit_size;
1299 unsigned long delta;
1300 unsigned int cpu;
1301
1302 /*
1303 * Always reserve area for module percpu variables. That's
1304 * what the legacy allocator did.
1305 */
1306 unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE,
1307 PERCPU_DYNAMIC_RESERVE, -1);
1308 if (unit_size < 0)
1309 panic("Failed to initialized percpu areas.");
1310
1311 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1312 for_each_possible_cpu(cpu)
1313 __per_cpu_offset[cpu] = delta + cpu * unit_size;
1314}
1315#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
diff --git a/mm/quicklist.c b/mm/quicklist.c
index e66d07d1b4ff..6eedf7e473d1 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -19,7 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/quicklist.h> 20#include <linux/quicklist.h>
21 21
22DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; 22DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist);
23 23
24#define FRACTION_OF_NODE_MEM 16 24#define FRACTION_OF_NODE_MEM 16
25 25
diff --git a/mm/slub.c b/mm/slub.c
index 819f056b39c6..ffc895cc3a68 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2092,8 +2092,8 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
2092 */ 2092 */
2093#define NR_KMEM_CACHE_CPU 100 2093#define NR_KMEM_CACHE_CPU 100
2094 2094
2095static DEFINE_PER_CPU(struct kmem_cache_cpu, 2095static DEFINE_PER_CPU(struct kmem_cache_cpu [NR_KMEM_CACHE_CPU],
2096 kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; 2096 kmem_cache_cpu);
2097 2097
2098static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); 2098static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
2099static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS); 2099static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);