aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile4
-rw-r--r--mm/allocpercpu.c177
-rw-r--r--mm/percpu.c24
-rw-r--r--mm/slab.c18
-rw-r--r--mm/vmalloc.c4
-rw-r--r--mm/vmstat.c7
6 files changed, 36 insertions, 198 deletions
diff --git a/mm/Makefile b/mm/Makefile
index ebf849042ed3..82131d0f8d85 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -34,11 +34,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
34obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 34obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
35obj-$(CONFIG_FS_XIP) += filemap_xip.o 35obj-$(CONFIG_FS_XIP) += filemap_xip.o
36obj-$(CONFIG_MIGRATION) += migrate.o 36obj-$(CONFIG_MIGRATION) += migrate.o
37ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
38obj-$(CONFIG_SMP) += percpu.o 37obj-$(CONFIG_SMP) += percpu.o
39else
40obj-$(CONFIG_SMP) += allocpercpu.o
41endif
42obj-$(CONFIG_QUICKLIST) += quicklist.o 38obj-$(CONFIG_QUICKLIST) += quicklist.o
43obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o 39obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
44obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o 40obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
deleted file mode 100644
index df34ceae0c67..000000000000
--- a/mm/allocpercpu.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * linux/mm/allocpercpu.c
3 *
4 * Separated from slab.c August 11, 2006 Christoph Lameter
5 */
6#include <linux/mm.h>
7#include <linux/module.h>
8#include <linux/bootmem.h>
9#include <asm/sections.h>
10
11#ifndef cache_line_size
12#define cache_line_size() L1_CACHE_BYTES
13#endif
14
15/**
16 * percpu_depopulate - depopulate per-cpu data for given cpu
17 * @__pdata: per-cpu data to depopulate
18 * @cpu: depopulate per-cpu data for this cpu
19 *
20 * Depopulating per-cpu data for a cpu going offline would be a typical
21 * use case. You need to register a cpu hotplug handler for that purpose.
22 */
23static void percpu_depopulate(void *__pdata, int cpu)
24{
25 struct percpu_data *pdata = __percpu_disguise(__pdata);
26
27 kfree(pdata->ptrs[cpu]);
28 pdata->ptrs[cpu] = NULL;
29}
30
31/**
32 * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
33 * @__pdata: per-cpu data to depopulate
34 * @mask: depopulate per-cpu data for cpu's selected through mask bits
35 */
36static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
37{
38 int cpu;
39 for_each_cpu_mask_nr(cpu, *mask)
40 percpu_depopulate(__pdata, cpu);
41}
42
43#define percpu_depopulate_mask(__pdata, mask) \
44 __percpu_depopulate_mask((__pdata), &(mask))
45
46/**
47 * percpu_populate - populate per-cpu data for given cpu
48 * @__pdata: per-cpu data to populate further
49 * @size: size of per-cpu object
50 * @gfp: may sleep or not etc.
51 * @cpu: populate per-data for this cpu
52 *
53 * Populating per-cpu data for a cpu coming online would be a typical
54 * use case. You need to register a cpu hotplug handler for that purpose.
55 * Per-cpu object is populated with zeroed buffer.
56 */
57static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
58{
59 struct percpu_data *pdata = __percpu_disguise(__pdata);
60 int node = cpu_to_node(cpu);
61
62 /*
63 * We should make sure each CPU gets private memory.
64 */
65 size = roundup(size, cache_line_size());
66
67 BUG_ON(pdata->ptrs[cpu]);
68 if (node_online(node))
69 pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
70 else
71 pdata->ptrs[cpu] = kzalloc(size, gfp);
72 return pdata->ptrs[cpu];
73}
74
75/**
76 * percpu_populate_mask - populate per-cpu data for more cpu's
77 * @__pdata: per-cpu data to populate further
78 * @size: size of per-cpu object
79 * @gfp: may sleep or not etc.
80 * @mask: populate per-cpu data for cpu's selected through mask bits
81 *
82 * Per-cpu objects are populated with zeroed buffers.
83 */
84static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
85 cpumask_t *mask)
86{
87 cpumask_t populated;
88 int cpu;
89
90 cpus_clear(populated);
91 for_each_cpu_mask_nr(cpu, *mask)
92 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
93 __percpu_depopulate_mask(__pdata, &populated);
94 return -ENOMEM;
95 } else
96 cpu_set(cpu, populated);
97 return 0;
98}
99
100#define percpu_populate_mask(__pdata, size, gfp, mask) \
101 __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
102
103/**
104 * alloc_percpu - initial setup of per-cpu data
105 * @size: size of per-cpu object
106 * @align: alignment
107 *
108 * Allocate dynamic percpu area. Percpu objects are populated with
109 * zeroed buffers.
110 */
111void *__alloc_percpu(size_t size, size_t align)
112{
113 /*
114 * We allocate whole cache lines to avoid false sharing
115 */
116 size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
117 void *pdata = kzalloc(sz, GFP_KERNEL);
118 void *__pdata = __percpu_disguise(pdata);
119
120 /*
121 * Can't easily make larger alignment work with kmalloc. WARN
122 * on it. Larger alignment should only be used for module
123 * percpu sections on SMP for which this path isn't used.
124 */
125 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
126
127 if (unlikely(!pdata))
128 return NULL;
129 if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
130 &cpu_possible_map)))
131 return __pdata;
132 kfree(pdata);
133 return NULL;
134}
135EXPORT_SYMBOL_GPL(__alloc_percpu);
136
137/**
138 * free_percpu - final cleanup of per-cpu data
139 * @__pdata: object to clean up
140 *
141 * We simply clean up any per-cpu object left. No need for the client to
142 * track and specify through a bis mask which per-cpu objects are to free.
143 */
144void free_percpu(void *__pdata)
145{
146 if (unlikely(!__pdata))
147 return;
148 __percpu_depopulate_mask(__pdata, cpu_possible_mask);
149 kfree(__percpu_disguise(__pdata));
150}
151EXPORT_SYMBOL_GPL(free_percpu);
152
153/*
154 * Generic percpu area setup.
155 */
156#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
157unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
158
159EXPORT_SYMBOL(__per_cpu_offset);
160
161void __init setup_per_cpu_areas(void)
162{
163 unsigned long size, i;
164 char *ptr;
165 unsigned long nr_possible_cpus = num_possible_cpus();
166
167 /* Copy section for each CPU (we discard the original) */
168 size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
169 ptr = alloc_bootmem_pages(size * nr_possible_cpus);
170
171 for_each_possible_cpu(i) {
172 __per_cpu_offset[i] = ptr - __per_cpu_start;
173 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
174 ptr += size;
175 }
176}
177#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
diff --git a/mm/percpu.c b/mm/percpu.c
index 5adfc268b408..442010cc91c6 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -46,8 +46,6 @@
46 * 46 *
47 * To use this allocator, arch code should do the followings. 47 * To use this allocator, arch code should do the followings.
48 * 48 *
49 * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
50 *
51 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 49 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
52 * regular address to percpu pointer and back if they need to be 50 * regular address to percpu pointer and back if they need to be
53 * different from the default 51 * different from the default
@@ -74,6 +72,7 @@
74#include <asm/cacheflush.h> 72#include <asm/cacheflush.h>
75#include <asm/sections.h> 73#include <asm/sections.h>
76#include <asm/tlbflush.h> 74#include <asm/tlbflush.h>
75#include <asm/io.h>
77 76
78#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 77#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
79#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 78#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
@@ -1302,6 +1301,27 @@ void free_percpu(void *ptr)
1302} 1301}
1303EXPORT_SYMBOL_GPL(free_percpu); 1302EXPORT_SYMBOL_GPL(free_percpu);
1304 1303
1304/**
1305 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1306 * @addr: the address to be converted to physical address
1307 *
1308 * Given @addr which is dereferenceable address obtained via one of
1309 * percpu access macros, this function translates it into its physical
1310 * address. The caller is responsible for ensuring @addr stays valid
1311 * until this function finishes.
1312 *
1313 * RETURNS:
1314 * The physical address for @addr.
1315 */
1316phys_addr_t per_cpu_ptr_to_phys(void *addr)
1317{
1318 if ((unsigned long)addr < VMALLOC_START ||
1319 (unsigned long)addr >= VMALLOC_END)
1320 return __pa(addr);
1321 else
1322 return page_to_phys(vmalloc_to_page(addr));
1323}
1324
1305static inline size_t pcpu_calc_fc_sizes(size_t static_size, 1325static inline size_t pcpu_calc_fc_sizes(size_t static_size,
1306 size_t reserved_size, 1326 size_t reserved_size,
1307 ssize_t *dyn_sizep) 1327 ssize_t *dyn_sizep)
diff --git a/mm/slab.c b/mm/slab.c
index a6c9166996a9..29b09599af7c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -697,7 +697,7 @@ static inline void init_lock_keys(void)
697static DEFINE_MUTEX(cache_chain_mutex); 697static DEFINE_MUTEX(cache_chain_mutex);
698static struct list_head cache_chain; 698static struct list_head cache_chain;
699 699
700static DEFINE_PER_CPU(struct delayed_work, reap_work); 700static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
701 701
702static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 702static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
703{ 703{
@@ -838,7 +838,7 @@ __setup("noaliencache", noaliencache_setup);
838 * objects freed on different nodes from which they were allocated) and the 838 * objects freed on different nodes from which they were allocated) and the
839 * flushing of remote pcps by calling drain_node_pages. 839 * flushing of remote pcps by calling drain_node_pages.
840 */ 840 */
841static DEFINE_PER_CPU(unsigned long, reap_node); 841static DEFINE_PER_CPU(unsigned long, slab_reap_node);
842 842
843static void init_reap_node(int cpu) 843static void init_reap_node(int cpu)
844{ 844{
@@ -848,17 +848,17 @@ static void init_reap_node(int cpu)
848 if (node == MAX_NUMNODES) 848 if (node == MAX_NUMNODES)
849 node = first_node(node_online_map); 849 node = first_node(node_online_map);
850 850
851 per_cpu(reap_node, cpu) = node; 851 per_cpu(slab_reap_node, cpu) = node;
852} 852}
853 853
854static void next_reap_node(void) 854static void next_reap_node(void)
855{ 855{
856 int node = __get_cpu_var(reap_node); 856 int node = __get_cpu_var(slab_reap_node);
857 857
858 node = next_node(node, node_online_map); 858 node = next_node(node, node_online_map);
859 if (unlikely(node >= MAX_NUMNODES)) 859 if (unlikely(node >= MAX_NUMNODES))
860 node = first_node(node_online_map); 860 node = first_node(node_online_map);
861 __get_cpu_var(reap_node) = node; 861 __get_cpu_var(slab_reap_node) = node;
862} 862}
863 863
864#else 864#else
@@ -875,7 +875,7 @@ static void next_reap_node(void)
875 */ 875 */
876static void __cpuinit start_cpu_timer(int cpu) 876static void __cpuinit start_cpu_timer(int cpu)
877{ 877{
878 struct delayed_work *reap_work = &per_cpu(reap_work, cpu); 878 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
879 879
880 /* 880 /*
881 * When this gets called from do_initcalls via cpucache_init(), 881 * When this gets called from do_initcalls via cpucache_init(),
@@ -1039,7 +1039,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
1039 */ 1039 */
1040static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1040static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1041{ 1041{
1042 int node = __get_cpu_var(reap_node); 1042 int node = __get_cpu_var(slab_reap_node);
1043 1043
1044 if (l3->alien) { 1044 if (l3->alien) {
1045 struct array_cache *ac = l3->alien[node]; 1045 struct array_cache *ac = l3->alien[node];
@@ -1300,9 +1300,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1300 * anything expensive but will only modify reap_work 1300 * anything expensive but will only modify reap_work
1301 * and reschedule the timer. 1301 * and reschedule the timer.
1302 */ 1302 */
1303 cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); 1303 cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
1304 /* Now the cache_reaper is guaranteed to be not running. */ 1304 /* Now the cache_reaper is guaranteed to be not running. */
1305 per_cpu(reap_work, cpu).work.func = NULL; 1305 per_cpu(slab_reap_work, cpu).work.func = NULL;
1306 break; 1306 break;
1307 case CPU_DOWN_FAILED: 1307 case CPU_DOWN_FAILED:
1308 case CPU_DOWN_FAILED_FROZEN: 1308 case CPU_DOWN_FAILED_FROZEN:
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0f551a4a44cd..9b08d790df6f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -761,7 +761,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
761 spin_lock(&vbq->lock); 761 spin_lock(&vbq->lock);
762 list_add(&vb->free_list, &vbq->free); 762 list_add(&vb->free_list, &vbq->free);
763 spin_unlock(&vbq->lock); 763 spin_unlock(&vbq->lock);
764 put_cpu_var(vmap_cpu_blocks); 764 put_cpu_var(vmap_block_queue);
765 765
766 return vb; 766 return vb;
767} 767}
@@ -826,7 +826,7 @@ again:
826 } 826 }
827 spin_unlock(&vb->lock); 827 spin_unlock(&vb->lock);
828 } 828 }
829 put_cpu_var(vmap_cpu_blocks); 829 put_cpu_var(vmap_block_queue);
830 rcu_read_unlock(); 830 rcu_read_unlock();
831 831
832 if (!addr) { 832 if (!addr) {
diff --git a/mm/vmstat.c b/mm/vmstat.c
index c81321f9feec..dad2327e4580 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -883,11 +883,10 @@ static void vmstat_update(struct work_struct *w)
883 883
884static void __cpuinit start_cpu_timer(int cpu) 884static void __cpuinit start_cpu_timer(int cpu)
885{ 885{
886 struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); 886 struct delayed_work *work = &per_cpu(vmstat_work, cpu);
887 887
888 INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); 888 INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
889 schedule_delayed_work_on(cpu, vmstat_work, 889 schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
890 __round_jiffies_relative(HZ, cpu));
891} 890}
892 891
893/* 892/*