aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2013-06-19 14:53:51 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2013-07-14 19:36:59 -0400
commit0db0628d90125193280eabb501c94feaf48fa9ab (patch)
tree0e0ef0c4eac101d25a3bd125c4a9200ac4d294c0 /mm
parent49fb4c6290c70c418a5c25eee996d6b55ea132d6 (diff)
kernel: delete __cpuinit usage from all core kernel files
The __cpuinit type of throwaway sections might have made sense some time ago when RAM was more constrained, but now the savings do not offset the cost and complications. For example, the fix in commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time") is a good example of the nasty type of bugs that can be created with improper use of the various __init prefixes. After a discussion on LKML[1] it was decided that cpuinit should go the way of devinit and be phased out. Once all the users are gone, we can then finally remove the macros themselves from linux/init.h. This removes all the uses of the __cpuinit macros from C files in the core kernel directories (kernel, init, lib, mm, and include) that don't really have a specific maintainer. [1] https://lkml.org/lkml/2013/5/20/589 Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/slab.c10
-rw-r--r--mm/slub.c4
-rw-r--r--mm/vmstat.c6
5 files changed, 13 insertions, 13 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d12ca6f3c293..00a7a664b9c1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2522,7 +2522,7 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2522 spin_unlock(&memcg->pcp_counter_lock); 2522 spin_unlock(&memcg->pcp_counter_lock);
2523} 2523}
2524 2524
2525static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, 2525static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
2526 unsigned long action, 2526 unsigned long action,
2527 void *hcpu) 2527 void *hcpu)
2528{ 2528{
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 4514ad7415c3..3f0c895c71fe 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1619,7 +1619,7 @@ void writeback_set_ratelimit(void)
1619 ratelimit_pages = 16; 1619 ratelimit_pages = 16;
1620} 1620}
1621 1621
1622static int __cpuinit 1622static int
1623ratelimit_handler(struct notifier_block *self, unsigned long action, 1623ratelimit_handler(struct notifier_block *self, unsigned long action,
1624 void *hcpu) 1624 void *hcpu)
1625{ 1625{
@@ -1634,7 +1634,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
1634 } 1634 }
1635} 1635}
1636 1636
1637static struct notifier_block __cpuinitdata ratelimit_nb = { 1637static struct notifier_block ratelimit_nb = {
1638 .notifier_call = ratelimit_handler, 1638 .notifier_call = ratelimit_handler,
1639 .next = NULL, 1639 .next = NULL,
1640}; 1640};
diff --git a/mm/slab.c b/mm/slab.c
index 35cb0c861508..2580db062df9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -787,7 +787,7 @@ static void next_reap_node(void)
787 * the CPUs getting into lockstep and contending for the global cache chain 787 * the CPUs getting into lockstep and contending for the global cache chain
788 * lock. 788 * lock.
789 */ 789 */
790static void __cpuinit start_cpu_timer(int cpu) 790static void start_cpu_timer(int cpu)
791{ 791{
792 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); 792 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
793 793
@@ -1186,7 +1186,7 @@ static inline int slabs_tofree(struct kmem_cache *cachep,
1186 return (n->free_objects + cachep->num - 1) / cachep->num; 1186 return (n->free_objects + cachep->num - 1) / cachep->num;
1187} 1187}
1188 1188
1189static void __cpuinit cpuup_canceled(long cpu) 1189static void cpuup_canceled(long cpu)
1190{ 1190{
1191 struct kmem_cache *cachep; 1191 struct kmem_cache *cachep;
1192 struct kmem_cache_node *n = NULL; 1192 struct kmem_cache_node *n = NULL;
@@ -1251,7 +1251,7 @@ free_array_cache:
1251 } 1251 }
1252} 1252}
1253 1253
1254static int __cpuinit cpuup_prepare(long cpu) 1254static int cpuup_prepare(long cpu)
1255{ 1255{
1256 struct kmem_cache *cachep; 1256 struct kmem_cache *cachep;
1257 struct kmem_cache_node *n = NULL; 1257 struct kmem_cache_node *n = NULL;
@@ -1334,7 +1334,7 @@ bad:
1334 return -ENOMEM; 1334 return -ENOMEM;
1335} 1335}
1336 1336
1337static int __cpuinit cpuup_callback(struct notifier_block *nfb, 1337static int cpuup_callback(struct notifier_block *nfb,
1338 unsigned long action, void *hcpu) 1338 unsigned long action, void *hcpu)
1339{ 1339{
1340 long cpu = (long)hcpu; 1340 long cpu = (long)hcpu;
@@ -1390,7 +1390,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1390 return notifier_from_errno(err); 1390 return notifier_from_errno(err);
1391} 1391}
1392 1392
1393static struct notifier_block __cpuinitdata cpucache_notifier = { 1393static struct notifier_block cpucache_notifier = {
1394 &cpuup_callback, NULL, 0 1394 &cpuup_callback, NULL, 0
1395}; 1395};
1396 1396
diff --git a/mm/slub.c b/mm/slub.c
index 3b482c863002..2b02d666bf63 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3773,7 +3773,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
3773 * Use the cpu notifier to insure that the cpu slabs are flushed when 3773 * Use the cpu notifier to insure that the cpu slabs are flushed when
3774 * necessary. 3774 * necessary.
3775 */ 3775 */
3776static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, 3776static int slab_cpuup_callback(struct notifier_block *nfb,
3777 unsigned long action, void *hcpu) 3777 unsigned long action, void *hcpu)
3778{ 3778{
3779 long cpu = (long)hcpu; 3779 long cpu = (long)hcpu;
@@ -3799,7 +3799,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3799 return NOTIFY_OK; 3799 return NOTIFY_OK;
3800} 3800}
3801 3801
3802static struct notifier_block __cpuinitdata slab_notifier = { 3802static struct notifier_block slab_notifier = {
3803 .notifier_call = slab_cpuup_callback 3803 .notifier_call = slab_cpuup_callback
3804}; 3804};
3805 3805
diff --git a/mm/vmstat.c b/mm/vmstat.c
index f42745e65780..20c2ef4458fa 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1182,7 +1182,7 @@ static void vmstat_update(struct work_struct *w)
1182 round_jiffies_relative(sysctl_stat_interval)); 1182 round_jiffies_relative(sysctl_stat_interval));
1183} 1183}
1184 1184
1185static void __cpuinit start_cpu_timer(int cpu) 1185static void start_cpu_timer(int cpu)
1186{ 1186{
1187 struct delayed_work *work = &per_cpu(vmstat_work, cpu); 1187 struct delayed_work *work = &per_cpu(vmstat_work, cpu);
1188 1188
@@ -1194,7 +1194,7 @@ static void __cpuinit start_cpu_timer(int cpu)
1194 * Use the cpu notifier to insure that the thresholds are recalculated 1194 * Use the cpu notifier to insure that the thresholds are recalculated
1195 * when necessary. 1195 * when necessary.
1196 */ 1196 */
1197static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, 1197static int vmstat_cpuup_callback(struct notifier_block *nfb,
1198 unsigned long action, 1198 unsigned long action,
1199 void *hcpu) 1199 void *hcpu)
1200{ 1200{
@@ -1226,7 +1226,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
1226 return NOTIFY_OK; 1226 return NOTIFY_OK;
1227} 1227}
1228 1228
1229static struct notifier_block __cpuinitdata vmstat_notifier = 1229static struct notifier_block vmstat_notifier =
1230 { &vmstat_cpuup_callback, NULL, 0 }; 1230 { &vmstat_cpuup_callback, NULL, 0 };
1231#endif 1231#endif
1232 1232