summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-08-18 08:57:24 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-09-06 12:30:23 -0400
commit29c6d1bbd7a2cd88a197ea7cef171f616e198526 (patch)
tree030b89a1b988e71879538aac339e49cf4500b829 /drivers/md/raid5.c
parent84a3f4db039e7c4bfe8ae9bebdebdf2a4e09bf86 (diff)
md/raid5: Convert to hotplug state machine
Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Neil Brown <neilb@suse.com> Cc: linux-raid@vger.kernel.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160818125731.27256-10-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c84
1 files changed, 29 insertions, 55 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8912407a4dd0..aae8064fd9e6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6330,22 +6330,20 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
6330 return 0; 6330 return 0;
6331} 6331}
6332 6332
6333static void raid5_free_percpu(struct r5conf *conf) 6333static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node)
6334{ 6334{
6335 unsigned long cpu; 6335 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
6336
6337 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
6338 return 0;
6339}
6336 6340
6341static void raid5_free_percpu(struct r5conf *conf)
6342{
6337 if (!conf->percpu) 6343 if (!conf->percpu)
6338 return; 6344 return;
6339 6345
6340#ifdef CONFIG_HOTPLUG_CPU 6346 cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
6341 unregister_cpu_notifier(&conf->cpu_notify);
6342#endif
6343
6344 get_online_cpus();
6345 for_each_possible_cpu(cpu)
6346 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
6347 put_online_cpus();
6348
6349 free_percpu(conf->percpu); 6347 free_percpu(conf->percpu);
6350} 6348}
6351 6349
@@ -6364,64 +6362,28 @@ static void free_conf(struct r5conf *conf)
6364 kfree(conf); 6362 kfree(conf);
6365} 6363}
6366 6364
6367#ifdef CONFIG_HOTPLUG_CPU 6365static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
6368static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
6369 void *hcpu)
6370{ 6366{
6371 struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify); 6367 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
6372 long cpu = (long)hcpu;
6373 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); 6368 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
6374 6369
6375 switch (action) { 6370 if (alloc_scratch_buffer(conf, percpu)) {
6376 case CPU_UP_PREPARE: 6371 pr_err("%s: failed memory allocation for cpu%u\n",
6377 case CPU_UP_PREPARE_FROZEN: 6372 __func__, cpu);
6378 if (alloc_scratch_buffer(conf, percpu)) { 6373 return -ENOMEM;
6379 pr_err("%s: failed memory allocation for cpu%ld\n",
6380 __func__, cpu);
6381 return notifier_from_errno(-ENOMEM);
6382 }
6383 break;
6384 case CPU_DEAD:
6385 case CPU_DEAD_FROZEN:
6386 case CPU_UP_CANCELED:
6387 case CPU_UP_CANCELED_FROZEN:
6388 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
6389 break;
6390 default:
6391 break;
6392 } 6374 }
6393 return NOTIFY_OK; 6375 return 0;
6394} 6376}
6395#endif
6396 6377
6397static int raid5_alloc_percpu(struct r5conf *conf) 6378static int raid5_alloc_percpu(struct r5conf *conf)
6398{ 6379{
6399 unsigned long cpu;
6400 int err = 0; 6380 int err = 0;
6401 6381
6402 conf->percpu = alloc_percpu(struct raid5_percpu); 6382 conf->percpu = alloc_percpu(struct raid5_percpu);
6403 if (!conf->percpu) 6383 if (!conf->percpu)
6404 return -ENOMEM; 6384 return -ENOMEM;
6405 6385
6406#ifdef CONFIG_HOTPLUG_CPU 6386 err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
6407 conf->cpu_notify.notifier_call = raid456_cpu_notify;
6408 conf->cpu_notify.priority = 0;
6409 err = register_cpu_notifier(&conf->cpu_notify);
6410 if (err)
6411 return err;
6412#endif
6413
6414 get_online_cpus();
6415 for_each_present_cpu(cpu) {
6416 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
6417 if (err) {
6418 pr_err("%s: failed memory allocation for cpu%ld\n",
6419 __func__, cpu);
6420 break;
6421 }
6422 }
6423 put_online_cpus();
6424
6425 if (!err) { 6387 if (!err) {
6426 conf->scribble_disks = max(conf->raid_disks, 6388 conf->scribble_disks = max(conf->raid_disks,
6427 conf->previous_raid_disks); 6389 conf->previous_raid_disks);
@@ -7953,10 +7915,21 @@ static struct md_personality raid4_personality =
7953 7915
7954static int __init raid5_init(void) 7916static int __init raid5_init(void)
7955{ 7917{
7918 int ret;
7919
7956 raid5_wq = alloc_workqueue("raid5wq", 7920 raid5_wq = alloc_workqueue("raid5wq",
7957 WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0); 7921 WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
7958 if (!raid5_wq) 7922 if (!raid5_wq)
7959 return -ENOMEM; 7923 return -ENOMEM;
7924
7925 ret = cpuhp_setup_state_multi(CPUHP_MD_RAID5_PREPARE,
7926 "md/raid5:prepare",
7927 raid456_cpu_up_prepare,
7928 raid456_cpu_dead);
7929 if (ret) {
7930 destroy_workqueue(raid5_wq);
7931 return ret;
7932 }
7960 register_md_personality(&raid6_personality); 7933 register_md_personality(&raid6_personality);
7961 register_md_personality(&raid5_personality); 7934 register_md_personality(&raid5_personality);
7962 register_md_personality(&raid4_personality); 7935 register_md_personality(&raid4_personality);
@@ -7968,6 +7941,7 @@ static void raid5_exit(void)
7968 unregister_md_personality(&raid6_personality); 7941 unregister_md_personality(&raid6_personality);
7969 unregister_md_personality(&raid5_personality); 7942 unregister_md_personality(&raid5_personality);
7970 unregister_md_personality(&raid4_personality); 7943 unregister_md_personality(&raid4_personality);
7944 cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE);
7971 destroy_workqueue(raid5_wq); 7945 destroy_workqueue(raid5_wq);
7972} 7946}
7973 7947