summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorJulia Cartwright <julia@ni.com>2017-04-28 13:41:02 -0400
committerShaohua Li <shli@fb.com>2017-05-04 16:44:23 -0400
commit3d05f3aed5d721c2c77d20288c29ab26c6193ed5 (patch)
tree9659622f82930732daa486273ff60cdaa6ca748f /drivers/md
parent4ac4d584886a4f47f8ff3bca0f32ff9a2987d3e5 (diff)
md/raid5: make use of spin_lock_irq over local_irq_disable + spin_lock
On mainline, there is no functional difference, just less code, and symmetric lock/unlock paths. On PREEMPT_RT builds, this fixes the following warning, seen by Alexander GQ Gerasiov, due to the sleeping nature of spinlocks. BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:993 in_atomic(): 0, irqs_disabled(): 1, pid: 58, name: kworker/u12:1 CPU: 5 PID: 58 Comm: kworker/u12:1 Tainted: G W 4.9.20-rt16-stand6-686 #1 Hardware name: Supermicro SYS-5027R-WRF/X9SRW-F, BIOS 3.2a 10/28/2015 Workqueue: writeback wb_workfn (flush-253:0) Call Trace: dump_stack+0x47/0x68 ? migrate_enable+0x4a/0xf0 ___might_sleep+0x101/0x180 rt_spin_lock+0x17/0x40 add_stripe_bio+0x4e3/0x6c0 [raid456] ? preempt_count_add+0x42/0xb0 raid5_make_request+0x737/0xdd0 [raid456] Reported-by: Alexander GQ Gerasiov <gq@redlab-i.ru> Tested-by: Alexander GQ Gerasiov <gq@redlab-i.ru> Signed-off-by: Julia Cartwright <julia@ni.com> Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid5.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2e38cfac5b1d..3809a2192132 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -103,8 +103,7 @@ static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
103static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) 103static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
104{ 104{
105 int i; 105 int i;
106 local_irq_disable(); 106 spin_lock_irq(conf->hash_locks);
107 spin_lock(conf->hash_locks);
108 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 107 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
109 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); 108 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
110 spin_lock(&conf->device_lock); 109 spin_lock(&conf->device_lock);
@@ -114,9 +113,9 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
114{ 113{
115 int i; 114 int i;
116 spin_unlock(&conf->device_lock); 115 spin_unlock(&conf->device_lock);
117 for (i = NR_STRIPE_HASH_LOCKS; i; i--) 116 for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--)
118 spin_unlock(conf->hash_locks + i - 1); 117 spin_unlock(conf->hash_locks + i);
119 local_irq_enable(); 118 spin_unlock_irq(conf->hash_locks);
120} 119}
121 120
122/* Find first data disk in a raid6 stripe */ 121/* Find first data disk in a raid6 stripe */
@@ -714,12 +713,11 @@ static bool is_full_stripe_write(struct stripe_head *sh)
714 713
715static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 714static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
716{ 715{
717 local_irq_disable();
718 if (sh1 > sh2) { 716 if (sh1 > sh2) {
719 spin_lock(&sh2->stripe_lock); 717 spin_lock_irq(&sh2->stripe_lock);
720 spin_lock_nested(&sh1->stripe_lock, 1); 718 spin_lock_nested(&sh1->stripe_lock, 1);
721 } else { 719 } else {
722 spin_lock(&sh1->stripe_lock); 720 spin_lock_irq(&sh1->stripe_lock);
723 spin_lock_nested(&sh2->stripe_lock, 1); 721 spin_lock_nested(&sh2->stripe_lock, 1);
724 } 722 }
725} 723}
@@ -727,8 +725,7 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
727static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 725static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
728{ 726{
729 spin_unlock(&sh1->stripe_lock); 727 spin_unlock(&sh1->stripe_lock);
730 spin_unlock(&sh2->stripe_lock); 728 spin_unlock_irq(&sh2->stripe_lock);
731 local_irq_enable();
732} 729}
733 730
734/* Only freshly new full stripe normal write stripe can be added to a batch list */ 731/* Only freshly new full stripe normal write stripe can be added to a batch list */