diff options
author | Christof Schmitt <christof.schmitt@de.ibm.com> | 2010-09-08 08:39:57 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2010-09-16 22:54:19 -0400 |
commit | 44a24cb3731495336d77f3a955a7004997270dfd (patch) | |
tree | 3fdf2df31c2f528e6bd01c99a942ae2de2b9d39a /drivers/s390/scsi/zfcp_qdio.c | |
parent | f8210e34887e1feb977a9b6b8caa086855af40c9 (diff) |
[SCSI] zfcp: Change spin_lock_bh to spin_lock_irq to fix lockdep warning
With the change to use the data on the SCSI device, iterating through
all LUNs/scsi_devices takes the SCSI host_lock. This triggers warnings
from the lock dependency checker:
=========================================================
[ INFO: possible irq lock inversion dependency detected ]
2.6.34.1 #97
---------------------------------------------------------
chchp/3224 just changed the state of lock:
(&(shost->host_lock)->rlock){-.-...}, at: [<00000000003a73f4>] __scsi_iterate_devices+0x38/0xbc
but this lock took another, HARDIRQ-unsafe lock in the past:
(&(&qdio->req_q_lock)->rlock){+.-...}
and interrupts could create inverse lock ordering between them.
other info that might help us debug this: [ 24.972394] 2 locks held by chchp/3224:
#0: (&(sch->lock)->rlock){-.-...}, at: [<0000000000401efa>] do_IRQ+0xb2/0x1e4
#1: (&adapter->port_list_lock){.-....}, at: [<0000000000490302>] zfcp_erp_modify_adapter_status+0x9e/0x16c
[...]
=========================================================
[ INFO: possible irq lock inversion dependency detected ]
2.6.34.1 #98
---------------------------------------------------------
chchp/3235 just changed the state of lock:
(&(shost->host_lock)->rlock){-.-...}, at: [<00000000003a73f4>] __scsi_iterate_devices+0x38/0xbc
but this lock took another, HARDIRQ-unsafe lock in the past:
(&(&qdio->stat_lock)->rlock){+.-...}
and interrupts could create inverse lock ordering between them.
other info that might help us debug this:
2 locks held by chchp/3235:
#0: (&(sch->lock)->rlock){-.-...}, at: [<0000000000401efa>] do_IRQ+0xb2/0x1e4
#1: (&adapter->port_list_lock){.-.-..}, at: [<00000000004902f6>] zfcp_erp_modify_adapter_status+0x9e/0x16c
[...]
To stop this warning, change the request queue lock to disable irqs,
not only softirq. The changes are required only outside of the
critical "send fcp command" path.
Reviewed-by: Swen Schillig <swen@vnet.ibm.com>
Signed-off-by: Christof Schmitt <christof.schmitt@de.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/s390/scsi/zfcp_qdio.c')
-rw-r--r-- | drivers/s390/scsi/zfcp_qdio.c | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index b2635759721c..60e6e5714eb9 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -60,13 +60,11 @@ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) | |||
60 | unsigned long long now, span; | 60 | unsigned long long now, span; |
61 | int used; | 61 | int used; |
62 | 62 | ||
63 | spin_lock(&qdio->stat_lock); | ||
64 | now = get_clock_monotonic(); | 63 | now = get_clock_monotonic(); |
65 | span = (now - qdio->req_q_time) >> 12; | 64 | span = (now - qdio->req_q_time) >> 12; |
66 | used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); | 65 | used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); |
67 | qdio->req_q_util += used * span; | 66 | qdio->req_q_util += used * span; |
68 | qdio->req_q_time = now; | 67 | qdio->req_q_time = now; |
69 | spin_unlock(&qdio->stat_lock); | ||
70 | } | 68 | } |
71 | 69 | ||
72 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, | 70 | static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, |
@@ -84,7 +82,9 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, | |||
84 | /* cleanup all SBALs being program-owned now */ | 82 | /* cleanup all SBALs being program-owned now */ |
85 | zfcp_qdio_zero_sbals(qdio->req_q, idx, count); | 83 | zfcp_qdio_zero_sbals(qdio->req_q, idx, count); |
86 | 84 | ||
85 | spin_lock_irq(&qdio->stat_lock); | ||
87 | zfcp_qdio_account(qdio); | 86 | zfcp_qdio_account(qdio); |
87 | spin_unlock_irq(&qdio->stat_lock); | ||
88 | atomic_add(count, &qdio->req_q_free); | 88 | atomic_add(count, &qdio->req_q_free); |
89 | wake_up(&qdio->req_q_wq); | 89 | wake_up(&qdio->req_q_wq); |
90 | } | 90 | } |
@@ -201,11 +201,11 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, | |||
201 | 201 | ||
202 | static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) | 202 | static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) |
203 | { | 203 | { |
204 | spin_lock_bh(&qdio->req_q_lock); | 204 | spin_lock_irq(&qdio->req_q_lock); |
205 | if (atomic_read(&qdio->req_q_free) || | 205 | if (atomic_read(&qdio->req_q_free) || |
206 | !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | 206 | !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
207 | return 1; | 207 | return 1; |
208 | spin_unlock_bh(&qdio->req_q_lock); | 208 | spin_unlock_irq(&qdio->req_q_lock); |
209 | return 0; | 209 | return 0; |
210 | } | 210 | } |
211 | 211 | ||
@@ -223,7 +223,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | |||
223 | { | 223 | { |
224 | long ret; | 224 | long ret; |
225 | 225 | ||
226 | spin_unlock_bh(&qdio->req_q_lock); | 226 | spin_unlock_irq(&qdio->req_q_lock); |
227 | ret = wait_event_interruptible_timeout(qdio->req_q_wq, | 227 | ret = wait_event_interruptible_timeout(qdio->req_q_wq, |
228 | zfcp_qdio_sbal_check(qdio), 5 * HZ); | 228 | zfcp_qdio_sbal_check(qdio), 5 * HZ); |
229 | 229 | ||
@@ -239,7 +239,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | |||
239 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL); | 239 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL); |
240 | } | 240 | } |
241 | 241 | ||
242 | spin_lock_bh(&qdio->req_q_lock); | 242 | spin_lock_irq(&qdio->req_q_lock); |
243 | return -EIO; | 243 | return -EIO; |
244 | } | 244 | } |
245 | 245 | ||
@@ -254,7 +254,9 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) | |||
254 | int retval; | 254 | int retval; |
255 | u8 sbal_number = q_req->sbal_number; | 255 | u8 sbal_number = q_req->sbal_number; |
256 | 256 | ||
257 | spin_lock(&qdio->stat_lock); | ||
257 | zfcp_qdio_account(qdio); | 258 | zfcp_qdio_account(qdio); |
259 | spin_unlock(&qdio->stat_lock); | ||
258 | 260 | ||
259 | retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, | 261 | retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, |
260 | q_req->sbal_first, sbal_number); | 262 | q_req->sbal_first, sbal_number); |
@@ -328,9 +330,9 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio) | |||
328 | return; | 330 | return; |
329 | 331 | ||
330 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ | 332 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ |
331 | spin_lock_bh(&qdio->req_q_lock); | 333 | spin_lock_irq(&qdio->req_q_lock); |
332 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); | 334 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); |
333 | spin_unlock_bh(&qdio->req_q_lock); | 335 | spin_unlock_irq(&qdio->req_q_lock); |
334 | 336 | ||
335 | wake_up(&qdio->req_q_wq); | 337 | wake_up(&qdio->req_q_wq); |
336 | 338 | ||