diff options
author | Robert Love <robert.w.love@intel.com> | 2011-04-01 19:06:14 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2011-05-01 11:20:33 -0400 |
commit | 9c8cce8e416b3286720379b5efa1c7fa81b2ec36 (patch) | |
tree | 3ce14481f433d990ba27bcdeaab9a1bbe9c84be0 /drivers | |
parent | 63ce2499947683dcc026373e24a4cb5a9d086e7d (diff) |
[SCSI] libfc: Move host_lock usage into ramp_up/down routines
The host_lock is still used to protect the can_queue
value in the Scsi_Host, but it doesn't need to be held
and released by each caller. This patch moves the lock
usage into the fc_fcp_can_queue_ramp_up and
fc_fcp_can_queue_ramp_down routines.
Signed-off-by: Robert Love <robert.w.love@intel.com>
Tested-by: Ross Brattain <ross.b.brattain@intel.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/scsi/libfc/fc_fcp.c | 25 |
1 files changed, 15 insertions, 10 deletions
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 5b799a37ad09..3591b872dd0e 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -335,22 +335,23 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) | |||
335 | /** | 335 | /** |
336 | * fc_fcp_can_queue_ramp_up() - increases can_queue | 336 | * fc_fcp_can_queue_ramp_up() - increases can_queue |
337 | * @lport: lport to ramp up can_queue | 337 | * @lport: lport to ramp up can_queue |
338 | * | ||
339 | * Locking notes: Called with Scsi_Host lock held | ||
340 | */ | 338 | */ |
341 | static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport) | 339 | static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport) |
342 | { | 340 | { |
343 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); | 341 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); |
342 | unsigned long flags; | ||
344 | int can_queue; | 343 | int can_queue; |
345 | 344 | ||
345 | spin_lock_irqsave(lport->host->host_lock, flags); | ||
346 | |||
346 | if (si->last_can_queue_ramp_up_time && | 347 | if (si->last_can_queue_ramp_up_time && |
347 | (time_before(jiffies, si->last_can_queue_ramp_up_time + | 348 | (time_before(jiffies, si->last_can_queue_ramp_up_time + |
348 | FC_CAN_QUEUE_PERIOD))) | 349 | FC_CAN_QUEUE_PERIOD))) |
349 | return; | 350 | goto unlock; |
350 | 351 | ||
351 | if (time_before(jiffies, si->last_can_queue_ramp_down_time + | 352 | if (time_before(jiffies, si->last_can_queue_ramp_down_time + |
352 | FC_CAN_QUEUE_PERIOD)) | 353 | FC_CAN_QUEUE_PERIOD)) |
353 | return; | 354 | goto unlock; |
354 | 355 | ||
355 | si->last_can_queue_ramp_up_time = jiffies; | 356 | si->last_can_queue_ramp_up_time = jiffies; |
356 | 357 | ||
@@ -362,6 +363,9 @@ static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport) | |||
362 | lport->host->can_queue = can_queue; | 363 | lport->host->can_queue = can_queue; |
363 | shost_printk(KERN_ERR, lport->host, "libfc: increased " | 364 | shost_printk(KERN_ERR, lport->host, "libfc: increased " |
364 | "can_queue to %d.\n", can_queue); | 365 | "can_queue to %d.\n", can_queue); |
366 | |||
367 | unlock: | ||
368 | spin_unlock_irqrestore(lport->host->host_lock, flags); | ||
365 | } | 369 | } |
366 | 370 | ||
367 | /** | 371 | /** |
@@ -373,18 +377,19 @@ static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport) | |||
373 | * commands complete or timeout, then try again with a reduced | 377 | * commands complete or timeout, then try again with a reduced |
374 | * can_queue. Eventually we will hit the point where we run | 378 | * can_queue. Eventually we will hit the point where we run |
375 | * on all reserved structs. | 379 | * on all reserved structs. |
376 | * | ||
377 | * Locking notes: Called with Scsi_Host lock held | ||
378 | */ | 380 | */ |
379 | static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) | 381 | static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) |
380 | { | 382 | { |
381 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); | 383 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); |
384 | unsigned long flags; | ||
382 | int can_queue; | 385 | int can_queue; |
383 | 386 | ||
387 | spin_lock_irqsave(lport->host->host_lock, flags); | ||
388 | |||
384 | if (si->last_can_queue_ramp_down_time && | 389 | if (si->last_can_queue_ramp_down_time && |
385 | (time_before(jiffies, si->last_can_queue_ramp_down_time + | 390 | (time_before(jiffies, si->last_can_queue_ramp_down_time + |
386 | FC_CAN_QUEUE_PERIOD))) | 391 | FC_CAN_QUEUE_PERIOD))) |
387 | return; | 392 | goto unlock; |
388 | 393 | ||
389 | si->last_can_queue_ramp_down_time = jiffies; | 394 | si->last_can_queue_ramp_down_time = jiffies; |
390 | 395 | ||
@@ -395,6 +400,9 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) | |||
395 | lport->host->can_queue = can_queue; | 400 | lport->host->can_queue = can_queue; |
396 | shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n" | 401 | shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n" |
397 | "Reducing can_queue to %d.\n", can_queue); | 402 | "Reducing can_queue to %d.\n", can_queue); |
403 | |||
404 | unlock: | ||
405 | spin_unlock_irqrestore(lport->host->host_lock, flags); | ||
398 | } | 406 | } |
399 | 407 | ||
400 | /* | 408 | /* |
@@ -409,16 +417,13 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport, | |||
409 | size_t len) | 417 | size_t len) |
410 | { | 418 | { |
411 | struct fc_frame *fp; | 419 | struct fc_frame *fp; |
412 | unsigned long flags; | ||
413 | 420 | ||
414 | fp = fc_frame_alloc(lport, len); | 421 | fp = fc_frame_alloc(lport, len); |
415 | if (likely(fp)) | 422 | if (likely(fp)) |
416 | return fp; | 423 | return fp; |
417 | 424 | ||
418 | /* error case */ | 425 | /* error case */ |
419 | spin_lock_irqsave(lport->host->host_lock, flags); | ||
420 | fc_fcp_can_queue_ramp_down(lport); | 426 | fc_fcp_can_queue_ramp_down(lport); |
421 | spin_unlock_irqrestore(lport->host->host_lock, flags); | ||
422 | return NULL; | 427 | return NULL; |
423 | } | 428 | } |
424 | 429 | ||