diff options
author | Chris Leech <christopher.leech@intel.com> | 2009-12-10 12:59:26 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2009-12-12 17:30:33 -0500 |
commit | c1ecb90a66c5afc7cc5c9349f9c3714eef4a5cfb (patch) | |
tree | 9240283c872e0c110c0fef2e424180fc62bed487 /drivers/scsi | |
parent | 5543c72e2bbb30e5ba5938b18ec26617b8b3fb04 (diff) |
[SCSI] libfc: reduce hold time on SCSI host lock
Introduce a new lock to protect the list of fc_fcp_pkt structs in libfc
instead of using the host lock. This reduces the contention of this heavily
used lock, and I see up to a 25% performance gain in CPU bound small I/O
tests when scaling out across multiple quad-core CPUs.
The big win is in removing the host lock from the completion path
completely, as it does not need to be held around the call to scsi_done.
Signed-off-by: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Robert Love <robert.w.love@intel.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/libfc/fc_fcp.c | 65 |
1 files changed, 36 insertions, 29 deletions
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index c4b58d042f6f..881d5dfe8c74 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -68,18 +68,20 @@ struct kmem_cache *scsi_pkt_cachep; | |||
68 | 68 | ||
69 | /** | 69 | /** |
70 | * struct fc_fcp_internal - FCP layer internal data | 70 | * struct fc_fcp_internal - FCP layer internal data |
71 | * @scsi_pkt_pool: Memory pool to draw FCP packets from | 71 | * @scsi_pkt_pool: Memory pool to draw FCP packets from |
72 | * @scsi_queue_lock: Protects the scsi_pkt_queue | ||
72 | * @scsi_pkt_queue: Current FCP packets | 73 | * @scsi_pkt_queue: Current FCP packets |
73 | * @last_can_queue_ramp_down_time: ramp down time | 74 | * @last_can_queue_ramp_down_time: ramp down time |
74 | * @last_can_queue_ramp_up_time: ramp up time | 75 | * @last_can_queue_ramp_up_time: ramp up time |
75 | * @max_can_queue: max can_queue size | 76 | * @max_can_queue: max can_queue size |
76 | */ | 77 | */ |
77 | struct fc_fcp_internal { | 78 | struct fc_fcp_internal { |
78 | mempool_t *scsi_pkt_pool; | 79 | mempool_t *scsi_pkt_pool; |
79 | struct list_head scsi_pkt_queue; | 80 | spinlock_t scsi_queue_lock; |
80 | unsigned long last_can_queue_ramp_down_time; | 81 | struct list_head scsi_pkt_queue; |
81 | unsigned long last_can_queue_ramp_up_time; | 82 | unsigned long last_can_queue_ramp_down_time; |
82 | int max_can_queue; | 83 | unsigned long last_can_queue_ramp_up_time; |
84 | int max_can_queue; | ||
83 | }; | 85 | }; |
84 | 86 | ||
85 | #define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) | 87 | #define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) |
@@ -410,12 +412,14 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport, | |||
410 | unsigned long flags; | 412 | unsigned long flags; |
411 | 413 | ||
412 | fp = fc_frame_alloc(lport, len); | 414 | fp = fc_frame_alloc(lport, len); |
413 | if (!fp) { | 415 | if (likely(fp)) |
414 | spin_lock_irqsave(lport->host->host_lock, flags); | 416 | return fp; |
415 | fc_fcp_can_queue_ramp_down(lport); | 417 | |
416 | spin_unlock_irqrestore(lport->host->host_lock, flags); | 418 | /* error case */ |
417 | } | 419 | spin_lock_irqsave(lport->host->host_lock, flags); |
418 | return fp; | 420 | fc_fcp_can_queue_ramp_down(lport); |
421 | spin_unlock_irqrestore(lport->host->host_lock, flags); | ||
422 | return NULL; | ||
419 | } | 423 | } |
420 | 424 | ||
421 | /** | 425 | /** |
@@ -990,7 +994,7 @@ static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id, | |||
990 | struct scsi_cmnd *sc_cmd; | 994 | struct scsi_cmnd *sc_cmd; |
991 | unsigned long flags; | 995 | unsigned long flags; |
992 | 996 | ||
993 | spin_lock_irqsave(lport->host->host_lock, flags); | 997 | spin_lock_irqsave(&si->scsi_queue_lock, flags); |
994 | restart: | 998 | restart: |
995 | list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { | 999 | list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { |
996 | sc_cmd = fsp->cmd; | 1000 | sc_cmd = fsp->cmd; |
@@ -1001,7 +1005,7 @@ restart: | |||
1001 | continue; | 1005 | continue; |
1002 | 1006 | ||
1003 | fc_fcp_pkt_hold(fsp); | 1007 | fc_fcp_pkt_hold(fsp); |
1004 | spin_unlock_irqrestore(lport->host->host_lock, flags); | 1008 | spin_unlock_irqrestore(&si->scsi_queue_lock, flags); |
1005 | 1009 | ||
1006 | if (!fc_fcp_lock_pkt(fsp)) { | 1010 | if (!fc_fcp_lock_pkt(fsp)) { |
1007 | fc_fcp_cleanup_cmd(fsp, error); | 1011 | fc_fcp_cleanup_cmd(fsp, error); |
@@ -1010,14 +1014,14 @@ restart: | |||
1010 | } | 1014 | } |
1011 | 1015 | ||
1012 | fc_fcp_pkt_release(fsp); | 1016 | fc_fcp_pkt_release(fsp); |
1013 | spin_lock_irqsave(lport->host->host_lock, flags); | 1017 | spin_lock_irqsave(&si->scsi_queue_lock, flags); |
1014 | /* | 1018 | /* |
1015 | * while we dropped the lock multiple pkts could | 1019 | * while we dropped the lock multiple pkts could |
1016 | * have been released, so we have to start over. | 1020 | * have been released, so we have to start over. |
1017 | */ | 1021 | */ |
1018 | goto restart; | 1022 | goto restart; |
1019 | } | 1023 | } |
1020 | spin_unlock_irqrestore(lport->host->host_lock, flags); | 1024 | spin_unlock_irqrestore(&si->scsi_queue_lock, flags); |
1021 | } | 1025 | } |
1022 | 1026 | ||
1023 | /** | 1027 | /** |
@@ -1035,11 +1039,12 @@ static void fc_fcp_abort_io(struct fc_lport *lport) | |||
1035 | * @fsp: The FCP packet to send | 1039 | * @fsp: The FCP packet to send |
1036 | * | 1040 | * |
1037 | * Return: Zero for success and -1 for failure | 1041 | * Return: Zero for success and -1 for failure |
1038 | * Locks: Called with the host lock and irqs disabled. | 1042 | * Locks: Called without locks held |
1039 | */ | 1043 | */ |
1040 | static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) | 1044 | static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) |
1041 | { | 1045 | { |
1042 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); | 1046 | struct fc_fcp_internal *si = fc_get_scsi_internal(lport); |
1047 | unsigned long flags; | ||
1043 | int rc; | 1048 | int rc; |
1044 | 1049 | ||
1045 | fsp->cmd->SCp.ptr = (char *)fsp; | 1050 | fsp->cmd->SCp.ptr = (char *)fsp; |
@@ -1049,13 +1054,16 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) | |||
1049 | int_to_scsilun(fsp->cmd->device->lun, | 1054 | int_to_scsilun(fsp->cmd->device->lun, |
1050 | (struct scsi_lun *)fsp->cdb_cmd.fc_lun); | 1055 | (struct scsi_lun *)fsp->cdb_cmd.fc_lun); |
1051 | memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); | 1056 | memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); |
1052 | list_add_tail(&fsp->list, &si->scsi_pkt_queue); | ||
1053 | 1057 | ||
1054 | spin_unlock_irq(lport->host->host_lock); | 1058 | spin_lock_irqsave(&si->scsi_queue_lock, flags); |
1059 | list_add_tail(&fsp->list, &si->scsi_pkt_queue); | ||
1060 | spin_unlock_irqrestore(&si->scsi_queue_lock, flags); | ||
1055 | rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv); | 1061 | rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv); |
1056 | spin_lock_irq(lport->host->host_lock); | 1062 | if (unlikely(rc)) { |
1057 | if (rc) | 1063 | spin_lock_irqsave(&si->scsi_queue_lock, flags); |
1058 | list_del(&fsp->list); | 1064 | list_del(&fsp->list); |
1065 | spin_unlock_irqrestore(&si->scsi_queue_lock, flags); | ||
1066 | } | ||
1059 | 1067 | ||
1060 | return rc; | 1068 | return rc; |
1061 | } | 1069 | } |
@@ -1752,6 +1760,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | |||
1752 | struct fcoe_dev_stats *stats; | 1760 | struct fcoe_dev_stats *stats; |
1753 | 1761 | ||
1754 | lport = shost_priv(sc_cmd->device->host); | 1762 | lport = shost_priv(sc_cmd->device->host); |
1763 | spin_unlock_irq(lport->host->host_lock); | ||
1755 | 1764 | ||
1756 | rval = fc_remote_port_chkready(rport); | 1765 | rval = fc_remote_port_chkready(rport); |
1757 | if (rval) { | 1766 | if (rval) { |
@@ -1834,6 +1843,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | |||
1834 | rc = SCSI_MLQUEUE_HOST_BUSY; | 1843 | rc = SCSI_MLQUEUE_HOST_BUSY; |
1835 | } | 1844 | } |
1836 | out: | 1845 | out: |
1846 | spin_lock_irq(lport->host->host_lock); | ||
1837 | return rc; | 1847 | return rc; |
1838 | } | 1848 | } |
1839 | EXPORT_SYMBOL(fc_queuecommand); | 1849 | EXPORT_SYMBOL(fc_queuecommand); |
@@ -1864,11 +1874,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) | |||
1864 | 1874 | ||
1865 | lport = fsp->lp; | 1875 | lport = fsp->lp; |
1866 | si = fc_get_scsi_internal(lport); | 1876 | si = fc_get_scsi_internal(lport); |
1867 | spin_lock_irqsave(lport->host->host_lock, flags); | 1877 | if (!fsp->cmd) |
1868 | if (!fsp->cmd) { | ||
1869 | spin_unlock_irqrestore(lport->host->host_lock, flags); | ||
1870 | return; | 1878 | return; |
1871 | } | ||
1872 | 1879 | ||
1873 | /* | 1880 | /* |
1874 | * if can_queue ramp down is done then try can_queue ramp up | 1881 | * if can_queue ramp down is done then try can_queue ramp up |
@@ -1880,10 +1887,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) | |||
1880 | sc_cmd = fsp->cmd; | 1887 | sc_cmd = fsp->cmd; |
1881 | fsp->cmd = NULL; | 1888 | fsp->cmd = NULL; |
1882 | 1889 | ||
1883 | if (!sc_cmd->SCp.ptr) { | 1890 | if (!sc_cmd->SCp.ptr) |
1884 | spin_unlock_irqrestore(lport->host->host_lock, flags); | ||
1885 | return; | 1891 | return; |
1886 | } | ||
1887 | 1892 | ||
1888 | CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status; | 1893 | CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status; |
1889 | switch (fsp->status_code) { | 1894 | switch (fsp->status_code) { |
@@ -1945,10 +1950,11 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) | |||
1945 | break; | 1950 | break; |
1946 | } | 1951 | } |
1947 | 1952 | ||
1953 | spin_lock_irqsave(&si->scsi_queue_lock, flags); | ||
1948 | list_del(&fsp->list); | 1954 | list_del(&fsp->list); |
1955 | spin_unlock_irqrestore(&si->scsi_queue_lock, flags); | ||
1949 | sc_cmd->SCp.ptr = NULL; | 1956 | sc_cmd->SCp.ptr = NULL; |
1950 | sc_cmd->scsi_done(sc_cmd); | 1957 | sc_cmd->scsi_done(sc_cmd); |
1951 | spin_unlock_irqrestore(lport->host->host_lock, flags); | ||
1952 | 1958 | ||
1953 | /* release ref from initial allocation in queue command */ | 1959 | /* release ref from initial allocation in queue command */ |
1954 | fc_fcp_pkt_release(fsp); | 1960 | fc_fcp_pkt_release(fsp); |
@@ -2216,6 +2222,7 @@ int fc_fcp_init(struct fc_lport *lport) | |||
2216 | lport->scsi_priv = si; | 2222 | lport->scsi_priv = si; |
2217 | si->max_can_queue = lport->host->can_queue; | 2223 | si->max_can_queue = lport->host->can_queue; |
2218 | INIT_LIST_HEAD(&si->scsi_pkt_queue); | 2224 | INIT_LIST_HEAD(&si->scsi_pkt_queue); |
2225 | spin_lock_init(&si->scsi_queue_lock); | ||
2219 | 2226 | ||
2220 | si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); | 2227 | si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); |
2221 | if (!si->scsi_pkt_pool) { | 2228 | if (!si->scsi_pkt_pool) { |