aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla4xxx/ql4_iocb.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2007-05-26 01:08:20 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-05-29 19:32:12 -0400
commit5f7186c841a13abff0bf81ee93754b4f46e19141 (patch)
tree381d9c0564edd1319513e40d3886b53da6057ea8 /drivers/scsi/qla4xxx/ql4_iocb.c
parent1928d73fac9a38be901dd5c9eb8b18b56ce9e18d (diff)
[SCSI] qla4xxx: convert to use the data buffer accessors
- remove the unnecessary map_single path. - convert to use the new accessors for the sg lists and the parameters. Jens Axboe <jens.axboe@oracle.com> did the for_each_sg cleanup. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: David C Somayajulu <david.somayajulu@qlogic.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/qla4xxx/ql4_iocb.c')
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c97
1 files changed, 34 insertions, 63 deletions
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 6e3c8c81def3..5006ecb3ef5e 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -145,11 +145,13 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
145 uint16_t avail_dsds; 145 uint16_t avail_dsds;
146 struct data_seg_a64 *cur_dsd; 146 struct data_seg_a64 *cur_dsd;
147 struct scsi_cmnd *cmd; 147 struct scsi_cmnd *cmd;
148 struct scatterlist *sg;
149 int i;
148 150
149 cmd = srb->cmd; 151 cmd = srb->cmd;
150 ha = srb->ha; 152 ha = srb->ha;
151 153
152 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { 154 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
153 /* No data being transferred */ 155 /* No data being transferred */
154 cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0); 156 cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
155 return; 157 return;
@@ -158,40 +160,27 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
158 avail_dsds = COMMAND_SEG; 160 avail_dsds = COMMAND_SEG;
159 cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]); 161 cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
160 162
161 /* Load data segments */ 163 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
162 if (cmd->use_sg) { 164 dma_addr_t sle_dma;
163 struct scatterlist *cur_seg; 165
164 struct scatterlist *end_seg; 166 /* Allocate additional continuation packets? */
165 167 if (avail_dsds == 0) {
166 cur_seg = (struct scatterlist *)cmd->request_buffer; 168 struct continuation_t1_entry *cont_entry;
167 end_seg = cur_seg + tot_dsds; 169
168 while (cur_seg < end_seg) { 170 cont_entry = qla4xxx_alloc_cont_entry(ha);
169 dma_addr_t sle_dma; 171 cur_dsd =
170 172 (struct data_seg_a64 *)
171 /* Allocate additional continuation packets? */ 173 &cont_entry->dataseg[0];
172 if (avail_dsds == 0) { 174 avail_dsds = CONTINUE_SEG;
173 struct continuation_t1_entry *cont_entry;
174
175 cont_entry = qla4xxx_alloc_cont_entry(ha);
176 cur_dsd =
177 (struct data_seg_a64 *)
178 &cont_entry->dataseg[0];
179 avail_dsds = CONTINUE_SEG;
180 }
181
182 sle_dma = sg_dma_address(cur_seg);
183 cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
184 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
185 cur_dsd->count = cpu_to_le32(sg_dma_len(cur_seg));
186 avail_dsds--;
187
188 cur_dsd++;
189 cur_seg++;
190 } 175 }
191 } else { 176
192 cur_dsd->base.addrLow = cpu_to_le32(LSDW(srb->dma_handle)); 177 sle_dma = sg_dma_address(sg);
193 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(srb->dma_handle)); 178 cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
194 cur_dsd->count = cpu_to_le32(cmd->request_bufflen); 179 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
180 cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
181 avail_dsds--;
182
183 cur_dsd++;
195 } 184 }
196} 185}
197 186
@@ -208,8 +197,8 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
208 struct scsi_cmnd *cmd = srb->cmd; 197 struct scsi_cmnd *cmd = srb->cmd;
209 struct ddb_entry *ddb_entry; 198 struct ddb_entry *ddb_entry;
210 struct command_t3_entry *cmd_entry; 199 struct command_t3_entry *cmd_entry;
211 struct scatterlist *sg = NULL;
212 200
201 int nseg;
213 uint16_t tot_dsds; 202 uint16_t tot_dsds;
214 uint16_t req_cnt; 203 uint16_t req_cnt;
215 204
@@ -237,24 +226,11 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
237 index = (uint32_t)cmd->request->tag; 226 index = (uint32_t)cmd->request->tag;
238 227
239 /* Calculate the number of request entries needed. */ 228 /* Calculate the number of request entries needed. */
240 if (cmd->use_sg) { 229 nseg = scsi_dma_map(cmd);
241 sg = (struct scatterlist *)cmd->request_buffer; 230 if (nseg < 0)
242 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, 231 goto queuing_error;
243 cmd->sc_data_direction); 232 tot_dsds = nseg;
244 if (tot_dsds == 0) 233
245 goto queuing_error;
246 } else if (cmd->request_bufflen) {
247 dma_addr_t req_dma;
248
249 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
250 cmd->request_bufflen,
251 cmd->sc_data_direction);
252 if (dma_mapping_error(req_dma))
253 goto queuing_error;
254
255 srb->dma_handle = req_dma;
256 tot_dsds = 1;
257 }
258 req_cnt = qla4xxx_calc_request_entries(tot_dsds); 234 req_cnt = qla4xxx_calc_request_entries(tot_dsds);
259 235
260 if (ha->req_q_count < (req_cnt + 2)) { 236 if (ha->req_q_count < (req_cnt + 2)) {
@@ -283,7 +259,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
283 259
284 int_to_scsilun(cmd->device->lun, &cmd_entry->lun); 260 int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
285 cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn); 261 cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
286 cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen); 262 cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
287 memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len); 263 memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
288 cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds); 264 cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
289 cmd_entry->hdr.entryCount = req_cnt; 265 cmd_entry->hdr.entryCount = req_cnt;
@@ -293,13 +269,13 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
293 * transferred, as the data direction bit is sometimed filled 269 * transferred, as the data direction bit is sometimed filled
294 * in when there is no data to be transferred */ 270 * in when there is no data to be transferred */
295 cmd_entry->control_flags = CF_NO_DATA; 271 cmd_entry->control_flags = CF_NO_DATA;
296 if (cmd->request_bufflen) { 272 if (scsi_bufflen(cmd)) {
297 if (cmd->sc_data_direction == DMA_TO_DEVICE) 273 if (cmd->sc_data_direction == DMA_TO_DEVICE)
298 cmd_entry->control_flags = CF_WRITE; 274 cmd_entry->control_flags = CF_WRITE;
299 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 275 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
300 cmd_entry->control_flags = CF_READ; 276 cmd_entry->control_flags = CF_READ;
301 277
302 ha->bytes_xfered += cmd->request_bufflen; 278 ha->bytes_xfered += scsi_bufflen(cmd);
303 if (ha->bytes_xfered & ~0xFFFFF){ 279 if (ha->bytes_xfered & ~0xFFFFF){
304 ha->total_mbytes_xferred += ha->bytes_xfered >> 20; 280 ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
305 ha->bytes_xfered &= 0xFFFFF; 281 ha->bytes_xfered &= 0xFFFFF;
@@ -363,14 +339,9 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
363 return QLA_SUCCESS; 339 return QLA_SUCCESS;
364 340
365queuing_error: 341queuing_error:
342 if (tot_dsds)
343 scsi_dma_unmap(cmd);
366 344
367 if (cmd->use_sg && tot_dsds) {
368 sg = (struct scatterlist *) cmd->request_buffer;
369 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
370 cmd->sc_data_direction);
371 } else if (tot_dsds)
372 pci_unmap_single(ha->pdev, srb->dma_handle,
373 cmd->request_bufflen, cmd->sc_data_direction);
374 spin_unlock_irqrestore(&ha->hardware_lock, flags); 345 spin_unlock_irqrestore(&ha->hardware_lock, flags);
375 346
376 return QLA_ERROR; 347 return QLA_ERROR;