aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c156
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h2
2 files changed, 116 insertions, 42 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index e3e6752c4104..1b911dadf64b 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -87,7 +87,7 @@ static int max_channel = 3;
87static int init_timeout = 5; 87static int init_timeout = 5;
88static int max_requests = 50; 88static int max_requests = 50;
89 89
90#define IBMVSCSI_VERSION "1.5.6" 90#define IBMVSCSI_VERSION "1.5.7"
91 91
92MODULE_DESCRIPTION("IBM Virtual SCSI"); 92MODULE_DESCRIPTION("IBM Virtual SCSI");
93MODULE_AUTHOR("Dave Boutcher"); 93MODULE_AUTHOR("Dave Boutcher");
@@ -145,6 +145,8 @@ static int initialize_event_pool(struct event_pool *pool,
145 sizeof(*evt->xfer_iu) * i; 145 sizeof(*evt->xfer_iu) * i;
146 evt->xfer_iu = pool->iu_storage + i; 146 evt->xfer_iu = pool->iu_storage + i;
147 evt->hostdata = hostdata; 147 evt->hostdata = hostdata;
148 evt->ext_list = NULL;
149 evt->ext_list_token = 0;
148 } 150 }
149 151
150 return 0; 152 return 0;
@@ -161,9 +163,16 @@ static void release_event_pool(struct event_pool *pool,
161 struct ibmvscsi_host_data *hostdata) 163 struct ibmvscsi_host_data *hostdata)
162{ 164{
163 int i, in_use = 0; 165 int i, in_use = 0;
164 for (i = 0; i < pool->size; ++i) 166 for (i = 0; i < pool->size; ++i) {
165 if (atomic_read(&pool->events[i].free) != 1) 167 if (atomic_read(&pool->events[i].free) != 1)
166 ++in_use; 168 ++in_use;
169 if (pool->events[i].ext_list) {
170 dma_free_coherent(hostdata->dev,
171 SG_ALL * sizeof(struct memory_descriptor),
172 pool->events[i].ext_list,
173 pool->events[i].ext_list_token);
174 }
175 }
167 if (in_use) 176 if (in_use)
168 printk(KERN_WARNING 177 printk(KERN_WARNING
169 "ibmvscsi: releasing event pool with %d " 178 "ibmvscsi: releasing event pool with %d "
@@ -286,24 +295,41 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
286 } else { 295 } else {
287 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 296 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
288 srp_cmd->data_out_format = SRP_INDIRECT_BUFFER; 297 srp_cmd->data_out_format = SRP_INDIRECT_BUFFER;
289 srp_cmd->data_out_count = numbuf; 298 srp_cmd->data_out_count =
299 numbuf < MAX_INDIRECT_BUFS ?
300 numbuf: MAX_INDIRECT_BUFS;
290 } else { 301 } else {
291 srp_cmd->data_in_format = SRP_INDIRECT_BUFFER; 302 srp_cmd->data_in_format = SRP_INDIRECT_BUFFER;
292 srp_cmd->data_in_count = numbuf; 303 srp_cmd->data_in_count =
304 numbuf < MAX_INDIRECT_BUFS ?
305 numbuf: MAX_INDIRECT_BUFS;
293 } 306 }
294 } 307 }
295} 308}
296 309
310static void unmap_sg_list(int num_entries,
311 struct device *dev,
312 struct memory_descriptor *md)
313{
314 int i;
315
316 for (i = 0; i < num_entries; ++i) {
317 dma_unmap_single(dev,
318 md[i].virtual_address,
319 md[i].length, DMA_BIDIRECTIONAL);
320 }
321}
322
297/** 323/**
298 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format 324 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
299 * @cmd: srp_cmd whose additional_data member will be unmapped 325 * @cmd: srp_cmd whose additional_data member will be unmapped
300 * @dev: device for which the memory is mapped 326 * @dev: device for which the memory is mapped
301 * 327 *
302*/ 328*/
303static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev) 329static void unmap_cmd_data(struct srp_cmd *cmd,
330 struct srp_event_struct *evt_struct,
331 struct device *dev)
304{ 332{
305 int i;
306
307 if ((cmd->data_out_format == SRP_NO_BUFFER) && 333 if ((cmd->data_out_format == SRP_NO_BUFFER) &&
308 (cmd->data_in_format == SRP_NO_BUFFER)) 334 (cmd->data_in_format == SRP_NO_BUFFER))
309 return; 335 return;
@@ -318,15 +344,34 @@ static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev)
318 (struct indirect_descriptor *)cmd->additional_data; 344 (struct indirect_descriptor *)cmd->additional_data;
319 int num_mapped = indirect->head.length / 345 int num_mapped = indirect->head.length /
320 sizeof(indirect->list[0]); 346 sizeof(indirect->list[0]);
321 for (i = 0; i < num_mapped; ++i) { 347
322 struct memory_descriptor *data = &indirect->list[i]; 348 if (num_mapped <= MAX_INDIRECT_BUFS) {
323 dma_unmap_single(dev, 349 unmap_sg_list(num_mapped, dev, &indirect->list[0]);
324 data->virtual_address, 350 return;
325 data->length, DMA_BIDIRECTIONAL);
326 } 351 }
352
353 unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
327 } 354 }
328} 355}
329 356
357static int map_sg_list(int num_entries,
358 struct scatterlist *sg,
359 struct memory_descriptor *md)
360{
361 int i;
362 u64 total_length = 0;
363
364 for (i = 0; i < num_entries; ++i) {
365 struct memory_descriptor *descr = md + i;
366 struct scatterlist *sg_entry = &sg[i];
367 descr->virtual_address = sg_dma_address(sg_entry);
368 descr->length = sg_dma_len(sg_entry);
369 descr->memory_handle = 0;
370 total_length += sg_dma_len(sg_entry);
371 }
372 return total_length;
373}
374
330/** 375/**
331 * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields 376 * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
332 * @cmd: Scsi_Cmnd with the scatterlist 377 * @cmd: Scsi_Cmnd with the scatterlist
@@ -337,10 +382,11 @@ static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev)
337 * Returns 1 on success. 382 * Returns 1 on success.
338*/ 383*/
339static int map_sg_data(struct scsi_cmnd *cmd, 384static int map_sg_data(struct scsi_cmnd *cmd,
385 struct srp_event_struct *evt_struct,
340 struct srp_cmd *srp_cmd, struct device *dev) 386 struct srp_cmd *srp_cmd, struct device *dev)
341{ 387{
342 388
343 int i, sg_mapped; 389 int sg_mapped;
344 u64 total_length = 0; 390 u64 total_length = 0;
345 struct scatterlist *sg = cmd->request_buffer; 391 struct scatterlist *sg = cmd->request_buffer;
346 struct memory_descriptor *data = 392 struct memory_descriptor *data =
@@ -363,27 +409,46 @@ static int map_sg_data(struct scsi_cmnd *cmd,
363 return 1; 409 return 1;
364 } 410 }
365 411
366 if (sg_mapped > MAX_INDIRECT_BUFS) { 412 if (sg_mapped > SG_ALL) {
367 printk(KERN_ERR 413 printk(KERN_ERR
368 "ibmvscsi: More than %d mapped sg entries, got %d\n", 414 "ibmvscsi: More than %d mapped sg entries, got %d\n",
369 MAX_INDIRECT_BUFS, sg_mapped); 415 SG_ALL, sg_mapped);
370 return 0; 416 return 0;
371 } 417 }
372 418
373 indirect->head.virtual_address = 0; 419 indirect->head.virtual_address = 0;
374 indirect->head.length = sg_mapped * sizeof(indirect->list[0]); 420 indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
375 indirect->head.memory_handle = 0; 421 indirect->head.memory_handle = 0;
376 for (i = 0; i < sg_mapped; ++i) { 422
377 struct memory_descriptor *descr = &indirect->list[i]; 423 if (sg_mapped <= MAX_INDIRECT_BUFS) {
378 struct scatterlist *sg_entry = &sg[i]; 424 total_length = map_sg_list(sg_mapped, sg, &indirect->list[0]);
379 descr->virtual_address = sg_dma_address(sg_entry); 425 indirect->total_length = total_length;
380 descr->length = sg_dma_len(sg_entry); 426 return 1;
381 descr->memory_handle = 0;
382 total_length += sg_dma_len(sg_entry);
383 } 427 }
384 indirect->total_length = total_length;
385 428
386 return 1; 429 /* get indirect table */
430 if (!evt_struct->ext_list) {
431 evt_struct->ext_list =(struct memory_descriptor*)
432 dma_alloc_coherent(dev,
433 SG_ALL * sizeof(struct memory_descriptor),
434 &evt_struct->ext_list_token, 0);
435 if (!evt_struct->ext_list) {
436 printk(KERN_ERR
437 "ibmvscsi: Can't allocate memory for indirect table\n");
438 return 0;
439
440 }
441 }
442
443 total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);
444
445 indirect->total_length = total_length;
446 indirect->head.virtual_address = evt_struct->ext_list_token;
447 indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
448 memcpy(indirect->list, evt_struct->ext_list,
449 MAX_INDIRECT_BUFS * sizeof(struct memory_descriptor));
450
451 return 1;
387} 452}
388 453
389/** 454/**
@@ -428,6 +493,7 @@ static int map_single_data(struct scsi_cmnd *cmd,
428 * Returns 1 on success. 493 * Returns 1 on success.
429*/ 494*/
430static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, 495static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
496 struct srp_event_struct *evt_struct,
431 struct srp_cmd *srp_cmd, struct device *dev) 497 struct srp_cmd *srp_cmd, struct device *dev)
432{ 498{
433 switch (cmd->sc_data_direction) { 499 switch (cmd->sc_data_direction) {
@@ -450,7 +516,7 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
450 if (!cmd->request_buffer) 516 if (!cmd->request_buffer)
451 return 1; 517 return 1;
452 if (cmd->use_sg) 518 if (cmd->use_sg)
453 return map_sg_data(cmd, srp_cmd, dev); 519 return map_sg_data(cmd, evt_struct, srp_cmd, dev);
454 return map_single_data(cmd, srp_cmd, dev); 520 return map_single_data(cmd, srp_cmd, dev);
455} 521}
456 522
@@ -486,6 +552,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
486 printk(KERN_WARNING 552 printk(KERN_WARNING
487 "ibmvscsi: Warning, request_limit exceeded\n"); 553 "ibmvscsi: Warning, request_limit exceeded\n");
488 unmap_cmd_data(&evt_struct->iu.srp.cmd, 554 unmap_cmd_data(&evt_struct->iu.srp.cmd,
555 evt_struct,
489 hostdata->dev); 556 hostdata->dev);
490 free_event_struct(&hostdata->pool, evt_struct); 557 free_event_struct(&hostdata->pool, evt_struct);
491 return SCSI_MLQUEUE_HOST_BUSY; 558 return SCSI_MLQUEUE_HOST_BUSY;
@@ -513,7 +580,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
513 return 0; 580 return 0;
514 581
515 send_error: 582 send_error:
516 unmap_cmd_data(&evt_struct->iu.srp.cmd, hostdata->dev); 583 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
517 584
518 if ((cmnd = evt_struct->cmnd) != NULL) { 585 if ((cmnd = evt_struct->cmnd) != NULL) {
519 cmnd->result = DID_ERROR << 16; 586 cmnd->result = DID_ERROR << 16;
@@ -551,6 +618,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
551 rsp->sense_and_response_data, 618 rsp->sense_and_response_data,
552 rsp->sense_data_list_length); 619 rsp->sense_data_list_length);
553 unmap_cmd_data(&evt_struct->iu.srp.cmd, 620 unmap_cmd_data(&evt_struct->iu.srp.cmd,
621 evt_struct,
554 evt_struct->hostdata->dev); 622 evt_struct->hostdata->dev);
555 623
556 if (rsp->doover) 624 if (rsp->doover)
@@ -583,6 +651,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
583{ 651{
584 struct srp_cmd *srp_cmd; 652 struct srp_cmd *srp_cmd;
585 struct srp_event_struct *evt_struct; 653 struct srp_event_struct *evt_struct;
654 struct indirect_descriptor *indirect;
586 struct ibmvscsi_host_data *hostdata = 655 struct ibmvscsi_host_data *hostdata =
587 (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; 656 (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
588 u16 lun = lun_from_dev(cmnd->device); 657 u16 lun = lun_from_dev(cmnd->device);
@@ -591,14 +660,6 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
591 if (!evt_struct) 660 if (!evt_struct)
592 return SCSI_MLQUEUE_HOST_BUSY; 661 return SCSI_MLQUEUE_HOST_BUSY;
593 662
594 init_event_struct(evt_struct,
595 handle_cmd_rsp,
596 VIOSRP_SRP_FORMAT,
597 cmnd->timeout_per_command/HZ);
598
599 evt_struct->cmnd = cmnd;
600 evt_struct->cmnd_done = done;
601
602 /* Set up the actual SRP IU */ 663 /* Set up the actual SRP IU */
603 srp_cmd = &evt_struct->iu.srp.cmd; 664 srp_cmd = &evt_struct->iu.srp.cmd;
604 memset(srp_cmd, 0x00, sizeof(*srp_cmd)); 665 memset(srp_cmd, 0x00, sizeof(*srp_cmd));
@@ -606,17 +667,25 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
606 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); 667 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
607 srp_cmd->lun = ((u64) lun) << 48; 668 srp_cmd->lun = ((u64) lun) << 48;
608 669
609 if (!map_data_for_srp_cmd(cmnd, srp_cmd, hostdata->dev)) { 670 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
610 printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n"); 671 printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");
611 free_event_struct(&hostdata->pool, evt_struct); 672 free_event_struct(&hostdata->pool, evt_struct);
612 return SCSI_MLQUEUE_HOST_BUSY; 673 return SCSI_MLQUEUE_HOST_BUSY;
613 } 674 }
614 675
676 init_event_struct(evt_struct,
677 handle_cmd_rsp,
678 VIOSRP_SRP_FORMAT,
679 cmnd->timeout_per_command/HZ);
680
681 evt_struct->cmnd = cmnd;
682 evt_struct->cmnd_done = done;
683
615 /* Fix up dma address of the buffer itself */ 684 /* Fix up dma address of the buffer itself */
616 if ((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) || 685 indirect = (struct indirect_descriptor *)srp_cmd->additional_data;
617 (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) { 686 if (((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) ||
618 struct indirect_descriptor *indirect = 687 (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) &&
619 (struct indirect_descriptor *)srp_cmd->additional_data; 688 (indirect->head.virtual_address == 0)) {
620 indirect->head.virtual_address = evt_struct->crq.IU_data_ptr + 689 indirect->head.virtual_address = evt_struct->crq.IU_data_ptr +
621 offsetof(struct srp_cmd, additional_data) + 690 offsetof(struct srp_cmd, additional_data) +
622 offsetof(struct indirect_descriptor, list); 691 offsetof(struct indirect_descriptor, list);
@@ -931,7 +1000,8 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
931 1000
932 cmd->result = (DID_ABORT << 16); 1001 cmd->result = (DID_ABORT << 16);
933 list_del(&found_evt->list); 1002 list_del(&found_evt->list);
934 unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt->hostdata->dev); 1003 unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
1004 found_evt->hostdata->dev);
935 free_event_struct(&found_evt->hostdata->pool, found_evt); 1005 free_event_struct(&found_evt->hostdata->pool, found_evt);
936 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1006 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
937 atomic_inc(&hostdata->request_limit); 1007 atomic_inc(&hostdata->request_limit);
@@ -1023,7 +1093,8 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1023 if (tmp_evt->cmnd) 1093 if (tmp_evt->cmnd)
1024 tmp_evt->cmnd->result = (DID_RESET << 16); 1094 tmp_evt->cmnd->result = (DID_RESET << 16);
1025 list_del(&tmp_evt->list); 1095 list_del(&tmp_evt->list);
1026 unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt->hostdata->dev); 1096 unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
1097 tmp_evt->hostdata->dev);
1027 free_event_struct(&tmp_evt->hostdata->pool, 1098 free_event_struct(&tmp_evt->hostdata->pool,
1028 tmp_evt); 1099 tmp_evt);
1029 atomic_inc(&hostdata->request_limit); 1100 atomic_inc(&hostdata->request_limit);
@@ -1052,6 +1123,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata)
1052 if (tmp_evt->cmnd) { 1123 if (tmp_evt->cmnd) {
1053 tmp_evt->cmnd->result = (DID_ERROR << 16); 1124 tmp_evt->cmnd->result = (DID_ERROR << 16);
1054 unmap_cmd_data(&tmp_evt->iu.srp.cmd, 1125 unmap_cmd_data(&tmp_evt->iu.srp.cmd,
1126 tmp_evt,
1055 tmp_evt->hostdata->dev); 1127 tmp_evt->hostdata->dev);
1056 if (tmp_evt->cmnd_done) 1128 if (tmp_evt->cmnd_done)
1057 tmp_evt->cmnd_done(tmp_evt->cmnd); 1129 tmp_evt->cmnd_done(tmp_evt->cmnd);
@@ -1356,7 +1428,7 @@ static struct scsi_host_template driver_template = {
1356 .cmd_per_lun = 16, 1428 .cmd_per_lun = 16,
1357 .can_queue = 1, /* Updated after SRP_LOGIN */ 1429 .can_queue = 1, /* Updated after SRP_LOGIN */
1358 .this_id = -1, 1430 .this_id = -1,
1359 .sg_tablesize = MAX_INDIRECT_BUFS, 1431 .sg_tablesize = SG_ALL,
1360 .use_clustering = ENABLE_CLUSTERING, 1432 .use_clustering = ENABLE_CLUSTERING,
1361 .shost_attrs = ibmvscsi_attrs, 1433 .shost_attrs = ibmvscsi_attrs,
1362}; 1434};
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 1030b703c30e..8bec0438dc8a 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -68,6 +68,8 @@ struct srp_event_struct {
68 void (*cmnd_done) (struct scsi_cmnd *); 68 void (*cmnd_done) (struct scsi_cmnd *);
69 struct completion comp; 69 struct completion comp;
70 union viosrp_iu *sync_srp; 70 union viosrp_iu *sync_srp;
71 struct memory_descriptor *ext_list;
72 dma_addr_t ext_list_token;
71}; 73};
72 74
73/* a pool of event structs for use */ 75/* a pool of event structs for use */