diff options
Diffstat (limited to 'drivers/scsi/ibmvscsi/ibmvscsi.c')
| -rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.c | 179 |
1 files changed, 134 insertions, 45 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 2cb3c8340ca8..5b14934ba861 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
| @@ -87,7 +87,7 @@ static int max_channel = 3; | |||
| 87 | static int init_timeout = 5; | 87 | static int init_timeout = 5; |
| 88 | static int max_requests = 50; | 88 | static int max_requests = 50; |
| 89 | 89 | ||
| 90 | #define IBMVSCSI_VERSION "1.5.6" | 90 | #define IBMVSCSI_VERSION "1.5.7" |
| 91 | 91 | ||
| 92 | MODULE_DESCRIPTION("IBM Virtual SCSI"); | 92 | MODULE_DESCRIPTION("IBM Virtual SCSI"); |
| 93 | MODULE_AUTHOR("Dave Boutcher"); | 93 | MODULE_AUTHOR("Dave Boutcher"); |
| @@ -145,6 +145,8 @@ static int initialize_event_pool(struct event_pool *pool, | |||
| 145 | sizeof(*evt->xfer_iu) * i; | 145 | sizeof(*evt->xfer_iu) * i; |
| 146 | evt->xfer_iu = pool->iu_storage + i; | 146 | evt->xfer_iu = pool->iu_storage + i; |
| 147 | evt->hostdata = hostdata; | 147 | evt->hostdata = hostdata; |
| 148 | evt->ext_list = NULL; | ||
| 149 | evt->ext_list_token = 0; | ||
| 148 | } | 150 | } |
| 149 | 151 | ||
| 150 | return 0; | 152 | return 0; |
| @@ -161,9 +163,16 @@ static void release_event_pool(struct event_pool *pool, | |||
| 161 | struct ibmvscsi_host_data *hostdata) | 163 | struct ibmvscsi_host_data *hostdata) |
| 162 | { | 164 | { |
| 163 | int i, in_use = 0; | 165 | int i, in_use = 0; |
| 164 | for (i = 0; i < pool->size; ++i) | 166 | for (i = 0; i < pool->size; ++i) { |
| 165 | if (atomic_read(&pool->events[i].free) != 1) | 167 | if (atomic_read(&pool->events[i].free) != 1) |
| 166 | ++in_use; | 168 | ++in_use; |
| 169 | if (pool->events[i].ext_list) { | ||
| 170 | dma_free_coherent(hostdata->dev, | ||
| 171 | SG_ALL * sizeof(struct memory_descriptor), | ||
| 172 | pool->events[i].ext_list, | ||
| 173 | pool->events[i].ext_list_token); | ||
| 174 | } | ||
| 175 | } | ||
| 167 | if (in_use) | 176 | if (in_use) |
| 168 | printk(KERN_WARNING | 177 | printk(KERN_WARNING |
| 169 | "ibmvscsi: releasing event pool with %d " | 178 | "ibmvscsi: releasing event pool with %d " |
| @@ -286,24 +295,41 @@ static void set_srp_direction(struct scsi_cmnd *cmd, | |||
| 286 | } else { | 295 | } else { |
| 287 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { | 296 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { |
| 288 | srp_cmd->data_out_format = SRP_INDIRECT_BUFFER; | 297 | srp_cmd->data_out_format = SRP_INDIRECT_BUFFER; |
| 289 | srp_cmd->data_out_count = numbuf; | 298 | srp_cmd->data_out_count = |
| 299 | numbuf < MAX_INDIRECT_BUFS ? | ||
| 300 | numbuf: MAX_INDIRECT_BUFS; | ||
| 290 | } else { | 301 | } else { |
| 291 | srp_cmd->data_in_format = SRP_INDIRECT_BUFFER; | 302 | srp_cmd->data_in_format = SRP_INDIRECT_BUFFER; |
| 292 | srp_cmd->data_in_count = numbuf; | 303 | srp_cmd->data_in_count = |
| 304 | numbuf < MAX_INDIRECT_BUFS ? | ||
| 305 | numbuf: MAX_INDIRECT_BUFS; | ||
| 293 | } | 306 | } |
| 294 | } | 307 | } |
| 295 | } | 308 | } |
| 296 | 309 | ||
| 310 | static void unmap_sg_list(int num_entries, | ||
| 311 | struct device *dev, | ||
| 312 | struct memory_descriptor *md) | ||
| 313 | { | ||
| 314 | int i; | ||
| 315 | |||
| 316 | for (i = 0; i < num_entries; ++i) { | ||
| 317 | dma_unmap_single(dev, | ||
| 318 | md[i].virtual_address, | ||
| 319 | md[i].length, DMA_BIDIRECTIONAL); | ||
| 320 | } | ||
| 321 | } | ||
| 322 | |||
| 297 | /** | 323 | /** |
| 298 | * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format | 324 | * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format |
| 299 | * @cmd: srp_cmd whose additional_data member will be unmapped | 325 | * @cmd: srp_cmd whose additional_data member will be unmapped |
| 300 | * @dev: device for which the memory is mapped | 326 | * @dev: device for which the memory is mapped |
| 301 | * | 327 | * |
| 302 | */ | 328 | */ |
| 303 | static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev) | 329 | static void unmap_cmd_data(struct srp_cmd *cmd, |
| 330 | struct srp_event_struct *evt_struct, | ||
| 331 | struct device *dev) | ||
| 304 | { | 332 | { |
| 305 | int i; | ||
| 306 | |||
| 307 | if ((cmd->data_out_format == SRP_NO_BUFFER) && | 333 | if ((cmd->data_out_format == SRP_NO_BUFFER) && |
| 308 | (cmd->data_in_format == SRP_NO_BUFFER)) | 334 | (cmd->data_in_format == SRP_NO_BUFFER)) |
| 309 | return; | 335 | return; |
| @@ -318,15 +344,34 @@ static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev) | |||
| 318 | (struct indirect_descriptor *)cmd->additional_data; | 344 | (struct indirect_descriptor *)cmd->additional_data; |
| 319 | int num_mapped = indirect->head.length / | 345 | int num_mapped = indirect->head.length / |
| 320 | sizeof(indirect->list[0]); | 346 | sizeof(indirect->list[0]); |
| 321 | for (i = 0; i < num_mapped; ++i) { | 347 | |
| 322 | struct memory_descriptor *data = &indirect->list[i]; | 348 | if (num_mapped <= MAX_INDIRECT_BUFS) { |
| 323 | dma_unmap_single(dev, | 349 | unmap_sg_list(num_mapped, dev, &indirect->list[0]); |
| 324 | data->virtual_address, | 350 | return; |
| 325 | data->length, DMA_BIDIRECTIONAL); | ||
| 326 | } | 351 | } |
| 352 | |||
| 353 | unmap_sg_list(num_mapped, dev, evt_struct->ext_list); | ||
| 327 | } | 354 | } |
| 328 | } | 355 | } |
| 329 | 356 | ||
| 357 | static int map_sg_list(int num_entries, | ||
| 358 | struct scatterlist *sg, | ||
| 359 | struct memory_descriptor *md) | ||
| 360 | { | ||
| 361 | int i; | ||
| 362 | u64 total_length = 0; | ||
| 363 | |||
| 364 | for (i = 0; i < num_entries; ++i) { | ||
| 365 | struct memory_descriptor *descr = md + i; | ||
| 366 | struct scatterlist *sg_entry = &sg[i]; | ||
| 367 | descr->virtual_address = sg_dma_address(sg_entry); | ||
| 368 | descr->length = sg_dma_len(sg_entry); | ||
| 369 | descr->memory_handle = 0; | ||
| 370 | total_length += sg_dma_len(sg_entry); | ||
| 371 | } | ||
| 372 | return total_length; | ||
| 373 | } | ||
| 374 | |||
| 330 | /** | 375 | /** |
| 331 | * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields | 376 | * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields |
| 332 | * @cmd: Scsi_Cmnd with the scatterlist | 377 | * @cmd: Scsi_Cmnd with the scatterlist |
| @@ -337,10 +382,11 @@ static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev) | |||
| 337 | * Returns 1 on success. | 382 | * Returns 1 on success. |
| 338 | */ | 383 | */ |
| 339 | static int map_sg_data(struct scsi_cmnd *cmd, | 384 | static int map_sg_data(struct scsi_cmnd *cmd, |
| 385 | struct srp_event_struct *evt_struct, | ||
| 340 | struct srp_cmd *srp_cmd, struct device *dev) | 386 | struct srp_cmd *srp_cmd, struct device *dev) |
| 341 | { | 387 | { |
| 342 | 388 | ||
| 343 | int i, sg_mapped; | 389 | int sg_mapped; |
| 344 | u64 total_length = 0; | 390 | u64 total_length = 0; |
| 345 | struct scatterlist *sg = cmd->request_buffer; | 391 | struct scatterlist *sg = cmd->request_buffer; |
| 346 | struct memory_descriptor *data = | 392 | struct memory_descriptor *data = |
| @@ -363,27 +409,46 @@ static int map_sg_data(struct scsi_cmnd *cmd, | |||
| 363 | return 1; | 409 | return 1; |
| 364 | } | 410 | } |
| 365 | 411 | ||
| 366 | if (sg_mapped > MAX_INDIRECT_BUFS) { | 412 | if (sg_mapped > SG_ALL) { |
| 367 | printk(KERN_ERR | 413 | printk(KERN_ERR |
| 368 | "ibmvscsi: More than %d mapped sg entries, got %d\n", | 414 | "ibmvscsi: More than %d mapped sg entries, got %d\n", |
| 369 | MAX_INDIRECT_BUFS, sg_mapped); | 415 | SG_ALL, sg_mapped); |
| 370 | return 0; | 416 | return 0; |
| 371 | } | 417 | } |
| 372 | 418 | ||
| 373 | indirect->head.virtual_address = 0; | 419 | indirect->head.virtual_address = 0; |
| 374 | indirect->head.length = sg_mapped * sizeof(indirect->list[0]); | 420 | indirect->head.length = sg_mapped * sizeof(indirect->list[0]); |
| 375 | indirect->head.memory_handle = 0; | 421 | indirect->head.memory_handle = 0; |
| 376 | for (i = 0; i < sg_mapped; ++i) { | 422 | |
| 377 | struct memory_descriptor *descr = &indirect->list[i]; | 423 | if (sg_mapped <= MAX_INDIRECT_BUFS) { |
| 378 | struct scatterlist *sg_entry = &sg[i]; | 424 | total_length = map_sg_list(sg_mapped, sg, &indirect->list[0]); |
| 379 | descr->virtual_address = sg_dma_address(sg_entry); | 425 | indirect->total_length = total_length; |
| 380 | descr->length = sg_dma_len(sg_entry); | 426 | return 1; |
| 381 | descr->memory_handle = 0; | ||
| 382 | total_length += sg_dma_len(sg_entry); | ||
| 383 | } | 427 | } |
| 384 | indirect->total_length = total_length; | ||
| 385 | 428 | ||
| 386 | return 1; | 429 | /* get indirect table */ |
| 430 | if (!evt_struct->ext_list) { | ||
| 431 | evt_struct->ext_list =(struct memory_descriptor*) | ||
| 432 | dma_alloc_coherent(dev, | ||
| 433 | SG_ALL * sizeof(struct memory_descriptor), | ||
| 434 | &evt_struct->ext_list_token, 0); | ||
| 435 | if (!evt_struct->ext_list) { | ||
| 436 | printk(KERN_ERR | ||
| 437 | "ibmvscsi: Can't allocate memory for indirect table\n"); | ||
| 438 | return 0; | ||
| 439 | |||
| 440 | } | ||
| 441 | } | ||
| 442 | |||
| 443 | total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list); | ||
| 444 | |||
| 445 | indirect->total_length = total_length; | ||
| 446 | indirect->head.virtual_address = evt_struct->ext_list_token; | ||
| 447 | indirect->head.length = sg_mapped * sizeof(indirect->list[0]); | ||
| 448 | memcpy(indirect->list, evt_struct->ext_list, | ||
| 449 | MAX_INDIRECT_BUFS * sizeof(struct memory_descriptor)); | ||
| 450 | |||
| 451 | return 1; | ||
| 387 | } | 452 | } |
| 388 | 453 | ||
| 389 | /** | 454 | /** |
| @@ -428,6 +493,7 @@ static int map_single_data(struct scsi_cmnd *cmd, | |||
| 428 | * Returns 1 on success. | 493 | * Returns 1 on success. |
| 429 | */ | 494 | */ |
| 430 | static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, | 495 | static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, |
| 496 | struct srp_event_struct *evt_struct, | ||
| 431 | struct srp_cmd *srp_cmd, struct device *dev) | 497 | struct srp_cmd *srp_cmd, struct device *dev) |
| 432 | { | 498 | { |
| 433 | switch (cmd->sc_data_direction) { | 499 | switch (cmd->sc_data_direction) { |
| @@ -450,7 +516,7 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, | |||
| 450 | if (!cmd->request_buffer) | 516 | if (!cmd->request_buffer) |
| 451 | return 1; | 517 | return 1; |
| 452 | if (cmd->use_sg) | 518 | if (cmd->use_sg) |
| 453 | return map_sg_data(cmd, srp_cmd, dev); | 519 | return map_sg_data(cmd, evt_struct, srp_cmd, dev); |
| 454 | return map_single_data(cmd, srp_cmd, dev); | 520 | return map_single_data(cmd, srp_cmd, dev); |
| 455 | } | 521 | } |
| 456 | 522 | ||
| @@ -486,6 +552,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, | |||
| 486 | printk(KERN_WARNING | 552 | printk(KERN_WARNING |
| 487 | "ibmvscsi: Warning, request_limit exceeded\n"); | 553 | "ibmvscsi: Warning, request_limit exceeded\n"); |
| 488 | unmap_cmd_data(&evt_struct->iu.srp.cmd, | 554 | unmap_cmd_data(&evt_struct->iu.srp.cmd, |
| 555 | evt_struct, | ||
| 489 | hostdata->dev); | 556 | hostdata->dev); |
| 490 | free_event_struct(&hostdata->pool, evt_struct); | 557 | free_event_struct(&hostdata->pool, evt_struct); |
| 491 | return SCSI_MLQUEUE_HOST_BUSY; | 558 | return SCSI_MLQUEUE_HOST_BUSY; |
| @@ -513,7 +580,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, | |||
| 513 | return 0; | 580 | return 0; |
| 514 | 581 | ||
| 515 | send_error: | 582 | send_error: |
| 516 | unmap_cmd_data(&evt_struct->iu.srp.cmd, hostdata->dev); | 583 | unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); |
| 517 | 584 | ||
| 518 | if ((cmnd = evt_struct->cmnd) != NULL) { | 585 | if ((cmnd = evt_struct->cmnd) != NULL) { |
| 519 | cmnd->result = DID_ERROR << 16; | 586 | cmnd->result = DID_ERROR << 16; |
| @@ -551,6 +618,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct) | |||
| 551 | rsp->sense_and_response_data, | 618 | rsp->sense_and_response_data, |
| 552 | rsp->sense_data_list_length); | 619 | rsp->sense_data_list_length); |
| 553 | unmap_cmd_data(&evt_struct->iu.srp.cmd, | 620 | unmap_cmd_data(&evt_struct->iu.srp.cmd, |
| 621 | evt_struct, | ||
| 554 | evt_struct->hostdata->dev); | 622 | evt_struct->hostdata->dev); |
| 555 | 623 | ||
| 556 | if (rsp->doover) | 624 | if (rsp->doover) |
| @@ -583,6 +651,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |||
| 583 | { | 651 | { |
| 584 | struct srp_cmd *srp_cmd; | 652 | struct srp_cmd *srp_cmd; |
| 585 | struct srp_event_struct *evt_struct; | 653 | struct srp_event_struct *evt_struct; |
| 654 | struct indirect_descriptor *indirect; | ||
| 586 | struct ibmvscsi_host_data *hostdata = | 655 | struct ibmvscsi_host_data *hostdata = |
| 587 | (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; | 656 | (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; |
| 588 | u16 lun = lun_from_dev(cmnd->device); | 657 | u16 lun = lun_from_dev(cmnd->device); |
| @@ -591,14 +660,6 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |||
| 591 | if (!evt_struct) | 660 | if (!evt_struct) |
| 592 | return SCSI_MLQUEUE_HOST_BUSY; | 661 | return SCSI_MLQUEUE_HOST_BUSY; |
| 593 | 662 | ||
| 594 | init_event_struct(evt_struct, | ||
| 595 | handle_cmd_rsp, | ||
| 596 | VIOSRP_SRP_FORMAT, | ||
| 597 | cmnd->timeout); | ||
| 598 | |||
| 599 | evt_struct->cmnd = cmnd; | ||
| 600 | evt_struct->cmnd_done = done; | ||
| 601 | |||
| 602 | /* Set up the actual SRP IU */ | 663 | /* Set up the actual SRP IU */ |
| 603 | srp_cmd = &evt_struct->iu.srp.cmd; | 664 | srp_cmd = &evt_struct->iu.srp.cmd; |
| 604 | memset(srp_cmd, 0x00, sizeof(*srp_cmd)); | 665 | memset(srp_cmd, 0x00, sizeof(*srp_cmd)); |
| @@ -606,17 +667,25 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |||
| 606 | memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); | 667 | memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); |
| 607 | srp_cmd->lun = ((u64) lun) << 48; | 668 | srp_cmd->lun = ((u64) lun) << 48; |
| 608 | 669 | ||
| 609 | if (!map_data_for_srp_cmd(cmnd, srp_cmd, hostdata->dev)) { | 670 | if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { |
| 610 | printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n"); | 671 | printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n"); |
| 611 | free_event_struct(&hostdata->pool, evt_struct); | 672 | free_event_struct(&hostdata->pool, evt_struct); |
| 612 | return SCSI_MLQUEUE_HOST_BUSY; | 673 | return SCSI_MLQUEUE_HOST_BUSY; |
| 613 | } | 674 | } |
| 614 | 675 | ||
| 676 | init_event_struct(evt_struct, | ||
| 677 | handle_cmd_rsp, | ||
| 678 | VIOSRP_SRP_FORMAT, | ||
| 679 | cmnd->timeout_per_command/HZ); | ||
| 680 | |||
| 681 | evt_struct->cmnd = cmnd; | ||
| 682 | evt_struct->cmnd_done = done; | ||
| 683 | |||
| 615 | /* Fix up dma address of the buffer itself */ | 684 | /* Fix up dma address of the buffer itself */ |
| 616 | if ((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) || | 685 | indirect = (struct indirect_descriptor *)srp_cmd->additional_data; |
| 617 | (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) { | 686 | if (((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) || |
| 618 | struct indirect_descriptor *indirect = | 687 | (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) && |
| 619 | (struct indirect_descriptor *)srp_cmd->additional_data; | 688 | (indirect->head.virtual_address == 0)) { |
| 620 | indirect->head.virtual_address = evt_struct->crq.IU_data_ptr + | 689 | indirect->head.virtual_address = evt_struct->crq.IU_data_ptr + |
| 621 | offsetof(struct srp_cmd, additional_data) + | 690 | offsetof(struct srp_cmd, additional_data) + |
| 622 | offsetof(struct indirect_descriptor, list); | 691 | offsetof(struct indirect_descriptor, list); |
| @@ -826,11 +895,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
| 826 | struct srp_event_struct *tmp_evt, *found_evt; | 895 | struct srp_event_struct *tmp_evt, *found_evt; |
| 827 | union viosrp_iu srp_rsp; | 896 | union viosrp_iu srp_rsp; |
| 828 | int rsp_rc; | 897 | int rsp_rc; |
| 898 | unsigned long flags; | ||
| 829 | u16 lun = lun_from_dev(cmd->device); | 899 | u16 lun = lun_from_dev(cmd->device); |
| 830 | 900 | ||
| 831 | /* First, find this command in our sent list so we can figure | 901 | /* First, find this command in our sent list so we can figure |
| 832 | * out the correct tag | 902 | * out the correct tag |
| 833 | */ | 903 | */ |
| 904 | spin_lock_irqsave(hostdata->host->host_lock, flags); | ||
| 834 | found_evt = NULL; | 905 | found_evt = NULL; |
| 835 | list_for_each_entry(tmp_evt, &hostdata->sent, list) { | 906 | list_for_each_entry(tmp_evt, &hostdata->sent, list) { |
| 836 | if (tmp_evt->cmnd == cmd) { | 907 | if (tmp_evt->cmnd == cmd) { |
| @@ -839,11 +910,14 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
| 839 | } | 910 | } |
| 840 | } | 911 | } |
| 841 | 912 | ||
| 842 | if (!found_evt) | 913 | if (!found_evt) { |
| 914 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
| 843 | return FAILED; | 915 | return FAILED; |
| 916 | } | ||
| 844 | 917 | ||
| 845 | evt = get_event_struct(&hostdata->pool); | 918 | evt = get_event_struct(&hostdata->pool); |
| 846 | if (evt == NULL) { | 919 | if (evt == NULL) { |
| 920 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
| 847 | printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n"); | 921 | printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n"); |
| 848 | return FAILED; | 922 | return FAILED; |
| 849 | } | 923 | } |
| @@ -867,7 +941,9 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
| 867 | 941 | ||
| 868 | evt->sync_srp = &srp_rsp; | 942 | evt->sync_srp = &srp_rsp; |
| 869 | init_completion(&evt->comp); | 943 | init_completion(&evt->comp); |
| 870 | if (ibmvscsi_send_srp_event(evt, hostdata) != 0) { | 944 | rsp_rc = ibmvscsi_send_srp_event(evt, hostdata); |
| 945 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
| 946 | if (rsp_rc != 0) { | ||
| 871 | printk(KERN_ERR "ibmvscsi: failed to send abort() event\n"); | 947 | printk(KERN_ERR "ibmvscsi: failed to send abort() event\n"); |
| 872 | return FAILED; | 948 | return FAILED; |
| 873 | } | 949 | } |
| @@ -901,6 +977,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
| 901 | * The event is no longer in our list. Make sure it didn't | 977 | * The event is no longer in our list. Make sure it didn't |
| 902 | * complete while we were aborting | 978 | * complete while we were aborting |
| 903 | */ | 979 | */ |
| 980 | spin_lock_irqsave(hostdata->host->host_lock, flags); | ||
| 904 | found_evt = NULL; | 981 | found_evt = NULL; |
| 905 | list_for_each_entry(tmp_evt, &hostdata->sent, list) { | 982 | list_for_each_entry(tmp_evt, &hostdata->sent, list) { |
| 906 | if (tmp_evt->cmnd == cmd) { | 983 | if (tmp_evt->cmnd == cmd) { |
| @@ -910,6 +987,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
| 910 | } | 987 | } |
| 911 | 988 | ||
| 912 | if (found_evt == NULL) { | 989 | if (found_evt == NULL) { |
| 990 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
| 913 | printk(KERN_INFO | 991 | printk(KERN_INFO |
| 914 | "ibmvscsi: aborted task tag 0x%lx completed\n", | 992 | "ibmvscsi: aborted task tag 0x%lx completed\n", |
| 915 | tsk_mgmt->managed_task_tag); | 993 | tsk_mgmt->managed_task_tag); |
| @@ -922,8 +1000,10 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
| 922 | 1000 | ||
| 923 | cmd->result = (DID_ABORT << 16); | 1001 | cmd->result = (DID_ABORT << 16); |
| 924 | list_del(&found_evt->list); | 1002 | list_del(&found_evt->list); |
| 925 | unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt->hostdata->dev); | 1003 | unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt, |
| 1004 | found_evt->hostdata->dev); | ||
| 926 | free_event_struct(&found_evt->hostdata->pool, found_evt); | 1005 | free_event_struct(&found_evt->hostdata->pool, found_evt); |
| 1006 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
| 927 | atomic_inc(&hostdata->request_limit); | 1007 | atomic_inc(&hostdata->request_limit); |
| 928 | return SUCCESS; | 1008 | return SUCCESS; |
| 929 | } | 1009 | } |
| @@ -943,10 +1023,13 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
| 943 | struct srp_event_struct *tmp_evt, *pos; | 1023 | struct srp_event_struct *tmp_evt, *pos; |
| 944 | union viosrp_iu srp_rsp; | 1024 | union viosrp_iu srp_rsp; |
| 945 | int rsp_rc; | 1025 | int rsp_rc; |
| 1026 | unsigned long flags; | ||
| 946 | u16 lun = lun_from_dev(cmd->device); | 1027 | u16 lun = lun_from_dev(cmd->device); |
| 947 | 1028 | ||
| 1029 | spin_lock_irqsave(hostdata->host->host_lock, flags); | ||
| 948 | evt = get_event_struct(&hostdata->pool); | 1030 | evt = get_event_struct(&hostdata->pool); |
| 949 | if (evt == NULL) { | 1031 | if (evt == NULL) { |
| 1032 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
| 950 | printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n"); | 1033 | printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n"); |
| 951 | return FAILED; | 1034 | return FAILED; |
| 952 | } | 1035 | } |
| @@ -969,7 +1052,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
| 969 | 1052 | ||
| 970 | evt->sync_srp = &srp_rsp; | 1053 | evt->sync_srp = &srp_rsp; |
| 971 | init_completion(&evt->comp); | 1054 | init_completion(&evt->comp); |
| 972 | if (ibmvscsi_send_srp_event(evt, hostdata) != 0) { | 1055 | rsp_rc = ibmvscsi_send_srp_event(evt, hostdata); |
| 1056 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
| 1057 | if (rsp_rc != 0) { | ||
| 973 | printk(KERN_ERR "ibmvscsi: failed to send reset event\n"); | 1058 | printk(KERN_ERR "ibmvscsi: failed to send reset event\n"); |
| 974 | return FAILED; | 1059 | return FAILED; |
| 975 | } | 1060 | } |
| @@ -1002,12 +1087,14 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
| 1002 | /* We need to find all commands for this LUN that have not yet been | 1087 | /* We need to find all commands for this LUN that have not yet been |
| 1003 | * responded to, and fail them with DID_RESET | 1088 | * responded to, and fail them with DID_RESET |
| 1004 | */ | 1089 | */ |
| 1090 | spin_lock_irqsave(hostdata->host->host_lock, flags); | ||
| 1005 | list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { | 1091 | list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { |
| 1006 | if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) { | 1092 | if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) { |
| 1007 | if (tmp_evt->cmnd) | 1093 | if (tmp_evt->cmnd) |
| 1008 | tmp_evt->cmnd->result = (DID_RESET << 16); | 1094 | tmp_evt->cmnd->result = (DID_RESET << 16); |
| 1009 | list_del(&tmp_evt->list); | 1095 | list_del(&tmp_evt->list); |
| 1010 | unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt->hostdata->dev); | 1096 | unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt, |
| 1097 | tmp_evt->hostdata->dev); | ||
| 1011 | free_event_struct(&tmp_evt->hostdata->pool, | 1098 | free_event_struct(&tmp_evt->hostdata->pool, |
| 1012 | tmp_evt); | 1099 | tmp_evt); |
| 1013 | atomic_inc(&hostdata->request_limit); | 1100 | atomic_inc(&hostdata->request_limit); |
| @@ -1017,6 +1104,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
| 1017 | tmp_evt->done(tmp_evt); | 1104 | tmp_evt->done(tmp_evt); |
| 1018 | } | 1105 | } |
| 1019 | } | 1106 | } |
| 1107 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
| 1020 | return SUCCESS; | 1108 | return SUCCESS; |
| 1021 | } | 1109 | } |
| 1022 | 1110 | ||
| @@ -1035,6 +1123,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata) | |||
| 1035 | if (tmp_evt->cmnd) { | 1123 | if (tmp_evt->cmnd) { |
| 1036 | tmp_evt->cmnd->result = (DID_ERROR << 16); | 1124 | tmp_evt->cmnd->result = (DID_ERROR << 16); |
| 1037 | unmap_cmd_data(&tmp_evt->iu.srp.cmd, | 1125 | unmap_cmd_data(&tmp_evt->iu.srp.cmd, |
| 1126 | tmp_evt, | ||
| 1038 | tmp_evt->hostdata->dev); | 1127 | tmp_evt->hostdata->dev); |
| 1039 | if (tmp_evt->cmnd_done) | 1128 | if (tmp_evt->cmnd_done) |
| 1040 | tmp_evt->cmnd_done(tmp_evt->cmnd); | 1129 | tmp_evt->cmnd_done(tmp_evt->cmnd); |
| @@ -1339,7 +1428,7 @@ static struct scsi_host_template driver_template = { | |||
| 1339 | .cmd_per_lun = 16, | 1428 | .cmd_per_lun = 16, |
| 1340 | .can_queue = 1, /* Updated after SRP_LOGIN */ | 1429 | .can_queue = 1, /* Updated after SRP_LOGIN */ |
| 1341 | .this_id = -1, | 1430 | .this_id = -1, |
| 1342 | .sg_tablesize = MAX_INDIRECT_BUFS, | 1431 | .sg_tablesize = SG_ALL, |
| 1343 | .use_clustering = ENABLE_CLUSTERING, | 1432 | .use_clustering = ENABLE_CLUSTERING, |
| 1344 | .shost_attrs = ibmvscsi_attrs, | 1433 | .shost_attrs = ibmvscsi_attrs, |
| 1345 | }; | 1434 | }; |
