diff options
Diffstat (limited to 'drivers/scsi/ibmvscsi/ibmvscsi.c')
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.c | 463 |
1 files changed, 248 insertions, 215 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index b10eefe735c..5870866abc9 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -173,9 +173,8 @@ static void release_event_pool(struct event_pool *pool, | |||
173 | } | 173 | } |
174 | } | 174 | } |
175 | if (in_use) | 175 | if (in_use) |
176 | printk(KERN_WARNING | 176 | dev_warn(hostdata->dev, "releasing event pool with %d " |
177 | "ibmvscsi: releasing event pool with %d " | 177 | "events still in use?\n", in_use); |
178 | "events still in use?\n", in_use); | ||
179 | kfree(pool->events); | 178 | kfree(pool->events); |
180 | dma_free_coherent(hostdata->dev, | 179 | dma_free_coherent(hostdata->dev, |
181 | pool->size * sizeof(*pool->iu_storage), | 180 | pool->size * sizeof(*pool->iu_storage), |
@@ -210,15 +209,13 @@ static void free_event_struct(struct event_pool *pool, | |||
210 | struct srp_event_struct *evt) | 209 | struct srp_event_struct *evt) |
211 | { | 210 | { |
212 | if (!valid_event_struct(pool, evt)) { | 211 | if (!valid_event_struct(pool, evt)) { |
213 | printk(KERN_ERR | 212 | dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p " |
214 | "ibmvscsi: Freeing invalid event_struct %p " | 213 | "(not in pool %p)\n", evt, pool->events); |
215 | "(not in pool %p)\n", evt, pool->events); | ||
216 | return; | 214 | return; |
217 | } | 215 | } |
218 | if (atomic_inc_return(&evt->free) != 1) { | 216 | if (atomic_inc_return(&evt->free) != 1) { |
219 | printk(KERN_ERR | 217 | dev_err(evt->hostdata->dev, "Freeing event_struct %p " |
220 | "ibmvscsi: Freeing event_struct %p " | 218 | "which is not in use!\n", evt); |
221 | "which is not in use!\n", evt); | ||
222 | return; | 219 | return; |
223 | } | 220 | } |
224 | } | 221 | } |
@@ -353,20 +350,19 @@ static void unmap_cmd_data(struct srp_cmd *cmd, | |||
353 | } | 350 | } |
354 | } | 351 | } |
355 | 352 | ||
356 | static int map_sg_list(int num_entries, | 353 | static int map_sg_list(struct scsi_cmnd *cmd, int nseg, |
357 | struct scatterlist *sg, | ||
358 | struct srp_direct_buf *md) | 354 | struct srp_direct_buf *md) |
359 | { | 355 | { |
360 | int i; | 356 | int i; |
357 | struct scatterlist *sg; | ||
361 | u64 total_length = 0; | 358 | u64 total_length = 0; |
362 | 359 | ||
363 | for (i = 0; i < num_entries; ++i) { | 360 | scsi_for_each_sg(cmd, sg, nseg, i) { |
364 | struct srp_direct_buf *descr = md + i; | 361 | struct srp_direct_buf *descr = md + i; |
365 | struct scatterlist *sg_entry = &sg[i]; | 362 | descr->va = sg_dma_address(sg); |
366 | descr->va = sg_dma_address(sg_entry); | 363 | descr->len = sg_dma_len(sg); |
367 | descr->len = sg_dma_len(sg_entry); | ||
368 | descr->key = 0; | 364 | descr->key = 0; |
369 | total_length += sg_dma_len(sg_entry); | 365 | total_length += sg_dma_len(sg); |
370 | } | 366 | } |
371 | return total_length; | 367 | return total_length; |
372 | } | 368 | } |
@@ -387,40 +383,37 @@ static int map_sg_data(struct scsi_cmnd *cmd, | |||
387 | 383 | ||
388 | int sg_mapped; | 384 | int sg_mapped; |
389 | u64 total_length = 0; | 385 | u64 total_length = 0; |
390 | struct scatterlist *sg = cmd->request_buffer; | ||
391 | struct srp_direct_buf *data = | 386 | struct srp_direct_buf *data = |
392 | (struct srp_direct_buf *) srp_cmd->add_data; | 387 | (struct srp_direct_buf *) srp_cmd->add_data; |
393 | struct srp_indirect_buf *indirect = | 388 | struct srp_indirect_buf *indirect = |
394 | (struct srp_indirect_buf *) data; | 389 | (struct srp_indirect_buf *) data; |
395 | 390 | ||
396 | sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL); | 391 | sg_mapped = scsi_dma_map(cmd); |
397 | 392 | if (!sg_mapped) | |
398 | if (sg_mapped == 0) | 393 | return 1; |
394 | else if (sg_mapped < 0) | ||
399 | return 0; | 395 | return 0; |
396 | else if (sg_mapped > SG_ALL) { | ||
397 | printk(KERN_ERR | ||
398 | "ibmvscsi: More than %d mapped sg entries, got %d\n", | ||
399 | SG_ALL, sg_mapped); | ||
400 | return 0; | ||
401 | } | ||
400 | 402 | ||
401 | set_srp_direction(cmd, srp_cmd, sg_mapped); | 403 | set_srp_direction(cmd, srp_cmd, sg_mapped); |
402 | 404 | ||
403 | /* special case; we can use a single direct descriptor */ | 405 | /* special case; we can use a single direct descriptor */ |
404 | if (sg_mapped == 1) { | 406 | if (sg_mapped == 1) { |
405 | data->va = sg_dma_address(&sg[0]); | 407 | map_sg_list(cmd, sg_mapped, data); |
406 | data->len = sg_dma_len(&sg[0]); | ||
407 | data->key = 0; | ||
408 | return 1; | 408 | return 1; |
409 | } | 409 | } |
410 | 410 | ||
411 | if (sg_mapped > SG_ALL) { | ||
412 | printk(KERN_ERR | ||
413 | "ibmvscsi: More than %d mapped sg entries, got %d\n", | ||
414 | SG_ALL, sg_mapped); | ||
415 | return 0; | ||
416 | } | ||
417 | |||
418 | indirect->table_desc.va = 0; | 411 | indirect->table_desc.va = 0; |
419 | indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf); | 412 | indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf); |
420 | indirect->table_desc.key = 0; | 413 | indirect->table_desc.key = 0; |
421 | 414 | ||
422 | if (sg_mapped <= MAX_INDIRECT_BUFS) { | 415 | if (sg_mapped <= MAX_INDIRECT_BUFS) { |
423 | total_length = map_sg_list(sg_mapped, sg, | 416 | total_length = map_sg_list(cmd, sg_mapped, |
424 | &indirect->desc_list[0]); | 417 | &indirect->desc_list[0]); |
425 | indirect->len = total_length; | 418 | indirect->len = total_length; |
426 | return 1; | 419 | return 1; |
@@ -429,61 +422,27 @@ static int map_sg_data(struct scsi_cmnd *cmd, | |||
429 | /* get indirect table */ | 422 | /* get indirect table */ |
430 | if (!evt_struct->ext_list) { | 423 | if (!evt_struct->ext_list) { |
431 | evt_struct->ext_list = (struct srp_direct_buf *) | 424 | evt_struct->ext_list = (struct srp_direct_buf *) |
432 | dma_alloc_coherent(dev, | 425 | dma_alloc_coherent(dev, |
433 | SG_ALL * sizeof(struct srp_direct_buf), | 426 | SG_ALL * sizeof(struct srp_direct_buf), |
434 | &evt_struct->ext_list_token, 0); | 427 | &evt_struct->ext_list_token, 0); |
435 | if (!evt_struct->ext_list) { | 428 | if (!evt_struct->ext_list) { |
436 | printk(KERN_ERR | 429 | sdev_printk(KERN_ERR, cmd->device, |
437 | "ibmvscsi: Can't allocate memory for indirect table\n"); | 430 | "Can't allocate memory for indirect table\n"); |
438 | return 0; | 431 | return 0; |
439 | |||
440 | } | 432 | } |
441 | } | 433 | } |
442 | 434 | ||
443 | total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list); | 435 | total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list); |
444 | 436 | ||
445 | indirect->len = total_length; | 437 | indirect->len = total_length; |
446 | indirect->table_desc.va = evt_struct->ext_list_token; | 438 | indirect->table_desc.va = evt_struct->ext_list_token; |
447 | indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]); | 439 | indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]); |
448 | memcpy(indirect->desc_list, evt_struct->ext_list, | 440 | memcpy(indirect->desc_list, evt_struct->ext_list, |
449 | MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); | 441 | MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); |
450 | |||
451 | return 1; | 442 | return 1; |
452 | } | 443 | } |
453 | 444 | ||
454 | /** | 445 | /** |
455 | * map_single_data: - Maps memory and initializes memory decriptor fields | ||
456 | * @cmd: struct scsi_cmnd with the memory to be mapped | ||
457 | * @srp_cmd: srp_cmd that contains the memory descriptor | ||
458 | * @dev: device for which to map dma memory | ||
459 | * | ||
460 | * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. | ||
461 | * Returns 1 on success. | ||
462 | */ | ||
463 | static int map_single_data(struct scsi_cmnd *cmd, | ||
464 | struct srp_cmd *srp_cmd, struct device *dev) | ||
465 | { | ||
466 | struct srp_direct_buf *data = | ||
467 | (struct srp_direct_buf *) srp_cmd->add_data; | ||
468 | |||
469 | data->va = | ||
470 | dma_map_single(dev, cmd->request_buffer, | ||
471 | cmd->request_bufflen, | ||
472 | DMA_BIDIRECTIONAL); | ||
473 | if (dma_mapping_error(data->va)) { | ||
474 | printk(KERN_ERR | ||
475 | "ibmvscsi: Unable to map request_buffer for command!\n"); | ||
476 | return 0; | ||
477 | } | ||
478 | data->len = cmd->request_bufflen; | ||
479 | data->key = 0; | ||
480 | |||
481 | set_srp_direction(cmd, srp_cmd, 1); | ||
482 | |||
483 | return 1; | ||
484 | } | ||
485 | |||
486 | /** | ||
487 | * map_data_for_srp_cmd: - Calls functions to map data for srp cmds | 446 | * map_data_for_srp_cmd: - Calls functions to map data for srp cmds |
488 | * @cmd: struct scsi_cmnd with the memory to be mapped | 447 | * @cmd: struct scsi_cmnd with the memory to be mapped |
489 | * @srp_cmd: srp_cmd that contains the memory descriptor | 448 | * @srp_cmd: srp_cmd that contains the memory descriptor |
@@ -503,23 +462,83 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, | |||
503 | case DMA_NONE: | 462 | case DMA_NONE: |
504 | return 1; | 463 | return 1; |
505 | case DMA_BIDIRECTIONAL: | 464 | case DMA_BIDIRECTIONAL: |
506 | printk(KERN_ERR | 465 | sdev_printk(KERN_ERR, cmd->device, |
507 | "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n"); | 466 | "Can't map DMA_BIDIRECTIONAL to read/write\n"); |
508 | return 0; | 467 | return 0; |
509 | default: | 468 | default: |
510 | printk(KERN_ERR | 469 | sdev_printk(KERN_ERR, cmd->device, |
511 | "ibmvscsi: Unknown data direction 0x%02x; can't map!\n", | 470 | "Unknown data direction 0x%02x; can't map!\n", |
512 | cmd->sc_data_direction); | 471 | cmd->sc_data_direction); |
513 | return 0; | 472 | return 0; |
514 | } | 473 | } |
515 | 474 | ||
516 | if (!cmd->request_buffer) | 475 | return map_sg_data(cmd, evt_struct, srp_cmd, dev); |
517 | return 1; | ||
518 | if (cmd->use_sg) | ||
519 | return map_sg_data(cmd, evt_struct, srp_cmd, dev); | ||
520 | return map_single_data(cmd, srp_cmd, dev); | ||
521 | } | 476 | } |
522 | 477 | ||
478 | /** | ||
479 | * purge_requests: Our virtual adapter just shut down. purge any sent requests | ||
480 | * @hostdata: the adapter | ||
481 | */ | ||
482 | static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) | ||
483 | { | ||
484 | struct srp_event_struct *tmp_evt, *pos; | ||
485 | unsigned long flags; | ||
486 | |||
487 | spin_lock_irqsave(hostdata->host->host_lock, flags); | ||
488 | list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { | ||
489 | list_del(&tmp_evt->list); | ||
490 | del_timer(&tmp_evt->timer); | ||
491 | if (tmp_evt->cmnd) { | ||
492 | tmp_evt->cmnd->result = (error_code << 16); | ||
493 | unmap_cmd_data(&tmp_evt->iu.srp.cmd, | ||
494 | tmp_evt, | ||
495 | tmp_evt->hostdata->dev); | ||
496 | if (tmp_evt->cmnd_done) | ||
497 | tmp_evt->cmnd_done(tmp_evt->cmnd); | ||
498 | } else if (tmp_evt->done) | ||
499 | tmp_evt->done(tmp_evt); | ||
500 | free_event_struct(&tmp_evt->hostdata->pool, tmp_evt); | ||
501 | } | ||
502 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
503 | } | ||
504 | |||
505 | /** | ||
506 | * ibmvscsi_reset_host - Reset the connection to the server | ||
507 | * @hostdata: struct ibmvscsi_host_data to reset | ||
508 | */ | ||
509 | static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata) | ||
510 | { | ||
511 | scsi_block_requests(hostdata->host); | ||
512 | atomic_set(&hostdata->request_limit, 0); | ||
513 | |||
514 | purge_requests(hostdata, DID_ERROR); | ||
515 | if ((ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata)) || | ||
516 | (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0)) || | ||
517 | (vio_enable_interrupts(to_vio_dev(hostdata->dev)))) { | ||
518 | atomic_set(&hostdata->request_limit, -1); | ||
519 | dev_err(hostdata->dev, "error after reset\n"); | ||
520 | } | ||
521 | |||
522 | scsi_unblock_requests(hostdata->host); | ||
523 | } | ||
524 | |||
525 | /** | ||
526 | * ibmvscsi_timeout - Internal command timeout handler | ||
527 | * @evt_struct: struct srp_event_struct that timed out | ||
528 | * | ||
529 | * Called when an internally generated command times out | ||
530 | */ | ||
531 | static void ibmvscsi_timeout(struct srp_event_struct *evt_struct) | ||
532 | { | ||
533 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; | ||
534 | |||
535 | dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n", | ||
536 | evt_struct->iu.srp.cmd.opcode); | ||
537 | |||
538 | ibmvscsi_reset_host(hostdata); | ||
539 | } | ||
540 | |||
541 | |||
523 | /* ------------------------------------------------------------ | 542 | /* ------------------------------------------------------------ |
524 | * Routines for sending and receiving SRPs | 543 | * Routines for sending and receiving SRPs |
525 | */ | 544 | */ |
@@ -527,12 +546,14 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, | |||
527 | * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq() | 546 | * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq() |
528 | * @evt_struct: evt_struct to be sent | 547 | * @evt_struct: evt_struct to be sent |
529 | * @hostdata: ibmvscsi_host_data of host | 548 | * @hostdata: ibmvscsi_host_data of host |
549 | * @timeout: timeout in seconds - 0 means do not time command | ||
530 | * | 550 | * |
531 | * Returns the value returned from ibmvscsi_send_crq(). (Zero for success) | 551 | * Returns the value returned from ibmvscsi_send_crq(). (Zero for success) |
532 | * Note that this routine assumes that host_lock is held for synchronization | 552 | * Note that this routine assumes that host_lock is held for synchronization |
533 | */ | 553 | */ |
534 | static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, | 554 | static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, |
535 | struct ibmvscsi_host_data *hostdata) | 555 | struct ibmvscsi_host_data *hostdata, |
556 | unsigned long timeout) | ||
536 | { | 557 | { |
537 | u64 *crq_as_u64 = (u64 *) &evt_struct->crq; | 558 | u64 *crq_as_u64 = (u64 *) &evt_struct->crq; |
538 | int request_status; | 559 | int request_status; |
@@ -588,12 +609,20 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, | |||
588 | */ | 609 | */ |
589 | list_add_tail(&evt_struct->list, &hostdata->sent); | 610 | list_add_tail(&evt_struct->list, &hostdata->sent); |
590 | 611 | ||
612 | init_timer(&evt_struct->timer); | ||
613 | if (timeout) { | ||
614 | evt_struct->timer.data = (unsigned long) evt_struct; | ||
615 | evt_struct->timer.expires = jiffies + (timeout * HZ); | ||
616 | evt_struct->timer.function = (void (*)(unsigned long))ibmvscsi_timeout; | ||
617 | add_timer(&evt_struct->timer); | ||
618 | } | ||
619 | |||
591 | if ((rc = | 620 | if ((rc = |
592 | ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { | 621 | ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { |
593 | list_del(&evt_struct->list); | 622 | list_del(&evt_struct->list); |
623 | del_timer(&evt_struct->timer); | ||
594 | 624 | ||
595 | printk(KERN_ERR "ibmvscsi: send error %d\n", | 625 | dev_err(hostdata->dev, "send error %d\n", rc); |
596 | rc); | ||
597 | atomic_inc(&hostdata->request_limit); | 626 | atomic_inc(&hostdata->request_limit); |
598 | goto send_error; | 627 | goto send_error; |
599 | } | 628 | } |
@@ -634,9 +663,8 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct) | |||
634 | 663 | ||
635 | if (unlikely(rsp->opcode != SRP_RSP)) { | 664 | if (unlikely(rsp->opcode != SRP_RSP)) { |
636 | if (printk_ratelimit()) | 665 | if (printk_ratelimit()) |
637 | printk(KERN_WARNING | 666 | dev_warn(evt_struct->hostdata->dev, |
638 | "ibmvscsi: bad SRP RSP type %d\n", | 667 | "bad SRP RSP type %d\n", rsp->opcode); |
639 | rsp->opcode); | ||
640 | } | 668 | } |
641 | 669 | ||
642 | if (cmnd) { | 670 | if (cmnd) { |
@@ -650,9 +678,9 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct) | |||
650 | evt_struct->hostdata->dev); | 678 | evt_struct->hostdata->dev); |
651 | 679 | ||
652 | if (rsp->flags & SRP_RSP_FLAG_DOOVER) | 680 | if (rsp->flags & SRP_RSP_FLAG_DOOVER) |
653 | cmnd->resid = rsp->data_out_res_cnt; | 681 | scsi_set_resid(cmnd, rsp->data_out_res_cnt); |
654 | else if (rsp->flags & SRP_RSP_FLAG_DIOVER) | 682 | else if (rsp->flags & SRP_RSP_FLAG_DIOVER) |
655 | cmnd->resid = rsp->data_in_res_cnt; | 683 | scsi_set_resid(cmnd, rsp->data_in_res_cnt); |
656 | } | 684 | } |
657 | 685 | ||
658 | if (evt_struct->cmnd_done) | 686 | if (evt_struct->cmnd_done) |
@@ -697,7 +725,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |||
697 | srp_cmd->lun = ((u64) lun) << 48; | 725 | srp_cmd->lun = ((u64) lun) << 48; |
698 | 726 | ||
699 | if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { | 727 | if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { |
700 | printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n"); | 728 | sdev_printk(KERN_ERR, cmnd->device, "couldn't convert cmd to srp_cmd\n"); |
701 | free_event_struct(&hostdata->pool, evt_struct); | 729 | free_event_struct(&hostdata->pool, evt_struct); |
702 | return SCSI_MLQUEUE_HOST_BUSY; | 730 | return SCSI_MLQUEUE_HOST_BUSY; |
703 | } | 731 | } |
@@ -722,7 +750,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |||
722 | offsetof(struct srp_indirect_buf, desc_list); | 750 | offsetof(struct srp_indirect_buf, desc_list); |
723 | } | 751 | } |
724 | 752 | ||
725 | return ibmvscsi_send_srp_event(evt_struct, hostdata); | 753 | return ibmvscsi_send_srp_event(evt_struct, hostdata, 0); |
726 | } | 754 | } |
727 | 755 | ||
728 | /* ------------------------------------------------------------ | 756 | /* ------------------------------------------------------------ |
@@ -744,16 +772,16 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) | |||
744 | DMA_BIDIRECTIONAL); | 772 | DMA_BIDIRECTIONAL); |
745 | 773 | ||
746 | if (evt_struct->xfer_iu->mad.adapter_info.common.status) { | 774 | if (evt_struct->xfer_iu->mad.adapter_info.common.status) { |
747 | printk("ibmvscsi: error %d getting adapter info\n", | 775 | dev_err(hostdata->dev, "error %d getting adapter info\n", |
748 | evt_struct->xfer_iu->mad.adapter_info.common.status); | 776 | evt_struct->xfer_iu->mad.adapter_info.common.status); |
749 | } else { | 777 | } else { |
750 | printk("ibmvscsi: host srp version: %s, " | 778 | dev_info(hostdata->dev, "host srp version: %s, " |
751 | "host partition %s (%d), OS %d, max io %u\n", | 779 | "host partition %s (%d), OS %d, max io %u\n", |
752 | hostdata->madapter_info.srp_version, | 780 | hostdata->madapter_info.srp_version, |
753 | hostdata->madapter_info.partition_name, | 781 | hostdata->madapter_info.partition_name, |
754 | hostdata->madapter_info.partition_number, | 782 | hostdata->madapter_info.partition_number, |
755 | hostdata->madapter_info.os_type, | 783 | hostdata->madapter_info.os_type, |
756 | hostdata->madapter_info.port_max_txu[0]); | 784 | hostdata->madapter_info.port_max_txu[0]); |
757 | 785 | ||
758 | if (hostdata->madapter_info.port_max_txu[0]) | 786 | if (hostdata->madapter_info.port_max_txu[0]) |
759 | hostdata->host->max_sectors = | 787 | hostdata->host->max_sectors = |
@@ -761,11 +789,10 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) | |||
761 | 789 | ||
762 | if (hostdata->madapter_info.os_type == 3 && | 790 | if (hostdata->madapter_info.os_type == 3 && |
763 | strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { | 791 | strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { |
764 | printk("ibmvscsi: host (Ver. %s) doesn't support large" | 792 | dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", |
765 | "transfers\n", | 793 | hostdata->madapter_info.srp_version); |
766 | hostdata->madapter_info.srp_version); | 794 | dev_err(hostdata->dev, "limiting scatterlists to %d\n", |
767 | printk("ibmvscsi: limiting scatterlists to %d\n", | 795 | MAX_INDIRECT_BUFS); |
768 | MAX_INDIRECT_BUFS); | ||
769 | hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; | 796 | hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; |
770 | } | 797 | } |
771 | } | 798 | } |
@@ -784,19 +811,20 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) | |||
784 | { | 811 | { |
785 | struct viosrp_adapter_info *req; | 812 | struct viosrp_adapter_info *req; |
786 | struct srp_event_struct *evt_struct; | 813 | struct srp_event_struct *evt_struct; |
814 | unsigned long flags; | ||
787 | dma_addr_t addr; | 815 | dma_addr_t addr; |
788 | 816 | ||
789 | evt_struct = get_event_struct(&hostdata->pool); | 817 | evt_struct = get_event_struct(&hostdata->pool); |
790 | if (!evt_struct) { | 818 | if (!evt_struct) { |
791 | printk(KERN_ERR "ibmvscsi: couldn't allocate an event " | 819 | dev_err(hostdata->dev, |
792 | "for ADAPTER_INFO_REQ!\n"); | 820 | "couldn't allocate an event for ADAPTER_INFO_REQ!\n"); |
793 | return; | 821 | return; |
794 | } | 822 | } |
795 | 823 | ||
796 | init_event_struct(evt_struct, | 824 | init_event_struct(evt_struct, |
797 | adapter_info_rsp, | 825 | adapter_info_rsp, |
798 | VIOSRP_MAD_FORMAT, | 826 | VIOSRP_MAD_FORMAT, |
799 | init_timeout * HZ); | 827 | init_timeout); |
800 | 828 | ||
801 | req = &evt_struct->iu.mad.adapter_info; | 829 | req = &evt_struct->iu.mad.adapter_info; |
802 | memset(req, 0x00, sizeof(*req)); | 830 | memset(req, 0x00, sizeof(*req)); |
@@ -809,20 +837,20 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) | |||
809 | DMA_BIDIRECTIONAL); | 837 | DMA_BIDIRECTIONAL); |
810 | 838 | ||
811 | if (dma_mapping_error(req->buffer)) { | 839 | if (dma_mapping_error(req->buffer)) { |
812 | printk(KERN_ERR | 840 | dev_err(hostdata->dev, "Unable to map request_buffer for adapter_info!\n"); |
813 | "ibmvscsi: Unable to map request_buffer " | ||
814 | "for adapter_info!\n"); | ||
815 | free_event_struct(&hostdata->pool, evt_struct); | 841 | free_event_struct(&hostdata->pool, evt_struct); |
816 | return; | 842 | return; |
817 | } | 843 | } |
818 | 844 | ||
819 | if (ibmvscsi_send_srp_event(evt_struct, hostdata)) { | 845 | spin_lock_irqsave(hostdata->host->host_lock, flags); |
820 | printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n"); | 846 | if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) { |
847 | dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n"); | ||
821 | dma_unmap_single(hostdata->dev, | 848 | dma_unmap_single(hostdata->dev, |
822 | addr, | 849 | addr, |
823 | sizeof(hostdata->madapter_info), | 850 | sizeof(hostdata->madapter_info), |
824 | DMA_BIDIRECTIONAL); | 851 | DMA_BIDIRECTIONAL); |
825 | } | 852 | } |
853 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
826 | }; | 854 | }; |
827 | 855 | ||
828 | /** | 856 | /** |
@@ -839,24 +867,23 @@ static void login_rsp(struct srp_event_struct *evt_struct) | |||
839 | case SRP_LOGIN_RSP: /* it worked! */ | 867 | case SRP_LOGIN_RSP: /* it worked! */ |
840 | break; | 868 | break; |
841 | case SRP_LOGIN_REJ: /* refused! */ | 869 | case SRP_LOGIN_REJ: /* refused! */ |
842 | printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n", | 870 | dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n", |
843 | evt_struct->xfer_iu->srp.login_rej.reason); | 871 | evt_struct->xfer_iu->srp.login_rej.reason); |
844 | /* Login failed. */ | 872 | /* Login failed. */ |
845 | atomic_set(&hostdata->request_limit, -1); | 873 | atomic_set(&hostdata->request_limit, -1); |
846 | return; | 874 | return; |
847 | default: | 875 | default: |
848 | printk(KERN_ERR | 876 | dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n", |
849 | "ibmvscsi: Invalid login response typecode 0x%02x!\n", | 877 | evt_struct->xfer_iu->srp.login_rsp.opcode); |
850 | evt_struct->xfer_iu->srp.login_rsp.opcode); | ||
851 | /* Login failed. */ | 878 | /* Login failed. */ |
852 | atomic_set(&hostdata->request_limit, -1); | 879 | atomic_set(&hostdata->request_limit, -1); |
853 | return; | 880 | return; |
854 | } | 881 | } |
855 | 882 | ||
856 | printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); | 883 | dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); |
857 | 884 | ||
858 | if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0) | 885 | if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0) |
859 | printk(KERN_ERR "ibmvscsi: Invalid request_limit.\n"); | 886 | dev_err(hostdata->dev, "Invalid request_limit.\n"); |
860 | 887 | ||
861 | /* Now we know what the real request-limit is. | 888 | /* Now we know what the real request-limit is. |
862 | * This value is set rather than added to request_limit because | 889 | * This value is set rather than added to request_limit because |
@@ -885,15 +912,14 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) | |||
885 | struct srp_login_req *login; | 912 | struct srp_login_req *login; |
886 | struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); | 913 | struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); |
887 | if (!evt_struct) { | 914 | if (!evt_struct) { |
888 | printk(KERN_ERR | 915 | dev_err(hostdata->dev, "couldn't allocate an event for login req!\n"); |
889 | "ibmvscsi: couldn't allocate an event for login req!\n"); | ||
890 | return FAILED; | 916 | return FAILED; |
891 | } | 917 | } |
892 | 918 | ||
893 | init_event_struct(evt_struct, | 919 | init_event_struct(evt_struct, |
894 | login_rsp, | 920 | login_rsp, |
895 | VIOSRP_SRP_FORMAT, | 921 | VIOSRP_SRP_FORMAT, |
896 | init_timeout * HZ); | 922 | init_timeout); |
897 | 923 | ||
898 | login = &evt_struct->iu.srp.login_req; | 924 | login = &evt_struct->iu.srp.login_req; |
899 | memset(login, 0x00, sizeof(struct srp_login_req)); | 925 | memset(login, 0x00, sizeof(struct srp_login_req)); |
@@ -907,9 +933,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) | |||
907 | */ | 933 | */ |
908 | atomic_set(&hostdata->request_limit, 1); | 934 | atomic_set(&hostdata->request_limit, 1); |
909 | 935 | ||
910 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata); | 936 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); |
911 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 937 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); |
912 | printk("ibmvscsic: sent SRP login\n"); | 938 | dev_info(hostdata->dev, "sent SRP login\n"); |
913 | return rc; | 939 | return rc; |
914 | }; | 940 | }; |
915 | 941 | ||
@@ -958,20 +984,20 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
958 | 984 | ||
959 | if (!found_evt) { | 985 | if (!found_evt) { |
960 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 986 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); |
961 | return FAILED; | 987 | return SUCCESS; |
962 | } | 988 | } |
963 | 989 | ||
964 | evt = get_event_struct(&hostdata->pool); | 990 | evt = get_event_struct(&hostdata->pool); |
965 | if (evt == NULL) { | 991 | if (evt == NULL) { |
966 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 992 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); |
967 | printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n"); | 993 | sdev_printk(KERN_ERR, cmd->device, "failed to allocate abort event\n"); |
968 | return FAILED; | 994 | return FAILED; |
969 | } | 995 | } |
970 | 996 | ||
971 | init_event_struct(evt, | 997 | init_event_struct(evt, |
972 | sync_completion, | 998 | sync_completion, |
973 | VIOSRP_SRP_FORMAT, | 999 | VIOSRP_SRP_FORMAT, |
974 | init_timeout * HZ); | 1000 | init_timeout); |
975 | 1001 | ||
976 | tsk_mgmt = &evt->iu.srp.tsk_mgmt; | 1002 | tsk_mgmt = &evt->iu.srp.tsk_mgmt; |
977 | 1003 | ||
@@ -982,15 +1008,16 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
982 | tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; | 1008 | tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; |
983 | tsk_mgmt->task_tag = (u64) found_evt; | 1009 | tsk_mgmt->task_tag = (u64) found_evt; |
984 | 1010 | ||
985 | printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n", | 1011 | sdev_printk(KERN_INFO, cmd->device, "aborting command. lun 0x%lx, tag 0x%lx\n", |
986 | tsk_mgmt->lun, tsk_mgmt->task_tag); | 1012 | tsk_mgmt->lun, tsk_mgmt->task_tag); |
987 | 1013 | ||
988 | evt->sync_srp = &srp_rsp; | 1014 | evt->sync_srp = &srp_rsp; |
989 | init_completion(&evt->comp); | 1015 | init_completion(&evt->comp); |
990 | rsp_rc = ibmvscsi_send_srp_event(evt, hostdata); | 1016 | rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); |
991 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 1017 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); |
992 | if (rsp_rc != 0) { | 1018 | if (rsp_rc != 0) { |
993 | printk(KERN_ERR "ibmvscsi: failed to send abort() event\n"); | 1019 | sdev_printk(KERN_ERR, cmd->device, |
1020 | "failed to send abort() event. rc=%d\n", rsp_rc); | ||
994 | return FAILED; | 1021 | return FAILED; |
995 | } | 1022 | } |
996 | 1023 | ||
@@ -999,9 +1026,8 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
999 | /* make sure we got a good response */ | 1026 | /* make sure we got a good response */ |
1000 | if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { | 1027 | if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { |
1001 | if (printk_ratelimit()) | 1028 | if (printk_ratelimit()) |
1002 | printk(KERN_WARNING | 1029 | sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n", |
1003 | "ibmvscsi: abort bad SRP RSP type %d\n", | 1030 | srp_rsp.srp.rsp.opcode); |
1004 | srp_rsp.srp.rsp.opcode); | ||
1005 | return FAILED; | 1031 | return FAILED; |
1006 | } | 1032 | } |
1007 | 1033 | ||
@@ -1012,10 +1038,9 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
1012 | 1038 | ||
1013 | if (rsp_rc) { | 1039 | if (rsp_rc) { |
1014 | if (printk_ratelimit()) | 1040 | if (printk_ratelimit()) |
1015 | printk(KERN_WARNING | 1041 | sdev_printk(KERN_WARNING, cmd->device, |
1016 | "ibmvscsi: abort code %d for task tag 0x%lx\n", | 1042 | "abort code %d for task tag 0x%lx\n", |
1017 | rsp_rc, | 1043 | rsp_rc, tsk_mgmt->task_tag); |
1018 | tsk_mgmt->task_tag); | ||
1019 | return FAILED; | 1044 | return FAILED; |
1020 | } | 1045 | } |
1021 | 1046 | ||
@@ -1034,15 +1059,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
1034 | 1059 | ||
1035 | if (found_evt == NULL) { | 1060 | if (found_evt == NULL) { |
1036 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 1061 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); |
1037 | printk(KERN_INFO | 1062 | sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%lx completed\n", |
1038 | "ibmvscsi: aborted task tag 0x%lx completed\n", | 1063 | tsk_mgmt->task_tag); |
1039 | tsk_mgmt->task_tag); | ||
1040 | return SUCCESS; | 1064 | return SUCCESS; |
1041 | } | 1065 | } |
1042 | 1066 | ||
1043 | printk(KERN_INFO | 1067 | sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%lx\n", |
1044 | "ibmvscsi: successfully aborted task tag 0x%lx\n", | 1068 | tsk_mgmt->task_tag); |
1045 | tsk_mgmt->task_tag); | ||
1046 | 1069 | ||
1047 | cmd->result = (DID_ABORT << 16); | 1070 | cmd->result = (DID_ABORT << 16); |
1048 | list_del(&found_evt->list); | 1071 | list_del(&found_evt->list); |
@@ -1076,14 +1099,14 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
1076 | evt = get_event_struct(&hostdata->pool); | 1099 | evt = get_event_struct(&hostdata->pool); |
1077 | if (evt == NULL) { | 1100 | if (evt == NULL) { |
1078 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 1101 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); |
1079 | printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n"); | 1102 | sdev_printk(KERN_ERR, cmd->device, "failed to allocate reset event\n"); |
1080 | return FAILED; | 1103 | return FAILED; |
1081 | } | 1104 | } |
1082 | 1105 | ||
1083 | init_event_struct(evt, | 1106 | init_event_struct(evt, |
1084 | sync_completion, | 1107 | sync_completion, |
1085 | VIOSRP_SRP_FORMAT, | 1108 | VIOSRP_SRP_FORMAT, |
1086 | init_timeout * HZ); | 1109 | init_timeout); |
1087 | 1110 | ||
1088 | tsk_mgmt = &evt->iu.srp.tsk_mgmt; | 1111 | tsk_mgmt = &evt->iu.srp.tsk_mgmt; |
1089 | 1112 | ||
@@ -1093,15 +1116,16 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
1093 | tsk_mgmt->lun = ((u64) lun) << 48; | 1116 | tsk_mgmt->lun = ((u64) lun) << 48; |
1094 | tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; | 1117 | tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; |
1095 | 1118 | ||
1096 | printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n", | 1119 | sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n", |
1097 | tsk_mgmt->lun); | 1120 | tsk_mgmt->lun); |
1098 | 1121 | ||
1099 | evt->sync_srp = &srp_rsp; | 1122 | evt->sync_srp = &srp_rsp; |
1100 | init_completion(&evt->comp); | 1123 | init_completion(&evt->comp); |
1101 | rsp_rc = ibmvscsi_send_srp_event(evt, hostdata); | 1124 | rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); |
1102 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 1125 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); |
1103 | if (rsp_rc != 0) { | 1126 | if (rsp_rc != 0) { |
1104 | printk(KERN_ERR "ibmvscsi: failed to send reset event\n"); | 1127 | sdev_printk(KERN_ERR, cmd->device, |
1128 | "failed to send reset event. rc=%d\n", rsp_rc); | ||
1105 | return FAILED; | 1129 | return FAILED; |
1106 | } | 1130 | } |
1107 | 1131 | ||
@@ -1110,9 +1134,8 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
1110 | /* make sure we got a good response */ | 1134 | /* make sure we got a good response */ |
1111 | if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { | 1135 | if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { |
1112 | if (printk_ratelimit()) | 1136 | if (printk_ratelimit()) |
1113 | printk(KERN_WARNING | 1137 | sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n", |
1114 | "ibmvscsi: reset bad SRP RSP type %d\n", | 1138 | srp_rsp.srp.rsp.opcode); |
1115 | srp_rsp.srp.rsp.opcode); | ||
1116 | return FAILED; | 1139 | return FAILED; |
1117 | } | 1140 | } |
1118 | 1141 | ||
@@ -1123,9 +1146,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
1123 | 1146 | ||
1124 | if (rsp_rc) { | 1147 | if (rsp_rc) { |
1125 | if (printk_ratelimit()) | 1148 | if (printk_ratelimit()) |
1126 | printk(KERN_WARNING | 1149 | sdev_printk(KERN_WARNING, cmd->device, |
1127 | "ibmvscsi: reset code %d for task tag 0x%lx\n", | 1150 | "reset code %d for task tag 0x%lx\n", |
1128 | rsp_rc, tsk_mgmt->task_tag); | 1151 | rsp_rc, tsk_mgmt->task_tag); |
1129 | return FAILED; | 1152 | return FAILED; |
1130 | } | 1153 | } |
1131 | 1154 | ||
@@ -1154,32 +1177,30 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
1154 | } | 1177 | } |
1155 | 1178 | ||
1156 | /** | 1179 | /** |
1157 | * purge_requests: Our virtual adapter just shut down. purge any sent requests | 1180 | * ibmvscsi_eh_host_reset_handler - Reset the connection to the server |
1158 | * @hostdata: the adapter | 1181 | * @cmd: struct scsi_cmnd having problems |
1159 | */ | 1182 | */ |
1160 | static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) | 1183 | static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd) |
1161 | { | 1184 | { |
1162 | struct srp_event_struct *tmp_evt, *pos; | 1185 | unsigned long wait_switch = 0; |
1163 | unsigned long flags; | 1186 | struct ibmvscsi_host_data *hostdata = |
1187 | (struct ibmvscsi_host_data *)cmd->device->host->hostdata; | ||
1164 | 1188 | ||
1165 | spin_lock_irqsave(hostdata->host->host_lock, flags); | 1189 | dev_err(hostdata->dev, "Resetting connection due to error recovery\n"); |
1166 | list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { | 1190 | |
1167 | list_del(&tmp_evt->list); | 1191 | ibmvscsi_reset_host(hostdata); |
1168 | if (tmp_evt->cmnd) { | 1192 | |
1169 | tmp_evt->cmnd->result = (error_code << 16); | 1193 | for (wait_switch = jiffies + (init_timeout * HZ); |
1170 | unmap_cmd_data(&tmp_evt->iu.srp.cmd, | 1194 | time_before(jiffies, wait_switch) && |
1171 | tmp_evt, | 1195 | atomic_read(&hostdata->request_limit) < 2;) { |
1172 | tmp_evt->hostdata->dev); | 1196 | |
1173 | if (tmp_evt->cmnd_done) | 1197 | msleep(10); |
1174 | tmp_evt->cmnd_done(tmp_evt->cmnd); | ||
1175 | } else { | ||
1176 | if (tmp_evt->done) { | ||
1177 | tmp_evt->done(tmp_evt); | ||
1178 | } | ||
1179 | } | ||
1180 | free_event_struct(&tmp_evt->hostdata->pool, tmp_evt); | ||
1181 | } | 1198 | } |
1182 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 1199 | |
1200 | if (atomic_read(&hostdata->request_limit) <= 0) | ||
1201 | return FAILED; | ||
1202 | |||
1203 | return SUCCESS; | ||
1183 | } | 1204 | } |
1184 | 1205 | ||
1185 | /** | 1206 | /** |
@@ -1191,6 +1212,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) | |||
1191 | void ibmvscsi_handle_crq(struct viosrp_crq *crq, | 1212 | void ibmvscsi_handle_crq(struct viosrp_crq *crq, |
1192 | struct ibmvscsi_host_data *hostdata) | 1213 | struct ibmvscsi_host_data *hostdata) |
1193 | { | 1214 | { |
1215 | long rc; | ||
1194 | unsigned long flags; | 1216 | unsigned long flags; |
1195 | struct srp_event_struct *evt_struct = | 1217 | struct srp_event_struct *evt_struct = |
1196 | (struct srp_event_struct *)crq->IU_data_ptr; | 1218 | (struct srp_event_struct *)crq->IU_data_ptr; |
@@ -1198,27 +1220,25 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1198 | case 0xC0: /* initialization */ | 1220 | case 0xC0: /* initialization */ |
1199 | switch (crq->format) { | 1221 | switch (crq->format) { |
1200 | case 0x01: /* Initialization message */ | 1222 | case 0x01: /* Initialization message */ |
1201 | printk(KERN_INFO "ibmvscsi: partner initialized\n"); | 1223 | dev_info(hostdata->dev, "partner initialized\n"); |
1202 | /* Send back a response */ | 1224 | /* Send back a response */ |
1203 | if (ibmvscsi_send_crq(hostdata, | 1225 | if ((rc = ibmvscsi_send_crq(hostdata, |
1204 | 0xC002000000000000LL, 0) == 0) { | 1226 | 0xC002000000000000LL, 0)) == 0) { |
1205 | /* Now login */ | 1227 | /* Now login */ |
1206 | send_srp_login(hostdata); | 1228 | send_srp_login(hostdata); |
1207 | } else { | 1229 | } else { |
1208 | printk(KERN_ERR | 1230 | dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc); |
1209 | "ibmvscsi: Unable to send init rsp\n"); | ||
1210 | } | 1231 | } |
1211 | 1232 | ||
1212 | break; | 1233 | break; |
1213 | case 0x02: /* Initialization response */ | 1234 | case 0x02: /* Initialization response */ |
1214 | printk(KERN_INFO | 1235 | dev_info(hostdata->dev, "partner initialization complete\n"); |
1215 | "ibmvscsi: partner initialization complete\n"); | ||
1216 | 1236 | ||
1217 | /* Now login */ | 1237 | /* Now login */ |
1218 | send_srp_login(hostdata); | 1238 | send_srp_login(hostdata); |
1219 | break; | 1239 | break; |
1220 | default: | 1240 | default: |
1221 | printk(KERN_ERR "ibmvscsi: unknown crq message type\n"); | 1241 | dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format); |
1222 | } | 1242 | } |
1223 | return; | 1243 | return; |
1224 | case 0xFF: /* Hypervisor telling us the connection is closed */ | 1244 | case 0xFF: /* Hypervisor telling us the connection is closed */ |
@@ -1226,8 +1246,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1226 | atomic_set(&hostdata->request_limit, 0); | 1246 | atomic_set(&hostdata->request_limit, 0); |
1227 | if (crq->format == 0x06) { | 1247 | if (crq->format == 0x06) { |
1228 | /* We need to re-setup the interpartition connection */ | 1248 | /* We need to re-setup the interpartition connection */ |
1229 | printk(KERN_INFO | 1249 | dev_info(hostdata->dev, "Re-enabling adapter!\n"); |
1230 | "ibmvscsi: Re-enabling adapter!\n"); | ||
1231 | purge_requests(hostdata, DID_REQUEUE); | 1250 | purge_requests(hostdata, DID_REQUEUE); |
1232 | if ((ibmvscsi_reenable_crq_queue(&hostdata->queue, | 1251 | if ((ibmvscsi_reenable_crq_queue(&hostdata->queue, |
1233 | hostdata)) || | 1252 | hostdata)) || |
@@ -1235,14 +1254,11 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1235 | 0xC001000000000000LL, 0))) { | 1254 | 0xC001000000000000LL, 0))) { |
1236 | atomic_set(&hostdata->request_limit, | 1255 | atomic_set(&hostdata->request_limit, |
1237 | -1); | 1256 | -1); |
1238 | printk(KERN_ERR | 1257 | dev_err(hostdata->dev, "error after enable\n"); |
1239 | "ibmvscsi: error after" | ||
1240 | " enable\n"); | ||
1241 | } | 1258 | } |
1242 | } else { | 1259 | } else { |
1243 | printk(KERN_INFO | 1260 | dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n", |
1244 | "ibmvscsi: Virtual adapter failed rc %d!\n", | 1261 | crq->format); |
1245 | crq->format); | ||
1246 | 1262 | ||
1247 | purge_requests(hostdata, DID_ERROR); | 1263 | purge_requests(hostdata, DID_ERROR); |
1248 | if ((ibmvscsi_reset_crq_queue(&hostdata->queue, | 1264 | if ((ibmvscsi_reset_crq_queue(&hostdata->queue, |
@@ -1251,8 +1267,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1251 | 0xC001000000000000LL, 0))) { | 1267 | 0xC001000000000000LL, 0))) { |
1252 | atomic_set(&hostdata->request_limit, | 1268 | atomic_set(&hostdata->request_limit, |
1253 | -1); | 1269 | -1); |
1254 | printk(KERN_ERR | 1270 | dev_err(hostdata->dev, "error after reset\n"); |
1255 | "ibmvscsi: error after reset\n"); | ||
1256 | } | 1271 | } |
1257 | } | 1272 | } |
1258 | scsi_unblock_requests(hostdata->host); | 1273 | scsi_unblock_requests(hostdata->host); |
@@ -1260,9 +1275,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1260 | case 0x80: /* real payload */ | 1275 | case 0x80: /* real payload */ |
1261 | break; | 1276 | break; |
1262 | default: | 1277 | default: |
1263 | printk(KERN_ERR | 1278 | dev_err(hostdata->dev, "got an invalid message type 0x%02x\n", |
1264 | "ibmvscsi: got an invalid message type 0x%02x\n", | 1279 | crq->valid); |
1265 | crq->valid); | ||
1266 | return; | 1280 | return; |
1267 | } | 1281 | } |
1268 | 1282 | ||
@@ -1271,16 +1285,14 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1271 | * actually sent | 1285 | * actually sent |
1272 | */ | 1286 | */ |
1273 | if (!valid_event_struct(&hostdata->pool, evt_struct)) { | 1287 | if (!valid_event_struct(&hostdata->pool, evt_struct)) { |
1274 | printk(KERN_ERR | 1288 | dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n", |
1275 | "ibmvscsi: returned correlation_token 0x%p is invalid!\n", | ||
1276 | (void *)crq->IU_data_ptr); | 1289 | (void *)crq->IU_data_ptr); |
1277 | return; | 1290 | return; |
1278 | } | 1291 | } |
1279 | 1292 | ||
1280 | if (atomic_read(&evt_struct->free)) { | 1293 | if (atomic_read(&evt_struct->free)) { |
1281 | printk(KERN_ERR | 1294 | dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n", |
1282 | "ibmvscsi: received duplicate correlation_token 0x%p!\n", | 1295 | (void *)crq->IU_data_ptr); |
1283 | (void *)crq->IU_data_ptr); | ||
1284 | return; | 1296 | return; |
1285 | } | 1297 | } |
1286 | 1298 | ||
@@ -1288,11 +1300,12 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1288 | atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta, | 1300 | atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta, |
1289 | &hostdata->request_limit); | 1301 | &hostdata->request_limit); |
1290 | 1302 | ||
1303 | del_timer(&evt_struct->timer); | ||
1304 | |||
1291 | if (evt_struct->done) | 1305 | if (evt_struct->done) |
1292 | evt_struct->done(evt_struct); | 1306 | evt_struct->done(evt_struct); |
1293 | else | 1307 | else |
1294 | printk(KERN_ERR | 1308 | dev_err(hostdata->dev, "returned done() is NULL; not running it!\n"); |
1295 | "ibmvscsi: returned done() is NULL; not running it!\n"); | ||
1296 | 1309 | ||
1297 | /* | 1310 | /* |
1298 | * Lock the host_lock before messing with these structures, since we | 1311 | * Lock the host_lock before messing with these structures, since we |
@@ -1313,20 +1326,20 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, | |||
1313 | { | 1326 | { |
1314 | struct viosrp_host_config *host_config; | 1327 | struct viosrp_host_config *host_config; |
1315 | struct srp_event_struct *evt_struct; | 1328 | struct srp_event_struct *evt_struct; |
1329 | unsigned long flags; | ||
1316 | dma_addr_t addr; | 1330 | dma_addr_t addr; |
1317 | int rc; | 1331 | int rc; |
1318 | 1332 | ||
1319 | evt_struct = get_event_struct(&hostdata->pool); | 1333 | evt_struct = get_event_struct(&hostdata->pool); |
1320 | if (!evt_struct) { | 1334 | if (!evt_struct) { |
1321 | printk(KERN_ERR | 1335 | dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n"); |
1322 | "ibmvscsi: could't allocate event for HOST_CONFIG!\n"); | ||
1323 | return -1; | 1336 | return -1; |
1324 | } | 1337 | } |
1325 | 1338 | ||
1326 | init_event_struct(evt_struct, | 1339 | init_event_struct(evt_struct, |
1327 | sync_completion, | 1340 | sync_completion, |
1328 | VIOSRP_MAD_FORMAT, | 1341 | VIOSRP_MAD_FORMAT, |
1329 | init_timeout * HZ); | 1342 | init_timeout); |
1330 | 1343 | ||
1331 | host_config = &evt_struct->iu.mad.host_config; | 1344 | host_config = &evt_struct->iu.mad.host_config; |
1332 | 1345 | ||
@@ -1339,14 +1352,15 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, | |||
1339 | DMA_BIDIRECTIONAL); | 1352 | DMA_BIDIRECTIONAL); |
1340 | 1353 | ||
1341 | if (dma_mapping_error(host_config->buffer)) { | 1354 | if (dma_mapping_error(host_config->buffer)) { |
1342 | printk(KERN_ERR | 1355 | dev_err(hostdata->dev, "dma_mapping error getting host config\n"); |
1343 | "ibmvscsi: dma_mapping error " "getting host config\n"); | ||
1344 | free_event_struct(&hostdata->pool, evt_struct); | 1356 | free_event_struct(&hostdata->pool, evt_struct); |
1345 | return -1; | 1357 | return -1; |
1346 | } | 1358 | } |
1347 | 1359 | ||
1348 | init_completion(&evt_struct->comp); | 1360 | init_completion(&evt_struct->comp); |
1349 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata); | 1361 | spin_lock_irqsave(hostdata->host->host_lock, flags); |
1362 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); | ||
1363 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | ||
1350 | if (rc == 0) | 1364 | if (rc == 0) |
1351 | wait_for_completion(&evt_struct->comp); | 1365 | wait_for_completion(&evt_struct->comp); |
1352 | dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL); | 1366 | dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL); |
@@ -1375,6 +1389,23 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) | |||
1375 | return 0; | 1389 | return 0; |
1376 | } | 1390 | } |
1377 | 1391 | ||
1392 | /** | ||
1393 | * ibmvscsi_change_queue_depth - Change the device's queue depth | ||
1394 | * @sdev: scsi device struct | ||
1395 | * @qdepth: depth to set | ||
1396 | * | ||
1397 | * Return value: | ||
1398 | * actual depth set | ||
1399 | **/ | ||
1400 | static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) | ||
1401 | { | ||
1402 | if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN) | ||
1403 | qdepth = IBMVSCSI_MAX_CMDS_PER_LUN; | ||
1404 | |||
1405 | scsi_adjust_queue_depth(sdev, 0, qdepth); | ||
1406 | return sdev->queue_depth; | ||
1407 | } | ||
1408 | |||
1378 | /* ------------------------------------------------------------ | 1409 | /* ------------------------------------------------------------ |
1379 | * sysfs attributes | 1410 | * sysfs attributes |
1380 | */ | 1411 | */ |
@@ -1520,7 +1551,9 @@ static struct scsi_host_template driver_template = { | |||
1520 | .queuecommand = ibmvscsi_queuecommand, | 1551 | .queuecommand = ibmvscsi_queuecommand, |
1521 | .eh_abort_handler = ibmvscsi_eh_abort_handler, | 1552 | .eh_abort_handler = ibmvscsi_eh_abort_handler, |
1522 | .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, | 1553 | .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, |
1554 | .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler, | ||
1523 | .slave_configure = ibmvscsi_slave_configure, | 1555 | .slave_configure = ibmvscsi_slave_configure, |
1556 | .change_queue_depth = ibmvscsi_change_queue_depth, | ||
1524 | .cmd_per_lun = 16, | 1557 | .cmd_per_lun = 16, |
1525 | .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT, | 1558 | .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT, |
1526 | .this_id = -1, | 1559 | .this_id = -1, |
@@ -1545,7 +1578,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1545 | driver_template.can_queue = max_requests; | 1578 | driver_template.can_queue = max_requests; |
1546 | host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); | 1579 | host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); |
1547 | if (!host) { | 1580 | if (!host) { |
1548 | printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n"); | 1581 | dev_err(&vdev->dev, "couldn't allocate host data\n"); |
1549 | goto scsi_host_alloc_failed; | 1582 | goto scsi_host_alloc_failed; |
1550 | } | 1583 | } |
1551 | 1584 | ||
@@ -1559,11 +1592,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1559 | 1592 | ||
1560 | rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests); | 1593 | rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests); |
1561 | if (rc != 0 && rc != H_RESOURCE) { | 1594 | if (rc != 0 && rc != H_RESOURCE) { |
1562 | printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n"); | 1595 | dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); |
1563 | goto init_crq_failed; | 1596 | goto init_crq_failed; |
1564 | } | 1597 | } |
1565 | if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) { | 1598 | if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) { |
1566 | printk(KERN_ERR "ibmvscsi: couldn't initialize event pool\n"); | 1599 | dev_err(&vdev->dev, "couldn't initialize event pool\n"); |
1567 | goto init_pool_failed; | 1600 | goto init_pool_failed; |
1568 | } | 1601 | } |
1569 | 1602 | ||