aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ibmvscsi/ibmvscsi.c
diff options
context:
space:
mode:
authorBrian King <brking@linux.vnet.ibm.com>2007-06-13 18:12:19 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-06-17 16:52:25 -0400
commit6c0a60ec52042ece8bf4904c91ac497188e8d70b (patch)
tree27bcae804d75986c54636ba7497775f9f9094a08 /drivers/scsi/ibmvscsi/ibmvscsi.c
parent2a7309372fe56ae46c499b772d811ad31c501dd9 (diff)
[SCSI] ibmvscsi: Enhanced error logging
Converts ibmvscsi to use dev_printk and friends to simplify debugging. ibmvscsi adapter initialization now looks like this: ibmvscsi 30000005: SRP_VERSION: 16.a ibmvscsi 30000005: partner initialization complete ibmvscsi 30000005: sent SRP login ibmvscsi 30000005: SRP_LOGIN succeeded Additionally, this patch adds the logging of a couple return codes in a couple logs. Signed-off-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/ibmvscsi/ibmvscsi.c')
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c208
1 files changed, 91 insertions, 117 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d91837caa42b..c63a26e2fbc7 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -173,9 +173,8 @@ static void release_event_pool(struct event_pool *pool,
173 } 173 }
174 } 174 }
175 if (in_use) 175 if (in_use)
176 printk(KERN_WARNING 176 dev_warn(hostdata->dev, "releasing event pool with %d "
177 "ibmvscsi: releasing event pool with %d " 177 "events still in use?\n", in_use);
178 "events still in use?\n", in_use);
179 kfree(pool->events); 178 kfree(pool->events);
180 dma_free_coherent(hostdata->dev, 179 dma_free_coherent(hostdata->dev,
181 pool->size * sizeof(*pool->iu_storage), 180 pool->size * sizeof(*pool->iu_storage),
@@ -210,15 +209,13 @@ static void free_event_struct(struct event_pool *pool,
210 struct srp_event_struct *evt) 209 struct srp_event_struct *evt)
211{ 210{
212 if (!valid_event_struct(pool, evt)) { 211 if (!valid_event_struct(pool, evt)) {
213 printk(KERN_ERR 212 dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
214 "ibmvscsi: Freeing invalid event_struct %p " 213 "(not in pool %p)\n", evt, pool->events);
215 "(not in pool %p)\n", evt, pool->events);
216 return; 214 return;
217 } 215 }
218 if (atomic_inc_return(&evt->free) != 1) { 216 if (atomic_inc_return(&evt->free) != 1) {
219 printk(KERN_ERR 217 dev_err(evt->hostdata->dev, "Freeing event_struct %p "
220 "ibmvscsi: Freeing event_struct %p " 218 "which is not in use!\n", evt);
221 "which is not in use!\n", evt);
222 return; 219 return;
223 } 220 }
224} 221}
@@ -426,10 +423,9 @@ static int map_sg_data(struct scsi_cmnd *cmd,
426 SG_ALL * sizeof(struct srp_direct_buf), 423 SG_ALL * sizeof(struct srp_direct_buf),
427 &evt_struct->ext_list_token, 0); 424 &evt_struct->ext_list_token, 0);
428 if (!evt_struct->ext_list) { 425 if (!evt_struct->ext_list) {
429 printk(KERN_ERR 426 sdev_printk(KERN_ERR, cmd->device,
430 "ibmvscsi: Can't allocate memory for indirect table\n"); 427 "Can't allocate memory for indirect table\n");
431 return 0; 428 return 0;
432
433 } 429 }
434 } 430 }
435 431
@@ -464,8 +460,8 @@ static int map_single_data(struct scsi_cmnd *cmd,
464 cmd->request_bufflen, 460 cmd->request_bufflen,
465 DMA_BIDIRECTIONAL); 461 DMA_BIDIRECTIONAL);
466 if (dma_mapping_error(data->va)) { 462 if (dma_mapping_error(data->va)) {
467 printk(KERN_ERR 463 sdev_printk(KERN_ERR, cmd->device,
468 "ibmvscsi: Unable to map request_buffer for command!\n"); 464 "Unable to map request_buffer for command!\n");
469 return 0; 465 return 0;
470 } 466 }
471 data->len = cmd->request_bufflen; 467 data->len = cmd->request_bufflen;
@@ -496,13 +492,13 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
496 case DMA_NONE: 492 case DMA_NONE:
497 return 1; 493 return 1;
498 case DMA_BIDIRECTIONAL: 494 case DMA_BIDIRECTIONAL:
499 printk(KERN_ERR 495 sdev_printk(KERN_ERR, cmd->device,
500 "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n"); 496 "Can't map DMA_BIDIRECTIONAL to read/write\n");
501 return 0; 497 return 0;
502 default: 498 default:
503 printk(KERN_ERR 499 sdev_printk(KERN_ERR, cmd->device,
504 "ibmvscsi: Unknown data direction 0x%02x; can't map!\n", 500 "Unknown data direction 0x%02x; can't map!\n",
505 cmd->sc_data_direction); 501 cmd->sc_data_direction);
506 return 0; 502 return 0;
507 } 503 }
508 504
@@ -585,8 +581,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
585 ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { 581 ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
586 list_del(&evt_struct->list); 582 list_del(&evt_struct->list);
587 583
588 printk(KERN_ERR "ibmvscsi: send error %d\n", 584 dev_err(hostdata->dev, "send error %d\n", rc);
589 rc);
590 atomic_inc(&hostdata->request_limit); 585 atomic_inc(&hostdata->request_limit);
591 goto send_error; 586 goto send_error;
592 } 587 }
@@ -627,9 +622,8 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
627 622
628 if (unlikely(rsp->opcode != SRP_RSP)) { 623 if (unlikely(rsp->opcode != SRP_RSP)) {
629 if (printk_ratelimit()) 624 if (printk_ratelimit())
630 printk(KERN_WARNING 625 dev_warn(evt_struct->hostdata->dev,
631 "ibmvscsi: bad SRP RSP type %d\n", 626 "bad SRP RSP type %d\n", rsp->opcode);
632 rsp->opcode);
633 } 627 }
634 628
635 if (cmnd) { 629 if (cmnd) {
@@ -690,7 +684,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
690 srp_cmd->lun = ((u64) lun) << 48; 684 srp_cmd->lun = ((u64) lun) << 48;
691 685
692 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { 686 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
693 printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n"); 687 sdev_printk(KERN_ERR, cmnd->device, "couldn't convert cmd to srp_cmd\n");
694 free_event_struct(&hostdata->pool, evt_struct); 688 free_event_struct(&hostdata->pool, evt_struct);
695 return SCSI_MLQUEUE_HOST_BUSY; 689 return SCSI_MLQUEUE_HOST_BUSY;
696 } 690 }
@@ -737,16 +731,16 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
737 DMA_BIDIRECTIONAL); 731 DMA_BIDIRECTIONAL);
738 732
739 if (evt_struct->xfer_iu->mad.adapter_info.common.status) { 733 if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
740 printk("ibmvscsi: error %d getting adapter info\n", 734 dev_err(hostdata->dev, "error %d getting adapter info\n",
741 evt_struct->xfer_iu->mad.adapter_info.common.status); 735 evt_struct->xfer_iu->mad.adapter_info.common.status);
742 } else { 736 } else {
743 printk("ibmvscsi: host srp version: %s, " 737 dev_info(hostdata->dev, "host srp version: %s, "
744 "host partition %s (%d), OS %d, max io %u\n", 738 "host partition %s (%d), OS %d, max io %u\n",
745 hostdata->madapter_info.srp_version, 739 hostdata->madapter_info.srp_version,
746 hostdata->madapter_info.partition_name, 740 hostdata->madapter_info.partition_name,
747 hostdata->madapter_info.partition_number, 741 hostdata->madapter_info.partition_number,
748 hostdata->madapter_info.os_type, 742 hostdata->madapter_info.os_type,
749 hostdata->madapter_info.port_max_txu[0]); 743 hostdata->madapter_info.port_max_txu[0]);
750 744
751 if (hostdata->madapter_info.port_max_txu[0]) 745 if (hostdata->madapter_info.port_max_txu[0])
752 hostdata->host->max_sectors = 746 hostdata->host->max_sectors =
@@ -754,11 +748,10 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
754 748
755 if (hostdata->madapter_info.os_type == 3 && 749 if (hostdata->madapter_info.os_type == 3 &&
756 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { 750 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
757 printk("ibmvscsi: host (Ver. %s) doesn't support large" 751 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
758 "transfers\n", 752 hostdata->madapter_info.srp_version);
759 hostdata->madapter_info.srp_version); 753 dev_err(hostdata->dev, "limiting scatterlists to %d\n",
760 printk("ibmvscsi: limiting scatterlists to %d\n", 754 MAX_INDIRECT_BUFS);
761 MAX_INDIRECT_BUFS);
762 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; 755 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
763 } 756 }
764 } 757 }
@@ -781,8 +774,8 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
781 774
782 evt_struct = get_event_struct(&hostdata->pool); 775 evt_struct = get_event_struct(&hostdata->pool);
783 if (!evt_struct) { 776 if (!evt_struct) {
784 printk(KERN_ERR "ibmvscsi: couldn't allocate an event " 777 dev_err(hostdata->dev,
785 "for ADAPTER_INFO_REQ!\n"); 778 "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
786 return; 779 return;
787 } 780 }
788 781
@@ -802,15 +795,13 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
802 DMA_BIDIRECTIONAL); 795 DMA_BIDIRECTIONAL);
803 796
804 if (dma_mapping_error(req->buffer)) { 797 if (dma_mapping_error(req->buffer)) {
805 printk(KERN_ERR 798 dev_err(hostdata->dev, "Unable to map request_buffer for adapter_info!\n");
806 "ibmvscsi: Unable to map request_buffer "
807 "for adapter_info!\n");
808 free_event_struct(&hostdata->pool, evt_struct); 799 free_event_struct(&hostdata->pool, evt_struct);
809 return; 800 return;
810 } 801 }
811 802
812 if (ibmvscsi_send_srp_event(evt_struct, hostdata)) { 803 if (ibmvscsi_send_srp_event(evt_struct, hostdata)) {
813 printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n"); 804 dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
814 dma_unmap_single(hostdata->dev, 805 dma_unmap_single(hostdata->dev,
815 addr, 806 addr,
816 sizeof(hostdata->madapter_info), 807 sizeof(hostdata->madapter_info),
@@ -832,24 +823,23 @@ static void login_rsp(struct srp_event_struct *evt_struct)
832 case SRP_LOGIN_RSP: /* it worked! */ 823 case SRP_LOGIN_RSP: /* it worked! */
833 break; 824 break;
834 case SRP_LOGIN_REJ: /* refused! */ 825 case SRP_LOGIN_REJ: /* refused! */
835 printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n", 826 dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
836 evt_struct->xfer_iu->srp.login_rej.reason); 827 evt_struct->xfer_iu->srp.login_rej.reason);
837 /* Login failed. */ 828 /* Login failed. */
838 atomic_set(&hostdata->request_limit, -1); 829 atomic_set(&hostdata->request_limit, -1);
839 return; 830 return;
840 default: 831 default:
841 printk(KERN_ERR 832 dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
842 "ibmvscsi: Invalid login response typecode 0x%02x!\n", 833 evt_struct->xfer_iu->srp.login_rsp.opcode);
843 evt_struct->xfer_iu->srp.login_rsp.opcode);
844 /* Login failed. */ 834 /* Login failed. */
845 atomic_set(&hostdata->request_limit, -1); 835 atomic_set(&hostdata->request_limit, -1);
846 return; 836 return;
847 } 837 }
848 838
849 printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); 839 dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
850 840
851 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0) 841 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
852 printk(KERN_ERR "ibmvscsi: Invalid request_limit.\n"); 842 dev_err(hostdata->dev, "Invalid request_limit.\n");
853 843
854 /* Now we know what the real request-limit is. 844 /* Now we know what the real request-limit is.
855 * This value is set rather than added to request_limit because 845 * This value is set rather than added to request_limit because
@@ -878,8 +868,7 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
878 struct srp_login_req *login; 868 struct srp_login_req *login;
879 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); 869 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
880 if (!evt_struct) { 870 if (!evt_struct) {
881 printk(KERN_ERR 871 dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
882 "ibmvscsi: couldn't allocate an event for login req!\n");
883 return FAILED; 872 return FAILED;
884 } 873 }
885 874
@@ -902,7 +891,7 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
902 891
903 rc = ibmvscsi_send_srp_event(evt_struct, hostdata); 892 rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
904 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 893 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
905 printk("ibmvscsic: sent SRP login\n"); 894 dev_info(hostdata->dev, "sent SRP login\n");
906 return rc; 895 return rc;
907}; 896};
908 897
@@ -957,7 +946,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
957 evt = get_event_struct(&hostdata->pool); 946 evt = get_event_struct(&hostdata->pool);
958 if (evt == NULL) { 947 if (evt == NULL) {
959 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 948 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
960 printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n"); 949 sdev_printk(KERN_ERR, cmd->device, "failed to allocate abort event\n");
961 return FAILED; 950 return FAILED;
962 } 951 }
963 952
@@ -975,15 +964,16 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
975 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; 964 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
976 tsk_mgmt->task_tag = (u64) found_evt; 965 tsk_mgmt->task_tag = (u64) found_evt;
977 966
978 printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n", 967 sdev_printk(KERN_INFO, cmd->device, "aborting command. lun 0x%lx, tag 0x%lx\n",
979 tsk_mgmt->lun, tsk_mgmt->task_tag); 968 tsk_mgmt->lun, tsk_mgmt->task_tag);
980 969
981 evt->sync_srp = &srp_rsp; 970 evt->sync_srp = &srp_rsp;
982 init_completion(&evt->comp); 971 init_completion(&evt->comp);
983 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata); 972 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
984 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 973 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
985 if (rsp_rc != 0) { 974 if (rsp_rc != 0) {
986 printk(KERN_ERR "ibmvscsi: failed to send abort() event\n"); 975 sdev_printk(KERN_ERR, cmd->device,
976 "failed to send abort() event. rc=%d\n", rsp_rc);
987 return FAILED; 977 return FAILED;
988 } 978 }
989 979
@@ -992,9 +982,8 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
992 /* make sure we got a good response */ 982 /* make sure we got a good response */
993 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { 983 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
994 if (printk_ratelimit()) 984 if (printk_ratelimit())
995 printk(KERN_WARNING 985 sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
996 "ibmvscsi: abort bad SRP RSP type %d\n", 986 srp_rsp.srp.rsp.opcode);
997 srp_rsp.srp.rsp.opcode);
998 return FAILED; 987 return FAILED;
999 } 988 }
1000 989
@@ -1005,10 +994,9 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1005 994
1006 if (rsp_rc) { 995 if (rsp_rc) {
1007 if (printk_ratelimit()) 996 if (printk_ratelimit())
1008 printk(KERN_WARNING 997 sdev_printk(KERN_WARNING, cmd->device,
1009 "ibmvscsi: abort code %d for task tag 0x%lx\n", 998 "abort code %d for task tag 0x%lx\n",
1010 rsp_rc, 999 rsp_rc, tsk_mgmt->task_tag);
1011 tsk_mgmt->task_tag);
1012 return FAILED; 1000 return FAILED;
1013 } 1001 }
1014 1002
@@ -1027,15 +1015,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1027 1015
1028 if (found_evt == NULL) { 1016 if (found_evt == NULL) {
1029 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1017 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1030 printk(KERN_INFO 1018 sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%lx completed\n",
1031 "ibmvscsi: aborted task tag 0x%lx completed\n", 1019 tsk_mgmt->task_tag);
1032 tsk_mgmt->task_tag);
1033 return SUCCESS; 1020 return SUCCESS;
1034 } 1021 }
1035 1022
1036 printk(KERN_INFO 1023 sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%lx\n",
1037 "ibmvscsi: successfully aborted task tag 0x%lx\n", 1024 tsk_mgmt->task_tag);
1038 tsk_mgmt->task_tag);
1039 1025
1040 cmd->result = (DID_ABORT << 16); 1026 cmd->result = (DID_ABORT << 16);
1041 list_del(&found_evt->list); 1027 list_del(&found_evt->list);
@@ -1069,7 +1055,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1069 evt = get_event_struct(&hostdata->pool); 1055 evt = get_event_struct(&hostdata->pool);
1070 if (evt == NULL) { 1056 if (evt == NULL) {
1071 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1057 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1072 printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n"); 1058 sdev_printk(KERN_ERR, cmd->device, "failed to allocate reset event\n");
1073 return FAILED; 1059 return FAILED;
1074 } 1060 }
1075 1061
@@ -1086,15 +1072,16 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1086 tsk_mgmt->lun = ((u64) lun) << 48; 1072 tsk_mgmt->lun = ((u64) lun) << 48;
1087 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; 1073 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
1088 1074
1089 printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n", 1075 sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n",
1090 tsk_mgmt->lun); 1076 tsk_mgmt->lun);
1091 1077
1092 evt->sync_srp = &srp_rsp; 1078 evt->sync_srp = &srp_rsp;
1093 init_completion(&evt->comp); 1079 init_completion(&evt->comp);
1094 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata); 1080 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
1095 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1081 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1096 if (rsp_rc != 0) { 1082 if (rsp_rc != 0) {
1097 printk(KERN_ERR "ibmvscsi: failed to send reset event\n"); 1083 sdev_printk(KERN_ERR, cmd->device,
1084 "failed to send reset event. rc=%d\n", rsp_rc);
1098 return FAILED; 1085 return FAILED;
1099 } 1086 }
1100 1087
@@ -1103,9 +1090,8 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1103 /* make sure we got a good response */ 1090 /* make sure we got a good response */
1104 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { 1091 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1105 if (printk_ratelimit()) 1092 if (printk_ratelimit())
1106 printk(KERN_WARNING 1093 sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
1107 "ibmvscsi: reset bad SRP RSP type %d\n", 1094 srp_rsp.srp.rsp.opcode);
1108 srp_rsp.srp.rsp.opcode);
1109 return FAILED; 1095 return FAILED;
1110 } 1096 }
1111 1097
@@ -1116,9 +1102,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1116 1102
1117 if (rsp_rc) { 1103 if (rsp_rc) {
1118 if (printk_ratelimit()) 1104 if (printk_ratelimit())
1119 printk(KERN_WARNING 1105 sdev_printk(KERN_WARNING, cmd->device,
1120 "ibmvscsi: reset code %d for task tag 0x%lx\n", 1106 "reset code %d for task tag 0x%lx\n",
1121 rsp_rc, tsk_mgmt->task_tag); 1107 rsp_rc, tsk_mgmt->task_tag);
1122 return FAILED; 1108 return FAILED;
1123 } 1109 }
1124 1110
@@ -1184,6 +1170,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
1184void ibmvscsi_handle_crq(struct viosrp_crq *crq, 1170void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1185 struct ibmvscsi_host_data *hostdata) 1171 struct ibmvscsi_host_data *hostdata)
1186{ 1172{
1173 long rc;
1187 unsigned long flags; 1174 unsigned long flags;
1188 struct srp_event_struct *evt_struct = 1175 struct srp_event_struct *evt_struct =
1189 (struct srp_event_struct *)crq->IU_data_ptr; 1176 (struct srp_event_struct *)crq->IU_data_ptr;
@@ -1191,27 +1178,25 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1191 case 0xC0: /* initialization */ 1178 case 0xC0: /* initialization */
1192 switch (crq->format) { 1179 switch (crq->format) {
1193 case 0x01: /* Initialization message */ 1180 case 0x01: /* Initialization message */
1194 printk(KERN_INFO "ibmvscsi: partner initialized\n"); 1181 dev_info(hostdata->dev, "partner initialized\n");
1195 /* Send back a response */ 1182 /* Send back a response */
1196 if (ibmvscsi_send_crq(hostdata, 1183 if ((rc = ibmvscsi_send_crq(hostdata,
1197 0xC002000000000000LL, 0) == 0) { 1184 0xC002000000000000LL, 0)) == 0) {
1198 /* Now login */ 1185 /* Now login */
1199 send_srp_login(hostdata); 1186 send_srp_login(hostdata);
1200 } else { 1187 } else {
1201 printk(KERN_ERR 1188 dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
1202 "ibmvscsi: Unable to send init rsp\n");
1203 } 1189 }
1204 1190
1205 break; 1191 break;
1206 case 0x02: /* Initialization response */ 1192 case 0x02: /* Initialization response */
1207 printk(KERN_INFO 1193 dev_info(hostdata->dev, "partner initialization complete\n");
1208 "ibmvscsi: partner initialization complete\n");
1209 1194
1210 /* Now login */ 1195 /* Now login */
1211 send_srp_login(hostdata); 1196 send_srp_login(hostdata);
1212 break; 1197 break;
1213 default: 1198 default:
1214 printk(KERN_ERR "ibmvscsi: unknown crq message type\n"); 1199 dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
1215 } 1200 }
1216 return; 1201 return;
1217 case 0xFF: /* Hypervisor telling us the connection is closed */ 1202 case 0xFF: /* Hypervisor telling us the connection is closed */
@@ -1219,8 +1204,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1219 atomic_set(&hostdata->request_limit, 0); 1204 atomic_set(&hostdata->request_limit, 0);
1220 if (crq->format == 0x06) { 1205 if (crq->format == 0x06) {
1221 /* We need to re-setup the interpartition connection */ 1206 /* We need to re-setup the interpartition connection */
1222 printk(KERN_INFO 1207 dev_info(hostdata->dev, "Re-enabling adapter!\n");
1223 "ibmvscsi: Re-enabling adapter!\n");
1224 purge_requests(hostdata, DID_REQUEUE); 1208 purge_requests(hostdata, DID_REQUEUE);
1225 if ((ibmvscsi_reenable_crq_queue(&hostdata->queue, 1209 if ((ibmvscsi_reenable_crq_queue(&hostdata->queue,
1226 hostdata)) || 1210 hostdata)) ||
@@ -1228,14 +1212,11 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1228 0xC001000000000000LL, 0))) { 1212 0xC001000000000000LL, 0))) {
1229 atomic_set(&hostdata->request_limit, 1213 atomic_set(&hostdata->request_limit,
1230 -1); 1214 -1);
1231 printk(KERN_ERR 1215 dev_err(hostdata->dev, "error after enable\n");
1232 "ibmvscsi: error after"
1233 " enable\n");
1234 } 1216 }
1235 } else { 1217 } else {
1236 printk(KERN_INFO 1218 dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
1237 "ibmvscsi: Virtual adapter failed rc %d!\n", 1219 crq->format);
1238 crq->format);
1239 1220
1240 purge_requests(hostdata, DID_ERROR); 1221 purge_requests(hostdata, DID_ERROR);
1241 if ((ibmvscsi_reset_crq_queue(&hostdata->queue, 1222 if ((ibmvscsi_reset_crq_queue(&hostdata->queue,
@@ -1244,8 +1225,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1244 0xC001000000000000LL, 0))) { 1225 0xC001000000000000LL, 0))) {
1245 atomic_set(&hostdata->request_limit, 1226 atomic_set(&hostdata->request_limit,
1246 -1); 1227 -1);
1247 printk(KERN_ERR 1228 dev_err(hostdata->dev, "error after reset\n");
1248 "ibmvscsi: error after reset\n");
1249 } 1229 }
1250 } 1230 }
1251 scsi_unblock_requests(hostdata->host); 1231 scsi_unblock_requests(hostdata->host);
@@ -1253,9 +1233,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1253 case 0x80: /* real payload */ 1233 case 0x80: /* real payload */
1254 break; 1234 break;
1255 default: 1235 default:
1256 printk(KERN_ERR 1236 dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
1257 "ibmvscsi: got an invalid message type 0x%02x\n", 1237 crq->valid);
1258 crq->valid);
1259 return; 1238 return;
1260 } 1239 }
1261 1240
@@ -1264,16 +1243,14 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1264 * actually sent 1243 * actually sent
1265 */ 1244 */
1266 if (!valid_event_struct(&hostdata->pool, evt_struct)) { 1245 if (!valid_event_struct(&hostdata->pool, evt_struct)) {
1267 printk(KERN_ERR 1246 dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
1268 "ibmvscsi: returned correlation_token 0x%p is invalid!\n",
1269 (void *)crq->IU_data_ptr); 1247 (void *)crq->IU_data_ptr);
1270 return; 1248 return;
1271 } 1249 }
1272 1250
1273 if (atomic_read(&evt_struct->free)) { 1251 if (atomic_read(&evt_struct->free)) {
1274 printk(KERN_ERR 1252 dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
1275 "ibmvscsi: received duplicate correlation_token 0x%p!\n", 1253 (void *)crq->IU_data_ptr);
1276 (void *)crq->IU_data_ptr);
1277 return; 1254 return;
1278 } 1255 }
1279 1256
@@ -1284,8 +1261,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1284 if (evt_struct->done) 1261 if (evt_struct->done)
1285 evt_struct->done(evt_struct); 1262 evt_struct->done(evt_struct);
1286 else 1263 else
1287 printk(KERN_ERR 1264 dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
1288 "ibmvscsi: returned done() is NULL; not running it!\n");
1289 1265
1290 /* 1266 /*
1291 * Lock the host_lock before messing with these structures, since we 1267 * Lock the host_lock before messing with these structures, since we
@@ -1311,8 +1287,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1311 1287
1312 evt_struct = get_event_struct(&hostdata->pool); 1288 evt_struct = get_event_struct(&hostdata->pool);
1313 if (!evt_struct) { 1289 if (!evt_struct) {
1314 printk(KERN_ERR 1290 dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n");
1315 "ibmvscsi: could't allocate event for HOST_CONFIG!\n");
1316 return -1; 1291 return -1;
1317 } 1292 }
1318 1293
@@ -1332,8 +1307,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1332 DMA_BIDIRECTIONAL); 1307 DMA_BIDIRECTIONAL);
1333 1308
1334 if (dma_mapping_error(host_config->buffer)) { 1309 if (dma_mapping_error(host_config->buffer)) {
1335 printk(KERN_ERR 1310 dev_err(hostdata->dev, "dma_mapping error getting host config\n");
1336 "ibmvscsi: dma_mapping error " "getting host config\n");
1337 free_event_struct(&hostdata->pool, evt_struct); 1311 free_event_struct(&hostdata->pool, evt_struct);
1338 return -1; 1312 return -1;
1339 } 1313 }
@@ -1556,7 +1530,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1556 driver_template.can_queue = max_requests; 1530 driver_template.can_queue = max_requests;
1557 host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); 1531 host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1558 if (!host) { 1532 if (!host) {
1559 printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n"); 1533 dev_err(&vdev->dev, "couldn't allocate host data\n");
1560 goto scsi_host_alloc_failed; 1534 goto scsi_host_alloc_failed;
1561 } 1535 }
1562 1536
@@ -1570,11 +1544,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1570 1544
1571 rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests); 1545 rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests);
1572 if (rc != 0 && rc != H_RESOURCE) { 1546 if (rc != 0 && rc != H_RESOURCE) {
1573 printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n"); 1547 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
1574 goto init_crq_failed; 1548 goto init_crq_failed;
1575 } 1549 }
1576 if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) { 1550 if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) {
1577 printk(KERN_ERR "ibmvscsi: couldn't initialize event pool\n"); 1551 dev_err(&vdev->dev, "couldn't initialize event pool\n");
1578 goto init_pool_failed; 1552 goto init_pool_failed;
1579 } 1553 }
1580 1554