aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/hptiop.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/hptiop.c')
-rw-r--r--drivers/scsi/hptiop.c568
1 files changed, 9 insertions, 559 deletions
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 6b41c2ef6e21..28bfb8f9f81d 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -44,10 +44,6 @@ static char driver_name[] = "hptiop";
44static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; 44static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
45static const char driver_ver[] = "v1.0 (060426)"; 45static const char driver_ver[] = "v1.0 (060426)";
46 46
47static DEFINE_SPINLOCK(hptiop_hba_list_lock);
48static LIST_HEAD(hptiop_hba_list);
49static int hptiop_cdev_major = -1;
50
51static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); 47static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
52static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); 48static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
53static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); 49static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
@@ -576,7 +572,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
576 if (atomic_xchg(&hba->resetting, 1) == 0) { 572 if (atomic_xchg(&hba->resetting, 1) == 0) {
577 atomic_inc(&hba->reset_count); 573 atomic_inc(&hba->reset_count);
578 writel(IOPMU_INBOUND_MSG0_RESET, 574 writel(IOPMU_INBOUND_MSG0_RESET,
579 &hba->iop->outbound_msgaddr0); 575 &hba->iop->inbound_msgaddr0);
580 hptiop_pci_posting_flush(hba->iop); 576 hptiop_pci_posting_flush(hba->iop);
581 } 577 }
582 578
@@ -619,532 +615,11 @@ static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
619 return queue_depth; 615 return queue_depth;
620} 616}
621 617
622struct hptiop_getinfo {
623 char __user *buffer;
624 loff_t buflength;
625 loff_t bufoffset;
626 loff_t buffillen;
627 loff_t filpos;
628};
629
630static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo,
631 char *data, int datalen)
632{
633 if (pinfo->filpos < pinfo->bufoffset) {
634 if (pinfo->filpos + datalen <= pinfo->bufoffset) {
635 pinfo->filpos += datalen;
636 return;
637 } else {
638 data += (pinfo->bufoffset - pinfo->filpos);
639 datalen -= (pinfo->bufoffset - pinfo->filpos);
640 pinfo->filpos = pinfo->bufoffset;
641 }
642 }
643
644 pinfo->filpos += datalen;
645 if (pinfo->buffillen == pinfo->buflength)
646 return;
647
648 if (pinfo->buflength - pinfo->buffillen < datalen)
649 datalen = pinfo->buflength - pinfo->buffillen;
650
651 if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen))
652 return;
653
654 pinfo->buffillen += datalen;
655}
656
657static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...)
658{
659 va_list args;
660 char buf[128];
661 int len;
662
663 va_start(args, fmt);
664 len = vsnprintf(buf, sizeof(buf), fmt, args);
665 va_end(args);
666 hptiop_copy_mem_info(pinfo, buf, len);
667 return len;
668}
669
670static void hptiop_ioctl_done(struct hpt_ioctl_k *arg)
671{
672 arg->done = NULL;
673 wake_up(&arg->hba->ioctl_wq);
674}
675
676static void hptiop_do_ioctl(struct hpt_ioctl_k *arg)
677{
678 struct hptiop_hba *hba = arg->hba;
679 u32 val;
680 struct hpt_iop_request_ioctl_command __iomem *req;
681 int ioctl_retry = 0;
682
683 dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no);
684
685 /*
686 * check (in + out) buff size from application.
687 * outbuf must be dword aligned.
688 */
689 if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size >
690 hba->max_request_size
691 - sizeof(struct hpt_iop_request_header)
692 - 4 * sizeof(u32)) {
693 dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n",
694 hba->host->host_no,
695 arg->inbuf_size, arg->outbuf_size);
696 arg->result = HPT_IOCTL_RESULT_FAILED;
697 return;
698 }
699
700retry:
701 spin_lock_irq(hba->host->host_lock);
702
703 val = readl(&hba->iop->inbound_queue);
704 if (val == IOPMU_QUEUE_EMPTY) {
705 spin_unlock_irq(hba->host->host_lock);
706 dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no);
707 arg->result = -1;
708 return;
709 }
710
711 req = (struct hpt_iop_request_ioctl_command __iomem *)
712 ((unsigned long)hba->iop + val);
713
714 writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code),
715 &req->ioctl_code);
716 writel(arg->inbuf_size, &req->inbuf_size);
717 writel(arg->outbuf_size, &req->outbuf_size);
718
719 /*
720 * use the buffer on the IOP local memory first, then copy it
721 * back to host.
722 * the caller's request buffer shoudl be little-endian.
723 */
724 if (arg->inbuf_size)
725 memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size);
726
727 /* correct the controller ID for IOP */
728 if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO ||
729 arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 ||
730 arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO)
731 && arg->inbuf_size >= sizeof(u32))
732 writel(0, req->buf);
733
734 writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type);
735 writel(0, &req->header.flags);
736 writel(offsetof(struct hpt_iop_request_ioctl_command, buf)
737 + arg->inbuf_size, &req->header.size);
738 writel((u32)(unsigned long)arg, &req->header.context);
739 writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0,
740 &req->header.context_hi32);
741 writel(IOP_RESULT_PENDING, &req->header.result);
742
743 arg->result = HPT_IOCTL_RESULT_FAILED;
744 arg->done = hptiop_ioctl_done;
745
746 writel(val, &hba->iop->inbound_queue);
747 hptiop_pci_posting_flush(hba->iop);
748
749 spin_unlock_irq(hba->host->host_lock);
750
751 wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ);
752
753 if (arg->done != NULL) {
754 hptiop_reset_hba(hba);
755 if (ioctl_retry++ < 3)
756 goto retry;
757 }
758
759 dprintk("hpt_iop_ioctl %x result %d\n",
760 arg->ioctl_code, arg->result);
761}
762
763static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf,
764 u32 insize, void *outbuf, u32 outsize)
765{
766 struct hpt_ioctl_k arg;
767 arg.hba = hba;
768 arg.ioctl_code = code;
769 arg.inbuf = inbuf;
770 arg.outbuf = outbuf;
771 arg.inbuf_size = insize;
772 arg.outbuf_size = outsize;
773 arg.bytes_returned = NULL;
774 hptiop_do_ioctl(&arg);
775 return arg.result;
776}
777
778static inline int hpt_id_valid(__le32 id)
779{
780 return id != 0 && id != cpu_to_le32(0xffffffff);
781}
782
783static int hptiop_get_controller_info(struct hptiop_hba *hba,
784 struct hpt_controller_info *pinfo)
785{
786 int id = 0;
787
788 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO,
789 &id, sizeof(int), pinfo, sizeof(*pinfo));
790}
791
792
793static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus,
794 struct hpt_channel_info *pinfo)
795{
796 u32 ids[2];
797
798 ids[0] = 0;
799 ids[1] = bus;
800 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO,
801 ids, sizeof(ids), pinfo, sizeof(*pinfo));
802
803}
804
805static int hptiop_get_logical_devices(struct hptiop_hba *hba,
806 __le32 *pids, int maxcount)
807{
808 int i;
809 u32 count = maxcount - 1;
810
811 if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES,
812 &count, sizeof(u32),
813 pids, sizeof(u32) * maxcount))
814 return -1;
815
816 maxcount = le32_to_cpu(pids[0]);
817 for (i = 0; i < maxcount; i++)
818 pids[i] = pids[i+1];
819
820 return maxcount;
821}
822
823static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id,
824 struct hpt_logical_device_info_v3 *pinfo)
825{
826 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3,
827 &id, sizeof(u32),
828 pinfo, sizeof(*pinfo));
829}
830
831static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo)
832{
833 static char s[64];
834 u32 flags = le32_to_cpu(devinfo->u.array.flags);
835 u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress);
836 u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress);
837
838 if (flags & ARRAY_FLAG_DISABLED)
839 return "Disabled";
840 else if (flags & ARRAY_FLAG_TRANSFORMING)
841 sprintf(s, "Expanding/Migrating %d.%d%%%s%s",
842 trans_prog / 100,
843 trans_prog % 100,
844 (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))?
845 ", Critical" : "",
846 ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
847 !(flags & ARRAY_FLAG_REBUILDING) &&
848 !(flags & ARRAY_FLAG_INITIALIZING))?
849 ", Unintialized" : "");
850 else if ((flags & ARRAY_FLAG_BROKEN) &&
851 devinfo->u.array.array_type != AT_RAID6)
852 return "Critical";
853 else if (flags & ARRAY_FLAG_REBUILDING)
854 sprintf(s,
855 (flags & ARRAY_FLAG_NEEDINITIALIZING)?
856 "%sBackground initializing %d.%d%%" :
857 "%sRebuilding %d.%d%%",
858 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
859 reb_prog / 100,
860 reb_prog % 100);
861 else if (flags & ARRAY_FLAG_VERIFYING)
862 sprintf(s, "%sVerifying %d.%d%%",
863 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
864 reb_prog / 100,
865 reb_prog % 100);
866 else if (flags & ARRAY_FLAG_INITIALIZING)
867 sprintf(s, "%sForground initializing %d.%d%%",
868 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
869 reb_prog / 100,
870 reb_prog % 100);
871 else if (flags & ARRAY_FLAG_NEEDTRANSFORM)
872 sprintf(s,"%s%s%s", "Need Expanding/Migrating",
873 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
874 ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
875 !(flags & ARRAY_FLAG_REBUILDING) &&
876 !(flags & ARRAY_FLAG_INITIALIZING))?
877 ", Unintialized" : "");
878 else if (flags & ARRAY_FLAG_NEEDINITIALIZING &&
879 !(flags & ARRAY_FLAG_REBUILDING) &&
880 !(flags & ARRAY_FLAG_INITIALIZING))
881 sprintf(s,"%sUninitialized",
882 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "");
883 else if ((flags & ARRAY_FLAG_NEEDBUILDING) ||
884 (flags & ARRAY_FLAG_BROKEN))
885 return "Critical";
886 else
887 return "Normal";
888 return s;
889}
890
891static void hptiop_dump_devinfo(struct hptiop_hba *hba,
892 struct hptiop_getinfo *pinfo, __le32 id, int indent)
893{
894 struct hpt_logical_device_info_v3 devinfo;
895 int i;
896 u64 capacity;
897
898 for (i = 0; i < indent; i++)
899 hptiop_copy_info(pinfo, "\t");
900
901 if (hptiop_get_device_info_v3(hba, id, &devinfo)) {
902 hptiop_copy_info(pinfo, "unknown\n");
903 return;
904 }
905
906 switch (devinfo.type) {
907
908 case LDT_DEVICE: {
909 struct hd_driveid *driveid;
910 u32 flags = le32_to_cpu(devinfo.u.device.flags);
911
912 driveid = (struct hd_driveid *)devinfo.u.device.ident;
913 /* model[] is 40 chars long, but we just want 20 chars here */
914 driveid->model[20] = 0;
915
916 if (indent)
917 if (flags & DEVICE_FLAG_DISABLED)
918 hptiop_copy_info(pinfo,"Missing\n");
919 else
920 hptiop_copy_info(pinfo, "CH%d %s\n",
921 devinfo.u.device.path_id + 1,
922 driveid->model);
923 else {
924 capacity = le64_to_cpu(devinfo.capacity) * 512;
925 do_div(capacity, 1000000);
926 hptiop_copy_info(pinfo,
927 "CH%d %s, %lluMB, %s %s%s%s%s\n",
928 devinfo.u.device.path_id + 1,
929 driveid->model,
930 capacity,
931 (flags & DEVICE_FLAG_DISABLED)?
932 "Disabled" : "Normal",
933 devinfo.u.device.read_ahead_enabled?
934 "[RA]" : "",
935 devinfo.u.device.write_cache_enabled?
936 "[WC]" : "",
937 devinfo.u.device.TCQ_enabled?
938 "[TCQ]" : "",
939 devinfo.u.device.NCQ_enabled?
940 "[NCQ]" : ""
941 );
942 }
943 break;
944 }
945
946 case LDT_ARRAY:
947 if (devinfo.target_id != INVALID_TARGET_ID)
948 hptiop_copy_info(pinfo, "[DISK %d_%d] ",
949 devinfo.vbus_id, devinfo.target_id);
950
951 capacity = le64_to_cpu(devinfo.capacity) * 512;
952 do_div(capacity, 1000000);
953 hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n",
954 devinfo.u.array.name,
955 devinfo.u.array.array_type==AT_RAID0? "RAID0" :
956 devinfo.u.array.array_type==AT_RAID1? "RAID1" :
957 devinfo.u.array.array_type==AT_RAID5? "RAID5" :
958 devinfo.u.array.array_type==AT_RAID6? "RAID6" :
959 devinfo.u.array.array_type==AT_JBOD? "JBOD" :
960 "unknown",
961 capacity,
962 get_array_status(&devinfo));
963 for (i = 0; i < devinfo.u.array.ndisk; i++) {
964 if (hpt_id_valid(devinfo.u.array.members[i])) {
965 if (cpu_to_le16(1<<i) &
966 devinfo.u.array.critical_members)
967 hptiop_copy_info(pinfo, "\t*");
968 hptiop_dump_devinfo(hba, pinfo,
969 devinfo.u.array.members[i], indent+1);
970 }
971 else
972 hptiop_copy_info(pinfo, "\tMissing\n");
973 }
974 if (id == devinfo.u.array.transform_source) {
975 hptiop_copy_info(pinfo, "\tExpanding/Migrating to:\n");
976 hptiop_dump_devinfo(hba, pinfo,
977 devinfo.u.array.transform_target, indent+1);
978 }
979 break;
980 }
981}
982
983static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf) 618static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf)
984{ 619{
985 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); 620 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
986} 621}
987 622
988static ssize_t hptiop_cdev_read(struct file *filp, char __user *buf,
989 size_t count, loff_t *ppos)
990{
991 struct hptiop_hba *hba = filp->private_data;
992 struct hptiop_getinfo info;
993 int i, j, ndev;
994 struct hpt_controller_info con_info;
995 struct hpt_channel_info chan_info;
996 __le32 ids[32];
997
998 info.buffer = buf;
999 info.buflength = count;
1000 info.bufoffset = ppos ? *ppos : 0;
1001 info.filpos = 0;
1002 info.buffillen = 0;
1003
1004 if (hptiop_get_controller_info(hba, &con_info))
1005 return -EIO;
1006
1007 for (i = 0; i < con_info.num_buses; i++) {
1008 if (hptiop_get_channel_info(hba, i, &chan_info) == 0) {
1009 if (hpt_id_valid(chan_info.devices[0]))
1010 hptiop_dump_devinfo(hba, &info,
1011 chan_info.devices[0], 0);
1012 if (hpt_id_valid(chan_info.devices[1]))
1013 hptiop_dump_devinfo(hba, &info,
1014 chan_info.devices[1], 0);
1015 }
1016 }
1017
1018 ndev = hptiop_get_logical_devices(hba, ids,
1019 sizeof(ids) / sizeof(ids[0]));
1020
1021 /*
1022 * if hptiop_get_logical_devices fails, ndev==-1 and it just
1023 * output nothing here
1024 */
1025 for (j = 0; j < ndev; j++)
1026 hptiop_dump_devinfo(hba, &info, ids[j], 0);
1027
1028 if (ppos)
1029 *ppos += info.buffillen;
1030
1031 return info.buffillen;
1032}
1033
1034static int hptiop_cdev_ioctl(struct inode *inode, struct file *file,
1035 unsigned int cmd, unsigned long arg)
1036{
1037 struct hptiop_hba *hba = file->private_data;
1038 struct hpt_ioctl_u ioctl_u;
1039 struct hpt_ioctl_k ioctl_k;
1040 u32 bytes_returned;
1041 int err = -EINVAL;
1042
1043 if (copy_from_user(&ioctl_u,
1044 (void __user *)arg, sizeof(struct hpt_ioctl_u)))
1045 return -EINVAL;
1046
1047 if (ioctl_u.magic != HPT_IOCTL_MAGIC)
1048 return -EINVAL;
1049
1050 ioctl_k.ioctl_code = ioctl_u.ioctl_code;
1051 ioctl_k.inbuf = NULL;
1052 ioctl_k.inbuf_size = ioctl_u.inbuf_size;
1053 ioctl_k.outbuf = NULL;
1054 ioctl_k.outbuf_size = ioctl_u.outbuf_size;
1055 ioctl_k.hba = hba;
1056 ioctl_k.bytes_returned = &bytes_returned;
1057
1058 /* verify user buffer */
1059 if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ,
1060 ioctl_u.inbuf, ioctl_k.inbuf_size)) ||
1061 (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE,
1062 ioctl_u.outbuf, ioctl_k.outbuf_size)) ||
1063 (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE,
1064 ioctl_u.bytes_returned, sizeof(u32))) ||
1065 ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) {
1066
1067 dprintk("scsi%d: got bad user address\n", hba->host->host_no);
1068 return -EINVAL;
1069 }
1070
1071 /* map buffer to kernel. */
1072 if (ioctl_k.inbuf_size) {
1073 ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL);
1074 if (!ioctl_k.inbuf) {
1075 dprintk("scsi%d: fail to alloc inbuf\n",
1076 hba->host->host_no);
1077 err = -ENOMEM;
1078 goto err_exit;
1079 }
1080
1081 if (copy_from_user(ioctl_k.inbuf,
1082 ioctl_u.inbuf, ioctl_k.inbuf_size)) {
1083 goto err_exit;
1084 }
1085 }
1086
1087 if (ioctl_k.outbuf_size) {
1088 ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL);
1089 if (!ioctl_k.outbuf) {
1090 dprintk("scsi%d: fail to alloc outbuf\n",
1091 hba->host->host_no);
1092 err = -ENOMEM;
1093 goto err_exit;
1094 }
1095 }
1096
1097 hptiop_do_ioctl(&ioctl_k);
1098
1099 if (ioctl_k.result == HPT_IOCTL_RESULT_OK) {
1100 if (ioctl_k.outbuf_size &&
1101 copy_to_user(ioctl_u.outbuf,
1102 ioctl_k.outbuf, ioctl_k.outbuf_size))
1103 goto err_exit;
1104
1105 if (ioctl_u.bytes_returned &&
1106 copy_to_user(ioctl_u.bytes_returned,
1107 &bytes_returned, sizeof(u32)))
1108 goto err_exit;
1109
1110 err = 0;
1111 }
1112
1113err_exit:
1114 kfree(ioctl_k.inbuf);
1115 kfree(ioctl_k.outbuf);
1116
1117 return err;
1118}
1119
1120static int hptiop_cdev_open(struct inode *inode, struct file *file)
1121{
1122 struct hptiop_hba *hba;
1123 unsigned i = 0, minor = iminor(inode);
1124 int ret = -ENODEV;
1125
1126 spin_lock(&hptiop_hba_list_lock);
1127 list_for_each_entry(hba, &hptiop_hba_list, link) {
1128 if (i == minor) {
1129 file->private_data = hba;
1130 ret = 0;
1131 goto out;
1132 }
1133 i++;
1134 }
1135
1136out:
1137 spin_unlock(&hptiop_hba_list_lock);
1138 return ret;
1139}
1140
1141static struct file_operations hptiop_cdev_fops = {
1142 .owner = THIS_MODULE,
1143 .read = hptiop_cdev_read,
1144 .ioctl = hptiop_cdev_ioctl,
1145 .open = hptiop_cdev_open,
1146};
1147
1148static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) 623static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf)
1149{ 624{
1150 struct Scsi_Host *host = class_to_shost(class_dev); 625 struct Scsi_Host *host = class_to_shost(class_dev);
@@ -1295,19 +770,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
1295 goto unmap_pci_bar; 770 goto unmap_pci_bar;
1296 } 771 }
1297 772
1298 if (scsi_add_host(host, &pcidev->dev)) {
1299 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1300 hba->host->host_no);
1301 goto unmap_pci_bar;
1302 }
1303
1304 pci_set_drvdata(pcidev, host); 773 pci_set_drvdata(pcidev, host);
1305 774
1306 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, 775 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
1307 driver_name, hba)) { 776 driver_name, hba)) {
1308 printk(KERN_ERR "scsi%d: request irq %d failed\n", 777 printk(KERN_ERR "scsi%d: request irq %d failed\n",
1309 hba->host->host_no, pcidev->irq); 778 hba->host->host_no, pcidev->irq);
1310 goto remove_scsi_host; 779 goto unmap_pci_bar;
1311 } 780 }
1312 781
1313 /* Allocate request mem */ 782 /* Allocate request mem */
@@ -1354,9 +823,12 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
1354 if (hptiop_initialize_iop(hba)) 823 if (hptiop_initialize_iop(hba))
1355 goto free_request_mem; 824 goto free_request_mem;
1356 825
1357 spin_lock(&hptiop_hba_list_lock); 826 if (scsi_add_host(host, &pcidev->dev)) {
1358 list_add_tail(&hba->link, &hptiop_hba_list); 827 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1359 spin_unlock(&hptiop_hba_list_lock); 828 hba->host->host_no);
829 goto free_request_mem;
830 }
831
1360 832
1361 scsi_scan_host(host); 833 scsi_scan_host(host);
1362 834
@@ -1371,9 +843,6 @@ free_request_mem:
1371free_request_irq: 843free_request_irq:
1372 free_irq(hba->pcidev->irq, hba); 844 free_irq(hba->pcidev->irq, hba);
1373 845
1374remove_scsi_host:
1375 scsi_remove_host(host);
1376
1377unmap_pci_bar: 846unmap_pci_bar:
1378 iounmap(hba->iop); 847 iounmap(hba->iop);
1379 848
@@ -1421,10 +890,6 @@ static void hptiop_remove(struct pci_dev *pcidev)
1421 890
1422 scsi_remove_host(host); 891 scsi_remove_host(host);
1423 892
1424 spin_lock(&hptiop_hba_list_lock);
1425 list_del_init(&hba->link);
1426 spin_unlock(&hptiop_hba_list_lock);
1427
1428 hptiop_shutdown(pcidev); 893 hptiop_shutdown(pcidev);
1429 894
1430 free_irq(hba->pcidev->irq, hba); 895 free_irq(hba->pcidev->irq, hba);
@@ -1461,27 +926,12 @@ static struct pci_driver hptiop_pci_driver = {
1461 926
1462static int __init hptiop_module_init(void) 927static int __init hptiop_module_init(void)
1463{ 928{
1464 int error;
1465
1466 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); 929 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
1467 930 return pci_register_driver(&hptiop_pci_driver);
1468 error = pci_register_driver(&hptiop_pci_driver);
1469 if (error < 0)
1470 return error;
1471
1472 hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops);
1473 if (hptiop_cdev_major < 0) {
1474 printk(KERN_WARNING "unable to register hptiop device.\n");
1475 return hptiop_cdev_major;
1476 }
1477
1478 return 0;
1479} 931}
1480 932
1481static void __exit hptiop_module_exit(void) 933static void __exit hptiop_module_exit(void)
1482{ 934{
1483 dprintk("hptiop_module_exit\n");
1484 unregister_chrdev(hptiop_cdev_major, "hptiop");
1485 pci_unregister_driver(&hptiop_pci_driver); 935 pci_unregister_driver(&hptiop_pci_driver);
1486} 936}
1487 937