diff options
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/hptiop.c | 566 |
1 files changed, 8 insertions, 558 deletions
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index 74d4d22e5c05..bcb3444f1dcf 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c | |||
@@ -45,10 +45,6 @@ static char driver_name[] = "hptiop"; | |||
45 | static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; | 45 | static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; |
46 | static const char driver_ver[] = "v1.0 (060426)"; | 46 | static const char driver_ver[] = "v1.0 (060426)"; |
47 | 47 | ||
48 | static DEFINE_SPINLOCK(hptiop_hba_list_lock); | ||
49 | static LIST_HEAD(hptiop_hba_list); | ||
50 | static int hptiop_cdev_major = -1; | ||
51 | |||
52 | static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); | 48 | static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); |
53 | static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); | 49 | static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); |
54 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); | 50 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); |
@@ -620,532 +616,11 @@ static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, | |||
620 | return queue_depth; | 616 | return queue_depth; |
621 | } | 617 | } |
622 | 618 | ||
623 | struct hptiop_getinfo { | ||
624 | char __user *buffer; | ||
625 | loff_t buflength; | ||
626 | loff_t bufoffset; | ||
627 | loff_t buffillen; | ||
628 | loff_t filpos; | ||
629 | }; | ||
630 | |||
631 | static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo, | ||
632 | char *data, int datalen) | ||
633 | { | ||
634 | if (pinfo->filpos < pinfo->bufoffset) { | ||
635 | if (pinfo->filpos + datalen <= pinfo->bufoffset) { | ||
636 | pinfo->filpos += datalen; | ||
637 | return; | ||
638 | } else { | ||
639 | data += (pinfo->bufoffset - pinfo->filpos); | ||
640 | datalen -= (pinfo->bufoffset - pinfo->filpos); | ||
641 | pinfo->filpos = pinfo->bufoffset; | ||
642 | } | ||
643 | } | ||
644 | |||
645 | pinfo->filpos += datalen; | ||
646 | if (pinfo->buffillen == pinfo->buflength) | ||
647 | return; | ||
648 | |||
649 | if (pinfo->buflength - pinfo->buffillen < datalen) | ||
650 | datalen = pinfo->buflength - pinfo->buffillen; | ||
651 | |||
652 | if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen)) | ||
653 | return; | ||
654 | |||
655 | pinfo->buffillen += datalen; | ||
656 | } | ||
657 | |||
658 | static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...) | ||
659 | { | ||
660 | va_list args; | ||
661 | char buf[128]; | ||
662 | int len; | ||
663 | |||
664 | va_start(args, fmt); | ||
665 | len = vsnprintf(buf, sizeof(buf), fmt, args); | ||
666 | va_end(args); | ||
667 | hptiop_copy_mem_info(pinfo, buf, len); | ||
668 | return len; | ||
669 | } | ||
670 | |||
671 | static void hptiop_ioctl_done(struct hpt_ioctl_k *arg) | ||
672 | { | ||
673 | arg->done = NULL; | ||
674 | wake_up(&arg->hba->ioctl_wq); | ||
675 | } | ||
676 | |||
677 | static void hptiop_do_ioctl(struct hpt_ioctl_k *arg) | ||
678 | { | ||
679 | struct hptiop_hba *hba = arg->hba; | ||
680 | u32 val; | ||
681 | struct hpt_iop_request_ioctl_command __iomem *req; | ||
682 | int ioctl_retry = 0; | ||
683 | |||
684 | dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no); | ||
685 | |||
686 | /* | ||
687 | * check (in + out) buff size from application. | ||
688 | * outbuf must be dword aligned. | ||
689 | */ | ||
690 | if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size > | ||
691 | hba->max_request_size | ||
692 | - sizeof(struct hpt_iop_request_header) | ||
693 | - 4 * sizeof(u32)) { | ||
694 | dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n", | ||
695 | hba->host->host_no, | ||
696 | arg->inbuf_size, arg->outbuf_size); | ||
697 | arg->result = HPT_IOCTL_RESULT_FAILED; | ||
698 | return; | ||
699 | } | ||
700 | |||
701 | retry: | ||
702 | spin_lock_irq(hba->host->host_lock); | ||
703 | |||
704 | val = readl(&hba->iop->inbound_queue); | ||
705 | if (val == IOPMU_QUEUE_EMPTY) { | ||
706 | spin_unlock_irq(hba->host->host_lock); | ||
707 | dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no); | ||
708 | arg->result = -1; | ||
709 | return; | ||
710 | } | ||
711 | |||
712 | req = (struct hpt_iop_request_ioctl_command __iomem *) | ||
713 | ((unsigned long)hba->iop + val); | ||
714 | |||
715 | writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code), | ||
716 | &req->ioctl_code); | ||
717 | writel(arg->inbuf_size, &req->inbuf_size); | ||
718 | writel(arg->outbuf_size, &req->outbuf_size); | ||
719 | |||
720 | /* | ||
721 | * use the buffer on the IOP local memory first, then copy it | ||
722 | * back to host. | ||
723 | * the caller's request buffer shoudl be little-endian. | ||
724 | */ | ||
725 | if (arg->inbuf_size) | ||
726 | memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size); | ||
727 | |||
728 | /* correct the controller ID for IOP */ | ||
729 | if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO || | ||
730 | arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 || | ||
731 | arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO) | ||
732 | && arg->inbuf_size >= sizeof(u32)) | ||
733 | writel(0, req->buf); | ||
734 | |||
735 | writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type); | ||
736 | writel(0, &req->header.flags); | ||
737 | writel(offsetof(struct hpt_iop_request_ioctl_command, buf) | ||
738 | + arg->inbuf_size, &req->header.size); | ||
739 | writel((u32)(unsigned long)arg, &req->header.context); | ||
740 | writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0, | ||
741 | &req->header.context_hi32); | ||
742 | writel(IOP_RESULT_PENDING, &req->header.result); | ||
743 | |||
744 | arg->result = HPT_IOCTL_RESULT_FAILED; | ||
745 | arg->done = hptiop_ioctl_done; | ||
746 | |||
747 | writel(val, &hba->iop->inbound_queue); | ||
748 | hptiop_pci_posting_flush(hba->iop); | ||
749 | |||
750 | spin_unlock_irq(hba->host->host_lock); | ||
751 | |||
752 | wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ); | ||
753 | |||
754 | if (arg->done != NULL) { | ||
755 | hptiop_reset_hba(hba); | ||
756 | if (ioctl_retry++ < 3) | ||
757 | goto retry; | ||
758 | } | ||
759 | |||
760 | dprintk("hpt_iop_ioctl %x result %d\n", | ||
761 | arg->ioctl_code, arg->result); | ||
762 | } | ||
763 | |||
764 | static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf, | ||
765 | u32 insize, void *outbuf, u32 outsize) | ||
766 | { | ||
767 | struct hpt_ioctl_k arg; | ||
768 | arg.hba = hba; | ||
769 | arg.ioctl_code = code; | ||
770 | arg.inbuf = inbuf; | ||
771 | arg.outbuf = outbuf; | ||
772 | arg.inbuf_size = insize; | ||
773 | arg.outbuf_size = outsize; | ||
774 | arg.bytes_returned = NULL; | ||
775 | hptiop_do_ioctl(&arg); | ||
776 | return arg.result; | ||
777 | } | ||
778 | |||
779 | static inline int hpt_id_valid(__le32 id) | ||
780 | { | ||
781 | return id != 0 && id != cpu_to_le32(0xffffffff); | ||
782 | } | ||
783 | |||
784 | static int hptiop_get_controller_info(struct hptiop_hba *hba, | ||
785 | struct hpt_controller_info *pinfo) | ||
786 | { | ||
787 | int id = 0; | ||
788 | |||
789 | return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO, | ||
790 | &id, sizeof(int), pinfo, sizeof(*pinfo)); | ||
791 | } | ||
792 | |||
793 | |||
794 | static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus, | ||
795 | struct hpt_channel_info *pinfo) | ||
796 | { | ||
797 | u32 ids[2]; | ||
798 | |||
799 | ids[0] = 0; | ||
800 | ids[1] = bus; | ||
801 | return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO, | ||
802 | ids, sizeof(ids), pinfo, sizeof(*pinfo)); | ||
803 | |||
804 | } | ||
805 | |||
806 | static int hptiop_get_logical_devices(struct hptiop_hba *hba, | ||
807 | __le32 *pids, int maxcount) | ||
808 | { | ||
809 | int i; | ||
810 | u32 count = maxcount - 1; | ||
811 | |||
812 | if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES, | ||
813 | &count, sizeof(u32), | ||
814 | pids, sizeof(u32) * maxcount)) | ||
815 | return -1; | ||
816 | |||
817 | maxcount = le32_to_cpu(pids[0]); | ||
818 | for (i = 0; i < maxcount; i++) | ||
819 | pids[i] = pids[i+1]; | ||
820 | |||
821 | return maxcount; | ||
822 | } | ||
823 | |||
824 | static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id, | ||
825 | struct hpt_logical_device_info_v3 *pinfo) | ||
826 | { | ||
827 | return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3, | ||
828 | &id, sizeof(u32), | ||
829 | pinfo, sizeof(*pinfo)); | ||
830 | } | ||
831 | |||
832 | static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo) | ||
833 | { | ||
834 | static char s[64]; | ||
835 | u32 flags = le32_to_cpu(devinfo->u.array.flags); | ||
836 | u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress); | ||
837 | u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress); | ||
838 | |||
839 | if (flags & ARRAY_FLAG_DISABLED) | ||
840 | return "Disabled"; | ||
841 | else if (flags & ARRAY_FLAG_TRANSFORMING) | ||
842 | sprintf(s, "Expanding/Migrating %d.%d%%%s%s", | ||
843 | trans_prog / 100, | ||
844 | trans_prog % 100, | ||
845 | (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))? | ||
846 | ", Critical" : "", | ||
847 | ((flags & ARRAY_FLAG_NEEDINITIALIZING) && | ||
848 | !(flags & ARRAY_FLAG_REBUILDING) && | ||
849 | !(flags & ARRAY_FLAG_INITIALIZING))? | ||
850 | ", Unintialized" : ""); | ||
851 | else if ((flags & ARRAY_FLAG_BROKEN) && | ||
852 | devinfo->u.array.array_type != AT_RAID6) | ||
853 | return "Critical"; | ||
854 | else if (flags & ARRAY_FLAG_REBUILDING) | ||
855 | sprintf(s, | ||
856 | (flags & ARRAY_FLAG_NEEDINITIALIZING)? | ||
857 | "%sBackground initializing %d.%d%%" : | ||
858 | "%sRebuilding %d.%d%%", | ||
859 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
860 | reb_prog / 100, | ||
861 | reb_prog % 100); | ||
862 | else if (flags & ARRAY_FLAG_VERIFYING) | ||
863 | sprintf(s, "%sVerifying %d.%d%%", | ||
864 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
865 | reb_prog / 100, | ||
866 | reb_prog % 100); | ||
867 | else if (flags & ARRAY_FLAG_INITIALIZING) | ||
868 | sprintf(s, "%sForground initializing %d.%d%%", | ||
869 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
870 | reb_prog / 100, | ||
871 | reb_prog % 100); | ||
872 | else if (flags & ARRAY_FLAG_NEEDTRANSFORM) | ||
873 | sprintf(s,"%s%s%s", "Need Expanding/Migrating", | ||
874 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
875 | ((flags & ARRAY_FLAG_NEEDINITIALIZING) && | ||
876 | !(flags & ARRAY_FLAG_REBUILDING) && | ||
877 | !(flags & ARRAY_FLAG_INITIALIZING))? | ||
878 | ", Unintialized" : ""); | ||
879 | else if (flags & ARRAY_FLAG_NEEDINITIALIZING && | ||
880 | !(flags & ARRAY_FLAG_REBUILDING) && | ||
881 | !(flags & ARRAY_FLAG_INITIALIZING)) | ||
882 | sprintf(s,"%sUninitialized", | ||
883 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : ""); | ||
884 | else if ((flags & ARRAY_FLAG_NEEDBUILDING) || | ||
885 | (flags & ARRAY_FLAG_BROKEN)) | ||
886 | return "Critical"; | ||
887 | else | ||
888 | return "Normal"; | ||
889 | return s; | ||
890 | } | ||
891 | |||
892 | static void hptiop_dump_devinfo(struct hptiop_hba *hba, | ||
893 | struct hptiop_getinfo *pinfo, __le32 id, int indent) | ||
894 | { | ||
895 | struct hpt_logical_device_info_v3 devinfo; | ||
896 | int i; | ||
897 | u64 capacity; | ||
898 | |||
899 | for (i = 0; i < indent; i++) | ||
900 | hptiop_copy_info(pinfo, "\t"); | ||
901 | |||
902 | if (hptiop_get_device_info_v3(hba, id, &devinfo)) { | ||
903 | hptiop_copy_info(pinfo, "unknown\n"); | ||
904 | return; | ||
905 | } | ||
906 | |||
907 | switch (devinfo.type) { | ||
908 | |||
909 | case LDT_DEVICE: { | ||
910 | struct hd_driveid *driveid; | ||
911 | u32 flags = le32_to_cpu(devinfo.u.device.flags); | ||
912 | |||
913 | driveid = (struct hd_driveid *)devinfo.u.device.ident; | ||
914 | /* model[] is 40 chars long, but we just want 20 chars here */ | ||
915 | driveid->model[20] = 0; | ||
916 | |||
917 | if (indent) | ||
918 | if (flags & DEVICE_FLAG_DISABLED) | ||
919 | hptiop_copy_info(pinfo,"Missing\n"); | ||
920 | else | ||
921 | hptiop_copy_info(pinfo, "CH%d %s\n", | ||
922 | devinfo.u.device.path_id + 1, | ||
923 | driveid->model); | ||
924 | else { | ||
925 | capacity = le64_to_cpu(devinfo.capacity) * 512; | ||
926 | do_div(capacity, 1000000); | ||
927 | hptiop_copy_info(pinfo, | ||
928 | "CH%d %s, %lluMB, %s %s%s%s%s\n", | ||
929 | devinfo.u.device.path_id + 1, | ||
930 | driveid->model, | ||
931 | capacity, | ||
932 | (flags & DEVICE_FLAG_DISABLED)? | ||
933 | "Disabled" : "Normal", | ||
934 | devinfo.u.device.read_ahead_enabled? | ||
935 | "[RA]" : "", | ||
936 | devinfo.u.device.write_cache_enabled? | ||
937 | "[WC]" : "", | ||
938 | devinfo.u.device.TCQ_enabled? | ||
939 | "[TCQ]" : "", | ||
940 | devinfo.u.device.NCQ_enabled? | ||
941 | "[NCQ]" : "" | ||
942 | ); | ||
943 | } | ||
944 | break; | ||
945 | } | ||
946 | |||
947 | case LDT_ARRAY: | ||
948 | if (devinfo.target_id != INVALID_TARGET_ID) | ||
949 | hptiop_copy_info(pinfo, "[DISK %d_%d] ", | ||
950 | devinfo.vbus_id, devinfo.target_id); | ||
951 | |||
952 | capacity = le64_to_cpu(devinfo.capacity) * 512; | ||
953 | do_div(capacity, 1000000); | ||
954 | hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n", | ||
955 | devinfo.u.array.name, | ||
956 | devinfo.u.array.array_type==AT_RAID0? "RAID0" : | ||
957 | devinfo.u.array.array_type==AT_RAID1? "RAID1" : | ||
958 | devinfo.u.array.array_type==AT_RAID5? "RAID5" : | ||
959 | devinfo.u.array.array_type==AT_RAID6? "RAID6" : | ||
960 | devinfo.u.array.array_type==AT_JBOD? "JBOD" : | ||
961 | "unknown", | ||
962 | capacity, | ||
963 | get_array_status(&devinfo)); | ||
964 | for (i = 0; i < devinfo.u.array.ndisk; i++) { | ||
965 | if (hpt_id_valid(devinfo.u.array.members[i])) { | ||
966 | if (cpu_to_le16(1<<i) & | ||
967 | devinfo.u.array.critical_members) | ||
968 | hptiop_copy_info(pinfo, "\t*"); | ||
969 | hptiop_dump_devinfo(hba, pinfo, | ||
970 | devinfo.u.array.members[i], indent+1); | ||
971 | } | ||
972 | else | ||
973 | hptiop_copy_info(pinfo, "\tMissing\n"); | ||
974 | } | ||
975 | if (id == devinfo.u.array.transform_source) { | ||
976 | hptiop_copy_info(pinfo, "\tExpanding/Migrating to:\n"); | ||
977 | hptiop_dump_devinfo(hba, pinfo, | ||
978 | devinfo.u.array.transform_target, indent+1); | ||
979 | } | ||
980 | break; | ||
981 | } | ||
982 | } | ||
983 | |||
984 | static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf) | 619 | static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf) |
985 | { | 620 | { |
986 | return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); | 621 | return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); |
987 | } | 622 | } |
988 | 623 | ||
989 | static ssize_t hptiop_cdev_read(struct file *filp, char __user *buf, | ||
990 | size_t count, loff_t *ppos) | ||
991 | { | ||
992 | struct hptiop_hba *hba = filp->private_data; | ||
993 | struct hptiop_getinfo info; | ||
994 | int i, j, ndev; | ||
995 | struct hpt_controller_info con_info; | ||
996 | struct hpt_channel_info chan_info; | ||
997 | __le32 ids[32]; | ||
998 | |||
999 | info.buffer = buf; | ||
1000 | info.buflength = count; | ||
1001 | info.bufoffset = ppos ? *ppos : 0; | ||
1002 | info.filpos = 0; | ||
1003 | info.buffillen = 0; | ||
1004 | |||
1005 | if (hptiop_get_controller_info(hba, &con_info)) | ||
1006 | return -EIO; | ||
1007 | |||
1008 | for (i = 0; i < con_info.num_buses; i++) { | ||
1009 | if (hptiop_get_channel_info(hba, i, &chan_info) == 0) { | ||
1010 | if (hpt_id_valid(chan_info.devices[0])) | ||
1011 | hptiop_dump_devinfo(hba, &info, | ||
1012 | chan_info.devices[0], 0); | ||
1013 | if (hpt_id_valid(chan_info.devices[1])) | ||
1014 | hptiop_dump_devinfo(hba, &info, | ||
1015 | chan_info.devices[1], 0); | ||
1016 | } | ||
1017 | } | ||
1018 | |||
1019 | ndev = hptiop_get_logical_devices(hba, ids, | ||
1020 | sizeof(ids) / sizeof(ids[0])); | ||
1021 | |||
1022 | /* | ||
1023 | * if hptiop_get_logical_devices fails, ndev==-1 and it just | ||
1024 | * output nothing here | ||
1025 | */ | ||
1026 | for (j = 0; j < ndev; j++) | ||
1027 | hptiop_dump_devinfo(hba, &info, ids[j], 0); | ||
1028 | |||
1029 | if (ppos) | ||
1030 | *ppos += info.buffillen; | ||
1031 | |||
1032 | return info.buffillen; | ||
1033 | } | ||
1034 | |||
1035 | static int hptiop_cdev_ioctl(struct inode *inode, struct file *file, | ||
1036 | unsigned int cmd, unsigned long arg) | ||
1037 | { | ||
1038 | struct hptiop_hba *hba = file->private_data; | ||
1039 | struct hpt_ioctl_u ioctl_u; | ||
1040 | struct hpt_ioctl_k ioctl_k; | ||
1041 | u32 bytes_returned; | ||
1042 | int err = -EINVAL; | ||
1043 | |||
1044 | if (copy_from_user(&ioctl_u, | ||
1045 | (void __user *)arg, sizeof(struct hpt_ioctl_u))) | ||
1046 | return -EINVAL; | ||
1047 | |||
1048 | if (ioctl_u.magic != HPT_IOCTL_MAGIC) | ||
1049 | return -EINVAL; | ||
1050 | |||
1051 | ioctl_k.ioctl_code = ioctl_u.ioctl_code; | ||
1052 | ioctl_k.inbuf = NULL; | ||
1053 | ioctl_k.inbuf_size = ioctl_u.inbuf_size; | ||
1054 | ioctl_k.outbuf = NULL; | ||
1055 | ioctl_k.outbuf_size = ioctl_u.outbuf_size; | ||
1056 | ioctl_k.hba = hba; | ||
1057 | ioctl_k.bytes_returned = &bytes_returned; | ||
1058 | |||
1059 | /* verify user buffer */ | ||
1060 | if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ, | ||
1061 | ioctl_u.inbuf, ioctl_k.inbuf_size)) || | ||
1062 | (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE, | ||
1063 | ioctl_u.outbuf, ioctl_k.outbuf_size)) || | ||
1064 | (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE, | ||
1065 | ioctl_u.bytes_returned, sizeof(u32))) || | ||
1066 | ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) { | ||
1067 | |||
1068 | dprintk("scsi%d: got bad user address\n", hba->host->host_no); | ||
1069 | return -EINVAL; | ||
1070 | } | ||
1071 | |||
1072 | /* map buffer to kernel. */ | ||
1073 | if (ioctl_k.inbuf_size) { | ||
1074 | ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL); | ||
1075 | if (!ioctl_k.inbuf) { | ||
1076 | dprintk("scsi%d: fail to alloc inbuf\n", | ||
1077 | hba->host->host_no); | ||
1078 | err = -ENOMEM; | ||
1079 | goto err_exit; | ||
1080 | } | ||
1081 | |||
1082 | if (copy_from_user(ioctl_k.inbuf, | ||
1083 | ioctl_u.inbuf, ioctl_k.inbuf_size)) { | ||
1084 | goto err_exit; | ||
1085 | } | ||
1086 | } | ||
1087 | |||
1088 | if (ioctl_k.outbuf_size) { | ||
1089 | ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL); | ||
1090 | if (!ioctl_k.outbuf) { | ||
1091 | dprintk("scsi%d: fail to alloc outbuf\n", | ||
1092 | hba->host->host_no); | ||
1093 | err = -ENOMEM; | ||
1094 | goto err_exit; | ||
1095 | } | ||
1096 | } | ||
1097 | |||
1098 | hptiop_do_ioctl(&ioctl_k); | ||
1099 | |||
1100 | if (ioctl_k.result == HPT_IOCTL_RESULT_OK) { | ||
1101 | if (ioctl_k.outbuf_size && | ||
1102 | copy_to_user(ioctl_u.outbuf, | ||
1103 | ioctl_k.outbuf, ioctl_k.outbuf_size)) | ||
1104 | goto err_exit; | ||
1105 | |||
1106 | if (ioctl_u.bytes_returned && | ||
1107 | copy_to_user(ioctl_u.bytes_returned, | ||
1108 | &bytes_returned, sizeof(u32))) | ||
1109 | goto err_exit; | ||
1110 | |||
1111 | err = 0; | ||
1112 | } | ||
1113 | |||
1114 | err_exit: | ||
1115 | kfree(ioctl_k.inbuf); | ||
1116 | kfree(ioctl_k.outbuf); | ||
1117 | |||
1118 | return err; | ||
1119 | } | ||
1120 | |||
1121 | static int hptiop_cdev_open(struct inode *inode, struct file *file) | ||
1122 | { | ||
1123 | struct hptiop_hba *hba; | ||
1124 | unsigned i = 0, minor = iminor(inode); | ||
1125 | int ret = -ENODEV; | ||
1126 | |||
1127 | spin_lock(&hptiop_hba_list_lock); | ||
1128 | list_for_each_entry(hba, &hptiop_hba_list, link) { | ||
1129 | if (i == minor) { | ||
1130 | file->private_data = hba; | ||
1131 | ret = 0; | ||
1132 | goto out; | ||
1133 | } | ||
1134 | i++; | ||
1135 | } | ||
1136 | |||
1137 | out: | ||
1138 | spin_unlock(&hptiop_hba_list_lock); | ||
1139 | return ret; | ||
1140 | } | ||
1141 | |||
1142 | static struct file_operations hptiop_cdev_fops = { | ||
1143 | .owner = THIS_MODULE, | ||
1144 | .read = hptiop_cdev_read, | ||
1145 | .ioctl = hptiop_cdev_ioctl, | ||
1146 | .open = hptiop_cdev_open, | ||
1147 | }; | ||
1148 | |||
1149 | static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) | 624 | static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) |
1150 | { | 625 | { |
1151 | struct Scsi_Host *host = class_to_shost(class_dev); | 626 | struct Scsi_Host *host = class_to_shost(class_dev); |
@@ -1296,19 +771,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, | |||
1296 | goto unmap_pci_bar; | 771 | goto unmap_pci_bar; |
1297 | } | 772 | } |
1298 | 773 | ||
1299 | if (scsi_add_host(host, &pcidev->dev)) { | ||
1300 | printk(KERN_ERR "scsi%d: scsi_add_host failed\n", | ||
1301 | hba->host->host_no); | ||
1302 | goto unmap_pci_bar; | ||
1303 | } | ||
1304 | |||
1305 | pci_set_drvdata(pcidev, host); | 774 | pci_set_drvdata(pcidev, host); |
1306 | 775 | ||
1307 | if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, | 776 | if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, |
1308 | driver_name, hba)) { | 777 | driver_name, hba)) { |
1309 | printk(KERN_ERR "scsi%d: request irq %d failed\n", | 778 | printk(KERN_ERR "scsi%d: request irq %d failed\n", |
1310 | hba->host->host_no, pcidev->irq); | 779 | hba->host->host_no, pcidev->irq); |
1311 | goto remove_scsi_host; | 780 | goto unmap_pci_bar; |
1312 | } | 781 | } |
1313 | 782 | ||
1314 | /* Allocate request mem */ | 783 | /* Allocate request mem */ |
@@ -1355,9 +824,12 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, | |||
1355 | if (hptiop_initialize_iop(hba)) | 824 | if (hptiop_initialize_iop(hba)) |
1356 | goto free_request_mem; | 825 | goto free_request_mem; |
1357 | 826 | ||
1358 | spin_lock(&hptiop_hba_list_lock); | 827 | if (scsi_add_host(host, &pcidev->dev)) { |
1359 | list_add_tail(&hba->link, &hptiop_hba_list); | 828 | printk(KERN_ERR "scsi%d: scsi_add_host failed\n", |
1360 | spin_unlock(&hptiop_hba_list_lock); | 829 | hba->host->host_no); |
830 | goto free_request_mem; | ||
831 | } | ||
832 | |||
1361 | 833 | ||
1362 | scsi_scan_host(host); | 834 | scsi_scan_host(host); |
1363 | 835 | ||
@@ -1372,9 +844,6 @@ free_request_mem: | |||
1372 | free_request_irq: | 844 | free_request_irq: |
1373 | free_irq(hba->pcidev->irq, hba); | 845 | free_irq(hba->pcidev->irq, hba); |
1374 | 846 | ||
1375 | remove_scsi_host: | ||
1376 | scsi_remove_host(host); | ||
1377 | |||
1378 | unmap_pci_bar: | 847 | unmap_pci_bar: |
1379 | iounmap(hba->iop); | 848 | iounmap(hba->iop); |
1380 | 849 | ||
@@ -1422,10 +891,6 @@ static void hptiop_remove(struct pci_dev *pcidev) | |||
1422 | 891 | ||
1423 | scsi_remove_host(host); | 892 | scsi_remove_host(host); |
1424 | 893 | ||
1425 | spin_lock(&hptiop_hba_list_lock); | ||
1426 | list_del_init(&hba->link); | ||
1427 | spin_unlock(&hptiop_hba_list_lock); | ||
1428 | |||
1429 | hptiop_shutdown(pcidev); | 894 | hptiop_shutdown(pcidev); |
1430 | 895 | ||
1431 | free_irq(hba->pcidev->irq, hba); | 896 | free_irq(hba->pcidev->irq, hba); |
@@ -1462,27 +927,12 @@ static struct pci_driver hptiop_pci_driver = { | |||
1462 | 927 | ||
1463 | static int __init hptiop_module_init(void) | 928 | static int __init hptiop_module_init(void) |
1464 | { | 929 | { |
1465 | int error; | ||
1466 | |||
1467 | printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); | 930 | printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); |
1468 | 931 | return pci_register_driver(&hptiop_pci_driver); | |
1469 | error = pci_register_driver(&hptiop_pci_driver); | ||
1470 | if (error < 0) | ||
1471 | return error; | ||
1472 | |||
1473 | hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops); | ||
1474 | if (hptiop_cdev_major < 0) { | ||
1475 | printk(KERN_WARNING "unable to register hptiop device.\n"); | ||
1476 | return hptiop_cdev_major; | ||
1477 | } | ||
1478 | |||
1479 | return 0; | ||
1480 | } | 932 | } |
1481 | 933 | ||
1482 | static void __exit hptiop_module_exit(void) | 934 | static void __exit hptiop_module_exit(void) |
1483 | { | 935 | { |
1484 | dprintk("hptiop_module_exit\n"); | ||
1485 | unregister_chrdev(hptiop_cdev_major, "hptiop"); | ||
1486 | pci_unregister_driver(&hptiop_pci_driver); | 936 | pci_unregister_driver(&hptiop_pci_driver); |
1487 | } | 937 | } |
1488 | 938 | ||