aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/aacraid
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2007-02-17 15:09:59 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-17 15:09:59 -0500
commit48c871c1f6a7c7044dd76774fb469e65c7e2e4e8 (patch)
treeda3aa535c98cc0957851354ceb0fbff7482d7a9d /drivers/scsi/aacraid
parent1a1689344add3333d28d1b5495d8043a3877d01c (diff)
parent4409d28140d9a6e6e3f4f1fdaf7234c4b965d954 (diff)
Merge branch 'gfar' of master.kernel.org:/pub/scm/linux/kernel/git/galak/powerpc into upstream
Diffstat (limited to 'drivers/scsi/aacraid')
-rw-r--r--drivers/scsi/aacraid/Makefile2
-rw-r--r--drivers/scsi/aacraid/aachba.c602
-rw-r--r--drivers/scsi/aacraid/aacraid.h46
-rw-r--r--drivers/scsi/aacraid/comminit.c14
-rw-r--r--drivers/scsi/aacraid/commsup.c40
-rw-r--r--drivers/scsi/aacraid/linit.c19
-rw-r--r--drivers/scsi/aacraid/nark.c87
-rw-r--r--drivers/scsi/aacraid/rkt.c64
-rw-r--r--drivers/scsi/aacraid/rx.c263
-rw-r--r--drivers/scsi/aacraid/sa.c33
10 files changed, 708 insertions, 462 deletions
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile
index 28d133a3094f..f1cca4ee5410 100644
--- a/drivers/scsi/aacraid/Makefile
+++ b/drivers/scsi/aacraid/Makefile
@@ -3,6 +3,6 @@
3obj-$(CONFIG_SCSI_AACRAID) := aacraid.o 3obj-$(CONFIG_SCSI_AACRAID) := aacraid.o
4 4
5aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \ 5aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \
6 dpcsup.o rx.o sa.o rkt.o 6 dpcsup.o rx.o sa.o rkt.o nark.o
7 7
8EXTRA_CFLAGS := -Idrivers/scsi 8EXTRA_CFLAGS := -Idrivers/scsi
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 426cd6f49f5d..ddb33b06e0ef 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -170,9 +170,9 @@ int acbsize = -1;
170module_param(acbsize, int, S_IRUGO|S_IWUSR); 170module_param(acbsize, int, S_IRUGO|S_IWUSR);
171MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware."); 171MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware.");
172 172
173int expose_physicals = 0; 173int expose_physicals = -1;
174module_param(expose_physicals, int, S_IRUGO|S_IWUSR); 174module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
175MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. 0=off, 1=on"); 175MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on");
176/** 176/**
177 * aac_get_config_status - check the adapter configuration 177 * aac_get_config_status - check the adapter configuration
178 * @common: adapter to query 178 * @common: adapter to query
@@ -706,6 +706,309 @@ static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
706 } 706 }
707} 707}
708 708
709static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
710{
711 if (lba & 0xffffffff00000000LL) {
712 int cid = scmd_id(cmd);
713 dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
714 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
715 SAM_STAT_CHECK_CONDITION;
716 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
717 HARDWARE_ERROR,
718 SENCODE_INTERNAL_TARGET_FAILURE,
719 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
720 0, 0);
721 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
722 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(cmd->sense_buffer))
723 ? sizeof(cmd->sense_buffer)
724 : sizeof(dev->fsa_dev[cid].sense_data));
725 cmd->scsi_done(cmd);
726 return 1;
727 }
728 return 0;
729}
730
731static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
732{
733 return 0;
734}
735
736static void io_callback(void *context, struct fib * fibptr);
737
738static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
739{
740 u16 fibsize;
741 struct aac_raw_io *readcmd;
742 aac_fib_init(fib);
743 readcmd = (struct aac_raw_io *) fib_data(fib);
744 readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
745 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
746 readcmd->count = cpu_to_le32(count<<9);
747 readcmd->cid = cpu_to_le16(scmd_id(cmd));
748 readcmd->flags = cpu_to_le16(1);
749 readcmd->bpTotal = 0;
750 readcmd->bpComplete = 0;
751
752 aac_build_sgraw(cmd, &readcmd->sg);
753 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw));
754 BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
755 /*
756 * Now send the Fib to the adapter
757 */
758 return aac_fib_send(ContainerRawIo,
759 fib,
760 fibsize,
761 FsaNormal,
762 0, 1,
763 (fib_callback) io_callback,
764 (void *) cmd);
765}
766
767static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
768{
769 u16 fibsize;
770 struct aac_read64 *readcmd;
771 aac_fib_init(fib);
772 readcmd = (struct aac_read64 *) fib_data(fib);
773 readcmd->command = cpu_to_le32(VM_CtHostRead64);
774 readcmd->cid = cpu_to_le16(scmd_id(cmd));
775 readcmd->sector_count = cpu_to_le16(count);
776 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
777 readcmd->pad = 0;
778 readcmd->flags = 0;
779
780 aac_build_sg64(cmd, &readcmd->sg);
781 fibsize = sizeof(struct aac_read64) +
782 ((le32_to_cpu(readcmd->sg.count) - 1) *
783 sizeof (struct sgentry64));
784 BUG_ON (fibsize > (fib->dev->max_fib_size -
785 sizeof(struct aac_fibhdr)));
786 /*
787 * Now send the Fib to the adapter
788 */
789 return aac_fib_send(ContainerCommand64,
790 fib,
791 fibsize,
792 FsaNormal,
793 0, 1,
794 (fib_callback) io_callback,
795 (void *) cmd);
796}
797
798static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
799{
800 u16 fibsize;
801 struct aac_read *readcmd;
802 aac_fib_init(fib);
803 readcmd = (struct aac_read *) fib_data(fib);
804 readcmd->command = cpu_to_le32(VM_CtBlockRead);
805 readcmd->cid = cpu_to_le16(scmd_id(cmd));
806 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
807 readcmd->count = cpu_to_le32(count * 512);
808
809 aac_build_sg(cmd, &readcmd->sg);
810 fibsize = sizeof(struct aac_read) +
811 ((le32_to_cpu(readcmd->sg.count) - 1) *
812 sizeof (struct sgentry));
813 BUG_ON (fibsize > (fib->dev->max_fib_size -
814 sizeof(struct aac_fibhdr)));
815 /*
816 * Now send the Fib to the adapter
817 */
818 return aac_fib_send(ContainerCommand,
819 fib,
820 fibsize,
821 FsaNormal,
822 0, 1,
823 (fib_callback) io_callback,
824 (void *) cmd);
825}
826
827static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
828{
829 u16 fibsize;
830 struct aac_raw_io *writecmd;
831 aac_fib_init(fib);
832 writecmd = (struct aac_raw_io *) fib_data(fib);
833 writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
834 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
835 writecmd->count = cpu_to_le32(count<<9);
836 writecmd->cid = cpu_to_le16(scmd_id(cmd));
837 writecmd->flags = 0;
838 writecmd->bpTotal = 0;
839 writecmd->bpComplete = 0;
840
841 aac_build_sgraw(cmd, &writecmd->sg);
842 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw));
843 BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
844 /*
845 * Now send the Fib to the adapter
846 */
847 return aac_fib_send(ContainerRawIo,
848 fib,
849 fibsize,
850 FsaNormal,
851 0, 1,
852 (fib_callback) io_callback,
853 (void *) cmd);
854}
855
856static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
857{
858 u16 fibsize;
859 struct aac_write64 *writecmd;
860 aac_fib_init(fib);
861 writecmd = (struct aac_write64 *) fib_data(fib);
862 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
863 writecmd->cid = cpu_to_le16(scmd_id(cmd));
864 writecmd->sector_count = cpu_to_le16(count);
865 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
866 writecmd->pad = 0;
867 writecmd->flags = 0;
868
869 aac_build_sg64(cmd, &writecmd->sg);
870 fibsize = sizeof(struct aac_write64) +
871 ((le32_to_cpu(writecmd->sg.count) - 1) *
872 sizeof (struct sgentry64));
873 BUG_ON (fibsize > (fib->dev->max_fib_size -
874 sizeof(struct aac_fibhdr)));
875 /*
876 * Now send the Fib to the adapter
877 */
878 return aac_fib_send(ContainerCommand64,
879 fib,
880 fibsize,
881 FsaNormal,
882 0, 1,
883 (fib_callback) io_callback,
884 (void *) cmd);
885}
886
887static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
888{
889 u16 fibsize;
890 struct aac_write *writecmd;
891 aac_fib_init(fib);
892 writecmd = (struct aac_write *) fib_data(fib);
893 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
894 writecmd->cid = cpu_to_le16(scmd_id(cmd));
895 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
896 writecmd->count = cpu_to_le32(count * 512);
897 writecmd->sg.count = cpu_to_le32(1);
898 /* ->stable is not used - it did mean which type of write */
899
900 aac_build_sg(cmd, &writecmd->sg);
901 fibsize = sizeof(struct aac_write) +
902 ((le32_to_cpu(writecmd->sg.count) - 1) *
903 sizeof (struct sgentry));
904 BUG_ON (fibsize > (fib->dev->max_fib_size -
905 sizeof(struct aac_fibhdr)));
906 /*
907 * Now send the Fib to the adapter
908 */
909 return aac_fib_send(ContainerCommand,
910 fib,
911 fibsize,
912 FsaNormal,
913 0, 1,
914 (fib_callback) io_callback,
915 (void *) cmd);
916}
917
918static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd)
919{
920 struct aac_srb * srbcmd;
921 u32 flag;
922 u32 timeout;
923
924 aac_fib_init(fib);
925 switch(cmd->sc_data_direction){
926 case DMA_TO_DEVICE:
927 flag = SRB_DataOut;
928 break;
929 case DMA_BIDIRECTIONAL:
930 flag = SRB_DataIn | SRB_DataOut;
931 break;
932 case DMA_FROM_DEVICE:
933 flag = SRB_DataIn;
934 break;
935 case DMA_NONE:
936 default: /* shuts up some versions of gcc */
937 flag = SRB_NoDataXfer;
938 break;
939 }
940
941 srbcmd = (struct aac_srb*) fib_data(fib);
942 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
943 srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd)));
944 srbcmd->id = cpu_to_le32(scmd_id(cmd));
945 srbcmd->lun = cpu_to_le32(cmd->device->lun);
946 srbcmd->flags = cpu_to_le32(flag);
947 timeout = cmd->timeout_per_command/HZ;
948 if (timeout == 0)
949 timeout = 1;
950 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
951 srbcmd->retry_limit = 0; /* Obsolete parameter */
952 srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
953 return srbcmd;
954}
955
956static void aac_srb_callback(void *context, struct fib * fibptr);
957
958static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
959{
960 u16 fibsize;
961 struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
962
963 aac_build_sg64(cmd, (struct sgmap64*) &srbcmd->sg);
964 srbcmd->count = cpu_to_le32(cmd->request_bufflen);
965
966 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
967 memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
968 /*
969 * Build Scatter/Gather list
970 */
971 fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
972 ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
973 sizeof (struct sgentry64));
974 BUG_ON (fibsize > (fib->dev->max_fib_size -
975 sizeof(struct aac_fibhdr)));
976
977 /*
978 * Now send the Fib to the adapter
979 */
980 return aac_fib_send(ScsiPortCommand64, fib,
981 fibsize, FsaNormal, 0, 1,
982 (fib_callback) aac_srb_callback,
983 (void *) cmd);
984}
985
986static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
987{
988 u16 fibsize;
989 struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
990
991 aac_build_sg(cmd, (struct sgmap*)&srbcmd->sg);
992 srbcmd->count = cpu_to_le32(cmd->request_bufflen);
993
994 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
995 memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
996 /*
997 * Build Scatter/Gather list
998 */
999 fibsize = sizeof (struct aac_srb) +
1000 (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
1001 sizeof (struct sgentry));
1002 BUG_ON (fibsize > (fib->dev->max_fib_size -
1003 sizeof(struct aac_fibhdr)));
1004
1005 /*
1006 * Now send the Fib to the adapter
1007 */
1008 return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1,
1009 (fib_callback) aac_srb_callback, (void *) cmd);
1010}
1011
709int aac_get_adapter_info(struct aac_dev* dev) 1012int aac_get_adapter_info(struct aac_dev* dev)
710{ 1013{
711 struct fib* fibptr; 1014 struct fib* fibptr;
@@ -874,14 +1177,27 @@ int aac_get_adapter_info(struct aac_dev* dev)
874 } 1177 }
875 } 1178 }
876 /* 1179 /*
877 * 57 scatter gather elements 1180 * Deal with configuring for the individualized limits of each packet
1181 * interface.
878 */ 1182 */
879 if (!(dev->raw_io_interface)) { 1183 dev->a_ops.adapter_scsi = (dev->dac_support)
1184 ? aac_scsi_64
1185 : aac_scsi_32;
1186 if (dev->raw_io_interface) {
1187 dev->a_ops.adapter_bounds = (dev->raw_io_64)
1188 ? aac_bounds_64
1189 : aac_bounds_32;
1190 dev->a_ops.adapter_read = aac_read_raw_io;
1191 dev->a_ops.adapter_write = aac_write_raw_io;
1192 } else {
1193 dev->a_ops.adapter_bounds = aac_bounds_32;
880 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - 1194 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
881 sizeof(struct aac_fibhdr) - 1195 sizeof(struct aac_fibhdr) -
882 sizeof(struct aac_write) + sizeof(struct sgentry)) / 1196 sizeof(struct aac_write) + sizeof(struct sgentry)) /
883 sizeof(struct sgentry); 1197 sizeof(struct sgentry);
884 if (dev->dac_support) { 1198 if (dev->dac_support) {
1199 dev->a_ops.adapter_read = aac_read_block64;
1200 dev->a_ops.adapter_write = aac_write_block64;
885 /* 1201 /*
886 * 38 scatter gather elements 1202 * 38 scatter gather elements
887 */ 1203 */
@@ -891,6 +1207,9 @@ int aac_get_adapter_info(struct aac_dev* dev)
891 sizeof(struct aac_write64) + 1207 sizeof(struct aac_write64) +
892 sizeof(struct sgentry64)) / 1208 sizeof(struct sgentry64)) /
893 sizeof(struct sgentry64); 1209 sizeof(struct sgentry64);
1210 } else {
1211 dev->a_ops.adapter_read = aac_read_block;
1212 dev->a_ops.adapter_write = aac_write_block;
894 } 1213 }
895 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; 1214 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
896 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { 1215 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
@@ -1004,8 +1323,6 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1004 u64 lba; 1323 u64 lba;
1005 u32 count; 1324 u32 count;
1006 int status; 1325 int status;
1007
1008 u16 fibsize;
1009 struct aac_dev *dev; 1326 struct aac_dev *dev;
1010 struct fib * cmd_fibcontext; 1327 struct fib * cmd_fibcontext;
1011 1328
@@ -1059,23 +1376,8 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1059 } 1376 }
1060 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", 1377 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
1061 smp_processor_id(), (unsigned long long)lba, jiffies)); 1378 smp_processor_id(), (unsigned long long)lba, jiffies));
1062 if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) && 1379 if (aac_adapter_bounds(dev,scsicmd,lba))
1063 (lba & 0xffffffff00000000LL)) {
1064 dprintk((KERN_DEBUG "aac_read: Illegal lba\n"));
1065 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1066 SAM_STAT_CHECK_CONDITION;
1067 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1068 HARDWARE_ERROR,
1069 SENCODE_INTERNAL_TARGET_FAILURE,
1070 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1071 0, 0);
1072 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1073 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1074 ? sizeof(scsicmd->sense_buffer)
1075 : sizeof(dev->fsa_dev[cid].sense_data));
1076 scsicmd->scsi_done(scsicmd);
1077 return 0; 1380 return 0;
1078 }
1079 /* 1381 /*
1080 * Alocate and initialize a Fib 1382 * Alocate and initialize a Fib
1081 */ 1383 */
@@ -1083,85 +1385,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1083 return -1; 1385 return -1;
1084 } 1386 }
1085 1387
1086 aac_fib_init(cmd_fibcontext); 1388 status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
1087
1088 if (dev->raw_io_interface) {
1089 struct aac_raw_io *readcmd;
1090 readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
1091 readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1092 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1093 readcmd->count = cpu_to_le32(count<<9);
1094 readcmd->cid = cpu_to_le16(cid);
1095 readcmd->flags = cpu_to_le16(1);
1096 readcmd->bpTotal = 0;
1097 readcmd->bpComplete = 0;
1098
1099 aac_build_sgraw(scsicmd, &readcmd->sg);
1100 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw));
1101 BUG_ON(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)));
1102 /*
1103 * Now send the Fib to the adapter
1104 */
1105 status = aac_fib_send(ContainerRawIo,
1106 cmd_fibcontext,
1107 fibsize,
1108 FsaNormal,
1109 0, 1,
1110 (fib_callback) io_callback,
1111 (void *) scsicmd);
1112 } else if (dev->dac_support == 1) {
1113 struct aac_read64 *readcmd;
1114 readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
1115 readcmd->command = cpu_to_le32(VM_CtHostRead64);
1116 readcmd->cid = cpu_to_le16(cid);
1117 readcmd->sector_count = cpu_to_le16(count);
1118 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1119 readcmd->pad = 0;
1120 readcmd->flags = 0;
1121
1122 aac_build_sg64(scsicmd, &readcmd->sg);
1123 fibsize = sizeof(struct aac_read64) +
1124 ((le32_to_cpu(readcmd->sg.count) - 1) *
1125 sizeof (struct sgentry64));
1126 BUG_ON (fibsize > (dev->max_fib_size -
1127 sizeof(struct aac_fibhdr)));
1128 /*
1129 * Now send the Fib to the adapter
1130 */
1131 status = aac_fib_send(ContainerCommand64,
1132 cmd_fibcontext,
1133 fibsize,
1134 FsaNormal,
1135 0, 1,
1136 (fib_callback) io_callback,
1137 (void *) scsicmd);
1138 } else {
1139 struct aac_read *readcmd;
1140 readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
1141 readcmd->command = cpu_to_le32(VM_CtBlockRead);
1142 readcmd->cid = cpu_to_le32(cid);
1143 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1144 readcmd->count = cpu_to_le32(count * 512);
1145
1146 aac_build_sg(scsicmd, &readcmd->sg);
1147 fibsize = sizeof(struct aac_read) +
1148 ((le32_to_cpu(readcmd->sg.count) - 1) *
1149 sizeof (struct sgentry));
1150 BUG_ON (fibsize > (dev->max_fib_size -
1151 sizeof(struct aac_fibhdr)));
1152 /*
1153 * Now send the Fib to the adapter
1154 */
1155 status = aac_fib_send(ContainerCommand,
1156 cmd_fibcontext,
1157 fibsize,
1158 FsaNormal,
1159 0, 1,
1160 (fib_callback) io_callback,
1161 (void *) scsicmd);
1162 }
1163
1164
1165 1389
1166 /* 1390 /*
1167 * Check that the command queued to the controller 1391 * Check that the command queued to the controller
@@ -1187,7 +1411,6 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1187 u64 lba; 1411 u64 lba;
1188 u32 count; 1412 u32 count;
1189 int status; 1413 int status;
1190 u16 fibsize;
1191 struct aac_dev *dev; 1414 struct aac_dev *dev;
1192 struct fib * cmd_fibcontext; 1415 struct fib * cmd_fibcontext;
1193 1416
@@ -1227,22 +1450,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1227 } 1450 }
1228 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", 1451 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
1229 smp_processor_id(), (unsigned long long)lba, jiffies)); 1452 smp_processor_id(), (unsigned long long)lba, jiffies));
1230 if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) 1453 if (aac_adapter_bounds(dev,scsicmd,lba))
1231 && (lba & 0xffffffff00000000LL)) {
1232 dprintk((KERN_DEBUG "aac_write: Illegal lba\n"));
1233 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1234 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1235 HARDWARE_ERROR,
1236 SENCODE_INTERNAL_TARGET_FAILURE,
1237 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1238 0, 0);
1239 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1240 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1241 ? sizeof(scsicmd->sense_buffer)
1242 : sizeof(dev->fsa_dev[cid].sense_data));
1243 scsicmd->scsi_done(scsicmd);
1244 return 0; 1454 return 0;
1245 }
1246 /* 1455 /*
1247 * Allocate and initialize a Fib then setup a BlockWrite command 1456 * Allocate and initialize a Fib then setup a BlockWrite command
1248 */ 1457 */
@@ -1251,85 +1460,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1251 scsicmd->scsi_done(scsicmd); 1460 scsicmd->scsi_done(scsicmd);
1252 return 0; 1461 return 0;
1253 } 1462 }
1254 aac_fib_init(cmd_fibcontext);
1255 1463
1256 if (dev->raw_io_interface) { 1464 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count);
1257 struct aac_raw_io *writecmd;
1258 writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
1259 writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1260 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1261 writecmd->count = cpu_to_le32(count<<9);
1262 writecmd->cid = cpu_to_le16(cid);
1263 writecmd->flags = 0;
1264 writecmd->bpTotal = 0;
1265 writecmd->bpComplete = 0;
1266
1267 aac_build_sgraw(scsicmd, &writecmd->sg);
1268 fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw));
1269 BUG_ON(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)));
1270 /*
1271 * Now send the Fib to the adapter
1272 */
1273 status = aac_fib_send(ContainerRawIo,
1274 cmd_fibcontext,
1275 fibsize,
1276 FsaNormal,
1277 0, 1,
1278 (fib_callback) io_callback,
1279 (void *) scsicmd);
1280 } else if (dev->dac_support == 1) {
1281 struct aac_write64 *writecmd;
1282 writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
1283 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
1284 writecmd->cid = cpu_to_le16(cid);
1285 writecmd->sector_count = cpu_to_le16(count);
1286 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1287 writecmd->pad = 0;
1288 writecmd->flags = 0;
1289
1290 aac_build_sg64(scsicmd, &writecmd->sg);
1291 fibsize = sizeof(struct aac_write64) +
1292 ((le32_to_cpu(writecmd->sg.count) - 1) *
1293 sizeof (struct sgentry64));
1294 BUG_ON (fibsize > (dev->max_fib_size -
1295 sizeof(struct aac_fibhdr)));
1296 /*
1297 * Now send the Fib to the adapter
1298 */
1299 status = aac_fib_send(ContainerCommand64,
1300 cmd_fibcontext,
1301 fibsize,
1302 FsaNormal,
1303 0, 1,
1304 (fib_callback) io_callback,
1305 (void *) scsicmd);
1306 } else {
1307 struct aac_write *writecmd;
1308 writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
1309 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
1310 writecmd->cid = cpu_to_le32(cid);
1311 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1312 writecmd->count = cpu_to_le32(count * 512);
1313 writecmd->sg.count = cpu_to_le32(1);
1314 /* ->stable is not used - it did mean which type of write */
1315
1316 aac_build_sg(scsicmd, &writecmd->sg);
1317 fibsize = sizeof(struct aac_write) +
1318 ((le32_to_cpu(writecmd->sg.count) - 1) *
1319 sizeof (struct sgentry));
1320 BUG_ON (fibsize > (dev->max_fib_size -
1321 sizeof(struct aac_fibhdr)));
1322 /*
1323 * Now send the Fib to the adapter
1324 */
1325 status = aac_fib_send(ContainerCommand,
1326 cmd_fibcontext,
1327 fibsize,
1328 FsaNormal,
1329 0, 1,
1330 (fib_callback) io_callback,
1331 (void *) scsicmd);
1332 }
1333 1465
1334 /* 1466 /*
1335 * Check that the command queued to the controller 1467 * Check that the command queued to the controller
@@ -2099,10 +2231,6 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2099 struct fib* cmd_fibcontext; 2231 struct fib* cmd_fibcontext;
2100 struct aac_dev* dev; 2232 struct aac_dev* dev;
2101 int status; 2233 int status;
2102 struct aac_srb *srbcmd;
2103 u16 fibsize;
2104 u32 flag;
2105 u32 timeout;
2106 2234
2107 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 2235 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2108 if (scmd_id(scsicmd) >= dev->maximum_num_physicals || 2236 if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
@@ -2112,88 +2240,14 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2112 return 0; 2240 return 0;
2113 } 2241 }
2114 2242
2115 switch(scsicmd->sc_data_direction){
2116 case DMA_TO_DEVICE:
2117 flag = SRB_DataOut;
2118 break;
2119 case DMA_BIDIRECTIONAL:
2120 flag = SRB_DataIn | SRB_DataOut;
2121 break;
2122 case DMA_FROM_DEVICE:
2123 flag = SRB_DataIn;
2124 break;
2125 case DMA_NONE:
2126 default: /* shuts up some versions of gcc */
2127 flag = SRB_NoDataXfer;
2128 break;
2129 }
2130
2131
2132 /* 2243 /*
2133 * Allocate and initialize a Fib then setup a BlockWrite command 2244 * Allocate and initialize a Fib then setup a BlockWrite command
2134 */ 2245 */
2135 if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 2246 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
2136 return -1; 2247 return -1;
2137 } 2248 }
2138 aac_fib_init(cmd_fibcontext); 2249 status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
2139
2140 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
2141 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
2142 srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(scsicmd)));
2143 srbcmd->id = cpu_to_le32(scmd_id(scsicmd));
2144 srbcmd->lun = cpu_to_le32(scsicmd->device->lun);
2145 srbcmd->flags = cpu_to_le32(flag);
2146 timeout = scsicmd->timeout_per_command/HZ;
2147 if(timeout == 0){
2148 timeout = 1;
2149 }
2150 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
2151 srbcmd->retry_limit = 0; /* Obsolete parameter */
2152 srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
2153
2154 if( dev->dac_support == 1 ) {
2155 aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
2156 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
2157
2158 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
2159 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
2160 /*
2161 * Build Scatter/Gather list
2162 */
2163 fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
2164 ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
2165 sizeof (struct sgentry64));
2166 BUG_ON (fibsize > (dev->max_fib_size -
2167 sizeof(struct aac_fibhdr)));
2168 2250
2169 /*
2170 * Now send the Fib to the adapter
2171 */
2172 status = aac_fib_send(ScsiPortCommand64, cmd_fibcontext,
2173 fibsize, FsaNormal, 0, 1,
2174 (fib_callback) aac_srb_callback,
2175 (void *) scsicmd);
2176 } else {
2177 aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
2178 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
2179
2180 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
2181 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
2182 /*
2183 * Build Scatter/Gather list
2184 */
2185 fibsize = sizeof (struct aac_srb) +
2186 (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
2187 sizeof (struct sgentry));
2188 BUG_ON (fibsize > (dev->max_fib_size -
2189 sizeof(struct aac_fibhdr)));
2190
2191 /*
2192 * Now send the Fib to the adapter
2193 */
2194 status = aac_fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
2195 (fib_callback) aac_srb_callback, (void *) scsicmd);
2196 }
2197 /* 2251 /*
2198 * Check that the command queued to the controller 2252 * Check that the command queued to the controller
2199 */ 2253 */
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 4f8b4c53d435..39ecd0d22eb0 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -5,6 +5,7 @@
5#define _nblank(x) #x 5#define _nblank(x) #x
6#define nblank(x) _nblank(x)[0] 6#define nblank(x) _nblank(x)[0]
7 7
8#include <linux/interrupt.h>
8 9
9/*------------------------------------------------------------------------------ 10/*------------------------------------------------------------------------------
10 * D E F I N E S 11 * D E F I N E S
@@ -485,16 +486,28 @@ enum aac_log_level {
485 486
486struct aac_dev; 487struct aac_dev;
487struct fib; 488struct fib;
489struct scsi_cmnd;
488 490
489struct adapter_ops 491struct adapter_ops
490{ 492{
493 /* Low level operations */
491 void (*adapter_interrupt)(struct aac_dev *dev); 494 void (*adapter_interrupt)(struct aac_dev *dev);
492 void (*adapter_notify)(struct aac_dev *dev, u32 event); 495 void (*adapter_notify)(struct aac_dev *dev, u32 event);
493 void (*adapter_disable_int)(struct aac_dev *dev); 496 void (*adapter_disable_int)(struct aac_dev *dev);
497 void (*adapter_enable_int)(struct aac_dev *dev);
494 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); 498 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
495 int (*adapter_check_health)(struct aac_dev *dev); 499 int (*adapter_check_health)(struct aac_dev *dev);
496 int (*adapter_send)(struct fib * fib); 500 /* Transport operations */
497 int (*adapter_ioremap)(struct aac_dev * dev, u32 size); 501 int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
502 irqreturn_t (*adapter_intr)(int irq, void *dev_id);
503 /* Packet operations */
504 int (*adapter_deliver)(struct fib * fib);
505 int (*adapter_bounds)(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba);
506 int (*adapter_read)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count);
507 int (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count);
508 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
509 /* Administrative operations */
510 int (*adapter_comm)(struct aac_dev * dev, int comm);
498}; 511};
499 512
500/* 513/*
@@ -1018,7 +1031,9 @@ struct aac_dev
1018 u8 nondasd_support; 1031 u8 nondasd_support;
1019 u8 dac_support; 1032 u8 dac_support;
1020 u8 raid_scsi_mode; 1033 u8 raid_scsi_mode;
1021 u8 new_comm_interface; 1034 u8 comm_interface;
1035# define AAC_COMM_PRODUCER 0
1036# define AAC_COMM_MESSAGE 1
1022 /* macro side-effects BEWARE */ 1037 /* macro side-effects BEWARE */
1023# define raw_io_interface \ 1038# define raw_io_interface \
1024 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) 1039 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
@@ -1036,18 +1051,36 @@ struct aac_dev
1036#define aac_adapter_disable_int(dev) \ 1051#define aac_adapter_disable_int(dev) \
1037 (dev)->a_ops.adapter_disable_int(dev) 1052 (dev)->a_ops.adapter_disable_int(dev)
1038 1053
1054#define aac_adapter_enable_int(dev) \
1055 (dev)->a_ops.adapter_enable_int(dev)
1056
1039#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ 1057#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
1040 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) 1058 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
1041 1059
1042#define aac_adapter_check_health(dev) \ 1060#define aac_adapter_check_health(dev) \
1043 (dev)->a_ops.adapter_check_health(dev) 1061 (dev)->a_ops.adapter_check_health(dev)
1044 1062
1045#define aac_adapter_send(fib) \
1046 ((fib)->dev)->a_ops.adapter_send(fib)
1047
1048#define aac_adapter_ioremap(dev, size) \ 1063#define aac_adapter_ioremap(dev, size) \
1049 (dev)->a_ops.adapter_ioremap(dev, size) 1064 (dev)->a_ops.adapter_ioremap(dev, size)
1050 1065
1066#define aac_adapter_deliver(fib) \
1067 ((fib)->dev)->a_ops.adapter_deliver(fib)
1068
1069#define aac_adapter_bounds(dev,cmd,lba) \
1070 dev->a_ops.adapter_bounds(dev,cmd,lba)
1071
1072#define aac_adapter_read(fib,cmd,lba,count) \
1073 ((fib)->dev)->a_ops.adapter_read(fib,cmd,lba,count)
1074
1075#define aac_adapter_write(fib,cmd,lba,count) \
1076 ((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count)
1077
1078#define aac_adapter_scsi(fib,cmd) \
1079 ((fib)->dev)->a_ops.adapter_scsi(fib,cmd)
1080
1081#define aac_adapter_comm(dev,comm) \
1082 (dev)->a_ops.adapter_comm(dev, comm)
1083
1051#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) 1084#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
1052 1085
1053/* 1086/*
@@ -1767,7 +1800,6 @@ static inline u32 cap_to_cyls(sector_t capacity, u32 divisor)
1767 return (u32)capacity; 1800 return (u32)capacity;
1768} 1801}
1769 1802
1770struct scsi_cmnd;
1771/* SCp.phase values */ 1803/* SCp.phase values */
1772#define AAC_OWNER_MIDLEVEL 0x101 1804#define AAC_OWNER_MIDLEVEL 0x101
1773#define AAC_OWNER_LOWLEVEL 0x102 1805#define AAC_OWNER_LOWLEVEL 0x102
@@ -1794,7 +1826,9 @@ int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
1794int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg); 1826int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg);
1795int aac_rx_init(struct aac_dev *dev); 1827int aac_rx_init(struct aac_dev *dev);
1796int aac_rkt_init(struct aac_dev *dev); 1828int aac_rkt_init(struct aac_dev *dev);
1829int aac_nark_init(struct aac_dev *dev);
1797int aac_sa_init(struct aac_dev *dev); 1830int aac_sa_init(struct aac_dev *dev);
1831int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify);
1798unsigned int aac_response_normal(struct aac_queue * q); 1832unsigned int aac_response_normal(struct aac_queue * q);
1799unsigned int aac_command_normal(struct aac_queue * q); 1833unsigned int aac_command_normal(struct aac_queue * q);
1800unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); 1834unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 6d305b2f854e..df67ba686023 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -95,7 +95,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
95 init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES); 95 init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
96 96
97 init->InitFlags = 0; 97 init->InitFlags = 0;
98 if (dev->new_comm_interface) { 98 if (dev->comm_interface == AAC_COMM_MESSAGE) {
99 init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); 99 init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
100 dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); 100 dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
101 } 101 }
@@ -297,21 +297,23 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
297 - sizeof(struct aac_fibhdr) 297 - sizeof(struct aac_fibhdr)
298 - sizeof(struct aac_write) + sizeof(struct sgentry)) 298 - sizeof(struct aac_write) + sizeof(struct sgentry))
299 / sizeof(struct sgentry); 299 / sizeof(struct sgentry);
300 dev->new_comm_interface = 0; 300 dev->comm_interface = AAC_COMM_PRODUCER;
301 dev->raw_io_64 = 0; 301 dev->raw_io_64 = 0;
302 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 302 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
303 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && 303 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
304 (status[0] == 0x00000001)) { 304 (status[0] == 0x00000001)) {
305 if (status[1] & AAC_OPT_NEW_COMM_64) 305 if (status[1] & AAC_OPT_NEW_COMM_64)
306 dev->raw_io_64 = 1; 306 dev->raw_io_64 = 1;
307 if (status[1] & AAC_OPT_NEW_COMM) 307 if (dev->a_ops.adapter_comm &&
308 dev->new_comm_interface = dev->a_ops.adapter_send != 0; 308 (status[1] & AAC_OPT_NEW_COMM))
309 if (dev->new_comm_interface && (status[2] > dev->base_size)) { 309 dev->comm_interface = AAC_COMM_MESSAGE;
310 if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
311 (status[2] > dev->base_size)) {
310 aac_adapter_ioremap(dev, 0); 312 aac_adapter_ioremap(dev, 0);
311 dev->base_size = status[2]; 313 dev->base_size = status[2];
312 if (aac_adapter_ioremap(dev, status[2])) { 314 if (aac_adapter_ioremap(dev, status[2])) {
313 /* remap failed, go back ... */ 315 /* remap failed, go back ... */
314 dev->new_comm_interface = 0; 316 dev->comm_interface = AAC_COMM_PRODUCER;
315 if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) { 317 if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) {
316 printk(KERN_WARNING 318 printk(KERN_WARNING
317 "aacraid: unable to map adapter.\n"); 319 "aacraid: unable to map adapter.\n");
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 4893a6d06a33..1b97f60652ba 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -317,7 +317,7 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
317 * success. 317 * success.
318 */ 318 */
319 319
320static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) 320int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
321{ 321{
322 struct aac_entry * entry = NULL; 322 struct aac_entry * entry = NULL;
323 int map = 0; 323 int map = 0;
@@ -387,7 +387,6 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
387{ 387{
388 struct aac_dev * dev = fibptr->dev; 388 struct aac_dev * dev = fibptr->dev;
389 struct hw_fib * hw_fib = fibptr->hw_fib; 389 struct hw_fib * hw_fib = fibptr->hw_fib;
390 struct aac_queue * q;
391 unsigned long flags = 0; 390 unsigned long flags = 0;
392 unsigned long qflags; 391 unsigned long qflags;
393 392
@@ -469,38 +468,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
469 468
470 if (!dev->queues) 469 if (!dev->queues)
471 return -EBUSY; 470 return -EBUSY;
472 q = &dev->queues->queue[AdapNormCmdQueue];
473 471
474 if(wait) 472 if(wait)
475 spin_lock_irqsave(&fibptr->event_lock, flags); 473 spin_lock_irqsave(&fibptr->event_lock, flags);
476 spin_lock_irqsave(q->lock, qflags); 474 aac_adapter_deliver(fibptr);
477 if (dev->new_comm_interface) {
478 unsigned long count = 10000000L; /* 50 seconds */
479 q->numpending++;
480 spin_unlock_irqrestore(q->lock, qflags);
481 while (aac_adapter_send(fibptr) != 0) {
482 if (--count == 0) {
483 if (wait)
484 spin_unlock_irqrestore(&fibptr->event_lock, flags);
485 spin_lock_irqsave(q->lock, qflags);
486 q->numpending--;
487 spin_unlock_irqrestore(q->lock, qflags);
488 return -ETIMEDOUT;
489 }
490 udelay(5);
491 }
492 } else {
493 u32 index;
494 unsigned long nointr = 0;
495 aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr);
496
497 q->numpending++;
498 *(q->headers.producer) = cpu_to_le32(index + 1);
499 spin_unlock_irqrestore(q->lock, qflags);
500 dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index));
501 if (!(nointr & aac_config.irq_mod))
502 aac_adapter_notify(dev, AdapNormCmdQueue);
503 }
504 475
505 /* 476 /*
506 * If the caller wanted us to wait for response wait now. 477 * If the caller wanted us to wait for response wait now.
@@ -520,6 +491,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
520 while (down_trylock(&fibptr->event_wait)) { 491 while (down_trylock(&fibptr->event_wait)) {
521 int blink; 492 int blink;
522 if (--count == 0) { 493 if (--count == 0) {
494 struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
523 spin_lock_irqsave(q->lock, qflags); 495 spin_lock_irqsave(q->lock, qflags);
524 q->numpending--; 496 q->numpending--;
525 spin_unlock_irqrestore(q->lock, qflags); 497 spin_unlock_irqrestore(q->lock, qflags);
@@ -659,7 +631,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
659 unsigned long qflags; 631 unsigned long qflags;
660 632
661 if (hw_fib->header.XferState == 0) { 633 if (hw_fib->header.XferState == 0) {
662 if (dev->new_comm_interface) 634 if (dev->comm_interface == AAC_COMM_MESSAGE)
663 kfree (hw_fib); 635 kfree (hw_fib);
664 return 0; 636 return 0;
665 } 637 }
@@ -667,7 +639,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
667 * If we plan to do anything check the structure type first. 639 * If we plan to do anything check the structure type first.
668 */ 640 */
669 if ( hw_fib->header.StructType != FIB_MAGIC ) { 641 if ( hw_fib->header.StructType != FIB_MAGIC ) {
670 if (dev->new_comm_interface) 642 if (dev->comm_interface == AAC_COMM_MESSAGE)
671 kfree (hw_fib); 643 kfree (hw_fib);
672 return -EINVAL; 644 return -EINVAL;
673 } 645 }
@@ -679,7 +651,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
679 * send the completed cdb to the adapter. 651 * send the completed cdb to the adapter.
680 */ 652 */
681 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { 653 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
682 if (dev->new_comm_interface) { 654 if (dev->comm_interface == AAC_COMM_MESSAGE) {
683 kfree (hw_fib); 655 kfree (hw_fib);
684 } else { 656 } else {
685 u32 index; 657 u32 index;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index d2cf875af59b..0f948c2fb609 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -157,6 +157,7 @@ static struct pci_device_id aac_pci_tbl[] = {
157 { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */ 157 { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
158 { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */ 158 { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
159 { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */ 159 { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
160 { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
160 { 0,} 161 { 0,}
161}; 162};
162MODULE_DEVICE_TABLE(pci, aac_pci_tbl); 163MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -230,7 +231,8 @@ static struct aac_driver_ident aac_drivers[] = {
230 { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */ 231 { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */
231 { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */ 232 { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */
232 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec Catch All */ 233 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec Catch All */
233 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec Rocket Catch All */ 234 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
235 { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */
234}; 236};
235 237
236/** 238/**
@@ -396,11 +398,15 @@ static int aac_slave_configure(struct scsi_device *sdev)
396 sdev->skip_ms_page_3f = 1; 398 sdev->skip_ms_page_3f = 1;
397 } 399 }
398 if ((sdev->type == TYPE_DISK) && 400 if ((sdev->type == TYPE_DISK) &&
399 !expose_physicals &&
400 (sdev_channel(sdev) != CONTAINER_CHANNEL)) { 401 (sdev_channel(sdev) != CONTAINER_CHANNEL)) {
401 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; 402 if (expose_physicals == 0)
402 if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) 403 return -ENXIO;
403 sdev->no_uld_attach = 1; 404 if (expose_physicals < 0) {
405 struct aac_dev *aac =
406 (struct aac_dev *)sdev->host->hostdata;
407 if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
408 sdev->no_uld_attach = 1;
409 }
404 } 410 }
405 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && 411 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
406 (sdev_channel(sdev) == CONTAINER_CHANNEL)) { 412 (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
@@ -768,7 +774,7 @@ static struct class_device_attribute *aac_attrs[] = {
768}; 774};
769 775
770 776
771static struct file_operations aac_cfg_fops = { 777static const struct file_operations aac_cfg_fops = {
772 .owner = THIS_MODULE, 778 .owner = THIS_MODULE,
773 .ioctl = aac_cfg_ioctl, 779 .ioctl = aac_cfg_ioctl,
774#ifdef CONFIG_COMPAT 780#ifdef CONFIG_COMPAT
@@ -804,7 +810,6 @@ static struct scsi_host_template aac_driver_template = {
804 .emulated = 1, 810 .emulated = 1,
805}; 811};
806 812
807
808static int __devinit aac_probe_one(struct pci_dev *pdev, 813static int __devinit aac_probe_one(struct pci_dev *pdev,
809 const struct pci_device_id *id) 814 const struct pci_device_id *id)
810{ 815{
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c
new file mode 100644
index 000000000000..c76b611b6afb
--- /dev/null
+++ b/drivers/scsi/aacraid/nark.c
@@ -0,0 +1,87 @@
1/*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 * nark.c
26 *
27 * Abstract: Hardware Device Interface for NEMER/ARK
28 *
29 */
30
31#include <linux/pci.h>
32#include <linux/blkdev.h>
33
34#include <scsi/scsi_host.h>
35
36#include "aacraid.h"
37
38/**
39 * aac_nark_ioremap
40 * @size: mapping resize request
41 *
42 */
43static int aac_nark_ioremap(struct aac_dev * dev, u32 size)
44{
45 if (!size) {
46 iounmap(dev->regs.rx);
47 dev->regs.rx = NULL;
48 iounmap(dev->base);
49 dev->base = NULL;
50 return 0;
51 }
52 dev->scsi_host_ptr->base = pci_resource_start(dev->pdev, 2);
53 dev->regs.rx = ioremap((u64)pci_resource_start(dev->pdev, 0) |
54 ((u64)pci_resource_start(dev->pdev, 1) << 32),
55 sizeof(struct rx_registers) - sizeof(struct rx_inbound));
56 dev->base = NULL;
57 if (dev->regs.rx == NULL)
58 return -1;
59 dev->base = ioremap(dev->scsi_host_ptr->base, size);
60 if (dev->base == NULL) {
61 iounmap(dev->regs.rx);
62 dev->regs.rx = NULL;
63 return -1;
64 }
65 dev->IndexRegs = &((struct rx_registers __iomem *)dev->base)->IndexRegs;
66 return 0;
67}
68
69/**
70 * aac_nark_init - initialize an NEMER/ARK Split Bar card
71 * @dev: device to configure
72 *
73 */
74
75int aac_nark_init(struct aac_dev * dev)
76{
77 extern int _aac_rx_init(struct aac_dev *dev);
78 extern int aac_rx_select_comm(struct aac_dev *dev, int comm);
79
80 /*
81 * Fill in the function dispatch table.
82 */
83 dev->a_ops.adapter_ioremap = aac_nark_ioremap;
84 dev->a_ops.adapter_comm = aac_rx_select_comm;
85
86 return _aac_rx_init(dev);
87}
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 643f23b5ded8..d953c3fe998a 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -34,6 +34,40 @@
34 34
35#include "aacraid.h" 35#include "aacraid.h"
36 36
37#define AAC_NUM_IO_FIB_RKT (246 - AAC_NUM_MGT_FIB)
38
39/**
40 * aac_rkt_select_comm - Select communications method
41 * @dev: Adapter
42 * @comm: communications method
43 */
44
45static int aac_rkt_select_comm(struct aac_dev *dev, int comm)
46{
47 int retval;
48 extern int aac_rx_select_comm(struct aac_dev *dev, int comm);
49 retval = aac_rx_select_comm(dev, comm);
50 if (comm == AAC_COMM_MESSAGE) {
51 /*
52 * FIB Setup has already been done, but we can minimize the
53 * damage by at least ensuring the OS never issues more
54 * commands than we can handle. The Rocket adapters currently
55 * can only handle 246 commands and 8 AIFs at the same time,
56 * and in fact do notify us accordingly if we negotiate the
57 * FIB size. The problem that causes us to add this check is
58 * to ensure that we do not overdo it with the adapter when a
59 * hard coded FIB override is being utilized. This special
60 * case warrants this half baked, but convenient, check here.
61 */
62 if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) {
63 dev->init->MaxIoCommands =
64 cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB);
65 dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT;
66 }
67 }
68 return retval;
69}
70
37/** 71/**
38 * aac_rkt_ioremap 72 * aac_rkt_ioremap
39 * @size: mapping resize request 73 * @size: mapping resize request
@@ -63,39 +97,13 @@ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
63 97
64int aac_rkt_init(struct aac_dev *dev) 98int aac_rkt_init(struct aac_dev *dev)
65{ 99{
66 int retval;
67 extern int _aac_rx_init(struct aac_dev *dev); 100 extern int _aac_rx_init(struct aac_dev *dev);
68 extern void aac_rx_start_adapter(struct aac_dev *dev);
69 101
70 /* 102 /*
71 * Fill in the function dispatch table. 103 * Fill in the function dispatch table.
72 */ 104 */
73 dev->a_ops.adapter_ioremap = aac_rkt_ioremap; 105 dev->a_ops.adapter_ioremap = aac_rkt_ioremap;
106 dev->a_ops.adapter_comm = aac_rkt_select_comm;
74 107
75 retval = _aac_rx_init(dev); 108 return _aac_rx_init(dev);
76 if (retval)
77 return retval;
78 if (dev->new_comm_interface) {
79 /*
80 * FIB Setup has already been done, but we can minimize the
81 * damage by at least ensuring the OS never issues more
82 * commands than we can handle. The Rocket adapters currently
83 * can only handle 246 commands and 8 AIFs at the same time,
84 * and in fact do notify us accordingly if we negotiate the
85 * FIB size. The problem that causes us to add this check is
86 * to ensure that we do not overdo it with the adapter when a
87 * hard coded FIB override is being utilized. This special
88 * case warrants this half baked, but convenient, check here.
89 */
90 if (dev->scsi_host_ptr->can_queue > (246 - AAC_NUM_MGT_FIB)) {
91 dev->init->MaxIoCommands = cpu_to_le32(246);
92 dev->scsi_host_ptr->can_queue = 246 - AAC_NUM_MGT_FIB;
93 }
94 }
95 /*
96 * Tell the adapter that all is configured, and it can start
97 * accepting requests
98 */
99 aac_rx_start_adapter(dev);
100 return 0;
101} 109}
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index dcc8b0ea7a9d..c632d9354a26 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -46,60 +46,60 @@
46 46
47#include "aacraid.h" 47#include "aacraid.h"
48 48
49static irqreturn_t aac_rx_intr(int irq, void *dev_id) 49static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
50{ 50{
51 struct aac_dev *dev = dev_id; 51 struct aac_dev *dev = dev_id;
52 unsigned long bellbits;
53 u8 intstat = rx_readb(dev, MUnit.OISR);
52 54
53 dprintk((KERN_DEBUG "aac_rx_intr(%d,%p)\n", irq, dev_id)); 55 /*
54 if (dev->new_comm_interface) { 56 * Read mask and invert because drawbridge is reversed.
55 u32 Index = rx_readl(dev, MUnit.OutboundQueue); 57 * This allows us to only service interrupts that have
56 if (Index == 0xFFFFFFFFL) 58 * been enabled.
57 Index = rx_readl(dev, MUnit.OutboundQueue); 59 * Check to see if this is our interrupt. If it isn't just return
58 if (Index != 0xFFFFFFFFL) { 60 */
59 do { 61 if (intstat & ~(dev->OIMR)) {
60 if (aac_intr_normal(dev, Index)) { 62 bellbits = rx_readl(dev, OutboundDoorbellReg);
61 rx_writel(dev, MUnit.OutboundQueue, Index); 63 if (bellbits & DoorBellPrintfReady) {
62 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); 64 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
63 } 65 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
64 Index = rx_readl(dev, MUnit.OutboundQueue); 66 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
65 } while (Index != 0xFFFFFFFFL);
66 return IRQ_HANDLED;
67 } 67 }
68 } else { 68 else if (bellbits & DoorBellAdapterNormCmdReady) {
69 unsigned long bellbits; 69 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
70 u8 intstat; 70 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
71 intstat = rx_readb(dev, MUnit.OISR); 71 }
72 /* 72 else if (bellbits & DoorBellAdapterNormRespReady) {
73 * Read mask and invert because drawbridge is reversed. 73 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
74 * This allows us to only service interrupts that have 74 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
75 * been enabled. 75 }
76 * Check to see if this is our interrupt. If it isn't just return 76 else if (bellbits & DoorBellAdapterNormCmdNotFull) {
77 */ 77 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
78 if (intstat & ~(dev->OIMR))
79 {
80 bellbits = rx_readl(dev, OutboundDoorbellReg);
81 if (bellbits & DoorBellPrintfReady) {
82 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
83 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
84 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
85 }
86 else if (bellbits & DoorBellAdapterNormCmdReady) {
87 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
88 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
89 }
90 else if (bellbits & DoorBellAdapterNormRespReady) {
91 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
92 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
93 }
94 else if (bellbits & DoorBellAdapterNormCmdNotFull) {
95 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
96 }
97 else if (bellbits & DoorBellAdapterNormRespNotFull) {
98 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
99 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
100 }
101 return IRQ_HANDLED;
102 } 78 }
79 else if (bellbits & DoorBellAdapterNormRespNotFull) {
80 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
81 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
82 }
83 return IRQ_HANDLED;
84 }
85 return IRQ_NONE;
86}
87
88static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
89{
90 struct aac_dev *dev = dev_id;
91 u32 Index = rx_readl(dev, MUnit.OutboundQueue);
92 if (Index == 0xFFFFFFFFL)
93 Index = rx_readl(dev, MUnit.OutboundQueue);
94 if (Index != 0xFFFFFFFFL) {
95 do {
96 if (aac_intr_normal(dev, Index)) {
97 rx_writel(dev, MUnit.OutboundQueue, Index);
98 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady);
99 }
100 Index = rx_readl(dev, MUnit.OutboundQueue);
101 } while (Index != 0xFFFFFFFFL);
102 return IRQ_HANDLED;
103 } 103 }
104 return IRQ_NONE; 104 return IRQ_NONE;
105} 105}
@@ -115,6 +115,26 @@ static void aac_rx_disable_interrupt(struct aac_dev *dev)
115} 115}
116 116
117/** 117/**
118 * aac_rx_enable_interrupt_producer - Enable interrupts
119 * @dev: Adapter
120 */
121
122static void aac_rx_enable_interrupt_producer(struct aac_dev *dev)
123{
124 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
125}
126
127/**
128 * aac_rx_enable_interrupt_message - Enable interrupts
129 * @dev: Adapter
130 */
131
132static void aac_rx_enable_interrupt_message(struct aac_dev *dev)
133{
134 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
135}
136
137/**
118 * rx_sync_cmd - send a command and wait 138 * rx_sync_cmd - send a command and wait
119 * @dev: Adapter 139 * @dev: Adapter
120 * @command: Command to execute 140 * @command: Command to execute
@@ -189,10 +209,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
189 /* 209 /*
190 * Restore interrupt mask even though we timed out 210 * Restore interrupt mask even though we timed out
191 */ 211 */
192 if (dev->new_comm_interface) 212 aac_adapter_enable_int(dev);
193 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
194 else
195 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
196 return -ETIMEDOUT; 213 return -ETIMEDOUT;
197 } 214 }
198 /* 215 /*
@@ -215,10 +232,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
215 /* 232 /*
216 * Restore interrupt mask 233 * Restore interrupt mask
217 */ 234 */
218 if (dev->new_comm_interface) 235 aac_adapter_enable_int(dev);
219 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
220 else
221 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
222 return 0; 236 return 0;
223 237
224} 238}
@@ -360,35 +374,72 @@ static int aac_rx_check_health(struct aac_dev *dev)
360} 374}
361 375
362/** 376/**
363 * aac_rx_send 377 * aac_rx_deliver_producer
364 * @fib: fib to issue 378 * @fib: fib to issue
365 * 379 *
366 * Will send a fib, returning 0 if successful. 380 * Will send a fib, returning 0 if successful.
367 */ 381 */
368static int aac_rx_send(struct fib * fib) 382static int aac_rx_deliver_producer(struct fib * fib)
369{ 383{
370 u64 addr = fib->hw_fib_pa;
371 struct aac_dev *dev = fib->dev; 384 struct aac_dev *dev = fib->dev;
372 volatile void __iomem *device = dev->regs.rx; 385 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
386 unsigned long qflags;
373 u32 Index; 387 u32 Index;
388 unsigned long nointr = 0;
374 389
375 dprintk((KERN_DEBUG "%p->aac_rx_send(%p->%llx)\n", dev, fib, addr)); 390 spin_lock_irqsave(q->lock, qflags);
376 Index = rx_readl(dev, MUnit.InboundQueue); 391 aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib, 1, fib, &nointr);
377 if (Index == 0xFFFFFFFFL) 392
393 q->numpending++;
394 *(q->headers.producer) = cpu_to_le32(Index + 1);
395 spin_unlock_irqrestore(q->lock, qflags);
396 if (!(nointr & aac_config.irq_mod))
397 aac_adapter_notify(dev, AdapNormCmdQueue);
398
399 return 0;
400}
401
402/**
403 * aac_rx_deliver_message
404 * @fib: fib to issue
405 *
406 * Will send a fib, returning 0 if successful.
407 */
408static int aac_rx_deliver_message(struct fib * fib)
409{
410 struct aac_dev *dev = fib->dev;
411 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
412 unsigned long qflags;
413 u32 Index;
414 u64 addr;
415 volatile void __iomem *device;
416
417 unsigned long count = 10000000L; /* 50 seconds */
418 spin_lock_irqsave(q->lock, qflags);
419 q->numpending++;
420 spin_unlock_irqrestore(q->lock, qflags);
421 for(;;) {
378 Index = rx_readl(dev, MUnit.InboundQueue); 422 Index = rx_readl(dev, MUnit.InboundQueue);
379 dprintk((KERN_DEBUG "Index = 0x%x\n", Index)); 423 if (Index == 0xFFFFFFFFL)
380 if (Index == 0xFFFFFFFFL) 424 Index = rx_readl(dev, MUnit.InboundQueue);
381 return Index; 425 if (Index != 0xFFFFFFFFL)
426 break;
427 if (--count == 0) {
428 spin_lock_irqsave(q->lock, qflags);
429 q->numpending--;
430 spin_unlock_irqrestore(q->lock, qflags);
431 return -ETIMEDOUT;
432 }
433 udelay(5);
434 }
382 device = dev->base + Index; 435 device = dev->base + Index;
383 dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff), 436 addr = fib->hw_fib_pa;
384 (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size)));
385 writel((u32)(addr & 0xffffffff), device); 437 writel((u32)(addr & 0xffffffff), device);
386 device += sizeof(u32); 438 device += sizeof(u32);
387 writel((u32)(addr >> 32), device); 439 writel((u32)(addr >> 32), device);
388 device += sizeof(u32); 440 device += sizeof(u32);
389 writel(le16_to_cpu(fib->hw_fib->header.Size), device); 441 writel(le16_to_cpu(fib->hw_fib->header.Size), device);
390 rx_writel(dev, MUnit.InboundQueue, Index); 442 rx_writel(dev, MUnit.InboundQueue, Index);
391 dprintk((KERN_DEBUG "aac_rx_send - return 0\n"));
392 return 0; 443 return 0;
393} 444}
394 445
@@ -430,6 +481,31 @@ static int aac_rx_restart_adapter(struct aac_dev *dev)
430} 481}
431 482
432/** 483/**
484 * aac_rx_select_comm - Select communications method
485 * @dev: Adapter
486 * @comm: communications method
487 */
488
489int aac_rx_select_comm(struct aac_dev *dev, int comm)
490{
491 switch (comm) {
492 case AAC_COMM_PRODUCER:
493 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer;
494 dev->a_ops.adapter_intr = aac_rx_intr_producer;
495 dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
496 break;
497 case AAC_COMM_MESSAGE:
498 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message;
499 dev->a_ops.adapter_intr = aac_rx_intr_message;
500 dev->a_ops.adapter_deliver = aac_rx_deliver_message;
501 break;
502 default:
503 return 1;
504 }
505 return 0;
506}
507
508/**
433 * aac_rx_init - initialize an i960 based AAC card 509 * aac_rx_init - initialize an i960 based AAC card
434 * @dev: device to configure 510 * @dev: device to configure
435 * 511 *
@@ -489,40 +565,42 @@ int _aac_rx_init(struct aac_dev *dev)
489 } 565 }
490 msleep(1); 566 msleep(1);
491 } 567 }
492 if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev)<0)
493 {
494 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
495 goto error_iounmap;
496 }
497 /* 568 /*
498 * Fill in the function dispatch table. 569 * Fill in the common function dispatch table.
499 */ 570 */
500 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; 571 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
501 dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; 572 dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
502 dev->a_ops.adapter_notify = aac_rx_notify_adapter; 573 dev->a_ops.adapter_notify = aac_rx_notify_adapter;
503 dev->a_ops.adapter_sync_cmd = rx_sync_cmd; 574 dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
504 dev->a_ops.adapter_check_health = aac_rx_check_health; 575 dev->a_ops.adapter_check_health = aac_rx_check_health;
505 dev->a_ops.adapter_send = aac_rx_send;
506 576
507 /* 577 /*
508 * First clear out all interrupts. Then enable the one's that we 578 * First clear out all interrupts. Then enable the one's that we
509 * can handle. 579 * can handle.
510 */ 580 */
511 rx_writeb(dev, MUnit.OIMR, 0xff); 581 aac_adapter_comm(dev, AAC_COMM_PRODUCER);
582 aac_adapter_disable_int(dev);
512 rx_writel(dev, MUnit.ODR, 0xffffffff); 583 rx_writel(dev, MUnit.ODR, 0xffffffff);
513 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); 584 aac_adapter_enable_int(dev);
514 585
515 if (aac_init_adapter(dev) == NULL) 586 if (aac_init_adapter(dev) == NULL)
516 goto error_irq; 587 goto error_iounmap;
517 if (dev->new_comm_interface) 588 aac_adapter_comm(dev, dev->comm_interface);
518 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); 589 if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr,
590 IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
591 printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
592 name, instance);
593 goto error_iounmap;
594 }
595 aac_adapter_enable_int(dev);
596 /*
597 * Tell the adapter that all is configured, and it can
598 * start accepting requests
599 */
600 aac_rx_start_adapter(dev);
519 601
520 return 0; 602 return 0;
521 603
522error_irq:
523 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
524 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
525
526error_iounmap: 604error_iounmap:
527 605
528 return -1; 606 return -1;
@@ -530,20 +608,11 @@ error_iounmap:
530 608
531int aac_rx_init(struct aac_dev *dev) 609int aac_rx_init(struct aac_dev *dev)
532{ 610{
533 int retval;
534
535 /* 611 /*
536 * Fill in the function dispatch table. 612 * Fill in the function dispatch table.
537 */ 613 */
538 dev->a_ops.adapter_ioremap = aac_rx_ioremap; 614 dev->a_ops.adapter_ioremap = aac_rx_ioremap;
615 dev->a_ops.adapter_comm = aac_rx_select_comm;
539 616
540 retval = _aac_rx_init(dev); 617 return _aac_rx_init(dev);
541 if (!retval) {
542 /*
543 * Tell the adapter that all is configured, and it can
544 * start accepting requests
545 */
546 aac_rx_start_adapter(dev);
547 }
548 return retval;
549} 618}
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 511b0a938fb1..8535db068c2f 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -92,6 +92,17 @@ static void aac_sa_disable_interrupt (struct aac_dev *dev)
92} 92}
93 93
94/** 94/**
95 * aac_sa_enable_interrupt - enable interrupt
96 * @dev: Which adapter to enable.
97 */
98
99static void aac_sa_enable_interrupt (struct aac_dev *dev)
100{
101 sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 |
102 DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
103}
104
105/**
95 * aac_sa_notify_adapter - handle adapter notification 106 * aac_sa_notify_adapter - handle adapter notification
96 * @dev: Adapter that notification is for 107 * @dev: Adapter that notification is for
97 * @event: Event to notidy 108 * @event: Event to notidy
@@ -347,32 +358,36 @@ int aac_sa_init(struct aac_dev *dev)
347 msleep(1); 358 msleep(1);
348 } 359 }
349 360
350 if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev ) < 0) {
351 printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance);
352 goto error_iounmap;
353 }
354
355 /* 361 /*
356 * Fill in the function dispatch table. 362 * Fill in the function dispatch table.
357 */ 363 */
358 364
359 dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter; 365 dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
360 dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt; 366 dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
367 dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
361 dev->a_ops.adapter_notify = aac_sa_notify_adapter; 368 dev->a_ops.adapter_notify = aac_sa_notify_adapter;
362 dev->a_ops.adapter_sync_cmd = sa_sync_cmd; 369 dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
363 dev->a_ops.adapter_check_health = aac_sa_check_health; 370 dev->a_ops.adapter_check_health = aac_sa_check_health;
371 dev->a_ops.adapter_intr = aac_sa_intr;
364 dev->a_ops.adapter_ioremap = aac_sa_ioremap; 372 dev->a_ops.adapter_ioremap = aac_sa_ioremap;
365 373
366 /* 374 /*
367 * First clear out all interrupts. Then enable the one's that 375 * First clear out all interrupts. Then enable the one's that
368 * we can handle. 376 * we can handle.
369 */ 377 */
370 sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); 378 aac_adapter_disable_int(dev);
371 sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | 379 aac_adapter_enable_int(dev);
372 DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
373 380
374 if(aac_init_adapter(dev) == NULL) 381 if(aac_init_adapter(dev) == NULL)
375 goto error_irq; 382 goto error_irq;
383 if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr,
384 IRQF_SHARED|IRQF_DISABLED,
385 "aacraid", (void *)dev ) < 0) {
386 printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",
387 name, instance);
388 goto error_iounmap;
389 }
390 aac_adapter_enable_int(dev);
376 391
377 /* 392 /*
378 * Tell the adapter that all is configure, and it can start 393 * Tell the adapter that all is configure, and it can start
@@ -382,7 +397,7 @@ int aac_sa_init(struct aac_dev *dev)
382 return 0; 397 return 0;
383 398
384error_irq: 399error_irq:
385 sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); 400 aac_sa_disable_interrupt(dev);
386 free_irq(dev->scsi_host_ptr->irq, (void *)dev); 401 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
387 402
388error_iounmap: 403error_iounmap: