diff options
-rw-r--r-- | drivers/scsi/aacraid/README | 2 | ||||
-rw-r--r-- | drivers/scsi/aacraid/TODO | 1 | ||||
-rw-r--r-- | drivers/scsi/aacraid/aachba.c | 23 | ||||
-rw-r--r-- | drivers/scsi/aacraid/aacraid.h | 25 | ||||
-rw-r--r-- | drivers/scsi/aacraid/comminit.c | 25 | ||||
-rw-r--r-- | drivers/scsi/aacraid/commsup.c | 88 | ||||
-rw-r--r-- | drivers/scsi/aacraid/dpcsup.c | 115 | ||||
-rw-r--r-- | drivers/scsi/aacraid/linit.c | 27 | ||||
-rw-r--r-- | drivers/scsi/aacraid/rkt.c | 172 | ||||
-rw-r--r-- | drivers/scsi/aacraid/rx.c | 157 | ||||
-rw-r--r-- | drivers/scsi/aacraid/sa.c | 38 |
11 files changed, 454 insertions, 219 deletions
diff --git a/drivers/scsi/aacraid/README b/drivers/scsi/aacraid/README index 4fa524687bc5..4193865d419c 100644 --- a/drivers/scsi/aacraid/README +++ b/drivers/scsi/aacraid/README | |||
@@ -57,7 +57,7 @@ Deanna Bonds (non-DASD support, PAE fibs and 64 bit, | |||
57 | (fixed 64bit and 64G memory model, changed confusing naming convention | 57 | (fixed 64bit and 64G memory model, changed confusing naming convention |
58 | where fibs that go to the hardware are consistently called hw_fibs and | 58 | where fibs that go to the hardware are consistently called hw_fibs and |
59 | not just fibs like the name of the driver tracking structure) | 59 | not just fibs like the name of the driver tracking structure) |
60 | Mark Salyzyn <Mark_Salyzyn@adaptec.com> Fixed panic issues and added some new product ids for upcoming hbas. | 60 | Mark Salyzyn <Mark_Salyzyn@adaptec.com> Fixed panic issues and added some new product ids for upcoming hbas. Performance tuning, card failover and bug mitigations. |
61 | 61 | ||
62 | Original Driver | 62 | Original Driver |
63 | ------------------------- | 63 | ------------------------- |
diff --git a/drivers/scsi/aacraid/TODO b/drivers/scsi/aacraid/TODO index 2f148b4617dc..78dc863eff4f 100644 --- a/drivers/scsi/aacraid/TODO +++ b/drivers/scsi/aacraid/TODO | |||
@@ -1,4 +1,3 @@ | |||
1 | o Testing | 1 | o Testing |
2 | o More testing | 2 | o More testing |
3 | o Drop irq_mask, basically unused | ||
4 | o I/O size increase | 3 | o I/O size increase |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index a913196459d5..acc3d9209879 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -359,15 +359,6 @@ int aac_get_containers(struct aac_dev *dev) | |||
359 | return status; | 359 | return status; |
360 | } | 360 | } |
361 | 361 | ||
362 | static void aac_io_done(struct scsi_cmnd * scsicmd) | ||
363 | { | ||
364 | unsigned long cpu_flags; | ||
365 | struct Scsi_Host *host = scsicmd->device->host; | ||
366 | spin_lock_irqsave(host->host_lock, cpu_flags); | ||
367 | scsicmd->scsi_done(scsicmd); | ||
368 | spin_unlock_irqrestore(host->host_lock, cpu_flags); | ||
369 | } | ||
370 | |||
371 | static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigned int offset, unsigned int len) | 362 | static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigned int offset, unsigned int len) |
372 | { | 363 | { |
373 | void *buf; | 364 | void *buf; |
@@ -424,7 +415,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr) | |||
424 | 415 | ||
425 | fib_complete(fibptr); | 416 | fib_complete(fibptr); |
426 | fib_free(fibptr); | 417 | fib_free(fibptr); |
427 | aac_io_done(scsicmd); | 418 | scsicmd->scsi_done(scsicmd); |
428 | } | 419 | } |
429 | 420 | ||
430 | /** | 421 | /** |
@@ -988,7 +979,7 @@ static void io_callback(void *context, struct fib * fibptr) | |||
988 | fib_complete(fibptr); | 979 | fib_complete(fibptr); |
989 | fib_free(fibptr); | 980 | fib_free(fibptr); |
990 | 981 | ||
991 | aac_io_done(scsicmd); | 982 | scsicmd->scsi_done(scsicmd); |
992 | } | 983 | } |
993 | 984 | ||
994 | static int aac_read(struct scsi_cmnd * scsicmd, int cid) | 985 | static int aac_read(struct scsi_cmnd * scsicmd, int cid) |
@@ -1167,7 +1158,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid) | |||
1167 | * For some reason, the Fib didn't queue, return QUEUE_FULL | 1158 | * For some reason, the Fib didn't queue, return QUEUE_FULL |
1168 | */ | 1159 | */ |
1169 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; | 1160 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; |
1170 | aac_io_done(scsicmd); | 1161 | scsicmd->scsi_done(scsicmd); |
1171 | fib_complete(cmd_fibcontext); | 1162 | fib_complete(cmd_fibcontext); |
1172 | fib_free(cmd_fibcontext); | 1163 | fib_free(cmd_fibcontext); |
1173 | return 0; | 1164 | return 0; |
@@ -1239,7 +1230,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid) | |||
1239 | */ | 1230 | */ |
1240 | if (!(cmd_fibcontext = fib_alloc(dev))) { | 1231 | if (!(cmd_fibcontext = fib_alloc(dev))) { |
1241 | scsicmd->result = DID_ERROR << 16; | 1232 | scsicmd->result = DID_ERROR << 16; |
1242 | aac_io_done(scsicmd); | 1233 | scsicmd->scsi_done(scsicmd); |
1243 | return 0; | 1234 | return 0; |
1244 | } | 1235 | } |
1245 | fib_init(cmd_fibcontext); | 1236 | fib_init(cmd_fibcontext); |
@@ -1336,7 +1327,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid) | |||
1336 | * For some reason, the Fib didn't queue, return QUEUE_FULL | 1327 | * For some reason, the Fib didn't queue, return QUEUE_FULL |
1337 | */ | 1328 | */ |
1338 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; | 1329 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; |
1339 | aac_io_done(scsicmd); | 1330 | scsicmd->scsi_done(scsicmd); |
1340 | 1331 | ||
1341 | fib_complete(cmd_fibcontext); | 1332 | fib_complete(cmd_fibcontext); |
1342 | fib_free(cmd_fibcontext); | 1333 | fib_free(cmd_fibcontext); |
@@ -1380,7 +1371,7 @@ static void synchronize_callback(void *context, struct fib *fibptr) | |||
1380 | 1371 | ||
1381 | fib_complete(fibptr); | 1372 | fib_complete(fibptr); |
1382 | fib_free(fibptr); | 1373 | fib_free(fibptr); |
1383 | aac_io_done(cmd); | 1374 | cmd->scsi_done(cmd); |
1384 | } | 1375 | } |
1385 | 1376 | ||
1386 | static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid) | 1377 | static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid) |
@@ -2097,7 +2088,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr) | |||
2097 | 2088 | ||
2098 | fib_complete(fibptr); | 2089 | fib_complete(fibptr); |
2099 | fib_free(fibptr); | 2090 | fib_free(fibptr); |
2100 | aac_io_done(scsicmd); | 2091 | scsicmd->scsi_done(scsicmd); |
2101 | } | 2092 | } |
2102 | 2093 | ||
2103 | /** | 2094 | /** |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 2ebe402bc31a..30fd8d6e3f31 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
@@ -481,6 +481,7 @@ enum aac_log_level { | |||
481 | #define FSAFS_NTC_FIB_CONTEXT 0x030c | 481 | #define FSAFS_NTC_FIB_CONTEXT 0x030c |
482 | 482 | ||
483 | struct aac_dev; | 483 | struct aac_dev; |
484 | struct fib; | ||
484 | 485 | ||
485 | struct adapter_ops | 486 | struct adapter_ops |
486 | { | 487 | { |
@@ -489,6 +490,7 @@ struct adapter_ops | |||
489 | void (*adapter_disable_int)(struct aac_dev *dev); | 490 | void (*adapter_disable_int)(struct aac_dev *dev); |
490 | int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); | 491 | int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); |
491 | int (*adapter_check_health)(struct aac_dev *dev); | 492 | int (*adapter_check_health)(struct aac_dev *dev); |
493 | int (*adapter_send)(struct fib * fib); | ||
492 | }; | 494 | }; |
493 | 495 | ||
494 | /* | 496 | /* |
@@ -659,6 +661,10 @@ struct rx_mu_registers { | |||
659 | Status Register */ | 661 | Status Register */ |
660 | __le32 OIMR; /* 1334h | 34h | Outbound Interrupt | 662 | __le32 OIMR; /* 1334h | 34h | Outbound Interrupt |
661 | Mask Register */ | 663 | Mask Register */ |
664 | __le32 reserved2; /* 1338h | 38h | Reserved */ | ||
665 | __le32 reserved3; /* 133Ch | 3Ch | Reserved */ | ||
666 | __le32 InboundQueue;/* 1340h | 40h | Inbound Queue Port relative to firmware */ | ||
667 | __le32 OutboundQueue;/*1344h | 44h | Outbound Queue Port relative to firmware */ | ||
662 | /* * Must access through ATU Inbound | 668 | /* * Must access through ATU Inbound |
663 | Translation Window */ | 669 | Translation Window */ |
664 | }; | 670 | }; |
@@ -693,8 +699,8 @@ struct rx_inbound { | |||
693 | #define OutboundDoorbellReg MUnit.ODR | 699 | #define OutboundDoorbellReg MUnit.ODR |
694 | 700 | ||
695 | struct rx_registers { | 701 | struct rx_registers { |
696 | struct rx_mu_registers MUnit; /* 1300h - 1334h */ | 702 | struct rx_mu_registers MUnit; /* 1300h - 1344h */ |
697 | __le32 reserved1[6]; /* 1338h - 134ch */ | 703 | __le32 reserved1[2]; /* 1348h - 134ch */ |
698 | struct rx_inbound IndexRegs; | 704 | struct rx_inbound IndexRegs; |
699 | }; | 705 | }; |
700 | 706 | ||
@@ -711,8 +717,8 @@ struct rx_registers { | |||
711 | #define rkt_inbound rx_inbound | 717 | #define rkt_inbound rx_inbound |
712 | 718 | ||
713 | struct rkt_registers { | 719 | struct rkt_registers { |
714 | struct rkt_mu_registers MUnit; /* 1300h - 1334h */ | 720 | struct rkt_mu_registers MUnit; /* 1300h - 1344h */ |
715 | __le32 reserved1[1010]; /* 1338h - 22fch */ | 721 | __le32 reserved1[1006]; /* 1348h - 22fch */ |
716 | struct rkt_inbound IndexRegs; /* 2300h - */ | 722 | struct rkt_inbound IndexRegs; /* 2300h - */ |
717 | }; | 723 | }; |
718 | 724 | ||
@@ -721,8 +727,6 @@ struct rkt_registers { | |||
721 | #define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR)) | 727 | #define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR)) |
722 | #define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR)) | 728 | #define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR)) |
723 | 729 | ||
724 | struct fib; | ||
725 | |||
726 | typedef void (*fib_callback)(void *ctxt, struct fib *fibctx); | 730 | typedef void (*fib_callback)(void *ctxt, struct fib *fibctx); |
727 | 731 | ||
728 | struct aac_fib_context { | 732 | struct aac_fib_context { |
@@ -937,7 +941,6 @@ struct aac_dev | |||
937 | const char *name; | 941 | const char *name; |
938 | int id; | 942 | int id; |
939 | 943 | ||
940 | u16 irq_mask; | ||
941 | /* | 944 | /* |
942 | * negotiated FIB settings | 945 | * negotiated FIB settings |
943 | */ | 946 | */ |
@@ -972,6 +975,7 @@ struct aac_dev | |||
972 | struct adapter_ops a_ops; | 975 | struct adapter_ops a_ops; |
973 | unsigned long fsrev; /* Main driver's revision number */ | 976 | unsigned long fsrev; /* Main driver's revision number */ |
974 | 977 | ||
978 | unsigned base_size; /* Size of mapped in region */ | ||
975 | struct aac_init *init; /* Holds initialization info to communicate with adapter */ | 979 | struct aac_init *init; /* Holds initialization info to communicate with adapter */ |
976 | dma_addr_t init_pa; /* Holds physical address of the init struct */ | 980 | dma_addr_t init_pa; /* Holds physical address of the init struct */ |
977 | 981 | ||
@@ -992,6 +996,9 @@ struct aac_dev | |||
992 | /* | 996 | /* |
993 | * The following is the device specific extension. | 997 | * The following is the device specific extension. |
994 | */ | 998 | */ |
999 | #if (!defined(AAC_MIN_FOOTPRINT_SIZE)) | ||
1000 | # define AAC_MIN_FOOTPRINT_SIZE 8192 | ||
1001 | #endif | ||
995 | union | 1002 | union |
996 | { | 1003 | { |
997 | struct sa_registers __iomem *sa; | 1004 | struct sa_registers __iomem *sa; |
@@ -1012,6 +1019,7 @@ struct aac_dev | |||
1012 | u8 nondasd_support; | 1019 | u8 nondasd_support; |
1013 | u8 dac_support; | 1020 | u8 dac_support; |
1014 | u8 raid_scsi_mode; | 1021 | u8 raid_scsi_mode; |
1022 | u8 new_comm_interface; | ||
1015 | /* macro side-effects BEWARE */ | 1023 | /* macro side-effects BEWARE */ |
1016 | # define raw_io_interface \ | 1024 | # define raw_io_interface \ |
1017 | init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) | 1025 | init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) |
@@ -1034,6 +1042,8 @@ struct aac_dev | |||
1034 | #define aac_adapter_check_health(dev) \ | 1042 | #define aac_adapter_check_health(dev) \ |
1035 | (dev)->a_ops.adapter_check_health(dev) | 1043 | (dev)->a_ops.adapter_check_health(dev) |
1036 | 1044 | ||
1045 | #define aac_adapter_send(fib) \ | ||
1046 | ((fib)->dev)->a_ops.adapter_send(fib) | ||
1037 | 1047 | ||
1038 | #define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) | 1048 | #define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) |
1039 | 1049 | ||
@@ -1779,6 +1789,7 @@ int aac_rkt_init(struct aac_dev *dev); | |||
1779 | int aac_sa_init(struct aac_dev *dev); | 1789 | int aac_sa_init(struct aac_dev *dev); |
1780 | unsigned int aac_response_normal(struct aac_queue * q); | 1790 | unsigned int aac_response_normal(struct aac_queue * q); |
1781 | unsigned int aac_command_normal(struct aac_queue * q); | 1791 | unsigned int aac_command_normal(struct aac_queue * q); |
1792 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); | ||
1782 | int aac_command_thread(struct aac_dev * dev); | 1793 | int aac_command_thread(struct aac_dev * dev); |
1783 | int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); | 1794 | int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); |
1784 | int fib_adapter_complete(struct fib * fibptr, unsigned short size); | 1795 | int fib_adapter_complete(struct fib * fibptr, unsigned short size); |
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 59a341b2aedc..82821d331c07 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c | |||
@@ -116,6 +116,10 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
116 | } | 116 | } |
117 | 117 | ||
118 | init->InitFlags = 0; | 118 | init->InitFlags = 0; |
119 | if (dev->new_comm_interface) { | ||
120 | init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); | ||
121 | dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); | ||
122 | } | ||
119 | init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); | 123 | init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); |
120 | init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); | 124 | init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); |
121 | init->MaxFibSize = cpu_to_le32(dev->max_fib_size); | 125 | init->MaxFibSize = cpu_to_le32(dev->max_fib_size); |
@@ -315,12 +319,33 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) | |||
315 | - sizeof(struct aac_fibhdr) | 319 | - sizeof(struct aac_fibhdr) |
316 | - sizeof(struct aac_write) + sizeof(struct sgentry)) | 320 | - sizeof(struct aac_write) + sizeof(struct sgentry)) |
317 | / sizeof(struct sgentry); | 321 | / sizeof(struct sgentry); |
322 | dev->new_comm_interface = 0; | ||
318 | dev->raw_io_64 = 0; | 323 | dev->raw_io_64 = 0; |
319 | if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, | 324 | if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, |
320 | 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && | 325 | 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && |
321 | (status[0] == 0x00000001)) { | 326 | (status[0] == 0x00000001)) { |
322 | if (status[1] & AAC_OPT_NEW_COMM_64) | 327 | if (status[1] & AAC_OPT_NEW_COMM_64) |
323 | dev->raw_io_64 = 1; | 328 | dev->raw_io_64 = 1; |
329 | if (status[1] & AAC_OPT_NEW_COMM) | ||
330 | dev->new_comm_interface = dev->a_ops.adapter_send != 0; | ||
331 | if (dev->new_comm_interface && (status[2] > dev->base_size)) { | ||
332 | iounmap(dev->regs.sa); | ||
333 | dev->base_size = status[2]; | ||
334 | dprintk((KERN_DEBUG "ioremap(%lx,%d)\n", | ||
335 | host->base, status[2])); | ||
336 | dev->regs.sa = ioremap(host->base, status[2]); | ||
337 | if (dev->regs.sa == NULL) { | ||
338 | /* remap failed, go back ... */ | ||
339 | dev->new_comm_interface = 0; | ||
340 | dev->regs.sa = ioremap(host->base, | ||
341 | AAC_MIN_FOOTPRINT_SIZE); | ||
342 | if (dev->regs.sa == NULL) { | ||
343 | printk(KERN_WARNING | ||
344 | "aacraid: unable to map adapter.\n"); | ||
345 | return NULL; | ||
346 | } | ||
347 | } | ||
348 | } | ||
324 | } | 349 | } |
325 | if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS, | 350 | if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS, |
326 | 0, 0, 0, 0, 0, 0, | 351 | 0, 0, 0, 0, 0, 0, |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index e4d543a474ae..ee9067255930 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -212,7 +212,7 @@ void fib_init(struct fib *fibptr) | |||
212 | hw_fib->header.StructType = FIB_MAGIC; | 212 | hw_fib->header.StructType = FIB_MAGIC; |
213 | hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size); | 213 | hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size); |
214 | hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); | 214 | hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); |
215 | hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa); | 215 | hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */ |
216 | hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); | 216 | hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); |
217 | hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size); | 217 | hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size); |
218 | } | 218 | } |
@@ -380,9 +380,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f | |||
380 | 380 | ||
381 | int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) | 381 | int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) |
382 | { | 382 | { |
383 | u32 index; | ||
384 | struct aac_dev * dev = fibptr->dev; | 383 | struct aac_dev * dev = fibptr->dev; |
385 | unsigned long nointr = 0; | ||
386 | struct hw_fib * hw_fib = fibptr->hw_fib; | 384 | struct hw_fib * hw_fib = fibptr->hw_fib; |
387 | struct aac_queue * q; | 385 | struct aac_queue * q; |
388 | unsigned long flags = 0; | 386 | unsigned long flags = 0; |
@@ -417,7 +415,7 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority | |||
417 | * Map the fib into 32bits by using the fib number | 415 | * Map the fib into 32bits by using the fib number |
418 | */ | 416 | */ |
419 | 417 | ||
420 | hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr-dev->fibs)) << 1); | 418 | hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2); |
421 | hw_fib->header.SenderData = (u32)(fibptr - dev->fibs); | 419 | hw_fib->header.SenderData = (u32)(fibptr - dev->fibs); |
422 | /* | 420 | /* |
423 | * Set FIB state to indicate where it came from and if we want a | 421 | * Set FIB state to indicate where it came from and if we want a |
@@ -456,10 +454,10 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority | |||
456 | 454 | ||
457 | FIB_COUNTER_INCREMENT(aac_config.FibsSent); | 455 | FIB_COUNTER_INCREMENT(aac_config.FibsSent); |
458 | 456 | ||
459 | dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); | ||
460 | dprintk((KERN_DEBUG "Fib contents:.\n")); | 457 | dprintk((KERN_DEBUG "Fib contents:.\n")); |
461 | dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command)); | 458 | dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command))); |
462 | dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState)); | 459 | dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command))); |
460 | dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState))); | ||
463 | dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); | 461 | dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); |
464 | dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); | 462 | dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); |
465 | dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); | 463 | dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); |
@@ -469,14 +467,37 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority | |||
469 | if(wait) | 467 | if(wait) |
470 | spin_lock_irqsave(&fibptr->event_lock, flags); | 468 | spin_lock_irqsave(&fibptr->event_lock, flags); |
471 | spin_lock_irqsave(q->lock, qflags); | 469 | spin_lock_irqsave(q->lock, qflags); |
472 | aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr); | 470 | if (dev->new_comm_interface) { |
473 | 471 | unsigned long count = 10000000L; /* 50 seconds */ | |
474 | list_add_tail(&fibptr->queue, &q->pendingq); | 472 | list_add_tail(&fibptr->queue, &q->pendingq); |
475 | q->numpending++; | 473 | q->numpending++; |
476 | *(q->headers.producer) = cpu_to_le32(index + 1); | 474 | spin_unlock_irqrestore(q->lock, qflags); |
477 | spin_unlock_irqrestore(q->lock, qflags); | 475 | while (aac_adapter_send(fibptr) != 0) { |
478 | if (!(nointr & aac_config.irq_mod)) | 476 | if (--count == 0) { |
479 | aac_adapter_notify(dev, AdapNormCmdQueue); | 477 | if (wait) |
478 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | ||
479 | spin_lock_irqsave(q->lock, qflags); | ||
480 | q->numpending--; | ||
481 | list_del(&fibptr->queue); | ||
482 | spin_unlock_irqrestore(q->lock, qflags); | ||
483 | return -ETIMEDOUT; | ||
484 | } | ||
485 | udelay(5); | ||
486 | } | ||
487 | } else { | ||
488 | u32 index; | ||
489 | unsigned long nointr = 0; | ||
490 | aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr); | ||
491 | |||
492 | list_add_tail(&fibptr->queue, &q->pendingq); | ||
493 | q->numpending++; | ||
494 | *(q->headers.producer) = cpu_to_le32(index + 1); | ||
495 | spin_unlock_irqrestore(q->lock, qflags); | ||
496 | dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); | ||
497 | if (!(nointr & aac_config.irq_mod)) | ||
498 | aac_adapter_notify(dev, AdapNormCmdQueue); | ||
499 | } | ||
500 | |||
480 | /* | 501 | /* |
481 | * If the caller wanted us to wait for response wait now. | 502 | * If the caller wanted us to wait for response wait now. |
482 | */ | 503 | */ |
@@ -492,7 +513,6 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority | |||
492 | * hardware failure has occurred. | 513 | * hardware failure has occurred. |
493 | */ | 514 | */ |
494 | unsigned long count = 36000000L; /* 3 minutes */ | 515 | unsigned long count = 36000000L; /* 3 minutes */ |
495 | unsigned long qflags; | ||
496 | while (down_trylock(&fibptr->event_wait)) { | 516 | while (down_trylock(&fibptr->event_wait)) { |
497 | if (--count == 0) { | 517 | if (--count == 0) { |
498 | spin_lock_irqsave(q->lock, qflags); | 518 | spin_lock_irqsave(q->lock, qflags); |
@@ -621,12 +641,16 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size) | |||
621 | unsigned long qflags; | 641 | unsigned long qflags; |
622 | 642 | ||
623 | if (hw_fib->header.XferState == 0) { | 643 | if (hw_fib->header.XferState == 0) { |
644 | if (dev->new_comm_interface) | ||
645 | kfree (hw_fib); | ||
624 | return 0; | 646 | return 0; |
625 | } | 647 | } |
626 | /* | 648 | /* |
627 | * If we plan to do anything check the structure type first. | 649 | * If we plan to do anything check the structure type first. |
628 | */ | 650 | */ |
629 | if ( hw_fib->header.StructType != FIB_MAGIC ) { | 651 | if ( hw_fib->header.StructType != FIB_MAGIC ) { |
652 | if (dev->new_comm_interface) | ||
653 | kfree (hw_fib); | ||
630 | return -EINVAL; | 654 | return -EINVAL; |
631 | } | 655 | } |
632 | /* | 656 | /* |
@@ -637,21 +661,25 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size) | |||
637 | * send the completed cdb to the adapter. | 661 | * send the completed cdb to the adapter. |
638 | */ | 662 | */ |
639 | if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { | 663 | if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { |
640 | u32 index; | 664 | if (dev->new_comm_interface) { |
641 | hw_fib->header.XferState |= cpu_to_le32(HostProcessed); | 665 | kfree (hw_fib); |
642 | if (size) { | 666 | } else { |
643 | size += sizeof(struct aac_fibhdr); | 667 | u32 index; |
644 | if (size > le16_to_cpu(hw_fib->header.SenderSize)) | 668 | hw_fib->header.XferState |= cpu_to_le32(HostProcessed); |
645 | return -EMSGSIZE; | 669 | if (size) { |
646 | hw_fib->header.Size = cpu_to_le16(size); | 670 | size += sizeof(struct aac_fibhdr); |
671 | if (size > le16_to_cpu(hw_fib->header.SenderSize)) | ||
672 | return -EMSGSIZE; | ||
673 | hw_fib->header.Size = cpu_to_le16(size); | ||
674 | } | ||
675 | q = &dev->queues->queue[AdapNormRespQueue]; | ||
676 | spin_lock_irqsave(q->lock, qflags); | ||
677 | aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr); | ||
678 | *(q->headers.producer) = cpu_to_le32(index + 1); | ||
679 | spin_unlock_irqrestore(q->lock, qflags); | ||
680 | if (!(nointr & (int)aac_config.irq_mod)) | ||
681 | aac_adapter_notify(dev, AdapNormRespQueue); | ||
647 | } | 682 | } |
648 | q = &dev->queues->queue[AdapNormRespQueue]; | ||
649 | spin_lock_irqsave(q->lock, qflags); | ||
650 | aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr); | ||
651 | *(q->headers.producer) = cpu_to_le32(index + 1); | ||
652 | spin_unlock_irqrestore(q->lock, qflags); | ||
653 | if (!(nointr & (int)aac_config.irq_mod)) | ||
654 | aac_adapter_notify(dev, AdapNormRespQueue); | ||
655 | } | 683 | } |
656 | else | 684 | else |
657 | { | 685 | { |
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index be2e98de9fab..439948ef8251 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c | |||
@@ -73,7 +73,7 @@ unsigned int aac_response_normal(struct aac_queue * q) | |||
73 | int fast; | 73 | int fast; |
74 | u32 index = le32_to_cpu(entry->addr); | 74 | u32 index = le32_to_cpu(entry->addr); |
75 | fast = index & 0x01; | 75 | fast = index & 0x01; |
76 | fib = &dev->fibs[index >> 1]; | 76 | fib = &dev->fibs[index >> 2]; |
77 | hwfib = fib->hw_fib; | 77 | hwfib = fib->hw_fib; |
78 | 78 | ||
79 | aac_consumer_free(dev, q, HostNormRespQueue); | 79 | aac_consumer_free(dev, q, HostNormRespQueue); |
@@ -213,3 +213,116 @@ unsigned int aac_command_normal(struct aac_queue *q) | |||
213 | spin_unlock_irqrestore(q->lock, flags); | 213 | spin_unlock_irqrestore(q->lock, flags); |
214 | return 0; | 214 | return 0; |
215 | } | 215 | } |
216 | |||
217 | |||
218 | /** | ||
219 | * aac_intr_normal - Handle command replies | ||
220 | * @dev: Device | ||
221 | * @index: completion reference | ||
222 | * | ||
223 | * This DPC routine will be run when the adapter interrupts us to let us | ||
224 | * know there is a response on our normal priority queue. We will pull off | ||
225 | * all QE there are and wake up all the waiters before exiting. | ||
226 | */ | ||
227 | |||
228 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index) | ||
229 | { | ||
230 | u32 index = le32_to_cpu(Index); | ||
231 | |||
232 | dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, Index)); | ||
233 | if ((index & 0x00000002L)) { | ||
234 | struct hw_fib * hw_fib; | ||
235 | struct fib * fib; | ||
236 | struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; | ||
237 | unsigned long flags; | ||
238 | |||
239 | if (index == 0xFFFFFFFEL) /* Special Case */ | ||
240 | return 0; /* Do nothing */ | ||
241 | /* | ||
242 | * Allocate a FIB. For non queued stuff we can just use | ||
243 | * the stack so we are happy. We need a fib object in order to | ||
244 | * manage the linked lists. | ||
245 | */ | ||
246 | if ((!dev->aif_thread) | ||
247 | || (!(fib = kmalloc(sizeof(struct fib),GFP_ATOMIC)))) | ||
248 | return 1; | ||
249 | if (!(hw_fib = kmalloc(sizeof(struct hw_fib),GFP_ATOMIC))) { | ||
250 | kfree (fib); | ||
251 | return 1; | ||
252 | } | ||
253 | memset(hw_fib, 0, sizeof(struct hw_fib)); | ||
254 | memcpy(hw_fib, (struct hw_fib *)(((unsigned long)(dev->regs.sa)) + (index & ~0x00000002L)), sizeof(struct hw_fib)); | ||
255 | memset(fib, 0, sizeof(struct fib)); | ||
256 | INIT_LIST_HEAD(&fib->fiblink); | ||
257 | fib->type = FSAFS_NTC_FIB_CONTEXT; | ||
258 | fib->size = sizeof(struct fib); | ||
259 | fib->hw_fib = hw_fib; | ||
260 | fib->data = hw_fib->data; | ||
261 | fib->dev = dev; | ||
262 | |||
263 | spin_lock_irqsave(q->lock, flags); | ||
264 | list_add_tail(&fib->fiblink, &q->cmdq); | ||
265 | wake_up_interruptible(&q->cmdready); | ||
266 | spin_unlock_irqrestore(q->lock, flags); | ||
267 | return 1; | ||
268 | } else { | ||
269 | int fast = index & 0x01; | ||
270 | struct fib * fib = &dev->fibs[index >> 2]; | ||
271 | struct hw_fib * hwfib = fib->hw_fib; | ||
272 | |||
273 | /* | ||
274 | * Remove this fib from the Outstanding I/O queue. | ||
275 | * But only if it has not already been timed out. | ||
276 | * | ||
277 | * If the fib has been timed out already, then just | ||
278 | * continue. The caller has already been notified that | ||
279 | * the fib timed out. | ||
280 | */ | ||
281 | if ((fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { | ||
282 | printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags); | ||
283 | printk(KERN_DEBUG"aacraid: hwfib=%p index=%i fib=%p\n",hwfib, hwfib->header.SenderData,fib); | ||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | list_del(&fib->queue); | ||
288 | dev->queues->queue[AdapNormCmdQueue].numpending--; | ||
289 | |||
290 | if (fast) { | ||
291 | /* | ||
292 | * Doctor the fib | ||
293 | */ | ||
294 | *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); | ||
295 | hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); | ||
296 | } | ||
297 | |||
298 | FIB_COUNTER_INCREMENT(aac_config.FibRecved); | ||
299 | |||
300 | if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) | ||
301 | { | ||
302 | u32 *pstatus = (u32 *)hwfib->data; | ||
303 | if (*pstatus & cpu_to_le32(0xffff0000)) | ||
304 | *pstatus = cpu_to_le32(ST_OK); | ||
305 | } | ||
306 | if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) | ||
307 | { | ||
308 | if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) | ||
309 | FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved); | ||
310 | else | ||
311 | FIB_COUNTER_INCREMENT(aac_config.AsyncRecved); | ||
312 | /* | ||
313 | * NOTE: we cannot touch the fib after this | ||
314 | * call, because it may have been deallocated. | ||
315 | */ | ||
316 | fib->callback(fib->callback_data, fib); | ||
317 | } else { | ||
318 | unsigned long flagv; | ||
319 | dprintk((KERN_INFO "event_wait up\n")); | ||
320 | spin_lock_irqsave(&fib->event_lock, flagv); | ||
321 | fib->done = 1; | ||
322 | up(&fib->event_wait); | ||
323 | spin_unlock_irqrestore(&fib->event_lock, flagv); | ||
324 | FIB_COUNTER_INCREMENT(aac_config.NormalRecved); | ||
325 | } | ||
326 | return 0; | ||
327 | } | ||
328 | } | ||
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index c235d0c0e7a7..ab383d1f59e2 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -788,8 +788,29 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, | |||
788 | goto out_free_host; | 788 | goto out_free_host; |
789 | spin_lock_init(&aac->fib_lock); | 789 | spin_lock_init(&aac->fib_lock); |
790 | 790 | ||
791 | if ((*aac_drivers[index].init)(aac)) | 791 | /* |
792 | * Map in the registers from the adapter. | ||
793 | */ | ||
794 | aac->base_size = AAC_MIN_FOOTPRINT_SIZE; | ||
795 | if ((aac->regs.sa = ioremap( | ||
796 | (unsigned long)aac->scsi_host_ptr->base, AAC_MIN_FOOTPRINT_SIZE)) | ||
797 | == NULL) { | ||
798 | printk(KERN_WARNING "%s: unable to map adapter.\n", | ||
799 | AAC_DRIVERNAME); | ||
792 | goto out_free_fibs; | 800 | goto out_free_fibs; |
801 | } | ||
802 | if ((*aac_drivers[index].init)(aac)) | ||
803 | goto out_unmap; | ||
804 | |||
805 | /* | ||
806 | * Start any kernel threads needed | ||
807 | */ | ||
808 | aac->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, | ||
809 | aac, 0); | ||
810 | if (aac->thread_pid < 0) { | ||
811 | printk(KERN_ERR "aacraid: Unable to create command thread.\n"); | ||
812 | goto out_deinit; | ||
813 | } | ||
793 | 814 | ||
794 | /* | 815 | /* |
795 | * If we had set a smaller DMA mask earlier, set it to 4gig | 816 | * If we had set a smaller DMA mask earlier, set it to 4gig |
@@ -866,10 +887,11 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, | |||
866 | 887 | ||
867 | aac_send_shutdown(aac); | 888 | aac_send_shutdown(aac); |
868 | aac_adapter_disable_int(aac); | 889 | aac_adapter_disable_int(aac); |
890 | free_irq(pdev->irq, aac); | ||
891 | out_unmap: | ||
869 | fib_map_free(aac); | 892 | fib_map_free(aac); |
870 | pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); | 893 | pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); |
871 | kfree(aac->queues); | 894 | kfree(aac->queues); |
872 | free_irq(pdev->irq, aac); | ||
873 | iounmap(aac->regs.sa); | 895 | iounmap(aac->regs.sa); |
874 | out_free_fibs: | 896 | out_free_fibs: |
875 | kfree(aac->fibs); | 897 | kfree(aac->fibs); |
@@ -910,6 +932,7 @@ static void __devexit aac_remove_one(struct pci_dev *pdev) | |||
910 | iounmap(aac->regs.sa); | 932 | iounmap(aac->regs.sa); |
911 | 933 | ||
912 | kfree(aac->fibs); | 934 | kfree(aac->fibs); |
935 | kfree(aac->fsa_dev); | ||
913 | 936 | ||
914 | list_del(&aac->entry); | 937 | list_del(&aac->entry); |
915 | scsi_host_put(shost); | 938 | scsi_host_put(shost); |
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c index 557287a0b80b..fc4c73c2a6a9 100644 --- a/drivers/scsi/aacraid/rkt.c +++ b/drivers/scsi/aacraid/rkt.c | |||
@@ -49,40 +49,57 @@ | |||
49 | static irqreturn_t aac_rkt_intr(int irq, void *dev_id, struct pt_regs *regs) | 49 | static irqreturn_t aac_rkt_intr(int irq, void *dev_id, struct pt_regs *regs) |
50 | { | 50 | { |
51 | struct aac_dev *dev = dev_id; | 51 | struct aac_dev *dev = dev_id; |
52 | unsigned long bellbits; | 52 | |
53 | u8 intstat, mask; | 53 | if (dev->new_comm_interface) { |
54 | intstat = rkt_readb(dev, MUnit.OISR); | 54 | u32 Index = rkt_readl(dev, MUnit.OutboundQueue); |
55 | /* | 55 | if (Index == 0xFFFFFFFFL) |
56 | * Read mask and invert because drawbridge is reversed. | 56 | Index = rkt_readl(dev, MUnit.OutboundQueue); |
57 | * This allows us to only service interrupts that have | 57 | if (Index != 0xFFFFFFFFL) { |
58 | * been enabled. | 58 | do { |
59 | */ | 59 | if (aac_intr_normal(dev, Index)) { |
60 | mask = ~(dev->OIMR); | 60 | rkt_writel(dev, MUnit.OutboundQueue, Index); |
61 | /* Check to see if this is our interrupt. If it isn't just return */ | 61 | rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); |
62 | if (intstat & mask) | 62 | } |
63 | { | 63 | Index = rkt_readl(dev, MUnit.OutboundQueue); |
64 | bellbits = rkt_readl(dev, OutboundDoorbellReg); | 64 | } while (Index != 0xFFFFFFFFL); |
65 | if (bellbits & DoorBellPrintfReady) { | 65 | return IRQ_HANDLED; |
66 | aac_printf(dev, rkt_readl(dev, IndexRegs.Mailbox[5])); | ||
67 | rkt_writel(dev, MUnit.ODR,DoorBellPrintfReady); | ||
68 | rkt_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); | ||
69 | } | ||
70 | else if (bellbits & DoorBellAdapterNormCmdReady) { | ||
71 | rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); | ||
72 | aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); | ||
73 | } | ||
74 | else if (bellbits & DoorBellAdapterNormRespReady) { | ||
75 | aac_response_normal(&dev->queues->queue[HostNormRespQueue]); | ||
76 | rkt_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); | ||
77 | } | ||
78 | else if (bellbits & DoorBellAdapterNormCmdNotFull) { | ||
79 | rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); | ||
80 | } | 66 | } |
81 | else if (bellbits & DoorBellAdapterNormRespNotFull) { | 67 | } else { |
82 | rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); | 68 | unsigned long bellbits; |
83 | rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); | 69 | u8 intstat; |
70 | intstat = rkt_readb(dev, MUnit.OISR); | ||
71 | /* | ||
72 | * Read mask and invert because drawbridge is reversed. | ||
73 | * This allows us to only service interrupts that have | ||
74 | * been enabled. | ||
75 | * Check to see if this is our interrupt. If it isn't just return | ||
76 | */ | ||
77 | if (intstat & ~(dev->OIMR)) | ||
78 | { | ||
79 | bellbits = rkt_readl(dev, OutboundDoorbellReg); | ||
80 | if (bellbits & DoorBellPrintfReady) { | ||
81 | aac_printf(dev, rkt_readl (dev, IndexRegs.Mailbox[5])); | ||
82 | rkt_writel(dev, MUnit.ODR,DoorBellPrintfReady); | ||
83 | rkt_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); | ||
84 | } | ||
85 | else if (bellbits & DoorBellAdapterNormCmdReady) { | ||
86 | rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); | ||
87 | aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); | ||
88 | // rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); | ||
89 | } | ||
90 | else if (bellbits & DoorBellAdapterNormRespReady) { | ||
91 | rkt_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); | ||
92 | aac_response_normal(&dev->queues->queue[HostNormRespQueue]); | ||
93 | } | ||
94 | else if (bellbits & DoorBellAdapterNormCmdNotFull) { | ||
95 | rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); | ||
96 | } | ||
97 | else if (bellbits & DoorBellAdapterNormRespNotFull) { | ||
98 | rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); | ||
99 | rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); | ||
100 | } | ||
101 | return IRQ_HANDLED; | ||
84 | } | 102 | } |
85 | return IRQ_HANDLED; | ||
86 | } | 103 | } |
87 | return IRQ_NONE; | 104 | return IRQ_NONE; |
88 | } | 105 | } |
@@ -173,7 +190,10 @@ static int rkt_sync_cmd(struct aac_dev *dev, u32 command, | |||
173 | /* | 190 | /* |
174 | * Restore interrupt mask even though we timed out | 191 | * Restore interrupt mask even though we timed out |
175 | */ | 192 | */ |
176 | rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); | 193 | if (dev->new_comm_interface) |
194 | rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); | ||
195 | else | ||
196 | rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); | ||
177 | return -ETIMEDOUT; | 197 | return -ETIMEDOUT; |
178 | } | 198 | } |
179 | /* | 199 | /* |
@@ -196,7 +216,10 @@ static int rkt_sync_cmd(struct aac_dev *dev, u32 command, | |||
196 | /* | 216 | /* |
197 | * Restore interrupt mask | 217 | * Restore interrupt mask |
198 | */ | 218 | */ |
199 | rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); | 219 | if (dev->new_comm_interface) |
220 | rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); | ||
221 | else | ||
222 | rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); | ||
200 | return 0; | 223 | return 0; |
201 | 224 | ||
202 | } | 225 | } |
@@ -268,15 +291,6 @@ static void aac_rkt_start_adapter(struct aac_dev *dev) | |||
268 | 291 | ||
269 | init = dev->init; | 292 | init = dev->init; |
270 | init->HostElapsedSeconds = cpu_to_le32(get_seconds()); | 293 | init->HostElapsedSeconds = cpu_to_le32(get_seconds()); |
271 | /* | ||
272 | * First clear out all interrupts. Then enable the one's that we | ||
273 | * can handle. | ||
274 | */ | ||
275 | rkt_writeb(dev, MUnit.OIMR, 0xff); | ||
276 | rkt_writel(dev, MUnit.ODR, 0xffffffff); | ||
277 | // rkt_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK); | ||
278 | rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); | ||
279 | |||
280 | // We can only use a 32 bit address here | 294 | // We can only use a 32 bit address here |
281 | rkt_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, | 295 | rkt_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, |
282 | 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); | 296 | 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); |
@@ -350,6 +364,39 @@ static int aac_rkt_check_health(struct aac_dev *dev) | |||
350 | } | 364 | } |
351 | 365 | ||
352 | /** | 366 | /** |
367 | * aac_rkt_send | ||
368 | * @fib: fib to issue | ||
369 | * | ||
370 | * Will send a fib, returning 0 if successful. | ||
371 | */ | ||
372 | static int aac_rkt_send(struct fib * fib) | ||
373 | { | ||
374 | u64 addr = fib->hw_fib_pa; | ||
375 | struct aac_dev *dev = fib->dev; | ||
376 | volatile void __iomem *device = dev->regs.rkt; | ||
377 | u32 Index; | ||
378 | |||
379 | dprintk((KERN_DEBUG "%p->aac_rkt_send(%p->%llx)\n", dev, fib, addr)); | ||
380 | Index = rkt_readl(dev, MUnit.InboundQueue); | ||
381 | if (Index == 0xFFFFFFFFL) | ||
382 | Index = rkt_readl(dev, MUnit.InboundQueue); | ||
383 | dprintk((KERN_DEBUG "Index = 0x%x\n", Index)); | ||
384 | if (Index == 0xFFFFFFFFL) | ||
385 | return Index; | ||
386 | device += Index; | ||
387 | dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff), | ||
388 | (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size))); | ||
389 | writel((u32)(addr & 0xffffffff), device); | ||
390 | device += sizeof(u32); | ||
391 | writel((u32)(addr >> 32), device); | ||
392 | device += sizeof(u32); | ||
393 | writel(le16_to_cpu(fib->hw_fib->header.Size), device); | ||
394 | rkt_writel(dev, MUnit.InboundQueue, Index); | ||
395 | dprintk((KERN_DEBUG "aac_rkt_send - return 0\n")); | ||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | /** | ||
353 | * aac_rkt_init - initialize an i960 based AAC card | 400 | * aac_rkt_init - initialize an i960 based AAC card |
354 | * @dev: device to configure | 401 | * @dev: device to configure |
355 | * | 402 | * |
@@ -369,13 +416,8 @@ int aac_rkt_init(struct aac_dev *dev) | |||
369 | name = dev->name; | 416 | name = dev->name; |
370 | 417 | ||
371 | /* | 418 | /* |
372 | * Map in the registers from the adapter. | 419 | * Check to see if the board panic'd while booting. |
373 | */ | 420 | */ |
374 | if((dev->regs.rkt = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL) | ||
375 | { | ||
376 | printk(KERN_WARNING "aacraid: unable to map i960.\n" ); | ||
377 | goto error_iounmap; | ||
378 | } | ||
379 | /* | 421 | /* |
380 | * Check to see if the board failed any self tests. | 422 | * Check to see if the board failed any self tests. |
381 | */ | 423 | */ |
@@ -426,6 +468,7 @@ int aac_rkt_init(struct aac_dev *dev) | |||
426 | dev->a_ops.adapter_notify = aac_rkt_notify_adapter; | 468 | dev->a_ops.adapter_notify = aac_rkt_notify_adapter; |
427 | dev->a_ops.adapter_sync_cmd = rkt_sync_cmd; | 469 | dev->a_ops.adapter_sync_cmd = rkt_sync_cmd; |
428 | dev->a_ops.adapter_check_health = aac_rkt_check_health; | 470 | dev->a_ops.adapter_check_health = aac_rkt_check_health; |
471 | dev->a_ops.adapter_send = aac_rkt_send; | ||
429 | 472 | ||
430 | /* | 473 | /* |
431 | * First clear out all interrupts. Then enable the one's that we | 474 | * First clear out all interrupts. Then enable the one's that we |
@@ -437,15 +480,24 @@ int aac_rkt_init(struct aac_dev *dev) | |||
437 | 480 | ||
438 | if (aac_init_adapter(dev) == NULL) | 481 | if (aac_init_adapter(dev) == NULL) |
439 | goto error_irq; | 482 | goto error_irq; |
440 | /* | 483 | if (dev->new_comm_interface) { |
441 | * Start any kernel threads needed | 484 | /* |
442 | */ | 485 | * FIB Setup has already been done, but we can minimize the |
443 | dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0); | 486 | * damage by at least ensuring the OS never issues more |
444 | if(dev->thread_pid < 0) | 487 | * commands than we can handle. The Rocket adapters currently |
445 | { | 488 | * can only handle 246 commands and 8 AIFs at the same time, |
446 | printk(KERN_ERR "aacraid: Unable to create rkt thread.\n"); | 489 | * and in fact do notify us accordingly if we negotiate the |
447 | goto error_kfree; | 490 | * FIB size. The problem that causes us to add this check is |
448 | } | 491 | * to ensure that we do not overdo it with the adapter when a |
492 | * hard coded FIB override is being utilized. This special | ||
493 | * case warrants this half baked, but convenient, check here. | ||
494 | */ | ||
495 | if (dev->scsi_host_ptr->can_queue > (246 - AAC_NUM_MGT_FIB)) { | ||
496 | dev->init->MaxIoCommands = cpu_to_le32(246); | ||
497 | dev->scsi_host_ptr->can_queue = 246 - AAC_NUM_MGT_FIB; | ||
498 | } | ||
499 | rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); | ||
500 | } | ||
449 | /* | 501 | /* |
450 | * Tell the adapter that all is configured, and it can start | 502 | * Tell the adapter that all is configured, and it can start |
451 | * accepting requests | 503 | * accepting requests |
@@ -453,15 +505,11 @@ int aac_rkt_init(struct aac_dev *dev) | |||
453 | aac_rkt_start_adapter(dev); | 505 | aac_rkt_start_adapter(dev); |
454 | return 0; | 506 | return 0; |
455 | 507 | ||
456 | error_kfree: | ||
457 | kfree(dev->queues); | ||
458 | |||
459 | error_irq: | 508 | error_irq: |
460 | rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); | 509 | rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); |
461 | free_irq(dev->scsi_host_ptr->irq, (void *)dev); | 510 | free_irq(dev->scsi_host_ptr->irq, (void *)dev); |
462 | 511 | ||
463 | error_iounmap: | 512 | error_iounmap: |
464 | iounmap(dev->regs.rkt); | ||
465 | 513 | ||
466 | return -1; | 514 | return -1; |
467 | } | 515 | } |
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index a8459faf87ca..da99046e5393 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c | |||
@@ -49,40 +49,57 @@ | |||
49 | static irqreturn_t aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs) | 49 | static irqreturn_t aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs) |
50 | { | 50 | { |
51 | struct aac_dev *dev = dev_id; | 51 | struct aac_dev *dev = dev_id; |
52 | unsigned long bellbits; | 52 | |
53 | u8 intstat, mask; | 53 | dprintk((KERN_DEBUG "aac_rx_intr(%d,%p,%p)\n", irq, dev_id, regs)); |
54 | intstat = rx_readb(dev, MUnit.OISR); | 54 | if (dev->new_comm_interface) { |
55 | /* | 55 | u32 Index = rx_readl(dev, MUnit.OutboundQueue); |
56 | * Read mask and invert because drawbridge is reversed. | 56 | if (Index == 0xFFFFFFFFL) |
57 | * This allows us to only service interrupts that have | 57 | Index = rx_readl(dev, MUnit.OutboundQueue); |
58 | * been enabled. | 58 | if (Index != 0xFFFFFFFFL) { |
59 | */ | 59 | do { |
60 | mask = ~(dev->OIMR); | 60 | if (aac_intr_normal(dev, Index)) { |
61 | /* Check to see if this is our interrupt. If it isn't just return */ | 61 | rx_writel(dev, MUnit.OutboundQueue, Index); |
62 | if (intstat & mask) | 62 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); |
63 | { | 63 | } |
64 | bellbits = rx_readl(dev, OutboundDoorbellReg); | 64 | Index = rx_readl(dev, MUnit.OutboundQueue); |
65 | if (bellbits & DoorBellPrintfReady) { | 65 | } while (Index != 0xFFFFFFFFL); |
66 | aac_printf(dev, rx_readl(dev, IndexRegs.Mailbox[5])); | 66 | return IRQ_HANDLED; |
67 | rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); | ||
68 | rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); | ||
69 | } | ||
70 | else if (bellbits & DoorBellAdapterNormCmdReady) { | ||
71 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); | ||
72 | aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); | ||
73 | } | ||
74 | else if (bellbits & DoorBellAdapterNormRespReady) { | ||
75 | aac_response_normal(&dev->queues->queue[HostNormRespQueue]); | ||
76 | rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); | ||
77 | } | ||
78 | else if (bellbits & DoorBellAdapterNormCmdNotFull) { | ||
79 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); | ||
80 | } | 67 | } |
81 | else if (bellbits & DoorBellAdapterNormRespNotFull) { | 68 | } else { |
82 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); | 69 | unsigned long bellbits; |
83 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); | 70 | u8 intstat; |
71 | intstat = rx_readb(dev, MUnit.OISR); | ||
72 | /* | ||
73 | * Read mask and invert because drawbridge is reversed. | ||
74 | * This allows us to only service interrupts that have | ||
75 | * been enabled. | ||
76 | * Check to see if this is our interrupt. If it isn't just return | ||
77 | */ | ||
78 | if (intstat & ~(dev->OIMR)) | ||
79 | { | ||
80 | bellbits = rx_readl(dev, OutboundDoorbellReg); | ||
81 | if (bellbits & DoorBellPrintfReady) { | ||
82 | aac_printf(dev, rx_readl (dev, IndexRegs.Mailbox[5])); | ||
83 | rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); | ||
84 | rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); | ||
85 | } | ||
86 | else if (bellbits & DoorBellAdapterNormCmdReady) { | ||
87 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); | ||
88 | aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); | ||
89 | } | ||
90 | else if (bellbits & DoorBellAdapterNormRespReady) { | ||
91 | rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); | ||
92 | aac_response_normal(&dev->queues->queue[HostNormRespQueue]); | ||
93 | } | ||
94 | else if (bellbits & DoorBellAdapterNormCmdNotFull) { | ||
95 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); | ||
96 | } | ||
97 | else if (bellbits & DoorBellAdapterNormRespNotFull) { | ||
98 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); | ||
99 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); | ||
100 | } | ||
101 | return IRQ_HANDLED; | ||
84 | } | 102 | } |
85 | return IRQ_HANDLED; | ||
86 | } | 103 | } |
87 | return IRQ_NONE; | 104 | return IRQ_NONE; |
88 | } | 105 | } |
@@ -173,7 +190,10 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command, | |||
173 | /* | 190 | /* |
174 | * Restore interrupt mask even though we timed out | 191 | * Restore interrupt mask even though we timed out |
175 | */ | 192 | */ |
176 | rx_writeb(dev, MUnit.OIMR, dev->OIMR &= 0xfb); | 193 | if (dev->new_comm_interface) |
194 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); | ||
195 | else | ||
196 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); | ||
177 | return -ETIMEDOUT; | 197 | return -ETIMEDOUT; |
178 | } | 198 | } |
179 | /* | 199 | /* |
@@ -196,7 +216,10 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command, | |||
196 | /* | 216 | /* |
197 | * Restore interrupt mask | 217 | * Restore interrupt mask |
198 | */ | 218 | */ |
199 | rx_writeb(dev, MUnit.OIMR, dev->OIMR &= 0xfb); | 219 | if (dev->new_comm_interface) |
220 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); | ||
221 | else | ||
222 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); | ||
200 | return 0; | 223 | return 0; |
201 | 224 | ||
202 | } | 225 | } |
@@ -267,15 +290,6 @@ static void aac_rx_start_adapter(struct aac_dev *dev) | |||
267 | 290 | ||
268 | init = dev->init; | 291 | init = dev->init; |
269 | init->HostElapsedSeconds = cpu_to_le32(get_seconds()); | 292 | init->HostElapsedSeconds = cpu_to_le32(get_seconds()); |
270 | /* | ||
271 | * First clear out all interrupts. Then enable the one's that we | ||
272 | * can handle. | ||
273 | */ | ||
274 | rx_writeb(dev, MUnit.OIMR, 0xff); | ||
275 | rx_writel(dev, MUnit.ODR, 0xffffffff); | ||
276 | // rx_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK); | ||
277 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); | ||
278 | |||
279 | // We can only use a 32 bit address here | 293 | // We can only use a 32 bit address here |
280 | rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, | 294 | rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, |
281 | 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); | 295 | 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); |
@@ -349,6 +363,39 @@ static int aac_rx_check_health(struct aac_dev *dev) | |||
349 | } | 363 | } |
350 | 364 | ||
351 | /** | 365 | /** |
366 | * aac_rx_send | ||
367 | * @fib: fib to issue | ||
368 | * | ||
369 | * Will send a fib, returning 0 if successful. | ||
370 | */ | ||
371 | static int aac_rx_send(struct fib * fib) | ||
372 | { | ||
373 | u64 addr = fib->hw_fib_pa; | ||
374 | struct aac_dev *dev = fib->dev; | ||
375 | volatile void __iomem *device = dev->regs.rx; | ||
376 | u32 Index; | ||
377 | |||
378 | dprintk((KERN_DEBUG "%p->aac_rx_send(%p->%llx)\n", dev, fib, addr)); | ||
379 | Index = rx_readl(dev, MUnit.InboundQueue); | ||
380 | if (Index == 0xFFFFFFFFL) | ||
381 | Index = rx_readl(dev, MUnit.InboundQueue); | ||
382 | dprintk((KERN_DEBUG "Index = 0x%x\n", Index)); | ||
383 | if (Index == 0xFFFFFFFFL) | ||
384 | return Index; | ||
385 | device += Index; | ||
386 | dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff), | ||
387 | (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size))); | ||
388 | writel((u32)(addr & 0xffffffff), device); | ||
389 | device += sizeof(u32); | ||
390 | writel((u32)(addr >> 32), device); | ||
391 | device += sizeof(u32); | ||
392 | writel(le16_to_cpu(fib->hw_fib->header.Size), device); | ||
393 | rx_writel(dev, MUnit.InboundQueue, Index); | ||
394 | dprintk((KERN_DEBUG "aac_rx_send - return 0\n")); | ||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | /** | ||
352 | * aac_rx_init - initialize an i960 based AAC card | 399 | * aac_rx_init - initialize an i960 based AAC card |
353 | * @dev: device to configure | 400 | * @dev: device to configure |
354 | * | 401 | * |
@@ -368,13 +415,8 @@ int aac_rx_init(struct aac_dev *dev) | |||
368 | name = dev->name; | 415 | name = dev->name; |
369 | 416 | ||
370 | /* | 417 | /* |
371 | * Map in the registers from the adapter. | 418 | * Check to see if the board panic'd while booting. |
372 | */ | 419 | */ |
373 | if((dev->regs.rx = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL) | ||
374 | { | ||
375 | printk(KERN_WARNING "aacraid: unable to map i960.\n" ); | ||
376 | return -1; | ||
377 | } | ||
378 | /* | 420 | /* |
379 | * Check to see if the board failed any self tests. | 421 | * Check to see if the board failed any self tests. |
380 | */ | 422 | */ |
@@ -426,6 +468,7 @@ int aac_rx_init(struct aac_dev *dev) | |||
426 | dev->a_ops.adapter_notify = aac_rx_notify_adapter; | 468 | dev->a_ops.adapter_notify = aac_rx_notify_adapter; |
427 | dev->a_ops.adapter_sync_cmd = rx_sync_cmd; | 469 | dev->a_ops.adapter_sync_cmd = rx_sync_cmd; |
428 | dev->a_ops.adapter_check_health = aac_rx_check_health; | 470 | dev->a_ops.adapter_check_health = aac_rx_check_health; |
471 | dev->a_ops.adapter_send = aac_rx_send; | ||
429 | 472 | ||
430 | /* | 473 | /* |
431 | * First clear out all interrupts. Then enable the one's that we | 474 | * First clear out all interrupts. Then enable the one's that we |
@@ -437,15 +480,9 @@ int aac_rx_init(struct aac_dev *dev) | |||
437 | 480 | ||
438 | if (aac_init_adapter(dev) == NULL) | 481 | if (aac_init_adapter(dev) == NULL) |
439 | goto error_irq; | 482 | goto error_irq; |
440 | /* | 483 | if (dev->new_comm_interface) |
441 | * Start any kernel threads needed | 484 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); |
442 | */ | 485 | |
443 | dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0); | ||
444 | if(dev->thread_pid < 0) | ||
445 | { | ||
446 | printk(KERN_ERR "aacraid: Unable to create rx thread.\n"); | ||
447 | goto error_kfree; | ||
448 | } | ||
449 | /* | 486 | /* |
450 | * Tell the adapter that all is configured, and it can start | 487 | * Tell the adapter that all is configured, and it can start |
451 | * accepting requests | 488 | * accepting requests |
@@ -453,15 +490,11 @@ int aac_rx_init(struct aac_dev *dev) | |||
453 | aac_rx_start_adapter(dev); | 490 | aac_rx_start_adapter(dev); |
454 | return 0; | 491 | return 0; |
455 | 492 | ||
456 | error_kfree: | ||
457 | kfree(dev->queues); | ||
458 | |||
459 | error_irq: | 493 | error_irq: |
460 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); | 494 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); |
461 | free_irq(dev->scsi_host_ptr->irq, (void *)dev); | 495 | free_irq(dev->scsi_host_ptr->irq, (void *)dev); |
462 | 496 | ||
463 | error_iounmap: | 497 | error_iounmap: |
464 | iounmap(dev->regs.rx); | ||
465 | 498 | ||
466 | return -1; | 499 | return -1; |
467 | } | 500 | } |
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 3900abc5850d..8b9596209164 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c | |||
@@ -237,29 +237,16 @@ static void aac_sa_interrupt_adapter (struct aac_dev *dev) | |||
237 | 237 | ||
238 | static void aac_sa_start_adapter(struct aac_dev *dev) | 238 | static void aac_sa_start_adapter(struct aac_dev *dev) |
239 | { | 239 | { |
240 | u32 ret; | ||
241 | struct aac_init *init; | 240 | struct aac_init *init; |
242 | /* | 241 | /* |
243 | * Fill in the remaining pieces of the init. | 242 | * Fill in the remaining pieces of the init. |
244 | */ | 243 | */ |
245 | init = dev->init; | 244 | init = dev->init; |
246 | init->HostElapsedSeconds = cpu_to_le32(get_seconds()); | 245 | init->HostElapsedSeconds = cpu_to_le32(get_seconds()); |
247 | |||
248 | /* | ||
249 | * Tell the adapter we are back and up and running so it will scan its command | ||
250 | * queues and enable our interrupts | ||
251 | */ | ||
252 | dev->irq_mask = (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4); | ||
253 | /* | ||
254 | * First clear out all interrupts. Then enable the one's that | ||
255 | * we can handle. | ||
256 | */ | ||
257 | sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); | ||
258 | sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4)); | ||
259 | /* We can only use a 32 bit address here */ | 246 | /* We can only use a 32 bit address here */ |
260 | sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, | 247 | sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, |
261 | (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, | 248 | (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, |
262 | &ret, NULL, NULL, NULL, NULL); | 249 | NULL, NULL, NULL, NULL, NULL); |
263 | } | 250 | } |
264 | 251 | ||
265 | /** | 252 | /** |
@@ -314,15 +301,6 @@ int aac_sa_init(struct aac_dev *dev) | |||
314 | name = dev->name; | 301 | name = dev->name; |
315 | 302 | ||
316 | /* | 303 | /* |
317 | * Map in the registers from the adapter. | ||
318 | */ | ||
319 | |||
320 | if((dev->regs.sa = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL) | ||
321 | { | ||
322 | printk(KERN_WARNING "aacraid: unable to map ARM.\n" ); | ||
323 | goto error_iounmap; | ||
324 | } | ||
325 | /* | ||
326 | * Check to see if the board failed any self tests. | 304 | * Check to see if the board failed any self tests. |
327 | */ | 305 | */ |
328 | if (sa_readl(dev, Mailbox7) & SELF_TEST_FAILED) { | 306 | if (sa_readl(dev, Mailbox7) & SELF_TEST_FAILED) { |
@@ -378,31 +356,17 @@ int aac_sa_init(struct aac_dev *dev) | |||
378 | goto error_irq; | 356 | goto error_irq; |
379 | 357 | ||
380 | /* | 358 | /* |
381 | * Start any kernel threads needed | ||
382 | */ | ||
383 | dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0); | ||
384 | if (dev->thread_pid < 0) { | ||
385 | printk(KERN_ERR "aacraid: Unable to create command thread.\n"); | ||
386 | goto error_kfree; | ||
387 | } | ||
388 | |||
389 | /* | ||
390 | * Tell the adapter that all is configure, and it can start | 359 | * Tell the adapter that all is configure, and it can start |
391 | * accepting requests | 360 | * accepting requests |
392 | */ | 361 | */ |
393 | aac_sa_start_adapter(dev); | 362 | aac_sa_start_adapter(dev); |
394 | return 0; | 363 | return 0; |
395 | 364 | ||
396 | |||
397 | error_kfree: | ||
398 | kfree(dev->queues); | ||
399 | |||
400 | error_irq: | 365 | error_irq: |
401 | sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); | 366 | sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); |
402 | free_irq(dev->scsi_host_ptr->irq, (void *)dev); | 367 | free_irq(dev->scsi_host_ptr->irq, (void *)dev); |
403 | 368 | ||
404 | error_iounmap: | 369 | error_iounmap: |
405 | iounmap(dev->regs.sa); | ||
406 | 370 | ||
407 | return -1; | 371 | return -1; |
408 | } | 372 | } |