diff options
author | Mark Haverkamp <markh@linux-foundation.org> | 2007-01-23 17:59:20 -0500 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2007-01-27 10:27:06 -0500 |
commit | 28713324a0f3c055186ecec27239673c36ba1de5 (patch) | |
tree | aa17c4441b0fda12f1d3a9a58a374a917666b781 | |
parent | 9cd065ab80d6c14c6693a93c8f47ef4cb80e770f (diff) |
[SCSI] aacraid: rework communication support code
Received from Mark Salyzyn,
Replace all if/else communication transports with a platform function call.
This is in recognition of the need to migrate to up-and-coming transports.
Currently the Linux driver does not support two available communication
transports provided by our products, these will be added in future patches, and
will expand the platform function set.
Signed-off-by Mark Haverkamp <markh@linux-foundation.org>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
-rw-r--r-- | drivers/scsi/aacraid/aacraid.h | 27 | ||||
-rw-r--r-- | drivers/scsi/aacraid/comminit.c | 14 | ||||
-rw-r--r-- | drivers/scsi/aacraid/commsup.c | 40 | ||||
-rw-r--r-- | drivers/scsi/aacraid/rkt.c | 64 | ||||
-rw-r--r-- | drivers/scsi/aacraid/rx.c | 263 | ||||
-rw-r--r-- | drivers/scsi/aacraid/sa.c | 33 |
6 files changed, 262 insertions, 179 deletions
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 4f8b4c53d435..813e68993aa6 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #define _nblank(x) #x | 5 | #define _nblank(x) #x |
6 | #define nblank(x) _nblank(x)[0] | 6 | #define nblank(x) _nblank(x)[0] |
7 | 7 | ||
8 | #include <linux/interrupt.h> | ||
8 | 9 | ||
9 | /*------------------------------------------------------------------------------ | 10 | /*------------------------------------------------------------------------------ |
10 | * D E F I N E S | 11 | * D E F I N E S |
@@ -488,13 +489,20 @@ struct fib; | |||
488 | 489 | ||
489 | struct adapter_ops | 490 | struct adapter_ops |
490 | { | 491 | { |
492 | /* Low level operations */ | ||
491 | void (*adapter_interrupt)(struct aac_dev *dev); | 493 | void (*adapter_interrupt)(struct aac_dev *dev); |
492 | void (*adapter_notify)(struct aac_dev *dev, u32 event); | 494 | void (*adapter_notify)(struct aac_dev *dev, u32 event); |
493 | void (*adapter_disable_int)(struct aac_dev *dev); | 495 | void (*adapter_disable_int)(struct aac_dev *dev); |
496 | void (*adapter_enable_int)(struct aac_dev *dev); | ||
494 | int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); | 497 | int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); |
495 | int (*adapter_check_health)(struct aac_dev *dev); | 498 | int (*adapter_check_health)(struct aac_dev *dev); |
496 | int (*adapter_send)(struct fib * fib); | 499 | /* Transport operations */ |
497 | int (*adapter_ioremap)(struct aac_dev * dev, u32 size); | 500 | int (*adapter_ioremap)(struct aac_dev * dev, u32 size); |
501 | irqreturn_t (*adapter_intr)(int irq, void *dev_id); | ||
502 | /* Packet operations */ | ||
503 | int (*adapter_deliver)(struct fib * fib); | ||
504 | /* Administrative operations */ | ||
505 | int (*adapter_comm)(struct aac_dev * dev, int comm); | ||
498 | }; | 506 | }; |
499 | 507 | ||
500 | /* | 508 | /* |
@@ -1018,7 +1026,9 @@ struct aac_dev | |||
1018 | u8 nondasd_support; | 1026 | u8 nondasd_support; |
1019 | u8 dac_support; | 1027 | u8 dac_support; |
1020 | u8 raid_scsi_mode; | 1028 | u8 raid_scsi_mode; |
1021 | u8 new_comm_interface; | 1029 | u8 comm_interface; |
1030 | # define AAC_COMM_PRODUCER 0 | ||
1031 | # define AAC_COMM_MESSAGE 1 | ||
1022 | /* macro side-effects BEWARE */ | 1032 | /* macro side-effects BEWARE */ |
1023 | # define raw_io_interface \ | 1033 | # define raw_io_interface \ |
1024 | init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) | 1034 | init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) |
@@ -1036,18 +1046,24 @@ struct aac_dev | |||
1036 | #define aac_adapter_disable_int(dev) \ | 1046 | #define aac_adapter_disable_int(dev) \ |
1037 | (dev)->a_ops.adapter_disable_int(dev) | 1047 | (dev)->a_ops.adapter_disable_int(dev) |
1038 | 1048 | ||
1049 | #define aac_adapter_enable_int(dev) \ | ||
1050 | (dev)->a_ops.adapter_enable_int(dev) | ||
1051 | |||
1039 | #define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ | 1052 | #define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ |
1040 | (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) | 1053 | (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) |
1041 | 1054 | ||
1042 | #define aac_adapter_check_health(dev) \ | 1055 | #define aac_adapter_check_health(dev) \ |
1043 | (dev)->a_ops.adapter_check_health(dev) | 1056 | (dev)->a_ops.adapter_check_health(dev) |
1044 | 1057 | ||
1045 | #define aac_adapter_send(fib) \ | ||
1046 | ((fib)->dev)->a_ops.adapter_send(fib) | ||
1047 | |||
1048 | #define aac_adapter_ioremap(dev, size) \ | 1058 | #define aac_adapter_ioremap(dev, size) \ |
1049 | (dev)->a_ops.adapter_ioremap(dev, size) | 1059 | (dev)->a_ops.adapter_ioremap(dev, size) |
1050 | 1060 | ||
1061 | #define aac_adapter_deliver(fib) \ | ||
1062 | ((fib)->dev)->a_ops.adapter_deliver(fib) | ||
1063 | |||
1064 | #define aac_adapter_comm(dev,comm) \ | ||
1065 | (dev)->a_ops.adapter_comm(dev, comm) | ||
1066 | |||
1051 | #define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) | 1067 | #define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) |
1052 | 1068 | ||
1053 | /* | 1069 | /* |
@@ -1795,6 +1811,7 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg); | |||
1795 | int aac_rx_init(struct aac_dev *dev); | 1811 | int aac_rx_init(struct aac_dev *dev); |
1796 | int aac_rkt_init(struct aac_dev *dev); | 1812 | int aac_rkt_init(struct aac_dev *dev); |
1797 | int aac_sa_init(struct aac_dev *dev); | 1813 | int aac_sa_init(struct aac_dev *dev); |
1814 | int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify); | ||
1798 | unsigned int aac_response_normal(struct aac_queue * q); | 1815 | unsigned int aac_response_normal(struct aac_queue * q); |
1799 | unsigned int aac_command_normal(struct aac_queue * q); | 1816 | unsigned int aac_command_normal(struct aac_queue * q); |
1800 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); | 1817 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); |
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 6d305b2f854e..df67ba686023 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c | |||
@@ -95,7 +95,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
95 | init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES); | 95 | init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES); |
96 | 96 | ||
97 | init->InitFlags = 0; | 97 | init->InitFlags = 0; |
98 | if (dev->new_comm_interface) { | 98 | if (dev->comm_interface == AAC_COMM_MESSAGE) { |
99 | init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); | 99 | init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); |
100 | dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); | 100 | dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); |
101 | } | 101 | } |
@@ -297,21 +297,23 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) | |||
297 | - sizeof(struct aac_fibhdr) | 297 | - sizeof(struct aac_fibhdr) |
298 | - sizeof(struct aac_write) + sizeof(struct sgentry)) | 298 | - sizeof(struct aac_write) + sizeof(struct sgentry)) |
299 | / sizeof(struct sgentry); | 299 | / sizeof(struct sgentry); |
300 | dev->new_comm_interface = 0; | 300 | dev->comm_interface = AAC_COMM_PRODUCER; |
301 | dev->raw_io_64 = 0; | 301 | dev->raw_io_64 = 0; |
302 | if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, | 302 | if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, |
303 | 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && | 303 | 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && |
304 | (status[0] == 0x00000001)) { | 304 | (status[0] == 0x00000001)) { |
305 | if (status[1] & AAC_OPT_NEW_COMM_64) | 305 | if (status[1] & AAC_OPT_NEW_COMM_64) |
306 | dev->raw_io_64 = 1; | 306 | dev->raw_io_64 = 1; |
307 | if (status[1] & AAC_OPT_NEW_COMM) | 307 | if (dev->a_ops.adapter_comm && |
308 | dev->new_comm_interface = dev->a_ops.adapter_send != 0; | 308 | (status[1] & AAC_OPT_NEW_COMM)) |
309 | if (dev->new_comm_interface && (status[2] > dev->base_size)) { | 309 | dev->comm_interface = AAC_COMM_MESSAGE; |
310 | if ((dev->comm_interface == AAC_COMM_MESSAGE) && | ||
311 | (status[2] > dev->base_size)) { | ||
310 | aac_adapter_ioremap(dev, 0); | 312 | aac_adapter_ioremap(dev, 0); |
311 | dev->base_size = status[2]; | 313 | dev->base_size = status[2]; |
312 | if (aac_adapter_ioremap(dev, status[2])) { | 314 | if (aac_adapter_ioremap(dev, status[2])) { |
313 | /* remap failed, go back ... */ | 315 | /* remap failed, go back ... */ |
314 | dev->new_comm_interface = 0; | 316 | dev->comm_interface = AAC_COMM_PRODUCER; |
315 | if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) { | 317 | if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) { |
316 | printk(KERN_WARNING | 318 | printk(KERN_WARNING |
317 | "aacraid: unable to map adapter.\n"); | 319 | "aacraid: unable to map adapter.\n"); |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 4893a6d06a33..1b97f60652ba 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -317,7 +317,7 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr | |||
317 | * success. | 317 | * success. |
318 | */ | 318 | */ |
319 | 319 | ||
320 | static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) | 320 | int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) |
321 | { | 321 | { |
322 | struct aac_entry * entry = NULL; | 322 | struct aac_entry * entry = NULL; |
323 | int map = 0; | 323 | int map = 0; |
@@ -387,7 +387,6 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
387 | { | 387 | { |
388 | struct aac_dev * dev = fibptr->dev; | 388 | struct aac_dev * dev = fibptr->dev; |
389 | struct hw_fib * hw_fib = fibptr->hw_fib; | 389 | struct hw_fib * hw_fib = fibptr->hw_fib; |
390 | struct aac_queue * q; | ||
391 | unsigned long flags = 0; | 390 | unsigned long flags = 0; |
392 | unsigned long qflags; | 391 | unsigned long qflags; |
393 | 392 | ||
@@ -469,38 +468,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
469 | 468 | ||
470 | if (!dev->queues) | 469 | if (!dev->queues) |
471 | return -EBUSY; | 470 | return -EBUSY; |
472 | q = &dev->queues->queue[AdapNormCmdQueue]; | ||
473 | 471 | ||
474 | if(wait) | 472 | if(wait) |
475 | spin_lock_irqsave(&fibptr->event_lock, flags); | 473 | spin_lock_irqsave(&fibptr->event_lock, flags); |
476 | spin_lock_irqsave(q->lock, qflags); | 474 | aac_adapter_deliver(fibptr); |
477 | if (dev->new_comm_interface) { | ||
478 | unsigned long count = 10000000L; /* 50 seconds */ | ||
479 | q->numpending++; | ||
480 | spin_unlock_irqrestore(q->lock, qflags); | ||
481 | while (aac_adapter_send(fibptr) != 0) { | ||
482 | if (--count == 0) { | ||
483 | if (wait) | ||
484 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | ||
485 | spin_lock_irqsave(q->lock, qflags); | ||
486 | q->numpending--; | ||
487 | spin_unlock_irqrestore(q->lock, qflags); | ||
488 | return -ETIMEDOUT; | ||
489 | } | ||
490 | udelay(5); | ||
491 | } | ||
492 | } else { | ||
493 | u32 index; | ||
494 | unsigned long nointr = 0; | ||
495 | aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr); | ||
496 | |||
497 | q->numpending++; | ||
498 | *(q->headers.producer) = cpu_to_le32(index + 1); | ||
499 | spin_unlock_irqrestore(q->lock, qflags); | ||
500 | dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index)); | ||
501 | if (!(nointr & aac_config.irq_mod)) | ||
502 | aac_adapter_notify(dev, AdapNormCmdQueue); | ||
503 | } | ||
504 | 475 | ||
505 | /* | 476 | /* |
506 | * If the caller wanted us to wait for response wait now. | 477 | * If the caller wanted us to wait for response wait now. |
@@ -520,6 +491,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
520 | while (down_trylock(&fibptr->event_wait)) { | 491 | while (down_trylock(&fibptr->event_wait)) { |
521 | int blink; | 492 | int blink; |
522 | if (--count == 0) { | 493 | if (--count == 0) { |
494 | struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; | ||
523 | spin_lock_irqsave(q->lock, qflags); | 495 | spin_lock_irqsave(q->lock, qflags); |
524 | q->numpending--; | 496 | q->numpending--; |
525 | spin_unlock_irqrestore(q->lock, qflags); | 497 | spin_unlock_irqrestore(q->lock, qflags); |
@@ -659,7 +631,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) | |||
659 | unsigned long qflags; | 631 | unsigned long qflags; |
660 | 632 | ||
661 | if (hw_fib->header.XferState == 0) { | 633 | if (hw_fib->header.XferState == 0) { |
662 | if (dev->new_comm_interface) | 634 | if (dev->comm_interface == AAC_COMM_MESSAGE) |
663 | kfree (hw_fib); | 635 | kfree (hw_fib); |
664 | return 0; | 636 | return 0; |
665 | } | 637 | } |
@@ -667,7 +639,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) | |||
667 | * If we plan to do anything check the structure type first. | 639 | * If we plan to do anything check the structure type first. |
668 | */ | 640 | */ |
669 | if ( hw_fib->header.StructType != FIB_MAGIC ) { | 641 | if ( hw_fib->header.StructType != FIB_MAGIC ) { |
670 | if (dev->new_comm_interface) | 642 | if (dev->comm_interface == AAC_COMM_MESSAGE) |
671 | kfree (hw_fib); | 643 | kfree (hw_fib); |
672 | return -EINVAL; | 644 | return -EINVAL; |
673 | } | 645 | } |
@@ -679,7 +651,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) | |||
679 | * send the completed cdb to the adapter. | 651 | * send the completed cdb to the adapter. |
680 | */ | 652 | */ |
681 | if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { | 653 | if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { |
682 | if (dev->new_comm_interface) { | 654 | if (dev->comm_interface == AAC_COMM_MESSAGE) { |
683 | kfree (hw_fib); | 655 | kfree (hw_fib); |
684 | } else { | 656 | } else { |
685 | u32 index; | 657 | u32 index; |
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c index 643f23b5ded8..d953c3fe998a 100644 --- a/drivers/scsi/aacraid/rkt.c +++ b/drivers/scsi/aacraid/rkt.c | |||
@@ -34,6 +34,40 @@ | |||
34 | 34 | ||
35 | #include "aacraid.h" | 35 | #include "aacraid.h" |
36 | 36 | ||
37 | #define AAC_NUM_IO_FIB_RKT (246 - AAC_NUM_MGT_FIB) | ||
38 | |||
39 | /** | ||
40 | * aac_rkt_select_comm - Select communications method | ||
41 | * @dev: Adapter | ||
42 | * @comm: communications method | ||
43 | */ | ||
44 | |||
45 | static int aac_rkt_select_comm(struct aac_dev *dev, int comm) | ||
46 | { | ||
47 | int retval; | ||
48 | extern int aac_rx_select_comm(struct aac_dev *dev, int comm); | ||
49 | retval = aac_rx_select_comm(dev, comm); | ||
50 | if (comm == AAC_COMM_MESSAGE) { | ||
51 | /* | ||
52 | * FIB Setup has already been done, but we can minimize the | ||
53 | * damage by at least ensuring the OS never issues more | ||
54 | * commands than we can handle. The Rocket adapters currently | ||
55 | * can only handle 246 commands and 8 AIFs at the same time, | ||
56 | * and in fact do notify us accordingly if we negotiate the | ||
57 | * FIB size. The problem that causes us to add this check is | ||
58 | * to ensure that we do not overdo it with the adapter when a | ||
59 | * hard coded FIB override is being utilized. This special | ||
60 | * case warrants this half baked, but convenient, check here. | ||
61 | */ | ||
62 | if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) { | ||
63 | dev->init->MaxIoCommands = | ||
64 | cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB); | ||
65 | dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT; | ||
66 | } | ||
67 | } | ||
68 | return retval; | ||
69 | } | ||
70 | |||
37 | /** | 71 | /** |
38 | * aac_rkt_ioremap | 72 | * aac_rkt_ioremap |
39 | * @size: mapping resize request | 73 | * @size: mapping resize request |
@@ -63,39 +97,13 @@ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size) | |||
63 | 97 | ||
64 | int aac_rkt_init(struct aac_dev *dev) | 98 | int aac_rkt_init(struct aac_dev *dev) |
65 | { | 99 | { |
66 | int retval; | ||
67 | extern int _aac_rx_init(struct aac_dev *dev); | 100 | extern int _aac_rx_init(struct aac_dev *dev); |
68 | extern void aac_rx_start_adapter(struct aac_dev *dev); | ||
69 | 101 | ||
70 | /* | 102 | /* |
71 | * Fill in the function dispatch table. | 103 | * Fill in the function dispatch table. |
72 | */ | 104 | */ |
73 | dev->a_ops.adapter_ioremap = aac_rkt_ioremap; | 105 | dev->a_ops.adapter_ioremap = aac_rkt_ioremap; |
106 | dev->a_ops.adapter_comm = aac_rkt_select_comm; | ||
74 | 107 | ||
75 | retval = _aac_rx_init(dev); | 108 | return _aac_rx_init(dev); |
76 | if (retval) | ||
77 | return retval; | ||
78 | if (dev->new_comm_interface) { | ||
79 | /* | ||
80 | * FIB Setup has already been done, but we can minimize the | ||
81 | * damage by at least ensuring the OS never issues more | ||
82 | * commands than we can handle. The Rocket adapters currently | ||
83 | * can only handle 246 commands and 8 AIFs at the same time, | ||
84 | * and in fact do notify us accordingly if we negotiate the | ||
85 | * FIB size. The problem that causes us to add this check is | ||
86 | * to ensure that we do not overdo it with the adapter when a | ||
87 | * hard coded FIB override is being utilized. This special | ||
88 | * case warrants this half baked, but convenient, check here. | ||
89 | */ | ||
90 | if (dev->scsi_host_ptr->can_queue > (246 - AAC_NUM_MGT_FIB)) { | ||
91 | dev->init->MaxIoCommands = cpu_to_le32(246); | ||
92 | dev->scsi_host_ptr->can_queue = 246 - AAC_NUM_MGT_FIB; | ||
93 | } | ||
94 | } | ||
95 | /* | ||
96 | * Tell the adapter that all is configured, and it can start | ||
97 | * accepting requests | ||
98 | */ | ||
99 | aac_rx_start_adapter(dev); | ||
100 | return 0; | ||
101 | } | 109 | } |
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index dcc8b0ea7a9d..c632d9354a26 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c | |||
@@ -46,60 +46,60 @@ | |||
46 | 46 | ||
47 | #include "aacraid.h" | 47 | #include "aacraid.h" |
48 | 48 | ||
49 | static irqreturn_t aac_rx_intr(int irq, void *dev_id) | 49 | static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id) |
50 | { | 50 | { |
51 | struct aac_dev *dev = dev_id; | 51 | struct aac_dev *dev = dev_id; |
52 | unsigned long bellbits; | ||
53 | u8 intstat = rx_readb(dev, MUnit.OISR); | ||
52 | 54 | ||
53 | dprintk((KERN_DEBUG "aac_rx_intr(%d,%p)\n", irq, dev_id)); | 55 | /* |
54 | if (dev->new_comm_interface) { | 56 | * Read mask and invert because drawbridge is reversed. |
55 | u32 Index = rx_readl(dev, MUnit.OutboundQueue); | 57 | * This allows us to only service interrupts that have |
56 | if (Index == 0xFFFFFFFFL) | 58 | * been enabled. |
57 | Index = rx_readl(dev, MUnit.OutboundQueue); | 59 | * Check to see if this is our interrupt. If it isn't just return |
58 | if (Index != 0xFFFFFFFFL) { | 60 | */ |
59 | do { | 61 | if (intstat & ~(dev->OIMR)) { |
60 | if (aac_intr_normal(dev, Index)) { | 62 | bellbits = rx_readl(dev, OutboundDoorbellReg); |
61 | rx_writel(dev, MUnit.OutboundQueue, Index); | 63 | if (bellbits & DoorBellPrintfReady) { |
62 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); | 64 | aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5])); |
63 | } | 65 | rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); |
64 | Index = rx_readl(dev, MUnit.OutboundQueue); | 66 | rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); |
65 | } while (Index != 0xFFFFFFFFL); | ||
66 | return IRQ_HANDLED; | ||
67 | } | 67 | } |
68 | } else { | 68 | else if (bellbits & DoorBellAdapterNormCmdReady) { |
69 | unsigned long bellbits; | 69 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); |
70 | u8 intstat; | 70 | aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); |
71 | intstat = rx_readb(dev, MUnit.OISR); | 71 | } |
72 | /* | 72 | else if (bellbits & DoorBellAdapterNormRespReady) { |
73 | * Read mask and invert because drawbridge is reversed. | 73 | rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); |
74 | * This allows us to only service interrupts that have | 74 | aac_response_normal(&dev->queues->queue[HostNormRespQueue]); |
75 | * been enabled. | 75 | } |
76 | * Check to see if this is our interrupt. If it isn't just return | 76 | else if (bellbits & DoorBellAdapterNormCmdNotFull) { |
77 | */ | 77 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); |
78 | if (intstat & ~(dev->OIMR)) | ||
79 | { | ||
80 | bellbits = rx_readl(dev, OutboundDoorbellReg); | ||
81 | if (bellbits & DoorBellPrintfReady) { | ||
82 | aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5])); | ||
83 | rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); | ||
84 | rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); | ||
85 | } | ||
86 | else if (bellbits & DoorBellAdapterNormCmdReady) { | ||
87 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); | ||
88 | aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); | ||
89 | } | ||
90 | else if (bellbits & DoorBellAdapterNormRespReady) { | ||
91 | rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); | ||
92 | aac_response_normal(&dev->queues->queue[HostNormRespQueue]); | ||
93 | } | ||
94 | else if (bellbits & DoorBellAdapterNormCmdNotFull) { | ||
95 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); | ||
96 | } | ||
97 | else if (bellbits & DoorBellAdapterNormRespNotFull) { | ||
98 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); | ||
99 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); | ||
100 | } | ||
101 | return IRQ_HANDLED; | ||
102 | } | 78 | } |
79 | else if (bellbits & DoorBellAdapterNormRespNotFull) { | ||
80 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); | ||
81 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); | ||
82 | } | ||
83 | return IRQ_HANDLED; | ||
84 | } | ||
85 | return IRQ_NONE; | ||
86 | } | ||
87 | |||
88 | static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) | ||
89 | { | ||
90 | struct aac_dev *dev = dev_id; | ||
91 | u32 Index = rx_readl(dev, MUnit.OutboundQueue); | ||
92 | if (Index == 0xFFFFFFFFL) | ||
93 | Index = rx_readl(dev, MUnit.OutboundQueue); | ||
94 | if (Index != 0xFFFFFFFFL) { | ||
95 | do { | ||
96 | if (aac_intr_normal(dev, Index)) { | ||
97 | rx_writel(dev, MUnit.OutboundQueue, Index); | ||
98 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); | ||
99 | } | ||
100 | Index = rx_readl(dev, MUnit.OutboundQueue); | ||
101 | } while (Index != 0xFFFFFFFFL); | ||
102 | return IRQ_HANDLED; | ||
103 | } | 103 | } |
104 | return IRQ_NONE; | 104 | return IRQ_NONE; |
105 | } | 105 | } |
@@ -115,6 +115,26 @@ static void aac_rx_disable_interrupt(struct aac_dev *dev) | |||
115 | } | 115 | } |
116 | 116 | ||
117 | /** | 117 | /** |
118 | * aac_rx_enable_interrupt_producer - Enable interrupts | ||
119 | * @dev: Adapter | ||
120 | */ | ||
121 | |||
122 | static void aac_rx_enable_interrupt_producer(struct aac_dev *dev) | ||
123 | { | ||
124 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * aac_rx_enable_interrupt_message - Enable interrupts | ||
129 | * @dev: Adapter | ||
130 | */ | ||
131 | |||
132 | static void aac_rx_enable_interrupt_message(struct aac_dev *dev) | ||
133 | { | ||
134 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); | ||
135 | } | ||
136 | |||
137 | /** | ||
118 | * rx_sync_cmd - send a command and wait | 138 | * rx_sync_cmd - send a command and wait |
119 | * @dev: Adapter | 139 | * @dev: Adapter |
120 | * @command: Command to execute | 140 | * @command: Command to execute |
@@ -189,10 +209,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command, | |||
189 | /* | 209 | /* |
190 | * Restore interrupt mask even though we timed out | 210 | * Restore interrupt mask even though we timed out |
191 | */ | 211 | */ |
192 | if (dev->new_comm_interface) | 212 | aac_adapter_enable_int(dev); |
193 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); | ||
194 | else | ||
195 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); | ||
196 | return -ETIMEDOUT; | 213 | return -ETIMEDOUT; |
197 | } | 214 | } |
198 | /* | 215 | /* |
@@ -215,10 +232,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command, | |||
215 | /* | 232 | /* |
216 | * Restore interrupt mask | 233 | * Restore interrupt mask |
217 | */ | 234 | */ |
218 | if (dev->new_comm_interface) | 235 | aac_adapter_enable_int(dev); |
219 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); | ||
220 | else | ||
221 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); | ||
222 | return 0; | 236 | return 0; |
223 | 237 | ||
224 | } | 238 | } |
@@ -360,35 +374,72 @@ static int aac_rx_check_health(struct aac_dev *dev) | |||
360 | } | 374 | } |
361 | 375 | ||
362 | /** | 376 | /** |
363 | * aac_rx_send | 377 | * aac_rx_deliver_producer |
364 | * @fib: fib to issue | 378 | * @fib: fib to issue |
365 | * | 379 | * |
366 | * Will send a fib, returning 0 if successful. | 380 | * Will send a fib, returning 0 if successful. |
367 | */ | 381 | */ |
368 | static int aac_rx_send(struct fib * fib) | 382 | static int aac_rx_deliver_producer(struct fib * fib) |
369 | { | 383 | { |
370 | u64 addr = fib->hw_fib_pa; | ||
371 | struct aac_dev *dev = fib->dev; | 384 | struct aac_dev *dev = fib->dev; |
372 | volatile void __iomem *device = dev->regs.rx; | 385 | struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; |
386 | unsigned long qflags; | ||
373 | u32 Index; | 387 | u32 Index; |
388 | unsigned long nointr = 0; | ||
374 | 389 | ||
375 | dprintk((KERN_DEBUG "%p->aac_rx_send(%p->%llx)\n", dev, fib, addr)); | 390 | spin_lock_irqsave(q->lock, qflags); |
376 | Index = rx_readl(dev, MUnit.InboundQueue); | 391 | aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib, 1, fib, &nointr); |
377 | if (Index == 0xFFFFFFFFL) | 392 | |
393 | q->numpending++; | ||
394 | *(q->headers.producer) = cpu_to_le32(Index + 1); | ||
395 | spin_unlock_irqrestore(q->lock, qflags); | ||
396 | if (!(nointr & aac_config.irq_mod)) | ||
397 | aac_adapter_notify(dev, AdapNormCmdQueue); | ||
398 | |||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | /** | ||
403 | * aac_rx_deliver_message | ||
404 | * @fib: fib to issue | ||
405 | * | ||
406 | * Will send a fib, returning 0 if successful. | ||
407 | */ | ||
408 | static int aac_rx_deliver_message(struct fib * fib) | ||
409 | { | ||
410 | struct aac_dev *dev = fib->dev; | ||
411 | struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; | ||
412 | unsigned long qflags; | ||
413 | u32 Index; | ||
414 | u64 addr; | ||
415 | volatile void __iomem *device; | ||
416 | |||
417 | unsigned long count = 10000000L; /* 50 seconds */ | ||
418 | spin_lock_irqsave(q->lock, qflags); | ||
419 | q->numpending++; | ||
420 | spin_unlock_irqrestore(q->lock, qflags); | ||
421 | for(;;) { | ||
378 | Index = rx_readl(dev, MUnit.InboundQueue); | 422 | Index = rx_readl(dev, MUnit.InboundQueue); |
379 | dprintk((KERN_DEBUG "Index = 0x%x\n", Index)); | 423 | if (Index == 0xFFFFFFFFL) |
380 | if (Index == 0xFFFFFFFFL) | 424 | Index = rx_readl(dev, MUnit.InboundQueue); |
381 | return Index; | 425 | if (Index != 0xFFFFFFFFL) |
426 | break; | ||
427 | if (--count == 0) { | ||
428 | spin_lock_irqsave(q->lock, qflags); | ||
429 | q->numpending--; | ||
430 | spin_unlock_irqrestore(q->lock, qflags); | ||
431 | return -ETIMEDOUT; | ||
432 | } | ||
433 | udelay(5); | ||
434 | } | ||
382 | device = dev->base + Index; | 435 | device = dev->base + Index; |
383 | dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff), | 436 | addr = fib->hw_fib_pa; |
384 | (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size))); | ||
385 | writel((u32)(addr & 0xffffffff), device); | 437 | writel((u32)(addr & 0xffffffff), device); |
386 | device += sizeof(u32); | 438 | device += sizeof(u32); |
387 | writel((u32)(addr >> 32), device); | 439 | writel((u32)(addr >> 32), device); |
388 | device += sizeof(u32); | 440 | device += sizeof(u32); |
389 | writel(le16_to_cpu(fib->hw_fib->header.Size), device); | 441 | writel(le16_to_cpu(fib->hw_fib->header.Size), device); |
390 | rx_writel(dev, MUnit.InboundQueue, Index); | 442 | rx_writel(dev, MUnit.InboundQueue, Index); |
391 | dprintk((KERN_DEBUG "aac_rx_send - return 0\n")); | ||
392 | return 0; | 443 | return 0; |
393 | } | 444 | } |
394 | 445 | ||
@@ -430,6 +481,31 @@ static int aac_rx_restart_adapter(struct aac_dev *dev) | |||
430 | } | 481 | } |
431 | 482 | ||
432 | /** | 483 | /** |
484 | * aac_rx_select_comm - Select communications method | ||
485 | * @dev: Adapter | ||
486 | * @comm: communications method | ||
487 | */ | ||
488 | |||
489 | int aac_rx_select_comm(struct aac_dev *dev, int comm) | ||
490 | { | ||
491 | switch (comm) { | ||
492 | case AAC_COMM_PRODUCER: | ||
493 | dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer; | ||
494 | dev->a_ops.adapter_intr = aac_rx_intr_producer; | ||
495 | dev->a_ops.adapter_deliver = aac_rx_deliver_producer; | ||
496 | break; | ||
497 | case AAC_COMM_MESSAGE: | ||
498 | dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message; | ||
499 | dev->a_ops.adapter_intr = aac_rx_intr_message; | ||
500 | dev->a_ops.adapter_deliver = aac_rx_deliver_message; | ||
501 | break; | ||
502 | default: | ||
503 | return 1; | ||
504 | } | ||
505 | return 0; | ||
506 | } | ||
507 | |||
508 | /** | ||
433 | * aac_rx_init - initialize an i960 based AAC card | 509 | * aac_rx_init - initialize an i960 based AAC card |
434 | * @dev: device to configure | 510 | * @dev: device to configure |
435 | * | 511 | * |
@@ -489,40 +565,42 @@ int _aac_rx_init(struct aac_dev *dev) | |||
489 | } | 565 | } |
490 | msleep(1); | 566 | msleep(1); |
491 | } | 567 | } |
492 | if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev)<0) | ||
493 | { | ||
494 | printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance); | ||
495 | goto error_iounmap; | ||
496 | } | ||
497 | /* | 568 | /* |
498 | * Fill in the function dispatch table. | 569 | * Fill in the common function dispatch table. |
499 | */ | 570 | */ |
500 | dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; | 571 | dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; |
501 | dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; | 572 | dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; |
502 | dev->a_ops.adapter_notify = aac_rx_notify_adapter; | 573 | dev->a_ops.adapter_notify = aac_rx_notify_adapter; |
503 | dev->a_ops.adapter_sync_cmd = rx_sync_cmd; | 574 | dev->a_ops.adapter_sync_cmd = rx_sync_cmd; |
504 | dev->a_ops.adapter_check_health = aac_rx_check_health; | 575 | dev->a_ops.adapter_check_health = aac_rx_check_health; |
505 | dev->a_ops.adapter_send = aac_rx_send; | ||
506 | 576 | ||
507 | /* | 577 | /* |
508 | * First clear out all interrupts. Then enable the one's that we | 578 | * First clear out all interrupts. Then enable the one's that we |
509 | * can handle. | 579 | * can handle. |
510 | */ | 580 | */ |
511 | rx_writeb(dev, MUnit.OIMR, 0xff); | 581 | aac_adapter_comm(dev, AAC_COMM_PRODUCER); |
582 | aac_adapter_disable_int(dev); | ||
512 | rx_writel(dev, MUnit.ODR, 0xffffffff); | 583 | rx_writel(dev, MUnit.ODR, 0xffffffff); |
513 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); | 584 | aac_adapter_enable_int(dev); |
514 | 585 | ||
515 | if (aac_init_adapter(dev) == NULL) | 586 | if (aac_init_adapter(dev) == NULL) |
516 | goto error_irq; | 587 | goto error_iounmap; |
517 | if (dev->new_comm_interface) | 588 | aac_adapter_comm(dev, dev->comm_interface); |
518 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); | 589 | if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, |
590 | IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { | ||
591 | printk(KERN_ERR "%s%d: Interrupt unavailable.\n", | ||
592 | name, instance); | ||
593 | goto error_iounmap; | ||
594 | } | ||
595 | aac_adapter_enable_int(dev); | ||
596 | /* | ||
597 | * Tell the adapter that all is configured, and it can | ||
598 | * start accepting requests | ||
599 | */ | ||
600 | aac_rx_start_adapter(dev); | ||
519 | 601 | ||
520 | return 0; | 602 | return 0; |
521 | 603 | ||
522 | error_irq: | ||
523 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); | ||
524 | free_irq(dev->scsi_host_ptr->irq, (void *)dev); | ||
525 | |||
526 | error_iounmap: | 604 | error_iounmap: |
527 | 605 | ||
528 | return -1; | 606 | return -1; |
@@ -530,20 +608,11 @@ error_iounmap: | |||
530 | 608 | ||
531 | int aac_rx_init(struct aac_dev *dev) | 609 | int aac_rx_init(struct aac_dev *dev) |
532 | { | 610 | { |
533 | int retval; | ||
534 | |||
535 | /* | 611 | /* |
536 | * Fill in the function dispatch table. | 612 | * Fill in the function dispatch table. |
537 | */ | 613 | */ |
538 | dev->a_ops.adapter_ioremap = aac_rx_ioremap; | 614 | dev->a_ops.adapter_ioremap = aac_rx_ioremap; |
615 | dev->a_ops.adapter_comm = aac_rx_select_comm; | ||
539 | 616 | ||
540 | retval = _aac_rx_init(dev); | 617 | return _aac_rx_init(dev); |
541 | if (!retval) { | ||
542 | /* | ||
543 | * Tell the adapter that all is configured, and it can | ||
544 | * start accepting requests | ||
545 | */ | ||
546 | aac_rx_start_adapter(dev); | ||
547 | } | ||
548 | return retval; | ||
549 | } | 618 | } |
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 511b0a938fb1..8535db068c2f 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c | |||
@@ -92,6 +92,17 @@ static void aac_sa_disable_interrupt (struct aac_dev *dev) | |||
92 | } | 92 | } |
93 | 93 | ||
94 | /** | 94 | /** |
95 | * aac_sa_enable_interrupt - enable interrupt | ||
96 | * @dev: Which adapter to enable. | ||
97 | */ | ||
98 | |||
99 | static void aac_sa_enable_interrupt (struct aac_dev *dev) | ||
100 | { | ||
101 | sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | | ||
102 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4)); | ||
103 | } | ||
104 | |||
105 | /** | ||
95 | * aac_sa_notify_adapter - handle adapter notification | 106 | * aac_sa_notify_adapter - handle adapter notification |
96 | * @dev: Adapter that notification is for | 107 | * @dev: Adapter that notification is for |
97 | * @event: Event to notidy | 108 | * @event: Event to notidy |
@@ -347,32 +358,36 @@ int aac_sa_init(struct aac_dev *dev) | |||
347 | msleep(1); | 358 | msleep(1); |
348 | } | 359 | } |
349 | 360 | ||
350 | if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev ) < 0) { | ||
351 | printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance); | ||
352 | goto error_iounmap; | ||
353 | } | ||
354 | |||
355 | /* | 361 | /* |
356 | * Fill in the function dispatch table. | 362 | * Fill in the function dispatch table. |
357 | */ | 363 | */ |
358 | 364 | ||
359 | dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter; | 365 | dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter; |
360 | dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt; | 366 | dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt; |
367 | dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt; | ||
361 | dev->a_ops.adapter_notify = aac_sa_notify_adapter; | 368 | dev->a_ops.adapter_notify = aac_sa_notify_adapter; |
362 | dev->a_ops.adapter_sync_cmd = sa_sync_cmd; | 369 | dev->a_ops.adapter_sync_cmd = sa_sync_cmd; |
363 | dev->a_ops.adapter_check_health = aac_sa_check_health; | 370 | dev->a_ops.adapter_check_health = aac_sa_check_health; |
371 | dev->a_ops.adapter_intr = aac_sa_intr; | ||
364 | dev->a_ops.adapter_ioremap = aac_sa_ioremap; | 372 | dev->a_ops.adapter_ioremap = aac_sa_ioremap; |
365 | 373 | ||
366 | /* | 374 | /* |
367 | * First clear out all interrupts. Then enable the one's that | 375 | * First clear out all interrupts. Then enable the one's that |
368 | * we can handle. | 376 | * we can handle. |
369 | */ | 377 | */ |
370 | sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); | 378 | aac_adapter_disable_int(dev); |
371 | sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | | 379 | aac_adapter_enable_int(dev); |
372 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4)); | ||
373 | 380 | ||
374 | if(aac_init_adapter(dev) == NULL) | 381 | if(aac_init_adapter(dev) == NULL) |
375 | goto error_irq; | 382 | goto error_irq; |
383 | if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, | ||
384 | IRQF_SHARED|IRQF_DISABLED, | ||
385 | "aacraid", (void *)dev ) < 0) { | ||
386 | printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", | ||
387 | name, instance); | ||
388 | goto error_iounmap; | ||
389 | } | ||
390 | aac_adapter_enable_int(dev); | ||
376 | 391 | ||
377 | /* | 392 | /* |
378 | * Tell the adapter that all is configure, and it can start | 393 | * Tell the adapter that all is configure, and it can start |
@@ -382,7 +397,7 @@ int aac_sa_init(struct aac_dev *dev) | |||
382 | return 0; | 397 | return 0; |
383 | 398 | ||
384 | error_irq: | 399 | error_irq: |
385 | sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); | 400 | aac_sa_disable_interrupt(dev); |
386 | free_irq(dev->scsi_host_ptr->irq, (void *)dev); | 401 | free_irq(dev->scsi_host_ptr->irq, (void *)dev); |
387 | 402 | ||
388 | error_iounmap: | 403 | error_iounmap: |