aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/aacraid/rx.c
diff options
context:
space:
mode:
authorMark Haverkamp <markh@linux-foundation.org>2007-01-23 17:59:20 -0500
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-01-27 10:27:06 -0500
commit28713324a0f3c055186ecec27239673c36ba1de5 (patch)
treeaa17c4441b0fda12f1d3a9a58a374a917666b781 /drivers/scsi/aacraid/rx.c
parent9cd065ab80d6c14c6693a93c8f47ef4cb80e770f (diff)
[SCSI] aacraid: rework communication support code
Received from Mark Salyzyn, Replace all if/else communication transports with a platform function call. This is in recognition of the need to migrate to up-and-coming transports. Currently the Linux driver does not support two available communication transports provided by our products, these will be added in future patches, and will expand the platform function set. Signed-off-by Mark Haverkamp <markh@linux-foundation.org> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/aacraid/rx.c')
-rw-r--r--drivers/scsi/aacraid/rx.c263
1 files changed, 166 insertions, 97 deletions
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index dcc8b0ea7a9d..c632d9354a26 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -46,60 +46,60 @@
46 46
47#include "aacraid.h" 47#include "aacraid.h"
48 48
49static irqreturn_t aac_rx_intr(int irq, void *dev_id) 49static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
50{ 50{
51 struct aac_dev *dev = dev_id; 51 struct aac_dev *dev = dev_id;
52 unsigned long bellbits;
53 u8 intstat = rx_readb(dev, MUnit.OISR);
52 54
53 dprintk((KERN_DEBUG "aac_rx_intr(%d,%p)\n", irq, dev_id)); 55 /*
54 if (dev->new_comm_interface) { 56 * Read mask and invert because drawbridge is reversed.
55 u32 Index = rx_readl(dev, MUnit.OutboundQueue); 57 * This allows us to only service interrupts that have
56 if (Index == 0xFFFFFFFFL) 58 * been enabled.
57 Index = rx_readl(dev, MUnit.OutboundQueue); 59 * Check to see if this is our interrupt. If it isn't just return
58 if (Index != 0xFFFFFFFFL) { 60 */
59 do { 61 if (intstat & ~(dev->OIMR)) {
60 if (aac_intr_normal(dev, Index)) { 62 bellbits = rx_readl(dev, OutboundDoorbellReg);
61 rx_writel(dev, MUnit.OutboundQueue, Index); 63 if (bellbits & DoorBellPrintfReady) {
62 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); 64 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
63 } 65 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
64 Index = rx_readl(dev, MUnit.OutboundQueue); 66 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
65 } while (Index != 0xFFFFFFFFL);
66 return IRQ_HANDLED;
67 } 67 }
68 } else { 68 else if (bellbits & DoorBellAdapterNormCmdReady) {
69 unsigned long bellbits; 69 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
70 u8 intstat; 70 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
71 intstat = rx_readb(dev, MUnit.OISR); 71 }
72 /* 72 else if (bellbits & DoorBellAdapterNormRespReady) {
73 * Read mask and invert because drawbridge is reversed. 73 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
74 * This allows us to only service interrupts that have 74 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
75 * been enabled. 75 }
76 * Check to see if this is our interrupt. If it isn't just return 76 else if (bellbits & DoorBellAdapterNormCmdNotFull) {
77 */ 77 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
78 if (intstat & ~(dev->OIMR))
79 {
80 bellbits = rx_readl(dev, OutboundDoorbellReg);
81 if (bellbits & DoorBellPrintfReady) {
82 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
83 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
84 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
85 }
86 else if (bellbits & DoorBellAdapterNormCmdReady) {
87 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
88 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
89 }
90 else if (bellbits & DoorBellAdapterNormRespReady) {
91 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
92 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
93 }
94 else if (bellbits & DoorBellAdapterNormCmdNotFull) {
95 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
96 }
97 else if (bellbits & DoorBellAdapterNormRespNotFull) {
98 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
99 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
100 }
101 return IRQ_HANDLED;
102 } 78 }
79 else if (bellbits & DoorBellAdapterNormRespNotFull) {
80 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
81 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
82 }
83 return IRQ_HANDLED;
84 }
85 return IRQ_NONE;
86}
87
88static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
89{
90 struct aac_dev *dev = dev_id;
91 u32 Index = rx_readl(dev, MUnit.OutboundQueue);
92 if (Index == 0xFFFFFFFFL)
93 Index = rx_readl(dev, MUnit.OutboundQueue);
94 if (Index != 0xFFFFFFFFL) {
95 do {
96 if (aac_intr_normal(dev, Index)) {
97 rx_writel(dev, MUnit.OutboundQueue, Index);
98 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady);
99 }
100 Index = rx_readl(dev, MUnit.OutboundQueue);
101 } while (Index != 0xFFFFFFFFL);
102 return IRQ_HANDLED;
103 } 103 }
104 return IRQ_NONE; 104 return IRQ_NONE;
105} 105}
@@ -115,6 +115,26 @@ static void aac_rx_disable_interrupt(struct aac_dev *dev)
115} 115}
116 116
117/** 117/**
118 * aac_rx_enable_interrupt_producer - Enable interrupts
119 * @dev: Adapter
120 */
121
122static void aac_rx_enable_interrupt_producer(struct aac_dev *dev)
123{
124 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
125}
126
127/**
128 * aac_rx_enable_interrupt_message - Enable interrupts
129 * @dev: Adapter
130 */
131
132static void aac_rx_enable_interrupt_message(struct aac_dev *dev)
133{
134 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
135}
136
137/**
118 * rx_sync_cmd - send a command and wait 138 * rx_sync_cmd - send a command and wait
119 * @dev: Adapter 139 * @dev: Adapter
120 * @command: Command to execute 140 * @command: Command to execute
@@ -189,10 +209,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
189 /* 209 /*
190 * Restore interrupt mask even though we timed out 210 * Restore interrupt mask even though we timed out
191 */ 211 */
192 if (dev->new_comm_interface) 212 aac_adapter_enable_int(dev);
193 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
194 else
195 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
196 return -ETIMEDOUT; 213 return -ETIMEDOUT;
197 } 214 }
198 /* 215 /*
@@ -215,10 +232,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
215 /* 232 /*
216 * Restore interrupt mask 233 * Restore interrupt mask
217 */ 234 */
218 if (dev->new_comm_interface) 235 aac_adapter_enable_int(dev);
219 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
220 else
221 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
222 return 0; 236 return 0;
223 237
224} 238}
@@ -360,35 +374,72 @@ static int aac_rx_check_health(struct aac_dev *dev)
360} 374}
361 375
362/** 376/**
363 * aac_rx_send 377 * aac_rx_deliver_producer
364 * @fib: fib to issue 378 * @fib: fib to issue
365 * 379 *
366 * Will send a fib, returning 0 if successful. 380 * Will send a fib, returning 0 if successful.
367 */ 381 */
368static int aac_rx_send(struct fib * fib) 382static int aac_rx_deliver_producer(struct fib * fib)
369{ 383{
370 u64 addr = fib->hw_fib_pa;
371 struct aac_dev *dev = fib->dev; 384 struct aac_dev *dev = fib->dev;
372 volatile void __iomem *device = dev->regs.rx; 385 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
386 unsigned long qflags;
373 u32 Index; 387 u32 Index;
388 unsigned long nointr = 0;
374 389
375 dprintk((KERN_DEBUG "%p->aac_rx_send(%p->%llx)\n", dev, fib, addr)); 390 spin_lock_irqsave(q->lock, qflags);
376 Index = rx_readl(dev, MUnit.InboundQueue); 391 aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib, 1, fib, &nointr);
377 if (Index == 0xFFFFFFFFL) 392
393 q->numpending++;
394 *(q->headers.producer) = cpu_to_le32(Index + 1);
395 spin_unlock_irqrestore(q->lock, qflags);
396 if (!(nointr & aac_config.irq_mod))
397 aac_adapter_notify(dev, AdapNormCmdQueue);
398
399 return 0;
400}
401
402/**
403 * aac_rx_deliver_message
404 * @fib: fib to issue
405 *
406 * Will send a fib, returning 0 if successful.
407 */
408static int aac_rx_deliver_message(struct fib * fib)
409{
410 struct aac_dev *dev = fib->dev;
411 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
412 unsigned long qflags;
413 u32 Index;
414 u64 addr;
415 volatile void __iomem *device;
416
417 unsigned long count = 10000000L; /* 50 seconds */
418 spin_lock_irqsave(q->lock, qflags);
419 q->numpending++;
420 spin_unlock_irqrestore(q->lock, qflags);
421 for(;;) {
378 Index = rx_readl(dev, MUnit.InboundQueue); 422 Index = rx_readl(dev, MUnit.InboundQueue);
379 dprintk((KERN_DEBUG "Index = 0x%x\n", Index)); 423 if (Index == 0xFFFFFFFFL)
380 if (Index == 0xFFFFFFFFL) 424 Index = rx_readl(dev, MUnit.InboundQueue);
381 return Index; 425 if (Index != 0xFFFFFFFFL)
426 break;
427 if (--count == 0) {
428 spin_lock_irqsave(q->lock, qflags);
429 q->numpending--;
430 spin_unlock_irqrestore(q->lock, qflags);
431 return -ETIMEDOUT;
432 }
433 udelay(5);
434 }
382 device = dev->base + Index; 435 device = dev->base + Index;
383 dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff), 436 addr = fib->hw_fib_pa;
384 (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size)));
385 writel((u32)(addr & 0xffffffff), device); 437 writel((u32)(addr & 0xffffffff), device);
386 device += sizeof(u32); 438 device += sizeof(u32);
387 writel((u32)(addr >> 32), device); 439 writel((u32)(addr >> 32), device);
388 device += sizeof(u32); 440 device += sizeof(u32);
389 writel(le16_to_cpu(fib->hw_fib->header.Size), device); 441 writel(le16_to_cpu(fib->hw_fib->header.Size), device);
390 rx_writel(dev, MUnit.InboundQueue, Index); 442 rx_writel(dev, MUnit.InboundQueue, Index);
391 dprintk((KERN_DEBUG "aac_rx_send - return 0\n"));
392 return 0; 443 return 0;
393} 444}
394 445
@@ -430,6 +481,31 @@ static int aac_rx_restart_adapter(struct aac_dev *dev)
430} 481}
431 482
432/** 483/**
484 * aac_rx_select_comm - Select communications method
485 * @dev: Adapter
486 * @comm: communications method
487 */
488
489int aac_rx_select_comm(struct aac_dev *dev, int comm)
490{
491 switch (comm) {
492 case AAC_COMM_PRODUCER:
493 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer;
494 dev->a_ops.adapter_intr = aac_rx_intr_producer;
495 dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
496 break;
497 case AAC_COMM_MESSAGE:
498 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message;
499 dev->a_ops.adapter_intr = aac_rx_intr_message;
500 dev->a_ops.adapter_deliver = aac_rx_deliver_message;
501 break;
502 default:
503 return 1;
504 }
505 return 0;
506}
507
508/**
433 * aac_rx_init - initialize an i960 based AAC card 509 * aac_rx_init - initialize an i960 based AAC card
434 * @dev: device to configure 510 * @dev: device to configure
435 * 511 *
@@ -489,40 +565,42 @@ int _aac_rx_init(struct aac_dev *dev)
489 } 565 }
490 msleep(1); 566 msleep(1);
491 } 567 }
492 if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev)<0)
493 {
494 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
495 goto error_iounmap;
496 }
497 /* 568 /*
498 * Fill in the function dispatch table. 569 * Fill in the common function dispatch table.
499 */ 570 */
500 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; 571 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
501 dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; 572 dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
502 dev->a_ops.adapter_notify = aac_rx_notify_adapter; 573 dev->a_ops.adapter_notify = aac_rx_notify_adapter;
503 dev->a_ops.adapter_sync_cmd = rx_sync_cmd; 574 dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
504 dev->a_ops.adapter_check_health = aac_rx_check_health; 575 dev->a_ops.adapter_check_health = aac_rx_check_health;
505 dev->a_ops.adapter_send = aac_rx_send;
506 576
507 /* 577 /*
508 * First clear out all interrupts. Then enable the one's that we 578 * First clear out all interrupts. Then enable the one's that we
509 * can handle. 579 * can handle.
510 */ 580 */
511 rx_writeb(dev, MUnit.OIMR, 0xff); 581 aac_adapter_comm(dev, AAC_COMM_PRODUCER);
582 aac_adapter_disable_int(dev);
512 rx_writel(dev, MUnit.ODR, 0xffffffff); 583 rx_writel(dev, MUnit.ODR, 0xffffffff);
513 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); 584 aac_adapter_enable_int(dev);
514 585
515 if (aac_init_adapter(dev) == NULL) 586 if (aac_init_adapter(dev) == NULL)
516 goto error_irq; 587 goto error_iounmap;
517 if (dev->new_comm_interface) 588 aac_adapter_comm(dev, dev->comm_interface);
518 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); 589 if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr,
590 IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
591 printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
592 name, instance);
593 goto error_iounmap;
594 }
595 aac_adapter_enable_int(dev);
596 /*
597 * Tell the adapter that all is configured, and it can
598 * start accepting requests
599 */
600 aac_rx_start_adapter(dev);
519 601
520 return 0; 602 return 0;
521 603
522error_irq:
523 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
524 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
525
526error_iounmap: 604error_iounmap:
527 605
528 return -1; 606 return -1;
@@ -530,20 +608,11 @@ error_iounmap:
530 608
531int aac_rx_init(struct aac_dev *dev) 609int aac_rx_init(struct aac_dev *dev)
532{ 610{
533 int retval;
534
535 /* 611 /*
536 * Fill in the function dispatch table. 612 * Fill in the function dispatch table.
537 */ 613 */
538 dev->a_ops.adapter_ioremap = aac_rx_ioremap; 614 dev->a_ops.adapter_ioremap = aac_rx_ioremap;
615 dev->a_ops.adapter_comm = aac_rx_select_comm;
539 616
540 retval = _aac_rx_init(dev); 617 return _aac_rx_init(dev);
541 if (!retval) {
542 /*
543 * Tell the adapter that all is configured, and it can
544 * start accepting requests
545 */
546 aac_rx_start_adapter(dev);
547 }
548 return retval;
549} 618}