diff options
Diffstat (limited to 'drivers/scsi/aacraid/commsup.c')
-rw-r--r-- | drivers/scsi/aacraid/commsup.c | 581 |
1 files changed, 433 insertions, 148 deletions
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index a1d303f03480..e4d543a474ae 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -39,7 +39,9 @@ | |||
39 | #include <linux/completion.h> | 39 | #include <linux/completion.h> |
40 | #include <linux/blkdev.h> | 40 | #include <linux/blkdev.h> |
41 | #include <scsi/scsi_host.h> | 41 | #include <scsi/scsi_host.h> |
42 | #include <scsi/scsi_device.h> | ||
42 | #include <asm/semaphore.h> | 43 | #include <asm/semaphore.h> |
44 | #include <asm/delay.h> | ||
43 | 45 | ||
44 | #include "aacraid.h" | 46 | #include "aacraid.h" |
45 | 47 | ||
@@ -269,40 +271,22 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr | |||
269 | /* Interrupt Moderation, only interrupt for first two entries */ | 271 | /* Interrupt Moderation, only interrupt for first two entries */ |
270 | if (idx != le32_to_cpu(*(q->headers.consumer))) { | 272 | if (idx != le32_to_cpu(*(q->headers.consumer))) { |
271 | if (--idx == 0) { | 273 | if (--idx == 0) { |
272 | if (qid == AdapHighCmdQueue) | 274 | if (qid == AdapNormCmdQueue) |
273 | idx = ADAP_HIGH_CMD_ENTRIES; | ||
274 | else if (qid == AdapNormCmdQueue) | ||
275 | idx = ADAP_NORM_CMD_ENTRIES; | 275 | idx = ADAP_NORM_CMD_ENTRIES; |
276 | else if (qid == AdapHighRespQueue) | 276 | else |
277 | idx = ADAP_HIGH_RESP_ENTRIES; | ||
278 | else if (qid == AdapNormRespQueue) | ||
279 | idx = ADAP_NORM_RESP_ENTRIES; | 277 | idx = ADAP_NORM_RESP_ENTRIES; |
280 | } | 278 | } |
281 | if (idx != le32_to_cpu(*(q->headers.consumer))) | 279 | if (idx != le32_to_cpu(*(q->headers.consumer))) |
282 | *nonotify = 1; | 280 | *nonotify = 1; |
283 | } | 281 | } |
284 | 282 | ||
285 | if (qid == AdapHighCmdQueue) { | 283 | if (qid == AdapNormCmdQueue) { |
286 | if (*index >= ADAP_HIGH_CMD_ENTRIES) | ||
287 | *index = 0; | ||
288 | } else if (qid == AdapNormCmdQueue) { | ||
289 | if (*index >= ADAP_NORM_CMD_ENTRIES) | 284 | if (*index >= ADAP_NORM_CMD_ENTRIES) |
290 | *index = 0; /* Wrap to front of the Producer Queue. */ | 285 | *index = 0; /* Wrap to front of the Producer Queue. */ |
291 | } | 286 | } else { |
292 | else if (qid == AdapHighRespQueue) | ||
293 | { | ||
294 | if (*index >= ADAP_HIGH_RESP_ENTRIES) | ||
295 | *index = 0; | ||
296 | } | ||
297 | else if (qid == AdapNormRespQueue) | ||
298 | { | ||
299 | if (*index >= ADAP_NORM_RESP_ENTRIES) | 287 | if (*index >= ADAP_NORM_RESP_ENTRIES) |
300 | *index = 0; /* Wrap to front of the Producer Queue. */ | 288 | *index = 0; /* Wrap to front of the Producer Queue. */ |
301 | } | 289 | } |
302 | else { | ||
303 | printk("aacraid: invalid qid\n"); | ||
304 | BUG(); | ||
305 | } | ||
306 | 290 | ||
307 | if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ | 291 | if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ |
308 | printk(KERN_WARNING "Queue %d full, %u outstanding.\n", | 292 | printk(KERN_WARNING "Queue %d full, %u outstanding.\n", |
@@ -334,12 +318,8 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f | |||
334 | { | 318 | { |
335 | struct aac_entry * entry = NULL; | 319 | struct aac_entry * entry = NULL; |
336 | int map = 0; | 320 | int map = 0; |
337 | struct aac_queue * q = &dev->queues->queue[qid]; | ||
338 | |||
339 | spin_lock_irqsave(q->lock, q->SavedIrql); | ||
340 | 321 | ||
341 | if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue) | 322 | if (qid == AdapNormCmdQueue) { |
342 | { | ||
343 | /* if no entries wait for some if caller wants to */ | 323 | /* if no entries wait for some if caller wants to */ |
344 | while (!aac_get_entry(dev, qid, &entry, index, nonotify)) | 324 | while (!aac_get_entry(dev, qid, &entry, index, nonotify)) |
345 | { | 325 | { |
@@ -350,9 +330,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f | |||
350 | */ | 330 | */ |
351 | entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); | 331 | entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); |
352 | map = 1; | 332 | map = 1; |
353 | } | 333 | } else { |
354 | else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue) | ||
355 | { | ||
356 | while(!aac_get_entry(dev, qid, &entry, index, nonotify)) | 334 | while(!aac_get_entry(dev, qid, &entry, index, nonotify)) |
357 | { | 335 | { |
358 | /* if no entries wait for some if caller wants to */ | 336 | /* if no entries wait for some if caller wants to */ |
@@ -375,42 +353,6 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f | |||
375 | return 0; | 353 | return 0; |
376 | } | 354 | } |
377 | 355 | ||
378 | |||
379 | /** | ||
380 | * aac_insert_entry - insert a queue entry | ||
381 | * @dev: Adapter | ||
382 | * @index: Index of entry to insert | ||
383 | * @qid: Queue number | ||
384 | * @nonotify: Suppress adapter notification | ||
385 | * | ||
386 | * Gets the next free QE off the requested priorty adapter command | ||
387 | * queue and associates the Fib with the QE. The QE represented by | ||
388 | * index is ready to insert on the queue when this routine returns | ||
389 | * success. | ||
390 | */ | ||
391 | |||
392 | static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify) | ||
393 | { | ||
394 | struct aac_queue * q = &dev->queues->queue[qid]; | ||
395 | |||
396 | if(q == NULL) | ||
397 | BUG(); | ||
398 | *(q->headers.producer) = cpu_to_le32(index + 1); | ||
399 | spin_unlock_irqrestore(q->lock, q->SavedIrql); | ||
400 | |||
401 | if (qid == AdapHighCmdQueue || | ||
402 | qid == AdapNormCmdQueue || | ||
403 | qid == AdapHighRespQueue || | ||
404 | qid == AdapNormRespQueue) | ||
405 | { | ||
406 | if (!nonotify) | ||
407 | aac_adapter_notify(dev, qid); | ||
408 | } | ||
409 | else | ||
410 | printk("Suprise insert!\n"); | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | /* | 356 | /* |
415 | * Define the highest level of host to adapter communication routines. | 357 | * Define the highest level of host to adapter communication routines. |
416 | * These routines will support host to adapter FS commuication. These | 358 | * These routines will support host to adapter FS commuication. These |
@@ -439,12 +381,13 @@ static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned l | |||
439 | int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) | 381 | int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) |
440 | { | 382 | { |
441 | u32 index; | 383 | u32 index; |
442 | u32 qid; | ||
443 | struct aac_dev * dev = fibptr->dev; | 384 | struct aac_dev * dev = fibptr->dev; |
444 | unsigned long nointr = 0; | 385 | unsigned long nointr = 0; |
445 | struct hw_fib * hw_fib = fibptr->hw_fib; | 386 | struct hw_fib * hw_fib = fibptr->hw_fib; |
446 | struct aac_queue * q; | 387 | struct aac_queue * q; |
447 | unsigned long flags = 0; | 388 | unsigned long flags = 0; |
389 | unsigned long qflags; | ||
390 | |||
448 | if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) | 391 | if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) |
449 | return -EBUSY; | 392 | return -EBUSY; |
450 | /* | 393 | /* |
@@ -497,26 +440,8 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority | |||
497 | * Get a queue entry connect the FIB to it and send an notify | 440 | * Get a queue entry connect the FIB to it and send an notify |
498 | * the adapter a command is ready. | 441 | * the adapter a command is ready. |
499 | */ | 442 | */ |
500 | if (priority == FsaHigh) { | 443 | hw_fib->header.XferState |= cpu_to_le32(NormalPriority); |
501 | hw_fib->header.XferState |= cpu_to_le32(HighPriority); | ||
502 | qid = AdapHighCmdQueue; | ||
503 | } else { | ||
504 | hw_fib->header.XferState |= cpu_to_le32(NormalPriority); | ||
505 | qid = AdapNormCmdQueue; | ||
506 | } | ||
507 | q = &dev->queues->queue[qid]; | ||
508 | 444 | ||
509 | if(wait) | ||
510 | spin_lock_irqsave(&fibptr->event_lock, flags); | ||
511 | if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0) | ||
512 | return -EWOULDBLOCK; | ||
513 | dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); | ||
514 | dprintk((KERN_DEBUG "Fib contents:.\n")); | ||
515 | dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command)); | ||
516 | dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState)); | ||
517 | dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); | ||
518 | dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); | ||
519 | dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); | ||
520 | /* | 445 | /* |
521 | * Fill in the Callback and CallbackContext if we are not | 446 | * Fill in the Callback and CallbackContext if we are not |
522 | * going to wait. | 447 | * going to wait. |
@@ -525,22 +450,67 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority | |||
525 | fibptr->callback = callback; | 450 | fibptr->callback = callback; |
526 | fibptr->callback_data = callback_data; | 451 | fibptr->callback_data = callback_data; |
527 | } | 452 | } |
528 | FIB_COUNTER_INCREMENT(aac_config.FibsSent); | ||
529 | list_add_tail(&fibptr->queue, &q->pendingq); | ||
530 | q->numpending++; | ||
531 | 453 | ||
532 | fibptr->done = 0; | 454 | fibptr->done = 0; |
533 | fibptr->flags = 0; | 455 | fibptr->flags = 0; |
534 | 456 | ||
535 | if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0) | 457 | FIB_COUNTER_INCREMENT(aac_config.FibsSent); |
536 | return -EWOULDBLOCK; | 458 | |
459 | dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); | ||
460 | dprintk((KERN_DEBUG "Fib contents:.\n")); | ||
461 | dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command)); | ||
462 | dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState)); | ||
463 | dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); | ||
464 | dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); | ||
465 | dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); | ||
466 | |||
467 | q = &dev->queues->queue[AdapNormCmdQueue]; | ||
468 | |||
469 | if(wait) | ||
470 | spin_lock_irqsave(&fibptr->event_lock, flags); | ||
471 | spin_lock_irqsave(q->lock, qflags); | ||
472 | aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr); | ||
473 | |||
474 | list_add_tail(&fibptr->queue, &q->pendingq); | ||
475 | q->numpending++; | ||
476 | *(q->headers.producer) = cpu_to_le32(index + 1); | ||
477 | spin_unlock_irqrestore(q->lock, qflags); | ||
478 | if (!(nointr & aac_config.irq_mod)) | ||
479 | aac_adapter_notify(dev, AdapNormCmdQueue); | ||
537 | /* | 480 | /* |
538 | * If the caller wanted us to wait for response wait now. | 481 | * If the caller wanted us to wait for response wait now. |
539 | */ | 482 | */ |
540 | 483 | ||
541 | if (wait) { | 484 | if (wait) { |
542 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | 485 | spin_unlock_irqrestore(&fibptr->event_lock, flags); |
543 | down(&fibptr->event_wait); | 486 | /* Only set for first known interruptable command */ |
487 | if (wait < 0) { | ||
488 | /* | ||
489 | * *VERY* Dangerous to time out a command, the | ||
490 | * assumption is made that we have no hope of | ||
491 | * functioning because an interrupt routing or other | ||
492 | * hardware failure has occurred. | ||
493 | */ | ||
494 | unsigned long count = 36000000L; /* 3 minutes */ | ||
495 | unsigned long qflags; | ||
496 | while (down_trylock(&fibptr->event_wait)) { | ||
497 | if (--count == 0) { | ||
498 | spin_lock_irqsave(q->lock, qflags); | ||
499 | q->numpending--; | ||
500 | list_del(&fibptr->queue); | ||
501 | spin_unlock_irqrestore(q->lock, qflags); | ||
502 | if (wait == -1) { | ||
503 | printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n" | ||
504 | "Usually a result of a PCI interrupt routing problem;\n" | ||
505 | "update mother board BIOS or consider utilizing one of\n" | ||
506 | "the SAFE mode kernel options (acpi, apic etc)\n"); | ||
507 | } | ||
508 | return -ETIMEDOUT; | ||
509 | } | ||
510 | udelay(5); | ||
511 | } | ||
512 | } else | ||
513 | down(&fibptr->event_wait); | ||
544 | if(fibptr->done == 0) | 514 | if(fibptr->done == 0) |
545 | BUG(); | 515 | BUG(); |
546 | 516 | ||
@@ -622,15 +592,9 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) | |||
622 | case HostNormCmdQueue: | 592 | case HostNormCmdQueue: |
623 | notify = HostNormCmdNotFull; | 593 | notify = HostNormCmdNotFull; |
624 | break; | 594 | break; |
625 | case HostHighCmdQueue: | ||
626 | notify = HostHighCmdNotFull; | ||
627 | break; | ||
628 | case HostNormRespQueue: | 595 | case HostNormRespQueue: |
629 | notify = HostNormRespNotFull; | 596 | notify = HostNormRespNotFull; |
630 | break; | 597 | break; |
631 | case HostHighRespQueue: | ||
632 | notify = HostHighRespNotFull; | ||
633 | break; | ||
634 | default: | 598 | default: |
635 | BUG(); | 599 | BUG(); |
636 | return; | 600 | return; |
@@ -652,9 +616,13 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size) | |||
652 | { | 616 | { |
653 | struct hw_fib * hw_fib = fibptr->hw_fib; | 617 | struct hw_fib * hw_fib = fibptr->hw_fib; |
654 | struct aac_dev * dev = fibptr->dev; | 618 | struct aac_dev * dev = fibptr->dev; |
619 | struct aac_queue * q; | ||
655 | unsigned long nointr = 0; | 620 | unsigned long nointr = 0; |
656 | if (hw_fib->header.XferState == 0) | 621 | unsigned long qflags; |
622 | |||
623 | if (hw_fib->header.XferState == 0) { | ||
657 | return 0; | 624 | return 0; |
625 | } | ||
658 | /* | 626 | /* |
659 | * If we plan to do anything check the structure type first. | 627 | * If we plan to do anything check the structure type first. |
660 | */ | 628 | */ |
@@ -669,37 +637,21 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size) | |||
669 | * send the completed cdb to the adapter. | 637 | * send the completed cdb to the adapter. |
670 | */ | 638 | */ |
671 | if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { | 639 | if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { |
640 | u32 index; | ||
672 | hw_fib->header.XferState |= cpu_to_le32(HostProcessed); | 641 | hw_fib->header.XferState |= cpu_to_le32(HostProcessed); |
673 | if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) { | 642 | if (size) { |
674 | u32 index; | 643 | size += sizeof(struct aac_fibhdr); |
675 | if (size) | 644 | if (size > le16_to_cpu(hw_fib->header.SenderSize)) |
676 | { | 645 | return -EMSGSIZE; |
677 | size += sizeof(struct aac_fibhdr); | 646 | hw_fib->header.Size = cpu_to_le16(size); |
678 | if (size > le16_to_cpu(hw_fib->header.SenderSize)) | ||
679 | return -EMSGSIZE; | ||
680 | hw_fib->header.Size = cpu_to_le16(size); | ||
681 | } | ||
682 | if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) { | ||
683 | return -EWOULDBLOCK; | ||
684 | } | ||
685 | if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) { | ||
686 | } | ||
687 | } else if (hw_fib->header.XferState & | ||
688 | cpu_to_le32(NormalPriority)) { | ||
689 | u32 index; | ||
690 | |||
691 | if (size) { | ||
692 | size += sizeof(struct aac_fibhdr); | ||
693 | if (size > le16_to_cpu(hw_fib->header.SenderSize)) | ||
694 | return -EMSGSIZE; | ||
695 | hw_fib->header.Size = cpu_to_le16(size); | ||
696 | } | ||
697 | if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0) | ||
698 | return -EWOULDBLOCK; | ||
699 | if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) | ||
700 | { | ||
701 | } | ||
702 | } | 647 | } |
648 | q = &dev->queues->queue[AdapNormRespQueue]; | ||
649 | spin_lock_irqsave(q->lock, qflags); | ||
650 | aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr); | ||
651 | *(q->headers.producer) = cpu_to_le32(index + 1); | ||
652 | spin_unlock_irqrestore(q->lock, qflags); | ||
653 | if (!(nointr & (int)aac_config.irq_mod)) | ||
654 | aac_adapter_notify(dev, AdapNormRespQueue); | ||
703 | } | 655 | } |
704 | else | 656 | else |
705 | { | 657 | { |
@@ -791,6 +743,268 @@ void aac_printf(struct aac_dev *dev, u32 val) | |||
791 | memset(cp, 0, 256); | 743 | memset(cp, 0, 256); |
792 | } | 744 | } |
793 | 745 | ||
746 | |||
747 | /** | ||
748 | * aac_handle_aif - Handle a message from the firmware | ||
749 | * @dev: Which adapter this fib is from | ||
750 | * @fibptr: Pointer to fibptr from adapter | ||
751 | * | ||
752 | * This routine handles a driver notify fib from the adapter and | ||
753 | * dispatches it to the appropriate routine for handling. | ||
754 | */ | ||
755 | |||
756 | static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) | ||
757 | { | ||
758 | struct hw_fib * hw_fib = fibptr->hw_fib; | ||
759 | struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; | ||
760 | int busy; | ||
761 | u32 container; | ||
762 | struct scsi_device *device; | ||
763 | enum { | ||
764 | NOTHING, | ||
765 | DELETE, | ||
766 | ADD, | ||
767 | CHANGE | ||
768 | } device_config_needed; | ||
769 | |||
770 | /* Sniff for container changes */ | ||
771 | |||
772 | if (!dev) | ||
773 | return; | ||
774 | container = (u32)-1; | ||
775 | |||
776 | /* | ||
777 | * We have set this up to try and minimize the number of | ||
778 | * re-configures that take place. As a result of this when | ||
779 | * certain AIF's come in we will set a flag waiting for another | ||
780 | * type of AIF before setting the re-config flag. | ||
781 | */ | ||
782 | switch (le32_to_cpu(aifcmd->command)) { | ||
783 | case AifCmdDriverNotify: | ||
784 | switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { | ||
785 | /* | ||
786 | * Morph or Expand complete | ||
787 | */ | ||
788 | case AifDenMorphComplete: | ||
789 | case AifDenVolumeExtendComplete: | ||
790 | container = le32_to_cpu(((u32 *)aifcmd->data)[1]); | ||
791 | if (container >= dev->maximum_num_containers) | ||
792 | break; | ||
793 | |||
794 | /* | ||
795 | * Find the Scsi_Device associated with the SCSI | ||
796 | * address. Make sure we have the right array, and if | ||
797 | * so set the flag to initiate a new re-config once we | ||
798 | * see an AifEnConfigChange AIF come through. | ||
799 | */ | ||
800 | |||
801 | if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) { | ||
802 | device = scsi_device_lookup(dev->scsi_host_ptr, | ||
803 | CONTAINER_TO_CHANNEL(container), | ||
804 | CONTAINER_TO_ID(container), | ||
805 | CONTAINER_TO_LUN(container)); | ||
806 | if (device) { | ||
807 | dev->fsa_dev[container].config_needed = CHANGE; | ||
808 | dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; | ||
809 | scsi_device_put(device); | ||
810 | } | ||
811 | } | ||
812 | } | ||
813 | |||
814 | /* | ||
815 | * If we are waiting on something and this happens to be | ||
816 | * that thing then set the re-configure flag. | ||
817 | */ | ||
818 | if (container != (u32)-1) { | ||
819 | if (container >= dev->maximum_num_containers) | ||
820 | break; | ||
821 | if (dev->fsa_dev[container].config_waiting_on == | ||
822 | le32_to_cpu(*(u32 *)aifcmd->data)) | ||
823 | dev->fsa_dev[container].config_waiting_on = 0; | ||
824 | } else for (container = 0; | ||
825 | container < dev->maximum_num_containers; ++container) { | ||
826 | if (dev->fsa_dev[container].config_waiting_on == | ||
827 | le32_to_cpu(*(u32 *)aifcmd->data)) | ||
828 | dev->fsa_dev[container].config_waiting_on = 0; | ||
829 | } | ||
830 | break; | ||
831 | |||
832 | case AifCmdEventNotify: | ||
833 | switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { | ||
834 | /* | ||
835 | * Add an Array. | ||
836 | */ | ||
837 | case AifEnAddContainer: | ||
838 | container = le32_to_cpu(((u32 *)aifcmd->data)[1]); | ||
839 | if (container >= dev->maximum_num_containers) | ||
840 | break; | ||
841 | dev->fsa_dev[container].config_needed = ADD; | ||
842 | dev->fsa_dev[container].config_waiting_on = | ||
843 | AifEnConfigChange; | ||
844 | break; | ||
845 | |||
846 | /* | ||
847 | * Delete an Array. | ||
848 | */ | ||
849 | case AifEnDeleteContainer: | ||
850 | container = le32_to_cpu(((u32 *)aifcmd->data)[1]); | ||
851 | if (container >= dev->maximum_num_containers) | ||
852 | break; | ||
853 | dev->fsa_dev[container].config_needed = DELETE; | ||
854 | dev->fsa_dev[container].config_waiting_on = | ||
855 | AifEnConfigChange; | ||
856 | break; | ||
857 | |||
858 | /* | ||
859 | * Container change detected. If we currently are not | ||
860 | * waiting on something else, setup to wait on a Config Change. | ||
861 | */ | ||
862 | case AifEnContainerChange: | ||
863 | container = le32_to_cpu(((u32 *)aifcmd->data)[1]); | ||
864 | if (container >= dev->maximum_num_containers) | ||
865 | break; | ||
866 | if (dev->fsa_dev[container].config_waiting_on) | ||
867 | break; | ||
868 | dev->fsa_dev[container].config_needed = CHANGE; | ||
869 | dev->fsa_dev[container].config_waiting_on = | ||
870 | AifEnConfigChange; | ||
871 | break; | ||
872 | |||
873 | case AifEnConfigChange: | ||
874 | break; | ||
875 | |||
876 | } | ||
877 | |||
878 | /* | ||
879 | * If we are waiting on something and this happens to be | ||
880 | * that thing then set the re-configure flag. | ||
881 | */ | ||
882 | if (container != (u32)-1) { | ||
883 | if (container >= dev->maximum_num_containers) | ||
884 | break; | ||
885 | if (dev->fsa_dev[container].config_waiting_on == | ||
886 | le32_to_cpu(*(u32 *)aifcmd->data)) | ||
887 | dev->fsa_dev[container].config_waiting_on = 0; | ||
888 | } else for (container = 0; | ||
889 | container < dev->maximum_num_containers; ++container) { | ||
890 | if (dev->fsa_dev[container].config_waiting_on == | ||
891 | le32_to_cpu(*(u32 *)aifcmd->data)) | ||
892 | dev->fsa_dev[container].config_waiting_on = 0; | ||
893 | } | ||
894 | break; | ||
895 | |||
896 | case AifCmdJobProgress: | ||
897 | /* | ||
898 | * These are job progress AIF's. When a Clear is being | ||
899 | * done on a container it is initially created then hidden from | ||
900 | * the OS. When the clear completes we don't get a config | ||
901 | * change so we monitor the job status complete on a clear then | ||
902 | * wait for a container change. | ||
903 | */ | ||
904 | |||
905 | if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) | ||
906 | && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5]) | ||
907 | || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) { | ||
908 | for (container = 0; | ||
909 | container < dev->maximum_num_containers; | ||
910 | ++container) { | ||
911 | /* | ||
912 | * Stomp on all config sequencing for all | ||
913 | * containers? | ||
914 | */ | ||
915 | dev->fsa_dev[container].config_waiting_on = | ||
916 | AifEnContainerChange; | ||
917 | dev->fsa_dev[container].config_needed = ADD; | ||
918 | } | ||
919 | } | ||
920 | if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) | ||
921 | && (((u32 *)aifcmd->data)[6] == 0) | ||
922 | && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) { | ||
923 | for (container = 0; | ||
924 | container < dev->maximum_num_containers; | ||
925 | ++container) { | ||
926 | /* | ||
927 | * Stomp on all config sequencing for all | ||
928 | * containers? | ||
929 | */ | ||
930 | dev->fsa_dev[container].config_waiting_on = | ||
931 | AifEnContainerChange; | ||
932 | dev->fsa_dev[container].config_needed = DELETE; | ||
933 | } | ||
934 | } | ||
935 | break; | ||
936 | } | ||
937 | |||
938 | device_config_needed = NOTHING; | ||
939 | for (container = 0; container < dev->maximum_num_containers; | ||
940 | ++container) { | ||
941 | if ((dev->fsa_dev[container].config_waiting_on == 0) | ||
942 | && (dev->fsa_dev[container].config_needed != NOTHING)) { | ||
943 | device_config_needed = | ||
944 | dev->fsa_dev[container].config_needed; | ||
945 | dev->fsa_dev[container].config_needed = NOTHING; | ||
946 | break; | ||
947 | } | ||
948 | } | ||
949 | if (device_config_needed == NOTHING) | ||
950 | return; | ||
951 | |||
952 | /* | ||
953 | * If we decided that a re-configuration needs to be done, | ||
954 | * schedule it here on the way out the door, please close the door | ||
955 | * behind you. | ||
956 | */ | ||
957 | |||
958 | busy = 0; | ||
959 | |||
960 | |||
961 | /* | ||
962 | * Find the Scsi_Device associated with the SCSI address, | ||
963 | * and mark it as changed, invalidating the cache. This deals | ||
964 | * with changes to existing device IDs. | ||
965 | */ | ||
966 | |||
967 | if (!dev || !dev->scsi_host_ptr) | ||
968 | return; | ||
969 | /* | ||
970 | * force reload of disk info via probe_container | ||
971 | */ | ||
972 | if ((device_config_needed == CHANGE) | ||
973 | && (dev->fsa_dev[container].valid == 1)) | ||
974 | dev->fsa_dev[container].valid = 2; | ||
975 | if ((device_config_needed == CHANGE) || | ||
976 | (device_config_needed == ADD)) | ||
977 | probe_container(dev, container); | ||
978 | device = scsi_device_lookup(dev->scsi_host_ptr, | ||
979 | CONTAINER_TO_CHANNEL(container), | ||
980 | CONTAINER_TO_ID(container), | ||
981 | CONTAINER_TO_LUN(container)); | ||
982 | if (device) { | ||
983 | switch (device_config_needed) { | ||
984 | case DELETE: | ||
985 | scsi_remove_device(device); | ||
986 | break; | ||
987 | case CHANGE: | ||
988 | if (!dev->fsa_dev[container].valid) { | ||
989 | scsi_remove_device(device); | ||
990 | break; | ||
991 | } | ||
992 | scsi_rescan_device(&device->sdev_gendev); | ||
993 | |||
994 | default: | ||
995 | break; | ||
996 | } | ||
997 | scsi_device_put(device); | ||
998 | } | ||
999 | if (device_config_needed == ADD) { | ||
1000 | scsi_add_device(dev->scsi_host_ptr, | ||
1001 | CONTAINER_TO_CHANNEL(container), | ||
1002 | CONTAINER_TO_ID(container), | ||
1003 | CONTAINER_TO_LUN(container)); | ||
1004 | } | ||
1005 | |||
1006 | } | ||
1007 | |||
794 | /** | 1008 | /** |
795 | * aac_command_thread - command processing thread | 1009 | * aac_command_thread - command processing thread |
796 | * @dev: Adapter to monitor | 1010 | * @dev: Adapter to monitor |
@@ -805,7 +1019,6 @@ int aac_command_thread(struct aac_dev * dev) | |||
805 | { | 1019 | { |
806 | struct hw_fib *hw_fib, *hw_newfib; | 1020 | struct hw_fib *hw_fib, *hw_newfib; |
807 | struct fib *fib, *newfib; | 1021 | struct fib *fib, *newfib; |
808 | struct aac_queue_block *queues = dev->queues; | ||
809 | struct aac_fib_context *fibctx; | 1022 | struct aac_fib_context *fibctx; |
810 | unsigned long flags; | 1023 | unsigned long flags; |
811 | DECLARE_WAITQUEUE(wait, current); | 1024 | DECLARE_WAITQUEUE(wait, current); |
@@ -825,21 +1038,22 @@ int aac_command_thread(struct aac_dev * dev) | |||
825 | * Let the DPC know it has a place to send the AIF's to. | 1038 | * Let the DPC know it has a place to send the AIF's to. |
826 | */ | 1039 | */ |
827 | dev->aif_thread = 1; | 1040 | dev->aif_thread = 1; |
828 | add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); | 1041 | add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); |
829 | set_current_state(TASK_INTERRUPTIBLE); | 1042 | set_current_state(TASK_INTERRUPTIBLE); |
1043 | dprintk ((KERN_INFO "aac_command_thread start\n")); | ||
830 | while(1) | 1044 | while(1) |
831 | { | 1045 | { |
832 | spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags); | 1046 | spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); |
833 | while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) { | 1047 | while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { |
834 | struct list_head *entry; | 1048 | struct list_head *entry; |
835 | struct aac_aifcmd * aifcmd; | 1049 | struct aac_aifcmd * aifcmd; |
836 | 1050 | ||
837 | set_current_state(TASK_RUNNING); | 1051 | set_current_state(TASK_RUNNING); |
838 | 1052 | ||
839 | entry = queues->queue[HostNormCmdQueue].cmdq.next; | 1053 | entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; |
840 | list_del(entry); | 1054 | list_del(entry); |
841 | 1055 | ||
842 | spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); | 1056 | spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); |
843 | fib = list_entry(entry, struct fib, fiblink); | 1057 | fib = list_entry(entry, struct fib, fiblink); |
844 | /* | 1058 | /* |
845 | * We will process the FIB here or pass it to a | 1059 | * We will process the FIB here or pass it to a |
@@ -860,6 +1074,7 @@ int aac_command_thread(struct aac_dev * dev) | |||
860 | aifcmd = (struct aac_aifcmd *) hw_fib->data; | 1074 | aifcmd = (struct aac_aifcmd *) hw_fib->data; |
861 | if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { | 1075 | if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { |
862 | /* Handle Driver Notify Events */ | 1076 | /* Handle Driver Notify Events */ |
1077 | aac_handle_aif(dev, fib); | ||
863 | *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); | 1078 | *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); |
864 | fib_adapter_complete(fib, (u16)sizeof(u32)); | 1079 | fib_adapter_complete(fib, (u16)sizeof(u32)); |
865 | } else { | 1080 | } else { |
@@ -869,9 +1084,62 @@ int aac_command_thread(struct aac_dev * dev) | |||
869 | 1084 | ||
870 | u32 time_now, time_last; | 1085 | u32 time_now, time_last; |
871 | unsigned long flagv; | 1086 | unsigned long flagv; |
872 | 1087 | unsigned num; | |
1088 | struct hw_fib ** hw_fib_pool, ** hw_fib_p; | ||
1089 | struct fib ** fib_pool, ** fib_p; | ||
1090 | |||
1091 | /* Sniff events */ | ||
1092 | if ((aifcmd->command == | ||
1093 | cpu_to_le32(AifCmdEventNotify)) || | ||
1094 | (aifcmd->command == | ||
1095 | cpu_to_le32(AifCmdJobProgress))) { | ||
1096 | aac_handle_aif(dev, fib); | ||
1097 | } | ||
1098 | |||
873 | time_now = jiffies/HZ; | 1099 | time_now = jiffies/HZ; |
874 | 1100 | ||
1101 | /* | ||
1102 | * Warning: no sleep allowed while | ||
1103 | * holding spinlock. We take the estimate | ||
1104 | * and pre-allocate a set of fibs outside the | ||
1105 | * lock. | ||
1106 | */ | ||
1107 | num = le32_to_cpu(dev->init->AdapterFibsSize) | ||
1108 | / sizeof(struct hw_fib); /* some extra */ | ||
1109 | spin_lock_irqsave(&dev->fib_lock, flagv); | ||
1110 | entry = dev->fib_list.next; | ||
1111 | while (entry != &dev->fib_list) { | ||
1112 | entry = entry->next; | ||
1113 | ++num; | ||
1114 | } | ||
1115 | spin_unlock_irqrestore(&dev->fib_lock, flagv); | ||
1116 | hw_fib_pool = NULL; | ||
1117 | fib_pool = NULL; | ||
1118 | if (num | ||
1119 | && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL))) | ||
1120 | && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) { | ||
1121 | hw_fib_p = hw_fib_pool; | ||
1122 | fib_p = fib_pool; | ||
1123 | while (hw_fib_p < &hw_fib_pool[num]) { | ||
1124 | if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) { | ||
1125 | --hw_fib_p; | ||
1126 | break; | ||
1127 | } | ||
1128 | if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) { | ||
1129 | kfree(*(--hw_fib_p)); | ||
1130 | break; | ||
1131 | } | ||
1132 | } | ||
1133 | if ((num = hw_fib_p - hw_fib_pool) == 0) { | ||
1134 | kfree(fib_pool); | ||
1135 | fib_pool = NULL; | ||
1136 | kfree(hw_fib_pool); | ||
1137 | hw_fib_pool = NULL; | ||
1138 | } | ||
1139 | } else if (hw_fib_pool) { | ||
1140 | kfree(hw_fib_pool); | ||
1141 | hw_fib_pool = NULL; | ||
1142 | } | ||
875 | spin_lock_irqsave(&dev->fib_lock, flagv); | 1143 | spin_lock_irqsave(&dev->fib_lock, flagv); |
876 | entry = dev->fib_list.next; | 1144 | entry = dev->fib_list.next; |
877 | /* | 1145 | /* |
@@ -880,6 +1148,8 @@ int aac_command_thread(struct aac_dev * dev) | |||
880 | * fib, and then set the event to wake up the | 1148 | * fib, and then set the event to wake up the |
881 | * thread that is waiting for it. | 1149 | * thread that is waiting for it. |
882 | */ | 1150 | */ |
1151 | hw_fib_p = hw_fib_pool; | ||
1152 | fib_p = fib_pool; | ||
883 | while (entry != &dev->fib_list) { | 1153 | while (entry != &dev->fib_list) { |
884 | /* | 1154 | /* |
885 | * Extract the fibctx | 1155 | * Extract the fibctx |
@@ -912,9 +1182,11 @@ int aac_command_thread(struct aac_dev * dev) | |||
912 | * Warning: no sleep allowed while | 1182 | * Warning: no sleep allowed while |
913 | * holding spinlock | 1183 | * holding spinlock |
914 | */ | 1184 | */ |
915 | hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC); | 1185 | if (hw_fib_p < &hw_fib_pool[num]) { |
916 | newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC); | 1186 | hw_newfib = *hw_fib_p; |
917 | if (newfib && hw_newfib) { | 1187 | *(hw_fib_p++) = NULL; |
1188 | newfib = *fib_p; | ||
1189 | *(fib_p++) = NULL; | ||
918 | /* | 1190 | /* |
919 | * Make the copy of the FIB | 1191 | * Make the copy of the FIB |
920 | */ | 1192 | */ |
@@ -929,15 +1201,11 @@ int aac_command_thread(struct aac_dev * dev) | |||
929 | fibctx->count++; | 1201 | fibctx->count++; |
930 | /* | 1202 | /* |
931 | * Set the event to wake up the | 1203 | * Set the event to wake up the |
932 | * thread that will waiting. | 1204 | * thread that is waiting. |
933 | */ | 1205 | */ |
934 | up(&fibctx->wait_sem); | 1206 | up(&fibctx->wait_sem); |
935 | } else { | 1207 | } else { |
936 | printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); | 1208 | printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); |
937 | if(newfib) | ||
938 | kfree(newfib); | ||
939 | if(hw_newfib) | ||
940 | kfree(hw_newfib); | ||
941 | } | 1209 | } |
942 | entry = entry->next; | 1210 | entry = entry->next; |
943 | } | 1211 | } |
@@ -947,21 +1215,38 @@ int aac_command_thread(struct aac_dev * dev) | |||
947 | *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); | 1215 | *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); |
948 | fib_adapter_complete(fib, sizeof(u32)); | 1216 | fib_adapter_complete(fib, sizeof(u32)); |
949 | spin_unlock_irqrestore(&dev->fib_lock, flagv); | 1217 | spin_unlock_irqrestore(&dev->fib_lock, flagv); |
1218 | /* Free up the remaining resources */ | ||
1219 | hw_fib_p = hw_fib_pool; | ||
1220 | fib_p = fib_pool; | ||
1221 | while (hw_fib_p < &hw_fib_pool[num]) { | ||
1222 | if (*hw_fib_p) | ||
1223 | kfree(*hw_fib_p); | ||
1224 | if (*fib_p) | ||
1225 | kfree(*fib_p); | ||
1226 | ++fib_p; | ||
1227 | ++hw_fib_p; | ||
1228 | } | ||
1229 | if (hw_fib_pool) | ||
1230 | kfree(hw_fib_pool); | ||
1231 | if (fib_pool) | ||
1232 | kfree(fib_pool); | ||
950 | } | 1233 | } |
951 | spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags); | ||
952 | kfree(fib); | 1234 | kfree(fib); |
1235 | spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); | ||
953 | } | 1236 | } |
954 | /* | 1237 | /* |
955 | * There are no more AIF's | 1238 | * There are no more AIF's |
956 | */ | 1239 | */ |
957 | spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); | 1240 | spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); |
958 | schedule(); | 1241 | schedule(); |
959 | 1242 | ||
960 | if(signal_pending(current)) | 1243 | if(signal_pending(current)) |
961 | break; | 1244 | break; |
962 | set_current_state(TASK_INTERRUPTIBLE); | 1245 | set_current_state(TASK_INTERRUPTIBLE); |
963 | } | 1246 | } |
964 | remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); | 1247 | if (dev->queues) |
1248 | remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); | ||
965 | dev->aif_thread = 0; | 1249 | dev->aif_thread = 0; |
966 | complete_and_exit(&dev->aif_completion, 0); | 1250 | complete_and_exit(&dev->aif_completion, 0); |
1251 | return 0; | ||
967 | } | 1252 | } |