diff options
author | Mark Haverkamp <markh@osdl.org> | 2005-09-20 15:57:11 -0400 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.(none)> | 2005-09-26 18:49:07 -0400 |
commit | 1640a2c385a860ef25be4a8d18a528c4b6f02bd6 (patch) | |
tree | 4735f28570e42b25bda1e7fb2a4ad081e3e95e31 /drivers | |
parent | 63a70eeaafe0e17e7f45cba495cb457d06070419 (diff) |
[SCSI] aacraid: remove aac_insert_entry
Received from Mark Salyzyn from Adaptec.
High Priority Queues have *never* been used in the entire history of the
aac based adapters. Associated with this, aac_insert_entry can be
removed, SavedIrql can be removed & padding variable can be removed.
With the movement of SavedIrql out & replaced with an automatic variable
qflags, the locking can be refined somewhat. The sparse warnings did not
catch the need for byte swapping in the 'dprintk' debugging print
macros, so fixed this up when this code was moved outside of the now
refined locking.
Signed-off-by: Mark Haverkamp <markh@osdl.org>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/scsi/aacraid/aacraid.h | 3 | ||||
-rw-r--r-- | drivers/scsi/aacraid/commsup.c | 177 |
2 files changed, 50 insertions, 130 deletions
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 0880f4807fc9..4a99d2f000f4 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
@@ -306,7 +306,6 @@ enum aac_queue_types { | |||
306 | */ | 306 | */ |
307 | 307 | ||
308 | #define FsaNormal 1 | 308 | #define FsaNormal 1 |
309 | #define FsaHigh 2 | ||
310 | 309 | ||
311 | /* | 310 | /* |
312 | * Define the FIB. The FIB is the where all the requested data and | 311 | * Define the FIB. The FIB is the where all the requested data and |
@@ -550,8 +549,6 @@ struct aac_queue { | |||
550 | /* This is only valid for adapter to host command queues. */ | 549 | /* This is only valid for adapter to host command queues. */ |
551 | spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */ | 550 | spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */ |
552 | spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */ | 551 | spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */ |
553 | unsigned long SavedIrql; /* Previous IRQL when the spin lock is taken */ | ||
554 | u32 padding; /* Padding - FIXME - can remove I believe */ | ||
555 | struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */ | 552 | struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */ |
556 | /* only valid for command queues which receive entries from the adapter. */ | 553 | /* only valid for command queues which receive entries from the adapter. */ |
557 | struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */ | 554 | struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */ |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 3b983f3ed960..e4d543a474ae 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -271,40 +271,22 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr | |||
271 | /* Interrupt Moderation, only interrupt for first two entries */ | 271 | /* Interrupt Moderation, only interrupt for first two entries */ |
272 | if (idx != le32_to_cpu(*(q->headers.consumer))) { | 272 | if (idx != le32_to_cpu(*(q->headers.consumer))) { |
273 | if (--idx == 0) { | 273 | if (--idx == 0) { |
274 | if (qid == AdapHighCmdQueue) | 274 | if (qid == AdapNormCmdQueue) |
275 | idx = ADAP_HIGH_CMD_ENTRIES; | ||
276 | else if (qid == AdapNormCmdQueue) | ||
277 | idx = ADAP_NORM_CMD_ENTRIES; | 275 | idx = ADAP_NORM_CMD_ENTRIES; |
278 | else if (qid == AdapHighRespQueue) | 276 | else |
279 | idx = ADAP_HIGH_RESP_ENTRIES; | ||
280 | else if (qid == AdapNormRespQueue) | ||
281 | idx = ADAP_NORM_RESP_ENTRIES; | 277 | idx = ADAP_NORM_RESP_ENTRIES; |
282 | } | 278 | } |
283 | if (idx != le32_to_cpu(*(q->headers.consumer))) | 279 | if (idx != le32_to_cpu(*(q->headers.consumer))) |
284 | *nonotify = 1; | 280 | *nonotify = 1; |
285 | } | 281 | } |
286 | 282 | ||
287 | if (qid == AdapHighCmdQueue) { | 283 | if (qid == AdapNormCmdQueue) { |
288 | if (*index >= ADAP_HIGH_CMD_ENTRIES) | ||
289 | *index = 0; | ||
290 | } else if (qid == AdapNormCmdQueue) { | ||
291 | if (*index >= ADAP_NORM_CMD_ENTRIES) | 284 | if (*index >= ADAP_NORM_CMD_ENTRIES) |
292 | *index = 0; /* Wrap to front of the Producer Queue. */ | 285 | *index = 0; /* Wrap to front of the Producer Queue. */ |
293 | } | 286 | } else { |
294 | else if (qid == AdapHighRespQueue) | ||
295 | { | ||
296 | if (*index >= ADAP_HIGH_RESP_ENTRIES) | ||
297 | *index = 0; | ||
298 | } | ||
299 | else if (qid == AdapNormRespQueue) | ||
300 | { | ||
301 | if (*index >= ADAP_NORM_RESP_ENTRIES) | 287 | if (*index >= ADAP_NORM_RESP_ENTRIES) |
302 | *index = 0; /* Wrap to front of the Producer Queue. */ | 288 | *index = 0; /* Wrap to front of the Producer Queue. */ |
303 | } | 289 | } |
304 | else { | ||
305 | printk("aacraid: invalid qid\n"); | ||
306 | BUG(); | ||
307 | } | ||
308 | 290 | ||
309 | if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ | 291 | if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ |
310 | printk(KERN_WARNING "Queue %d full, %u outstanding.\n", | 292 | printk(KERN_WARNING "Queue %d full, %u outstanding.\n", |
@@ -336,12 +318,8 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f | |||
336 | { | 318 | { |
337 | struct aac_entry * entry = NULL; | 319 | struct aac_entry * entry = NULL; |
338 | int map = 0; | 320 | int map = 0; |
339 | struct aac_queue * q = &dev->queues->queue[qid]; | ||
340 | |||
341 | spin_lock_irqsave(q->lock, q->SavedIrql); | ||
342 | 321 | ||
343 | if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue) | 322 | if (qid == AdapNormCmdQueue) { |
344 | { | ||
345 | /* if no entries wait for some if caller wants to */ | 323 | /* if no entries wait for some if caller wants to */ |
346 | while (!aac_get_entry(dev, qid, &entry, index, nonotify)) | 324 | while (!aac_get_entry(dev, qid, &entry, index, nonotify)) |
347 | { | 325 | { |
@@ -352,9 +330,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f | |||
352 | */ | 330 | */ |
353 | entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); | 331 | entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); |
354 | map = 1; | 332 | map = 1; |
355 | } | 333 | } else { |
356 | else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue) | ||
357 | { | ||
358 | while(!aac_get_entry(dev, qid, &entry, index, nonotify)) | 334 | while(!aac_get_entry(dev, qid, &entry, index, nonotify)) |
359 | { | 335 | { |
360 | /* if no entries wait for some if caller wants to */ | 336 | /* if no entries wait for some if caller wants to */ |
@@ -377,42 +353,6 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f | |||
377 | return 0; | 353 | return 0; |
378 | } | 354 | } |
379 | 355 | ||
380 | |||
381 | /** | ||
382 | * aac_insert_entry - insert a queue entry | ||
383 | * @dev: Adapter | ||
384 | * @index: Index of entry to insert | ||
385 | * @qid: Queue number | ||
386 | * @nonotify: Suppress adapter notification | ||
387 | * | ||
388 | * Gets the next free QE off the requested priorty adapter command | ||
389 | * queue and associates the Fib with the QE. The QE represented by | ||
390 | * index is ready to insert on the queue when this routine returns | ||
391 | * success. | ||
392 | */ | ||
393 | |||
394 | static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify) | ||
395 | { | ||
396 | struct aac_queue * q = &dev->queues->queue[qid]; | ||
397 | |||
398 | if(q == NULL) | ||
399 | BUG(); | ||
400 | *(q->headers.producer) = cpu_to_le32(index + 1); | ||
401 | spin_unlock_irqrestore(q->lock, q->SavedIrql); | ||
402 | |||
403 | if (qid == AdapHighCmdQueue || | ||
404 | qid == AdapNormCmdQueue || | ||
405 | qid == AdapHighRespQueue || | ||
406 | qid == AdapNormRespQueue) | ||
407 | { | ||
408 | if (!nonotify) | ||
409 | aac_adapter_notify(dev, qid); | ||
410 | } | ||
411 | else | ||
412 | printk("Suprise insert!\n"); | ||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | /* | 356 | /* |
417 | * Define the highest level of host to adapter communication routines. | 357 | * Define the highest level of host to adapter communication routines. |
418 | * These routines will support host to adapter FS commuication. These | 358 | * These routines will support host to adapter FS commuication. These |
@@ -441,12 +381,13 @@ static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned l | |||
441 | int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) | 381 | int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) |
442 | { | 382 | { |
443 | u32 index; | 383 | u32 index; |
444 | u32 qid; | ||
445 | struct aac_dev * dev = fibptr->dev; | 384 | struct aac_dev * dev = fibptr->dev; |
446 | unsigned long nointr = 0; | 385 | unsigned long nointr = 0; |
447 | struct hw_fib * hw_fib = fibptr->hw_fib; | 386 | struct hw_fib * hw_fib = fibptr->hw_fib; |
448 | struct aac_queue * q; | 387 | struct aac_queue * q; |
449 | unsigned long flags = 0; | 388 | unsigned long flags = 0; |
389 | unsigned long qflags; | ||
390 | |||
450 | if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) | 391 | if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) |
451 | return -EBUSY; | 392 | return -EBUSY; |
452 | /* | 393 | /* |
@@ -499,26 +440,8 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority | |||
499 | * Get a queue entry connect the FIB to it and send an notify | 440 | * Get a queue entry connect the FIB to it and send an notify |
500 | * the adapter a command is ready. | 441 | * the adapter a command is ready. |
501 | */ | 442 | */ |
502 | if (priority == FsaHigh) { | 443 | hw_fib->header.XferState |= cpu_to_le32(NormalPriority); |
503 | hw_fib->header.XferState |= cpu_to_le32(HighPriority); | ||
504 | qid = AdapHighCmdQueue; | ||
505 | } else { | ||
506 | hw_fib->header.XferState |= cpu_to_le32(NormalPriority); | ||
507 | qid = AdapNormCmdQueue; | ||
508 | } | ||
509 | q = &dev->queues->queue[qid]; | ||
510 | 444 | ||
511 | if(wait) | ||
512 | spin_lock_irqsave(&fibptr->event_lock, flags); | ||
513 | if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0) | ||
514 | return -EWOULDBLOCK; | ||
515 | dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); | ||
516 | dprintk((KERN_DEBUG "Fib contents:.\n")); | ||
517 | dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command)); | ||
518 | dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState)); | ||
519 | dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); | ||
520 | dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); | ||
521 | dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); | ||
522 | /* | 445 | /* |
523 | * Fill in the Callback and CallbackContext if we are not | 446 | * Fill in the Callback and CallbackContext if we are not |
524 | * going to wait. | 447 | * going to wait. |
@@ -527,15 +450,33 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority | |||
527 | fibptr->callback = callback; | 450 | fibptr->callback = callback; |
528 | fibptr->callback_data = callback_data; | 451 | fibptr->callback_data = callback_data; |
529 | } | 452 | } |
530 | FIB_COUNTER_INCREMENT(aac_config.FibsSent); | ||
531 | list_add_tail(&fibptr->queue, &q->pendingq); | ||
532 | q->numpending++; | ||
533 | 453 | ||
534 | fibptr->done = 0; | 454 | fibptr->done = 0; |
535 | fibptr->flags = 0; | 455 | fibptr->flags = 0; |
536 | 456 | ||
537 | if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0) | 457 | FIB_COUNTER_INCREMENT(aac_config.FibsSent); |
538 | return -EWOULDBLOCK; | 458 | |
459 | dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); | ||
460 | dprintk((KERN_DEBUG "Fib contents:.\n")); | ||
461 | dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command)); | ||
462 | dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState)); | ||
463 | dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); | ||
464 | dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); | ||
465 | dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); | ||
466 | |||
467 | q = &dev->queues->queue[AdapNormCmdQueue]; | ||
468 | |||
469 | if(wait) | ||
470 | spin_lock_irqsave(&fibptr->event_lock, flags); | ||
471 | spin_lock_irqsave(q->lock, qflags); | ||
472 | aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr); | ||
473 | |||
474 | list_add_tail(&fibptr->queue, &q->pendingq); | ||
475 | q->numpending++; | ||
476 | *(q->headers.producer) = cpu_to_le32(index + 1); | ||
477 | spin_unlock_irqrestore(q->lock, qflags); | ||
478 | if (!(nointr & aac_config.irq_mod)) | ||
479 | aac_adapter_notify(dev, AdapNormCmdQueue); | ||
539 | /* | 480 | /* |
540 | * If the caller wanted us to wait for response wait now. | 481 | * If the caller wanted us to wait for response wait now. |
541 | */ | 482 | */ |
@@ -651,15 +592,9 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) | |||
651 | case HostNormCmdQueue: | 592 | case HostNormCmdQueue: |
652 | notify = HostNormCmdNotFull; | 593 | notify = HostNormCmdNotFull; |
653 | break; | 594 | break; |
654 | case HostHighCmdQueue: | ||
655 | notify = HostHighCmdNotFull; | ||
656 | break; | ||
657 | case HostNormRespQueue: | 595 | case HostNormRespQueue: |
658 | notify = HostNormRespNotFull; | 596 | notify = HostNormRespNotFull; |
659 | break; | 597 | break; |
660 | case HostHighRespQueue: | ||
661 | notify = HostHighRespNotFull; | ||
662 | break; | ||
663 | default: | 598 | default: |
664 | BUG(); | 599 | BUG(); |
665 | return; | 600 | return; |
@@ -681,9 +616,13 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size) | |||
681 | { | 616 | { |
682 | struct hw_fib * hw_fib = fibptr->hw_fib; | 617 | struct hw_fib * hw_fib = fibptr->hw_fib; |
683 | struct aac_dev * dev = fibptr->dev; | 618 | struct aac_dev * dev = fibptr->dev; |
619 | struct aac_queue * q; | ||
684 | unsigned long nointr = 0; | 620 | unsigned long nointr = 0; |
685 | if (hw_fib->header.XferState == 0) | 621 | unsigned long qflags; |
622 | |||
623 | if (hw_fib->header.XferState == 0) { | ||
686 | return 0; | 624 | return 0; |
625 | } | ||
687 | /* | 626 | /* |
688 | * If we plan to do anything check the structure type first. | 627 | * If we plan to do anything check the structure type first. |
689 | */ | 628 | */ |
@@ -698,37 +637,21 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size) | |||
698 | * send the completed cdb to the adapter. | 637 | * send the completed cdb to the adapter. |
699 | */ | 638 | */ |
700 | if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { | 639 | if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { |
640 | u32 index; | ||
701 | hw_fib->header.XferState |= cpu_to_le32(HostProcessed); | 641 | hw_fib->header.XferState |= cpu_to_le32(HostProcessed); |
702 | if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) { | 642 | if (size) { |
703 | u32 index; | 643 | size += sizeof(struct aac_fibhdr); |
704 | if (size) | 644 | if (size > le16_to_cpu(hw_fib->header.SenderSize)) |
705 | { | 645 | return -EMSGSIZE; |
706 | size += sizeof(struct aac_fibhdr); | 646 | hw_fib->header.Size = cpu_to_le16(size); |
707 | if (size > le16_to_cpu(hw_fib->header.SenderSize)) | ||
708 | return -EMSGSIZE; | ||
709 | hw_fib->header.Size = cpu_to_le16(size); | ||
710 | } | ||
711 | if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) { | ||
712 | return -EWOULDBLOCK; | ||
713 | } | ||
714 | if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) { | ||
715 | } | ||
716 | } else if (hw_fib->header.XferState & | ||
717 | cpu_to_le32(NormalPriority)) { | ||
718 | u32 index; | ||
719 | |||
720 | if (size) { | ||
721 | size += sizeof(struct aac_fibhdr); | ||
722 | if (size > le16_to_cpu(hw_fib->header.SenderSize)) | ||
723 | return -EMSGSIZE; | ||
724 | hw_fib->header.Size = cpu_to_le16(size); | ||
725 | } | ||
726 | if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0) | ||
727 | return -EWOULDBLOCK; | ||
728 | if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) | ||
729 | { | ||
730 | } | ||
731 | } | 647 | } |
648 | q = &dev->queues->queue[AdapNormRespQueue]; | ||
649 | spin_lock_irqsave(q->lock, qflags); | ||
650 | aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr); | ||
651 | *(q->headers.producer) = cpu_to_le32(index + 1); | ||
652 | spin_unlock_irqrestore(q->lock, qflags); | ||
653 | if (!(nointr & (int)aac_config.irq_mod)) | ||
654 | aac_adapter_notify(dev, AdapNormRespQueue); | ||
732 | } | 655 | } |
733 | else | 656 | else |
734 | { | 657 | { |