aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/qdio_main.c
diff options
context:
space:
mode:
authorJan Glauber <jang@linux.vnet.ibm.com>2009-06-22 06:08:12 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2009-06-22 06:08:20 -0400
commit36e3e72120e27939233e4bd88a8d74b3a2377428 (patch)
treeb9630dd367096651a23f5425f70f2c28ec886970 /drivers/s390/cio/qdio_main.c
parent9a2c160a8cbd5b3253672b3bac462c64d0d2eef7 (diff)
[S390] qdio: extract all primed SBALs at once
For devices without QIOASSIST primed SBALS were extracted in a loop. Remove the loop since get_buf_states can already return more than one primed SBAL. Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio/qdio_main.c')
-rw-r--r--drivers/s390/cio/qdio_main.c34
1 files changed, 6 insertions, 28 deletions
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 127e78eef65..779b7741d49 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -476,19 +476,13 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
476 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 476 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
477 stop = add_buf(q->first_to_check, count); 477 stop = add_buf(q->first_to_check, count);
478 478
479 /*
480 * No siga sync here, as a PCI or we after a thin interrupt
481 * will sync the queues.
482 */
483
484 /* need to set count to 1 for non-qebsm */
485 if (!is_qebsm(q))
486 count = 1;
487
488check_next:
489 if (q->first_to_check == stop) 479 if (q->first_to_check == stop)
490 goto out; 480 goto out;
491 481
482 /*
483 * No siga sync here, as a PCI or we after a thin interrupt
484 * already sync'ed the queues.
485 */
492 count = get_buf_states(q, q->first_to_check, &state, count, 1); 486 count = get_buf_states(q, q->first_to_check, &state, count, 1);
493 if (!count) 487 if (!count)
494 goto out; 488 goto out;
@@ -496,14 +490,9 @@ check_next:
496 switch (state) { 490 switch (state) {
497 case SLSB_P_INPUT_PRIMED: 491 case SLSB_P_INPUT_PRIMED:
498 inbound_primed(q, count); 492 inbound_primed(q, count);
499 /*
500 * No siga-sync needed for non-qebsm here, as the inbound queue
501 * will be synced on the next siga-r, resp.
502 * qdio_inbound_q_done will do the siga-sync.
503 */
504 q->first_to_check = add_buf(q->first_to_check, count); 493 q->first_to_check = add_buf(q->first_to_check, count);
505 atomic_sub(count, &q->nr_buf_used); 494 atomic_sub(count, &q->nr_buf_used);
506 goto check_next; 495 break;
507 case SLSB_P_INPUT_ERROR: 496 case SLSB_P_INPUT_ERROR:
508 announce_buffer_error(q, count); 497 announce_buffer_error(q, count);
509 /* process the buffer, the upper layer will take care of it */ 498 /* process the buffer, the upper layer will take care of it */
@@ -641,11 +630,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
641 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 630 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
642 stop = add_buf(q->first_to_check, count); 631 stop = add_buf(q->first_to_check, count);
643 632
644 /* need to set count to 1 for non-qebsm */
645 if (!is_qebsm(q))
646 count = 1;
647
648check_next:
649 if (q->first_to_check == stop) 633 if (q->first_to_check == stop)
650 return q->first_to_check; 634 return q->first_to_check;
651 635
@@ -660,13 +644,7 @@ check_next:
660 644
661 atomic_sub(count, &q->nr_buf_used); 645 atomic_sub(count, &q->nr_buf_used);
662 q->first_to_check = add_buf(q->first_to_check, count); 646 q->first_to_check = add_buf(q->first_to_check, count);
663 /* 647 break;
664 * We fetch all buffer states at once. get_buf_states may
665 * return count < stop. For QEBSM we do not loop.
666 */
667 if (is_qebsm(q))
668 break;
669 goto check_next;
670 case SLSB_P_OUTPUT_ERROR: 648 case SLSB_P_OUTPUT_ERROR:
671 announce_buffer_error(q, count); 649 announce_buffer_error(q, count);
672 /* process the buffer, the upper layer will take care of it */ 650 /* process the buffer, the upper layer will take care of it */