diff options
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 48 |
1 files changed, 13 insertions, 35 deletions
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 377d881385cf..127e78eef651 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -499,7 +499,7 @@ check_next: | |||
499 | /* | 499 | /* |
500 | * No siga-sync needed for non-qebsm here, as the inbound queue | 500 | * No siga-sync needed for non-qebsm here, as the inbound queue |
501 | * will be synced on the next siga-r, resp. | 501 | * will be synced on the next siga-r, resp. |
502 | * tiqdio_is_inbound_q_done will do the siga-sync. | 502 | * qdio_inbound_q_done will do the siga-sync. |
503 | */ | 503 | */ |
504 | q->first_to_check = add_buf(q->first_to_check, count); | 504 | q->first_to_check = add_buf(q->first_to_check, count); |
505 | atomic_sub(count, &q->nr_buf_used); | 505 | atomic_sub(count, &q->nr_buf_used); |
@@ -530,35 +530,32 @@ static int qdio_inbound_q_moved(struct qdio_q *q) | |||
530 | 530 | ||
531 | if ((bufnr != q->last_move) || q->qdio_error) { | 531 | if ((bufnr != q->last_move) || q->qdio_error) { |
532 | q->last_move = bufnr; | 532 | q->last_move = bufnr; |
533 | if (!need_siga_sync(q) && !pci_out_supported(q)) | 533 | if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM) |
534 | q->u.in.timestamp = get_usecs(); | 534 | q->u.in.timestamp = get_usecs(); |
535 | |||
536 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved"); | ||
537 | return 1; | 535 | return 1; |
538 | } else | 536 | } else |
539 | return 0; | 537 | return 0; |
540 | } | 538 | } |
541 | 539 | ||
542 | static int qdio_inbound_q_done(struct qdio_q *q) | 540 | static inline int qdio_inbound_q_done(struct qdio_q *q) |
543 | { | 541 | { |
544 | unsigned char state = 0; | 542 | unsigned char state = 0; |
545 | 543 | ||
546 | if (!atomic_read(&q->nr_buf_used)) | 544 | if (!atomic_read(&q->nr_buf_used)) |
547 | return 1; | 545 | return 1; |
548 | 546 | ||
549 | /* | ||
550 | * We need that one for synchronization with the adapter, as it | ||
551 | * does a kind of PCI avoidance. | ||
552 | */ | ||
553 | qdio_siga_sync_q(q); | 547 | qdio_siga_sync_q(q); |
554 | |||
555 | get_buf_state(q, q->first_to_check, &state, 0); | 548 | get_buf_state(q, q->first_to_check, &state, 0); |
549 | |||
556 | if (state == SLSB_P_INPUT_PRIMED) | 550 | if (state == SLSB_P_INPUT_PRIMED) |
557 | /* we got something to do */ | 551 | /* more work coming */ |
558 | return 0; | 552 | return 0; |
559 | 553 | ||
560 | /* on VM, we don't poll, so the q is always done here */ | 554 | if (is_thinint_irq(q->irq_ptr)) |
561 | if (need_siga_sync(q) || pci_out_supported(q)) | 555 | return 1; |
556 | |||
557 | /* don't poll under z/VM */ | ||
558 | if (MACHINE_IS_VM) | ||
562 | return 1; | 559 | return 1; |
563 | 560 | ||
564 | /* | 561 | /* |
@@ -569,27 +566,8 @@ static int qdio_inbound_q_done(struct qdio_q *q) | |||
569 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", | 566 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", |
570 | q->first_to_check); | 567 | q->first_to_check); |
571 | return 1; | 568 | return 1; |
572 | } else { | 569 | } else |
573 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d", | ||
574 | q->first_to_check); | ||
575 | return 0; | ||
576 | } | ||
577 | } | ||
578 | |||
579 | static inline int tiqdio_inbound_q_done(struct qdio_q *q) | ||
580 | { | ||
581 | unsigned char state = 0; | ||
582 | |||
583 | if (!atomic_read(&q->nr_buf_used)) | ||
584 | return 1; | ||
585 | |||
586 | qdio_siga_sync_q(q); | ||
587 | get_buf_state(q, q->first_to_check, &state, 0); | ||
588 | |||
589 | if (state == SLSB_P_INPUT_PRIMED) | ||
590 | /* more work coming */ | ||
591 | return 0; | 570 | return 0; |
592 | return 1; | ||
593 | } | 571 | } |
594 | 572 | ||
595 | static void qdio_kick_handler(struct qdio_q *q) | 573 | static void qdio_kick_handler(struct qdio_q *q) |
@@ -847,7 +825,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) | |||
847 | 825 | ||
848 | qdio_kick_handler(q); | 826 | qdio_kick_handler(q); |
849 | 827 | ||
850 | if (!tiqdio_inbound_q_done(q)) { | 828 | if (!qdio_inbound_q_done(q)) { |
851 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); | 829 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); |
852 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | 830 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) |
853 | tasklet_schedule(&q->tasklet); | 831 | tasklet_schedule(&q->tasklet); |
@@ -858,7 +836,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) | |||
858 | * We need to check again to not lose initiative after | 836 | * We need to check again to not lose initiative after |
859 | * resetting the ACK state. | 837 | * resetting the ACK state. |
860 | */ | 838 | */ |
861 | if (!tiqdio_inbound_q_done(q)) { | 839 | if (!qdio_inbound_q_done(q)) { |
862 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); | 840 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); |
863 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | 841 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) |
864 | tasklet_schedule(&q->tasklet); | 842 | tasklet_schedule(&q->tasklet); |