aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/qdio_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio/qdio_main.c')
-rw-r--r--drivers/s390/cio/qdio_main.c144
1 files changed, 74 insertions, 70 deletions
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index d79cf5bf0e62..0038750ad945 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -231,8 +231,8 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
231 return i; 231 return i;
232} 232}
233 233
234inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, 234static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
235 unsigned char *state, int auto_ack) 235 unsigned char *state, int auto_ack)
236{ 236{
237 return get_buf_states(q, bufnr, state, 1, auto_ack); 237 return get_buf_states(q, bufnr, state, 1, auto_ack);
238} 238}
@@ -276,7 +276,7 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
276 QDIO_MAX_BUFFERS_PER_Q); 276 QDIO_MAX_BUFFERS_PER_Q);
277} 277}
278 278
279static int qdio_siga_sync(struct qdio_q *q, unsigned int output, 279static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
280 unsigned int input) 280 unsigned int input)
281{ 281{
282 int cc; 282 int cc;
@@ -293,7 +293,7 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
293 return cc; 293 return cc;
294} 294}
295 295
296inline int qdio_siga_sync_q(struct qdio_q *q) 296static inline int qdio_siga_sync_q(struct qdio_q *q)
297{ 297{
298 if (q->is_input_q) 298 if (q->is_input_q)
299 return qdio_siga_sync(q, 0, q->mask); 299 return qdio_siga_sync(q, 0, q->mask);
@@ -358,8 +358,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
358 return cc; 358 return cc;
359} 359}
360 360
361/* called from thinint inbound handler */ 361static inline void qdio_sync_after_thinint(struct qdio_q *q)
362void qdio_sync_after_thinint(struct qdio_q *q)
363{ 362{
364 if (pci_out_supported(q)) { 363 if (pci_out_supported(q)) {
365 if (need_siga_sync_thinint(q)) 364 if (need_siga_sync_thinint(q))
@@ -370,7 +369,14 @@ void qdio_sync_after_thinint(struct qdio_q *q)
370 qdio_siga_sync_q(q); 369 qdio_siga_sync_q(q);
371} 370}
372 371
373inline void qdio_stop_polling(struct qdio_q *q) 372int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
373 unsigned char *state)
374{
375 qdio_siga_sync_q(q);
376 return get_buf_states(q, bufnr, state, 1, 0);
377}
378
379static inline void qdio_stop_polling(struct qdio_q *q)
374{ 380{
375 if (!q->u.in.polling) 381 if (!q->u.in.polling)
376 return; 382 return;
@@ -449,13 +455,6 @@ static inline void inbound_primed(struct qdio_q *q, int count)
449 count--; 455 count--;
450 if (!count) 456 if (!count)
451 return; 457 return;
452
453 /*
454 * Need to change all PRIMED buffers to NOT_INIT, otherwise
455 * we're loosing initiative in the thinint code.
456 */
457 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT,
458 count);
459} 458}
460 459
461static int get_inbound_buffer_frontier(struct qdio_q *q) 460static int get_inbound_buffer_frontier(struct qdio_q *q)
@@ -470,19 +469,13 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
470 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 469 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
471 stop = add_buf(q->first_to_check, count); 470 stop = add_buf(q->first_to_check, count);
472 471
473 /*
474 * No siga sync here, as a PCI or we after a thin interrupt
475 * will sync the queues.
476 */
477
478 /* need to set count to 1 for non-qebsm */
479 if (!is_qebsm(q))
480 count = 1;
481
482check_next:
483 if (q->first_to_check == stop) 472 if (q->first_to_check == stop)
484 goto out; 473 goto out;
485 474
475 /*
476 * No siga sync here, as a PCI or we after a thin interrupt
477 * already sync'ed the queues.
478 */
486 count = get_buf_states(q, q->first_to_check, &state, count, 1); 479 count = get_buf_states(q, q->first_to_check, &state, count, 1);
487 if (!count) 480 if (!count)
488 goto out; 481 goto out;
@@ -490,14 +483,9 @@ check_next:
490 switch (state) { 483 switch (state) {
491 case SLSB_P_INPUT_PRIMED: 484 case SLSB_P_INPUT_PRIMED:
492 inbound_primed(q, count); 485 inbound_primed(q, count);
493 /*
494 * No siga-sync needed for non-qebsm here, as the inbound queue
495 * will be synced on the next siga-r, resp.
496 * tiqdio_is_inbound_q_done will do the siga-sync.
497 */
498 q->first_to_check = add_buf(q->first_to_check, count); 486 q->first_to_check = add_buf(q->first_to_check, count);
499 atomic_sub(count, &q->nr_buf_used); 487 atomic_sub(count, &q->nr_buf_used);
500 goto check_next; 488 break;
501 case SLSB_P_INPUT_ERROR: 489 case SLSB_P_INPUT_ERROR:
502 announce_buffer_error(q, count); 490 announce_buffer_error(q, count);
503 /* process the buffer, the upper layer will take care of it */ 491 /* process the buffer, the upper layer will take care of it */
@@ -516,7 +504,7 @@ out:
516 return q->first_to_check; 504 return q->first_to_check;
517} 505}
518 506
519int qdio_inbound_q_moved(struct qdio_q *q) 507static int qdio_inbound_q_moved(struct qdio_q *q)
520{ 508{
521 int bufnr; 509 int bufnr;
522 510
@@ -524,35 +512,32 @@ int qdio_inbound_q_moved(struct qdio_q *q)
524 512
525 if ((bufnr != q->last_move) || q->qdio_error) { 513 if ((bufnr != q->last_move) || q->qdio_error) {
526 q->last_move = bufnr; 514 q->last_move = bufnr;
527 if (!need_siga_sync(q) && !pci_out_supported(q)) 515 if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM)
528 q->u.in.timestamp = get_usecs(); 516 q->u.in.timestamp = get_usecs();
529
530 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved");
531 return 1; 517 return 1;
532 } else 518 } else
533 return 0; 519 return 0;
534} 520}
535 521
536static int qdio_inbound_q_done(struct qdio_q *q) 522static inline int qdio_inbound_q_done(struct qdio_q *q)
537{ 523{
538 unsigned char state = 0; 524 unsigned char state = 0;
539 525
540 if (!atomic_read(&q->nr_buf_used)) 526 if (!atomic_read(&q->nr_buf_used))
541 return 1; 527 return 1;
542 528
543 /*
544 * We need that one for synchronization with the adapter, as it
545 * does a kind of PCI avoidance.
546 */
547 qdio_siga_sync_q(q); 529 qdio_siga_sync_q(q);
548
549 get_buf_state(q, q->first_to_check, &state, 0); 530 get_buf_state(q, q->first_to_check, &state, 0);
531
550 if (state == SLSB_P_INPUT_PRIMED) 532 if (state == SLSB_P_INPUT_PRIMED)
551 /* we got something to do */ 533 /* more work coming */
552 return 0; 534 return 0;
553 535
554 /* on VM, we don't poll, so the q is always done here */ 536 if (is_thinint_irq(q->irq_ptr))
555 if (need_siga_sync(q) || pci_out_supported(q)) 537 return 1;
538
539 /* don't poll under z/VM */
540 if (MACHINE_IS_VM)
556 return 1; 541 return 1;
557 542
558 /* 543 /*
@@ -563,14 +548,11 @@ static int qdio_inbound_q_done(struct qdio_q *q)
563 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", 548 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
564 q->first_to_check); 549 q->first_to_check);
565 return 1; 550 return 1;
566 } else { 551 } else
567 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d",
568 q->first_to_check);
569 return 0; 552 return 0;
570 }
571} 553}
572 554
573void qdio_kick_handler(struct qdio_q *q) 555static void qdio_kick_handler(struct qdio_q *q)
574{ 556{
575 int start = q->first_to_kick; 557 int start = q->first_to_kick;
576 int end = q->first_to_check; 558 int end = q->first_to_check;
@@ -619,7 +601,6 @@ again:
619 goto again; 601 goto again;
620} 602}
621 603
622/* inbound tasklet */
623void qdio_inbound_processing(unsigned long data) 604void qdio_inbound_processing(unsigned long data)
624{ 605{
625 struct qdio_q *q = (struct qdio_q *)data; 606 struct qdio_q *q = (struct qdio_q *)data;
@@ -642,11 +623,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
642 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 623 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
643 stop = add_buf(q->first_to_check, count); 624 stop = add_buf(q->first_to_check, count);
644 625
645 /* need to set count to 1 for non-qebsm */
646 if (!is_qebsm(q))
647 count = 1;
648
649check_next:
650 if (q->first_to_check == stop) 626 if (q->first_to_check == stop)
651 return q->first_to_check; 627 return q->first_to_check;
652 628
@@ -661,13 +637,7 @@ check_next:
661 637
662 atomic_sub(count, &q->nr_buf_used); 638 atomic_sub(count, &q->nr_buf_used);
663 q->first_to_check = add_buf(q->first_to_check, count); 639 q->first_to_check = add_buf(q->first_to_check, count);
664 /* 640 break;
665 * We fetch all buffer states at once. get_buf_states may
666 * return count < stop. For QEBSM we do not loop.
667 */
668 if (is_qebsm(q))
669 break;
670 goto check_next;
671 case SLSB_P_OUTPUT_ERROR: 641 case SLSB_P_OUTPUT_ERROR:
672 announce_buffer_error(q, count); 642 announce_buffer_error(q, count);
673 /* process the buffer, the upper layer will take care of it */ 643 /* process the buffer, the upper layer will take care of it */
@@ -797,8 +767,7 @@ void qdio_outbound_timer(unsigned long data)
797 tasklet_schedule(&q->tasklet); 767 tasklet_schedule(&q->tasklet);
798} 768}
799 769
800/* called from thinint inbound tasklet */ 770static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
801void qdio_check_outbound_after_thinint(struct qdio_q *q)
802{ 771{
803 struct qdio_q *out; 772 struct qdio_q *out;
804 int i; 773 int i;
@@ -811,6 +780,46 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q)
811 tasklet_schedule(&out->tasklet); 780 tasklet_schedule(&out->tasklet);
812} 781}
813 782
783static void __tiqdio_inbound_processing(struct qdio_q *q)
784{
785 qdio_perf_stat_inc(&perf_stats.thinint_inbound);
786 qdio_sync_after_thinint(q);
787
788 /*
789 * The interrupt could be caused by a PCI request. Check the
790 * PCI capable outbound queues.
791 */
792 qdio_check_outbound_after_thinint(q);
793
794 if (!qdio_inbound_q_moved(q))
795 return;
796
797 qdio_kick_handler(q);
798
799 if (!qdio_inbound_q_done(q)) {
800 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
801 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
802 tasklet_schedule(&q->tasklet);
803 }
804
805 qdio_stop_polling(q);
806 /*
807 * We need to check again to not lose initiative after
808 * resetting the ACK state.
809 */
810 if (!qdio_inbound_q_done(q)) {
811 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
812 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
813 tasklet_schedule(&q->tasklet);
814 }
815}
816
817void tiqdio_inbound_processing(unsigned long data)
818{
819 struct qdio_q *q = (struct qdio_q *)data;
820 __tiqdio_inbound_processing(q);
821}
822
814static inline void qdio_set_state(struct qdio_irq *irq_ptr, 823static inline void qdio_set_state(struct qdio_irq *irq_ptr,
815 enum qdio_irq_states state) 824 enum qdio_irq_states state)
816{ 825{
@@ -1488,18 +1497,13 @@ out:
1488 * @count: how many buffers to process 1497 * @count: how many buffers to process
1489 */ 1498 */
1490int do_QDIO(struct ccw_device *cdev, unsigned int callflags, 1499int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1491 int q_nr, int bufnr, int count) 1500 int q_nr, unsigned int bufnr, unsigned int count)
1492{ 1501{
1493 struct qdio_irq *irq_ptr; 1502 struct qdio_irq *irq_ptr;
1494 1503
1495 if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || 1504 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1496 (count > QDIO_MAX_BUFFERS_PER_Q) ||
1497 (q_nr >= QDIO_MAX_QUEUES_PER_IRQ))
1498 return -EINVAL; 1505 return -EINVAL;
1499 1506
1500 if (!count)
1501 return 0;
1502
1503 irq_ptr = cdev->private->qdio_data; 1507 irq_ptr = cdev->private->qdio_data;
1504 if (!irq_ptr) 1508 if (!irq_ptr)
1505 return -ENODEV; 1509 return -ENODEV;