diff options
author | Tejun Heo <tj@kernel.org> | 2009-07-03 18:13:18 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-07-03 18:13:18 -0400 |
commit | c43768cbb7655ea5ff782ae250f6e2ef4297cf98 (patch) | |
tree | 3982e41dde3eecaa3739a5d1a8ed18d04bd74f01 /drivers/s390/cio | |
parent | 1a8dd307cc0a2119be4e578c517795464e6dabba (diff) | |
parent | 746a99a5af60ee676afa2ba469ccd1373493c7e7 (diff) |
Merge branch 'master' into for-next
Pull linus#master to merge PER_CPU_DEF_ATTRIBUTES and alpha build fix
changes. As alpha in percpu tree uses 'weak' attribute instead of
inline assembly, there's no need for __used attribute.
Conflicts:
arch/alpha/include/asm/percpu.h
arch/mn10300/kernel/vmlinux.lds.S
include/linux/percpu-defs.h
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r-- | drivers/s390/cio/qdio.h | 11 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_debug.c | 3 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 144 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_thinint.c | 114 |
4 files changed, 94 insertions, 178 deletions
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 13bcb8114388..b1241f8fae88 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h | |||
@@ -351,15 +351,6 @@ static inline unsigned long long get_usecs(void) | |||
351 | ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) | 351 | ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) |
352 | 352 | ||
353 | /* prototypes for thin interrupt */ | 353 | /* prototypes for thin interrupt */ |
354 | void qdio_sync_after_thinint(struct qdio_q *q); | ||
355 | int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state, | ||
356 | int auto_ack); | ||
357 | void qdio_check_outbound_after_thinint(struct qdio_q *q); | ||
358 | int qdio_inbound_q_moved(struct qdio_q *q); | ||
359 | void qdio_kick_handler(struct qdio_q *q); | ||
360 | void qdio_stop_polling(struct qdio_q *q); | ||
361 | int qdio_siga_sync_q(struct qdio_q *q); | ||
362 | |||
363 | void qdio_setup_thinint(struct qdio_irq *irq_ptr); | 354 | void qdio_setup_thinint(struct qdio_irq *irq_ptr); |
364 | int qdio_establish_thinint(struct qdio_irq *irq_ptr); | 355 | int qdio_establish_thinint(struct qdio_irq *irq_ptr); |
365 | void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); | 356 | void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); |
@@ -392,4 +383,6 @@ void qdio_setup_destroy_sysfs(struct ccw_device *cdev); | |||
392 | int qdio_setup_init(void); | 383 | int qdio_setup_init(void); |
393 | void qdio_setup_exit(void); | 384 | void qdio_setup_exit(void); |
394 | 385 | ||
386 | int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, | ||
387 | unsigned char *state); | ||
395 | #endif /* _CIO_QDIO_H */ | 388 | #endif /* _CIO_QDIO_H */ |
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index e3434b34f86c..b8626d4df116 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c | |||
@@ -70,9 +70,8 @@ static int qstat_show(struct seq_file *m, void *v) | |||
70 | seq_printf(m, "slsb buffer states:\n"); | 70 | seq_printf(m, "slsb buffer states:\n"); |
71 | seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); | 71 | seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); |
72 | 72 | ||
73 | qdio_siga_sync_q(q); | ||
74 | for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { | 73 | for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { |
75 | get_buf_state(q, i, &state, 0); | 74 | debug_get_buf_state(q, i, &state); |
76 | switch (state) { | 75 | switch (state) { |
77 | case SLSB_P_INPUT_NOT_INIT: | 76 | case SLSB_P_INPUT_NOT_INIT: |
78 | case SLSB_P_OUTPUT_NOT_INIT: | 77 | case SLSB_P_OUTPUT_NOT_INIT: |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index d79cf5bf0e62..0038750ad945 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -231,8 +231,8 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, | |||
231 | return i; | 231 | return i; |
232 | } | 232 | } |
233 | 233 | ||
234 | inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, | 234 | static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, |
235 | unsigned char *state, int auto_ack) | 235 | unsigned char *state, int auto_ack) |
236 | { | 236 | { |
237 | return get_buf_states(q, bufnr, state, 1, auto_ack); | 237 | return get_buf_states(q, bufnr, state, 1, auto_ack); |
238 | } | 238 | } |
@@ -276,7 +276,7 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr) | |||
276 | QDIO_MAX_BUFFERS_PER_Q); | 276 | QDIO_MAX_BUFFERS_PER_Q); |
277 | } | 277 | } |
278 | 278 | ||
279 | static int qdio_siga_sync(struct qdio_q *q, unsigned int output, | 279 | static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, |
280 | unsigned int input) | 280 | unsigned int input) |
281 | { | 281 | { |
282 | int cc; | 282 | int cc; |
@@ -293,7 +293,7 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output, | |||
293 | return cc; | 293 | return cc; |
294 | } | 294 | } |
295 | 295 | ||
296 | inline int qdio_siga_sync_q(struct qdio_q *q) | 296 | static inline int qdio_siga_sync_q(struct qdio_q *q) |
297 | { | 297 | { |
298 | if (q->is_input_q) | 298 | if (q->is_input_q) |
299 | return qdio_siga_sync(q, 0, q->mask); | 299 | return qdio_siga_sync(q, 0, q->mask); |
@@ -358,8 +358,7 @@ static inline int qdio_siga_input(struct qdio_q *q) | |||
358 | return cc; | 358 | return cc; |
359 | } | 359 | } |
360 | 360 | ||
361 | /* called from thinint inbound handler */ | 361 | static inline void qdio_sync_after_thinint(struct qdio_q *q) |
362 | void qdio_sync_after_thinint(struct qdio_q *q) | ||
363 | { | 362 | { |
364 | if (pci_out_supported(q)) { | 363 | if (pci_out_supported(q)) { |
365 | if (need_siga_sync_thinint(q)) | 364 | if (need_siga_sync_thinint(q)) |
@@ -370,7 +369,14 @@ void qdio_sync_after_thinint(struct qdio_q *q) | |||
370 | qdio_siga_sync_q(q); | 369 | qdio_siga_sync_q(q); |
371 | } | 370 | } |
372 | 371 | ||
373 | inline void qdio_stop_polling(struct qdio_q *q) | 372 | int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, |
373 | unsigned char *state) | ||
374 | { | ||
375 | qdio_siga_sync_q(q); | ||
376 | return get_buf_states(q, bufnr, state, 1, 0); | ||
377 | } | ||
378 | |||
379 | static inline void qdio_stop_polling(struct qdio_q *q) | ||
374 | { | 380 | { |
375 | if (!q->u.in.polling) | 381 | if (!q->u.in.polling) |
376 | return; | 382 | return; |
@@ -449,13 +455,6 @@ static inline void inbound_primed(struct qdio_q *q, int count) | |||
449 | count--; | 455 | count--; |
450 | if (!count) | 456 | if (!count) |
451 | return; | 457 | return; |
452 | |||
453 | /* | ||
454 | * Need to change all PRIMED buffers to NOT_INIT, otherwise | ||
455 | * we're loosing initiative in the thinint code. | ||
456 | */ | ||
457 | set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, | ||
458 | count); | ||
459 | } | 458 | } |
460 | 459 | ||
461 | static int get_inbound_buffer_frontier(struct qdio_q *q) | 460 | static int get_inbound_buffer_frontier(struct qdio_q *q) |
@@ -470,19 +469,13 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) | |||
470 | count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); | 469 | count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); |
471 | stop = add_buf(q->first_to_check, count); | 470 | stop = add_buf(q->first_to_check, count); |
472 | 471 | ||
473 | /* | ||
474 | * No siga sync here, as a PCI or we after a thin interrupt | ||
475 | * will sync the queues. | ||
476 | */ | ||
477 | |||
478 | /* need to set count to 1 for non-qebsm */ | ||
479 | if (!is_qebsm(q)) | ||
480 | count = 1; | ||
481 | |||
482 | check_next: | ||
483 | if (q->first_to_check == stop) | 472 | if (q->first_to_check == stop) |
484 | goto out; | 473 | goto out; |
485 | 474 | ||
475 | /* | ||
476 | * No siga sync here, as a PCI or we after a thin interrupt | ||
477 | * already sync'ed the queues. | ||
478 | */ | ||
486 | count = get_buf_states(q, q->first_to_check, &state, count, 1); | 479 | count = get_buf_states(q, q->first_to_check, &state, count, 1); |
487 | if (!count) | 480 | if (!count) |
488 | goto out; | 481 | goto out; |
@@ -490,14 +483,9 @@ check_next: | |||
490 | switch (state) { | 483 | switch (state) { |
491 | case SLSB_P_INPUT_PRIMED: | 484 | case SLSB_P_INPUT_PRIMED: |
492 | inbound_primed(q, count); | 485 | inbound_primed(q, count); |
493 | /* | ||
494 | * No siga-sync needed for non-qebsm here, as the inbound queue | ||
495 | * will be synced on the next siga-r, resp. | ||
496 | * tiqdio_is_inbound_q_done will do the siga-sync. | ||
497 | */ | ||
498 | q->first_to_check = add_buf(q->first_to_check, count); | 486 | q->first_to_check = add_buf(q->first_to_check, count); |
499 | atomic_sub(count, &q->nr_buf_used); | 487 | atomic_sub(count, &q->nr_buf_used); |
500 | goto check_next; | 488 | break; |
501 | case SLSB_P_INPUT_ERROR: | 489 | case SLSB_P_INPUT_ERROR: |
502 | announce_buffer_error(q, count); | 490 | announce_buffer_error(q, count); |
503 | /* process the buffer, the upper layer will take care of it */ | 491 | /* process the buffer, the upper layer will take care of it */ |
@@ -516,7 +504,7 @@ out: | |||
516 | return q->first_to_check; | 504 | return q->first_to_check; |
517 | } | 505 | } |
518 | 506 | ||
519 | int qdio_inbound_q_moved(struct qdio_q *q) | 507 | static int qdio_inbound_q_moved(struct qdio_q *q) |
520 | { | 508 | { |
521 | int bufnr; | 509 | int bufnr; |
522 | 510 | ||
@@ -524,35 +512,32 @@ int qdio_inbound_q_moved(struct qdio_q *q) | |||
524 | 512 | ||
525 | if ((bufnr != q->last_move) || q->qdio_error) { | 513 | if ((bufnr != q->last_move) || q->qdio_error) { |
526 | q->last_move = bufnr; | 514 | q->last_move = bufnr; |
527 | if (!need_siga_sync(q) && !pci_out_supported(q)) | 515 | if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM) |
528 | q->u.in.timestamp = get_usecs(); | 516 | q->u.in.timestamp = get_usecs(); |
529 | |||
530 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved"); | ||
531 | return 1; | 517 | return 1; |
532 | } else | 518 | } else |
533 | return 0; | 519 | return 0; |
534 | } | 520 | } |
535 | 521 | ||
536 | static int qdio_inbound_q_done(struct qdio_q *q) | 522 | static inline int qdio_inbound_q_done(struct qdio_q *q) |
537 | { | 523 | { |
538 | unsigned char state = 0; | 524 | unsigned char state = 0; |
539 | 525 | ||
540 | if (!atomic_read(&q->nr_buf_used)) | 526 | if (!atomic_read(&q->nr_buf_used)) |
541 | return 1; | 527 | return 1; |
542 | 528 | ||
543 | /* | ||
544 | * We need that one for synchronization with the adapter, as it | ||
545 | * does a kind of PCI avoidance. | ||
546 | */ | ||
547 | qdio_siga_sync_q(q); | 529 | qdio_siga_sync_q(q); |
548 | |||
549 | get_buf_state(q, q->first_to_check, &state, 0); | 530 | get_buf_state(q, q->first_to_check, &state, 0); |
531 | |||
550 | if (state == SLSB_P_INPUT_PRIMED) | 532 | if (state == SLSB_P_INPUT_PRIMED) |
551 | /* we got something to do */ | 533 | /* more work coming */ |
552 | return 0; | 534 | return 0; |
553 | 535 | ||
554 | /* on VM, we don't poll, so the q is always done here */ | 536 | if (is_thinint_irq(q->irq_ptr)) |
555 | if (need_siga_sync(q) || pci_out_supported(q)) | 537 | return 1; |
538 | |||
539 | /* don't poll under z/VM */ | ||
540 | if (MACHINE_IS_VM) | ||
556 | return 1; | 541 | return 1; |
557 | 542 | ||
558 | /* | 543 | /* |
@@ -563,14 +548,11 @@ static int qdio_inbound_q_done(struct qdio_q *q) | |||
563 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", | 548 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", |
564 | q->first_to_check); | 549 | q->first_to_check); |
565 | return 1; | 550 | return 1; |
566 | } else { | 551 | } else |
567 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d", | ||
568 | q->first_to_check); | ||
569 | return 0; | 552 | return 0; |
570 | } | ||
571 | } | 553 | } |
572 | 554 | ||
573 | void qdio_kick_handler(struct qdio_q *q) | 555 | static void qdio_kick_handler(struct qdio_q *q) |
574 | { | 556 | { |
575 | int start = q->first_to_kick; | 557 | int start = q->first_to_kick; |
576 | int end = q->first_to_check; | 558 | int end = q->first_to_check; |
@@ -619,7 +601,6 @@ again: | |||
619 | goto again; | 601 | goto again; |
620 | } | 602 | } |
621 | 603 | ||
622 | /* inbound tasklet */ | ||
623 | void qdio_inbound_processing(unsigned long data) | 604 | void qdio_inbound_processing(unsigned long data) |
624 | { | 605 | { |
625 | struct qdio_q *q = (struct qdio_q *)data; | 606 | struct qdio_q *q = (struct qdio_q *)data; |
@@ -642,11 +623,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) | |||
642 | count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); | 623 | count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); |
643 | stop = add_buf(q->first_to_check, count); | 624 | stop = add_buf(q->first_to_check, count); |
644 | 625 | ||
645 | /* need to set count to 1 for non-qebsm */ | ||
646 | if (!is_qebsm(q)) | ||
647 | count = 1; | ||
648 | |||
649 | check_next: | ||
650 | if (q->first_to_check == stop) | 626 | if (q->first_to_check == stop) |
651 | return q->first_to_check; | 627 | return q->first_to_check; |
652 | 628 | ||
@@ -661,13 +637,7 @@ check_next: | |||
661 | 637 | ||
662 | atomic_sub(count, &q->nr_buf_used); | 638 | atomic_sub(count, &q->nr_buf_used); |
663 | q->first_to_check = add_buf(q->first_to_check, count); | 639 | q->first_to_check = add_buf(q->first_to_check, count); |
664 | /* | 640 | break; |
665 | * We fetch all buffer states at once. get_buf_states may | ||
666 | * return count < stop. For QEBSM we do not loop. | ||
667 | */ | ||
668 | if (is_qebsm(q)) | ||
669 | break; | ||
670 | goto check_next; | ||
671 | case SLSB_P_OUTPUT_ERROR: | 641 | case SLSB_P_OUTPUT_ERROR: |
672 | announce_buffer_error(q, count); | 642 | announce_buffer_error(q, count); |
673 | /* process the buffer, the upper layer will take care of it */ | 643 | /* process the buffer, the upper layer will take care of it */ |
@@ -797,8 +767,7 @@ void qdio_outbound_timer(unsigned long data) | |||
797 | tasklet_schedule(&q->tasklet); | 767 | tasklet_schedule(&q->tasklet); |
798 | } | 768 | } |
799 | 769 | ||
800 | /* called from thinint inbound tasklet */ | 770 | static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) |
801 | void qdio_check_outbound_after_thinint(struct qdio_q *q) | ||
802 | { | 771 | { |
803 | struct qdio_q *out; | 772 | struct qdio_q *out; |
804 | int i; | 773 | int i; |
@@ -811,6 +780,46 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q) | |||
811 | tasklet_schedule(&out->tasklet); | 780 | tasklet_schedule(&out->tasklet); |
812 | } | 781 | } |
813 | 782 | ||
783 | static void __tiqdio_inbound_processing(struct qdio_q *q) | ||
784 | { | ||
785 | qdio_perf_stat_inc(&perf_stats.thinint_inbound); | ||
786 | qdio_sync_after_thinint(q); | ||
787 | |||
788 | /* | ||
789 | * The interrupt could be caused by a PCI request. Check the | ||
790 | * PCI capable outbound queues. | ||
791 | */ | ||
792 | qdio_check_outbound_after_thinint(q); | ||
793 | |||
794 | if (!qdio_inbound_q_moved(q)) | ||
795 | return; | ||
796 | |||
797 | qdio_kick_handler(q); | ||
798 | |||
799 | if (!qdio_inbound_q_done(q)) { | ||
800 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); | ||
801 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | ||
802 | tasklet_schedule(&q->tasklet); | ||
803 | } | ||
804 | |||
805 | qdio_stop_polling(q); | ||
806 | /* | ||
807 | * We need to check again to not lose initiative after | ||
808 | * resetting the ACK state. | ||
809 | */ | ||
810 | if (!qdio_inbound_q_done(q)) { | ||
811 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); | ||
812 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | ||
813 | tasklet_schedule(&q->tasklet); | ||
814 | } | ||
815 | } | ||
816 | |||
817 | void tiqdio_inbound_processing(unsigned long data) | ||
818 | { | ||
819 | struct qdio_q *q = (struct qdio_q *)data; | ||
820 | __tiqdio_inbound_processing(q); | ||
821 | } | ||
822 | |||
814 | static inline void qdio_set_state(struct qdio_irq *irq_ptr, | 823 | static inline void qdio_set_state(struct qdio_irq *irq_ptr, |
815 | enum qdio_irq_states state) | 824 | enum qdio_irq_states state) |
816 | { | 825 | { |
@@ -1488,18 +1497,13 @@ out: | |||
1488 | * @count: how many buffers to process | 1497 | * @count: how many buffers to process |
1489 | */ | 1498 | */ |
1490 | int do_QDIO(struct ccw_device *cdev, unsigned int callflags, | 1499 | int do_QDIO(struct ccw_device *cdev, unsigned int callflags, |
1491 | int q_nr, int bufnr, int count) | 1500 | int q_nr, unsigned int bufnr, unsigned int count) |
1492 | { | 1501 | { |
1493 | struct qdio_irq *irq_ptr; | 1502 | struct qdio_irq *irq_ptr; |
1494 | 1503 | ||
1495 | if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || | 1504 | if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) |
1496 | (count > QDIO_MAX_BUFFERS_PER_Q) || | ||
1497 | (q_nr >= QDIO_MAX_QUEUES_PER_IRQ)) | ||
1498 | return -EINVAL; | 1505 | return -EINVAL; |
1499 | 1506 | ||
1500 | if (!count) | ||
1501 | return 0; | ||
1502 | |||
1503 | irq_ptr = cdev->private->qdio_data; | 1507 | irq_ptr = cdev->private->qdio_data; |
1504 | if (!irq_ptr) | 1508 | if (!irq_ptr) |
1505 | return -ENODEV; | 1509 | return -ENODEV; |
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index c655d011a78d..981a77ea7ee2 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c | |||
@@ -43,9 +43,6 @@ struct indicator_t { | |||
43 | }; | 43 | }; |
44 | static struct indicator_t *q_indicators; | 44 | static struct indicator_t *q_indicators; |
45 | 45 | ||
46 | static void tiqdio_tasklet_fn(unsigned long data); | ||
47 | static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0); | ||
48 | |||
49 | static int css_qdio_omit_svs; | 46 | static int css_qdio_omit_svs; |
50 | 47 | ||
51 | static inline unsigned long do_clear_global_summary(void) | 48 | static inline unsigned long do_clear_global_summary(void) |
@@ -103,11 +100,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) | |||
103 | xchg(irq_ptr->dsci, 1); | 100 | xchg(irq_ptr->dsci, 1); |
104 | } | 101 | } |
105 | 102 | ||
106 | /* | ||
107 | * we cannot stop the tiqdio tasklet here since it is for all | ||
108 | * thinint qdio devices and it must run as long as there is a | ||
109 | * thinint device left | ||
110 | */ | ||
111 | void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) | 103 | void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) |
112 | { | 104 | { |
113 | struct qdio_q *q; | 105 | struct qdio_q *q; |
@@ -126,79 +118,39 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) | |||
126 | } | 118 | } |
127 | } | 119 | } |
128 | 120 | ||
129 | static inline int tiqdio_inbound_q_done(struct qdio_q *q) | ||
130 | { | ||
131 | unsigned char state = 0; | ||
132 | |||
133 | if (!atomic_read(&q->nr_buf_used)) | ||
134 | return 1; | ||
135 | |||
136 | qdio_siga_sync_q(q); | ||
137 | get_buf_state(q, q->first_to_check, &state, 0); | ||
138 | |||
139 | if (state == SLSB_P_INPUT_PRIMED) | ||
140 | /* more work coming */ | ||
141 | return 0; | ||
142 | return 1; | ||
143 | } | ||
144 | |||
145 | static inline int shared_ind(struct qdio_irq *irq_ptr) | 121 | static inline int shared_ind(struct qdio_irq *irq_ptr) |
146 | { | 122 | { |
147 | return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; | 123 | return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; |
148 | } | 124 | } |
149 | 125 | ||
150 | static void __tiqdio_inbound_processing(struct qdio_q *q) | 126 | /** |
127 | * tiqdio_thinint_handler - thin interrupt handler for qdio | ||
128 | * @ind: pointer to adapter local summary indicator | ||
129 | * @drv_data: NULL | ||
130 | */ | ||
131 | static void tiqdio_thinint_handler(void *ind, void *drv_data) | ||
151 | { | 132 | { |
152 | qdio_perf_stat_inc(&perf_stats.thinint_inbound); | 133 | struct qdio_q *q; |
153 | qdio_sync_after_thinint(q); | 134 | |
135 | qdio_perf_stat_inc(&perf_stats.thin_int); | ||
154 | 136 | ||
155 | /* | 137 | /* |
156 | * Maybe we have work on our outbound queues... at least | 138 | * SVS only when needed: issue SVS to benefit from iqdio interrupt |
157 | * we have to check the PCI capable queues. | 139 | * avoidance (SVS clears adapter interrupt suppression overwrite) |
158 | */ | 140 | */ |
159 | qdio_check_outbound_after_thinint(q); | 141 | if (!css_qdio_omit_svs) |
160 | 142 | do_clear_global_summary(); | |
161 | if (!qdio_inbound_q_moved(q)) | ||
162 | return; | ||
163 | |||
164 | qdio_kick_handler(q); | ||
165 | |||
166 | if (!tiqdio_inbound_q_done(q)) { | ||
167 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); | ||
168 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | ||
169 | tasklet_schedule(&q->tasklet); | ||
170 | } | ||
171 | 143 | ||
172 | qdio_stop_polling(q); | ||
173 | /* | 144 | /* |
174 | * We need to check again to not lose initiative after | 145 | * reset local summary indicator (tiqdio_alsi) to stop adapter |
175 | * resetting the ACK state. | 146 | * interrupts for now |
176 | */ | 147 | */ |
177 | if (!tiqdio_inbound_q_done(q)) { | 148 | xchg((u8 *)ind, 0); |
178 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); | ||
179 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | ||
180 | tasklet_schedule(&q->tasklet); | ||
181 | } | ||
182 | } | ||
183 | |||
184 | void tiqdio_inbound_processing(unsigned long data) | ||
185 | { | ||
186 | struct qdio_q *q = (struct qdio_q *)data; | ||
187 | |||
188 | __tiqdio_inbound_processing(q); | ||
189 | } | ||
190 | |||
191 | /* check for work on all inbound thinint queues */ | ||
192 | static void tiqdio_tasklet_fn(unsigned long data) | ||
193 | { | ||
194 | struct qdio_q *q; | ||
195 | |||
196 | qdio_perf_stat_inc(&perf_stats.tasklet_thinint); | ||
197 | again: | ||
198 | 149 | ||
199 | /* protect tiq_list entries, only changed in activate or shutdown */ | 150 | /* protect tiq_list entries, only changed in activate or shutdown */ |
200 | rcu_read_lock(); | 151 | rcu_read_lock(); |
201 | 152 | ||
153 | /* check for work on all inbound thinint queues */ | ||
202 | list_for_each_entry_rcu(q, &tiq_list, entry) | 154 | list_for_each_entry_rcu(q, &tiq_list, entry) |
203 | /* only process queues from changed sets */ | 155 | /* only process queues from changed sets */ |
204 | if (*q->irq_ptr->dsci) { | 156 | if (*q->irq_ptr->dsci) { |
@@ -226,37 +178,6 @@ again: | |||
226 | if (*tiqdio_alsi) | 178 | if (*tiqdio_alsi) |
227 | xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); | 179 | xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); |
228 | } | 180 | } |
229 | |||
230 | /* check for more work */ | ||
231 | if (*tiqdio_alsi) { | ||
232 | xchg(tiqdio_alsi, 0); | ||
233 | qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop); | ||
234 | goto again; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * tiqdio_thinint_handler - thin interrupt handler for qdio | ||
240 | * @ind: pointer to adapter local summary indicator | ||
241 | * @drv_data: NULL | ||
242 | */ | ||
243 | static void tiqdio_thinint_handler(void *ind, void *drv_data) | ||
244 | { | ||
245 | qdio_perf_stat_inc(&perf_stats.thin_int); | ||
246 | |||
247 | /* | ||
248 | * SVS only when needed: issue SVS to benefit from iqdio interrupt | ||
249 | * avoidance (SVS clears adapter interrupt suppression overwrite) | ||
250 | */ | ||
251 | if (!css_qdio_omit_svs) | ||
252 | do_clear_global_summary(); | ||
253 | |||
254 | /* | ||
255 | * reset local summary indicator (tiqdio_alsi) to stop adapter | ||
256 | * interrupts for now, the tasklet will clean all dsci's | ||
257 | */ | ||
258 | xchg((u8 *)ind, 0); | ||
259 | tasklet_hi_schedule(&tiqdio_tasklet); | ||
260 | } | 181 | } |
261 | 182 | ||
262 | static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) | 183 | static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) |
@@ -376,5 +297,4 @@ void __exit tiqdio_unregister_thinints(void) | |||
376 | s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); | 297 | s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); |
377 | isc_unregister(QDIO_AIRQ_ISC); | 298 | isc_unregister(QDIO_AIRQ_ISC); |
378 | } | 299 | } |
379 | tasklet_kill(&tiqdio_tasklet); | ||
380 | } | 300 | } |