diff options
author | Jan Glauber <jang@linux.vnet.ibm.com> | 2009-06-22 06:08:10 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2009-06-22 06:08:19 -0400 |
commit | 60b5df2f12f2ab54bfa7c1f0f0ce3f5953e73c0b (patch) | |
tree | 3135f3e560340cc8e419921a6f146f62df8bd635 /drivers/s390/cio/qdio_main.c | |
parent | f3dfa86caa4a54aceb2b235bf28a6f6ad73b2716 (diff) |
[S390] qdio: move adapter interrupt tasklet code
Move the adapter interrupt tasklet function to the qdio main code
since all the functions used by the tasklet are located there.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio/qdio_main.c')
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 84 |
1 files changed, 72 insertions, 12 deletions
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index d79cf5bf0e62..377d881385cf 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -231,8 +231,8 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, | |||
231 | return i; | 231 | return i; |
232 | } | 232 | } |
233 | 233 | ||
234 | inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, | 234 | static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, |
235 | unsigned char *state, int auto_ack) | 235 | unsigned char *state, int auto_ack) |
236 | { | 236 | { |
237 | return get_buf_states(q, bufnr, state, 1, auto_ack); | 237 | return get_buf_states(q, bufnr, state, 1, auto_ack); |
238 | } | 238 | } |
@@ -276,7 +276,7 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr) | |||
276 | QDIO_MAX_BUFFERS_PER_Q); | 276 | QDIO_MAX_BUFFERS_PER_Q); |
277 | } | 277 | } |
278 | 278 | ||
279 | static int qdio_siga_sync(struct qdio_q *q, unsigned int output, | 279 | static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, |
280 | unsigned int input) | 280 | unsigned int input) |
281 | { | 281 | { |
282 | int cc; | 282 | int cc; |
@@ -293,7 +293,7 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output, | |||
293 | return cc; | 293 | return cc; |
294 | } | 294 | } |
295 | 295 | ||
296 | inline int qdio_siga_sync_q(struct qdio_q *q) | 296 | static inline int qdio_siga_sync_q(struct qdio_q *q) |
297 | { | 297 | { |
298 | if (q->is_input_q) | 298 | if (q->is_input_q) |
299 | return qdio_siga_sync(q, 0, q->mask); | 299 | return qdio_siga_sync(q, 0, q->mask); |
@@ -358,8 +358,7 @@ static inline int qdio_siga_input(struct qdio_q *q) | |||
358 | return cc; | 358 | return cc; |
359 | } | 359 | } |
360 | 360 | ||
361 | /* called from thinint inbound handler */ | 361 | static inline void qdio_sync_after_thinint(struct qdio_q *q) |
362 | void qdio_sync_after_thinint(struct qdio_q *q) | ||
363 | { | 362 | { |
364 | if (pci_out_supported(q)) { | 363 | if (pci_out_supported(q)) { |
365 | if (need_siga_sync_thinint(q)) | 364 | if (need_siga_sync_thinint(q)) |
@@ -370,7 +369,14 @@ void qdio_sync_after_thinint(struct qdio_q *q) | |||
370 | qdio_siga_sync_q(q); | 369 | qdio_siga_sync_q(q); |
371 | } | 370 | } |
372 | 371 | ||
373 | inline void qdio_stop_polling(struct qdio_q *q) | 372 | int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, |
373 | unsigned char *state) | ||
374 | { | ||
375 | qdio_siga_sync_q(q); | ||
376 | return get_buf_states(q, bufnr, state, 1, 0); | ||
377 | } | ||
378 | |||
379 | static inline void qdio_stop_polling(struct qdio_q *q) | ||
374 | { | 380 | { |
375 | if (!q->u.in.polling) | 381 | if (!q->u.in.polling) |
376 | return; | 382 | return; |
@@ -516,7 +522,7 @@ out: | |||
516 | return q->first_to_check; | 522 | return q->first_to_check; |
517 | } | 523 | } |
518 | 524 | ||
519 | int qdio_inbound_q_moved(struct qdio_q *q) | 525 | static int qdio_inbound_q_moved(struct qdio_q *q) |
520 | { | 526 | { |
521 | int bufnr; | 527 | int bufnr; |
522 | 528 | ||
@@ -570,7 +576,23 @@ static int qdio_inbound_q_done(struct qdio_q *q) | |||
570 | } | 576 | } |
571 | } | 577 | } |
572 | 578 | ||
573 | void qdio_kick_handler(struct qdio_q *q) | 579 | static inline int tiqdio_inbound_q_done(struct qdio_q *q) |
580 | { | ||
581 | unsigned char state = 0; | ||
582 | |||
583 | if (!atomic_read(&q->nr_buf_used)) | ||
584 | return 1; | ||
585 | |||
586 | qdio_siga_sync_q(q); | ||
587 | get_buf_state(q, q->first_to_check, &state, 0); | ||
588 | |||
589 | if (state == SLSB_P_INPUT_PRIMED) | ||
590 | /* more work coming */ | ||
591 | return 0; | ||
592 | return 1; | ||
593 | } | ||
594 | |||
595 | static void qdio_kick_handler(struct qdio_q *q) | ||
574 | { | 596 | { |
575 | int start = q->first_to_kick; | 597 | int start = q->first_to_kick; |
576 | int end = q->first_to_check; | 598 | int end = q->first_to_check; |
@@ -619,7 +641,6 @@ again: | |||
619 | goto again; | 641 | goto again; |
620 | } | 642 | } |
621 | 643 | ||
622 | /* inbound tasklet */ | ||
623 | void qdio_inbound_processing(unsigned long data) | 644 | void qdio_inbound_processing(unsigned long data) |
624 | { | 645 | { |
625 | struct qdio_q *q = (struct qdio_q *)data; | 646 | struct qdio_q *q = (struct qdio_q *)data; |
@@ -797,8 +818,7 @@ void qdio_outbound_timer(unsigned long data) | |||
797 | tasklet_schedule(&q->tasklet); | 818 | tasklet_schedule(&q->tasklet); |
798 | } | 819 | } |
799 | 820 | ||
800 | /* called from thinint inbound tasklet */ | 821 | static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) |
801 | void qdio_check_outbound_after_thinint(struct qdio_q *q) | ||
802 | { | 822 | { |
803 | struct qdio_q *out; | 823 | struct qdio_q *out; |
804 | int i; | 824 | int i; |
@@ -811,6 +831,46 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q) | |||
811 | tasklet_schedule(&out->tasklet); | 831 | tasklet_schedule(&out->tasklet); |
812 | } | 832 | } |
813 | 833 | ||
834 | static void __tiqdio_inbound_processing(struct qdio_q *q) | ||
835 | { | ||
836 | qdio_perf_stat_inc(&perf_stats.thinint_inbound); | ||
837 | qdio_sync_after_thinint(q); | ||
838 | |||
839 | /* | ||
840 | * The interrupt could be caused by a PCI request. Check the | ||
841 | * PCI capable outbound queues. | ||
842 | */ | ||
843 | qdio_check_outbound_after_thinint(q); | ||
844 | |||
845 | if (!qdio_inbound_q_moved(q)) | ||
846 | return; | ||
847 | |||
848 | qdio_kick_handler(q); | ||
849 | |||
850 | if (!tiqdio_inbound_q_done(q)) { | ||
851 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); | ||
852 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | ||
853 | tasklet_schedule(&q->tasklet); | ||
854 | } | ||
855 | |||
856 | qdio_stop_polling(q); | ||
857 | /* | ||
858 | * We need to check again to not lose initiative after | ||
859 | * resetting the ACK state. | ||
860 | */ | ||
861 | if (!tiqdio_inbound_q_done(q)) { | ||
862 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); | ||
863 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | ||
864 | tasklet_schedule(&q->tasklet); | ||
865 | } | ||
866 | } | ||
867 | |||
868 | void tiqdio_inbound_processing(unsigned long data) | ||
869 | { | ||
870 | struct qdio_q *q = (struct qdio_q *)data; | ||
871 | __tiqdio_inbound_processing(q); | ||
872 | } | ||
873 | |||
814 | static inline void qdio_set_state(struct qdio_irq *irq_ptr, | 874 | static inline void qdio_set_state(struct qdio_irq *irq_ptr, |
815 | enum qdio_irq_states state) | 875 | enum qdio_irq_states state) |
816 | { | 876 | { |