aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorJan Glauber <jang@linux.vnet.ibm.com>2009-06-22 06:08:10 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2009-06-22 06:08:19 -0400
commit60b5df2f12f2ab54bfa7c1f0f0ce3f5953e73c0b (patch)
tree3135f3e560340cc8e419921a6f146f62df8bd635 /drivers/s390
parentf3dfa86caa4a54aceb2b235bf28a6f6ad73b2716 (diff)
[S390] qdio: move adapter interrupt tasklet code
Move the adapter interrupt tasklet function to the qdio main code since all the functions used by the tasklet are located there. Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/cio/qdio.h11
-rw-r--r--drivers/s390/cio/qdio_debug.c3
-rw-r--r--drivers/s390/cio/qdio_main.c84
-rw-r--r--drivers/s390/cio/qdio_thinint.c57
4 files changed, 75 insertions, 80 deletions
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 13bcb8114388..b1241f8fae88 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -351,15 +351,6 @@ static inline unsigned long long get_usecs(void)
351 ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) 351 ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
352 352
353/* prototypes for thin interrupt */ 353/* prototypes for thin interrupt */
354void qdio_sync_after_thinint(struct qdio_q *q);
355int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state,
356 int auto_ack);
357void qdio_check_outbound_after_thinint(struct qdio_q *q);
358int qdio_inbound_q_moved(struct qdio_q *q);
359void qdio_kick_handler(struct qdio_q *q);
360void qdio_stop_polling(struct qdio_q *q);
361int qdio_siga_sync_q(struct qdio_q *q);
362
363void qdio_setup_thinint(struct qdio_irq *irq_ptr); 354void qdio_setup_thinint(struct qdio_irq *irq_ptr);
364int qdio_establish_thinint(struct qdio_irq *irq_ptr); 355int qdio_establish_thinint(struct qdio_irq *irq_ptr);
365void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); 356void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
@@ -392,4 +383,6 @@ void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
392int qdio_setup_init(void); 383int qdio_setup_init(void);
393void qdio_setup_exit(void); 384void qdio_setup_exit(void);
394 385
386int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
387 unsigned char *state);
395#endif /* _CIO_QDIO_H */ 388#endif /* _CIO_QDIO_H */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index e3434b34f86c..b8626d4df116 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -70,9 +70,8 @@ static int qstat_show(struct seq_file *m, void *v)
70 seq_printf(m, "slsb buffer states:\n"); 70 seq_printf(m, "slsb buffer states:\n");
71 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); 71 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
72 72
73 qdio_siga_sync_q(q);
74 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { 73 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
75 get_buf_state(q, i, &state, 0); 74 debug_get_buf_state(q, i, &state);
76 switch (state) { 75 switch (state) {
77 case SLSB_P_INPUT_NOT_INIT: 76 case SLSB_P_INPUT_NOT_INIT:
78 case SLSB_P_OUTPUT_NOT_INIT: 77 case SLSB_P_OUTPUT_NOT_INIT:
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index d79cf5bf0e62..377d881385cf 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -231,8 +231,8 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
231 return i; 231 return i;
232} 232}
233 233
234inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, 234static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
235 unsigned char *state, int auto_ack) 235 unsigned char *state, int auto_ack)
236{ 236{
237 return get_buf_states(q, bufnr, state, 1, auto_ack); 237 return get_buf_states(q, bufnr, state, 1, auto_ack);
238} 238}
@@ -276,7 +276,7 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
276 QDIO_MAX_BUFFERS_PER_Q); 276 QDIO_MAX_BUFFERS_PER_Q);
277} 277}
278 278
279static int qdio_siga_sync(struct qdio_q *q, unsigned int output, 279static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
280 unsigned int input) 280 unsigned int input)
281{ 281{
282 int cc; 282 int cc;
@@ -293,7 +293,7 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
293 return cc; 293 return cc;
294} 294}
295 295
296inline int qdio_siga_sync_q(struct qdio_q *q) 296static inline int qdio_siga_sync_q(struct qdio_q *q)
297{ 297{
298 if (q->is_input_q) 298 if (q->is_input_q)
299 return qdio_siga_sync(q, 0, q->mask); 299 return qdio_siga_sync(q, 0, q->mask);
@@ -358,8 +358,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
358 return cc; 358 return cc;
359} 359}
360 360
361/* called from thinint inbound handler */ 361static inline void qdio_sync_after_thinint(struct qdio_q *q)
362void qdio_sync_after_thinint(struct qdio_q *q)
363{ 362{
364 if (pci_out_supported(q)) { 363 if (pci_out_supported(q)) {
365 if (need_siga_sync_thinint(q)) 364 if (need_siga_sync_thinint(q))
@@ -370,7 +369,14 @@ void qdio_sync_after_thinint(struct qdio_q *q)
370 qdio_siga_sync_q(q); 369 qdio_siga_sync_q(q);
371} 370}
372 371
373inline void qdio_stop_polling(struct qdio_q *q) 372int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
373 unsigned char *state)
374{
375 qdio_siga_sync_q(q);
376 return get_buf_states(q, bufnr, state, 1, 0);
377}
378
379static inline void qdio_stop_polling(struct qdio_q *q)
374{ 380{
375 if (!q->u.in.polling) 381 if (!q->u.in.polling)
376 return; 382 return;
@@ -516,7 +522,7 @@ out:
516 return q->first_to_check; 522 return q->first_to_check;
517} 523}
518 524
519int qdio_inbound_q_moved(struct qdio_q *q) 525static int qdio_inbound_q_moved(struct qdio_q *q)
520{ 526{
521 int bufnr; 527 int bufnr;
522 528
@@ -570,7 +576,23 @@ static int qdio_inbound_q_done(struct qdio_q *q)
570 } 576 }
571} 577}
572 578
573void qdio_kick_handler(struct qdio_q *q) 579static inline int tiqdio_inbound_q_done(struct qdio_q *q)
580{
581 unsigned char state = 0;
582
583 if (!atomic_read(&q->nr_buf_used))
584 return 1;
585
586 qdio_siga_sync_q(q);
587 get_buf_state(q, q->first_to_check, &state, 0);
588
589 if (state == SLSB_P_INPUT_PRIMED)
590 /* more work coming */
591 return 0;
592 return 1;
593}
594
595static void qdio_kick_handler(struct qdio_q *q)
574{ 596{
575 int start = q->first_to_kick; 597 int start = q->first_to_kick;
576 int end = q->first_to_check; 598 int end = q->first_to_check;
@@ -619,7 +641,6 @@ again:
619 goto again; 641 goto again;
620} 642}
621 643
622/* inbound tasklet */
623void qdio_inbound_processing(unsigned long data) 644void qdio_inbound_processing(unsigned long data)
624{ 645{
625 struct qdio_q *q = (struct qdio_q *)data; 646 struct qdio_q *q = (struct qdio_q *)data;
@@ -797,8 +818,7 @@ void qdio_outbound_timer(unsigned long data)
797 tasklet_schedule(&q->tasklet); 818 tasklet_schedule(&q->tasklet);
798} 819}
799 820
800/* called from thinint inbound tasklet */ 821static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
801void qdio_check_outbound_after_thinint(struct qdio_q *q)
802{ 822{
803 struct qdio_q *out; 823 struct qdio_q *out;
804 int i; 824 int i;
@@ -811,6 +831,46 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q)
811 tasklet_schedule(&out->tasklet); 831 tasklet_schedule(&out->tasklet);
812} 832}
813 833
834static void __tiqdio_inbound_processing(struct qdio_q *q)
835{
836 qdio_perf_stat_inc(&perf_stats.thinint_inbound);
837 qdio_sync_after_thinint(q);
838
839 /*
840 * The interrupt could be caused by a PCI request. Check the
841 * PCI capable outbound queues.
842 */
843 qdio_check_outbound_after_thinint(q);
844
845 if (!qdio_inbound_q_moved(q))
846 return;
847
848 qdio_kick_handler(q);
849
850 if (!tiqdio_inbound_q_done(q)) {
851 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
852 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
853 tasklet_schedule(&q->tasklet);
854 }
855
856 qdio_stop_polling(q);
857 /*
858 * We need to check again to not lose initiative after
859 * resetting the ACK state.
860 */
861 if (!tiqdio_inbound_q_done(q)) {
862 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
863 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
864 tasklet_schedule(&q->tasklet);
865 }
866}
867
868void tiqdio_inbound_processing(unsigned long data)
869{
870 struct qdio_q *q = (struct qdio_q *)data;
871 __tiqdio_inbound_processing(q);
872}
873
814static inline void qdio_set_state(struct qdio_irq *irq_ptr, 874static inline void qdio_set_state(struct qdio_irq *irq_ptr,
815 enum qdio_irq_states state) 875 enum qdio_irq_states state)
816{ 876{
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index c655d011a78d..e122f780f5ee 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -126,68 +126,11 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
126 } 126 }
127} 127}
128 128
129static inline int tiqdio_inbound_q_done(struct qdio_q *q)
130{
131 unsigned char state = 0;
132
133 if (!atomic_read(&q->nr_buf_used))
134 return 1;
135
136 qdio_siga_sync_q(q);
137 get_buf_state(q, q->first_to_check, &state, 0);
138
139 if (state == SLSB_P_INPUT_PRIMED)
140 /* more work coming */
141 return 0;
142 return 1;
143}
144
145static inline int shared_ind(struct qdio_irq *irq_ptr) 129static inline int shared_ind(struct qdio_irq *irq_ptr)
146{ 130{
147 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 131 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
148} 132}
149 133
150static void __tiqdio_inbound_processing(struct qdio_q *q)
151{
152 qdio_perf_stat_inc(&perf_stats.thinint_inbound);
153 qdio_sync_after_thinint(q);
154
155 /*
156 * Maybe we have work on our outbound queues... at least
157 * we have to check the PCI capable queues.
158 */
159 qdio_check_outbound_after_thinint(q);
160
161 if (!qdio_inbound_q_moved(q))
162 return;
163
164 qdio_kick_handler(q);
165
166 if (!tiqdio_inbound_q_done(q)) {
167 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
168 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
169 tasklet_schedule(&q->tasklet);
170 }
171
172 qdio_stop_polling(q);
173 /*
174 * We need to check again to not lose initiative after
175 * resetting the ACK state.
176 */
177 if (!tiqdio_inbound_q_done(q)) {
178 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
179 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
180 tasklet_schedule(&q->tasklet);
181 }
182}
183
184void tiqdio_inbound_processing(unsigned long data)
185{
186 struct qdio_q *q = (struct qdio_q *)data;
187
188 __tiqdio_inbound_processing(q);
189}
190
191/* check for work on all inbound thinint queues */ 134/* check for work on all inbound thinint queues */
192static void tiqdio_tasklet_fn(unsigned long data) 135static void tiqdio_tasklet_fn(unsigned long data)
193{ 136{