aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/qdio.c3929
-rw-r--r--drivers/s390/cio/qdio.h835
-rw-r--r--drivers/s390/cio/qdio_debug.c240
-rw-r--r--drivers/s390/cio/qdio_debug.h91
-rw-r--r--drivers/s390/cio/qdio_main.c1755
-rw-r--r--drivers/s390/cio/qdio_perf.c151
-rw-r--r--drivers/s390/cio/qdio_perf.h54
-rw-r--r--drivers/s390/cio/qdio_setup.c521
-rw-r--r--drivers/s390/cio/qdio_thinint.c380
-rw-r--r--drivers/s390/net/qeth_core.h12
-rw-r--r--drivers/s390/net/qeth_core_main.c87
-rw-r--r--drivers/s390/net/qeth_l2_main.c26
-rw-r--r--drivers/s390/net/qeth_l3_main.c25
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c12
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c42
18 files changed, 3561 insertions, 4608 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index 91e9e3f3073a..bd79bd165396 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -9,4 +9,6 @@ ccw_device-objs += device_id.o device_pgid.o device_status.o
9obj-y += ccw_device.o cmf.o 9obj-y += ccw_device.o cmf.o
10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o 10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
11obj-$(CONFIG_CCWGROUP) += ccwgroup.o 11obj-$(CONFIG_CCWGROUP) += ccwgroup.o
12
13qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o
12obj-$(CONFIG_QDIO) += qdio.o 14obj-$(CONFIG_QDIO) += qdio.o
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
deleted file mode 100644
index 2bf36e14b102..000000000000
--- a/drivers/s390/cio/qdio.c
+++ /dev/null
@@ -1,3929 +0,0 @@
1/*
2 *
3 * linux/drivers/s390/cio/qdio.c
4 *
5 * Linux for S/390 QDIO base support, Hipersocket base support
6 * version 2
7 *
8 * Copyright 2000,2002 IBM Corporation
9 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
10 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 *
12 * Restriction: only 63 iqdio subchannels would have its own indicator,
13 * after that, subsequent subchannels share one indicator
14 *
15 *
16 *
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 */
32
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/delay.h>
36#include <linux/slab.h>
37#include <linux/kernel.h>
38#include <linux/proc_fs.h>
39#include <linux/timer.h>
40#include <linux/mempool.h>
41#include <linux/semaphore.h>
42
43#include <asm/ccwdev.h>
44#include <asm/io.h>
45#include <asm/atomic.h>
46#include <asm/timex.h>
47
48#include <asm/debug.h>
49#include <asm/s390_rdev.h>
50#include <asm/qdio.h>
51#include <asm/airq.h>
52
53#include "cio.h"
54#include "css.h"
55#include "device.h"
56#include "qdio.h"
57#include "ioasm.h"
58#include "chsc.h"
59
60/****************** MODULE PARAMETER VARIABLES ********************/
61MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
62MODULE_DESCRIPTION("QDIO base support version 2, " \
63 "Copyright 2000 IBM Corporation");
64MODULE_LICENSE("GPL");
65
66/******************** HERE WE GO ***********************************/
67
68static const char version[] = "QDIO base support version 2";
69
70static int qdio_performance_stats = 0;
71static int proc_perf_file_registration;
72static struct qdio_perf_stats perf_stats;
73
74static int hydra_thinints;
75static int is_passthrough = 0;
76static int omit_svs;
77
78static int indicator_used[INDICATORS_PER_CACHELINE];
79static __u32 * volatile indicators;
80static __u32 volatile spare_indicator;
81static atomic_t spare_indicator_usecount;
82#define QDIO_MEMPOOL_SCSSC_ELEMENTS 2
83static mempool_t *qdio_mempool_scssc;
84static struct kmem_cache *qdio_q_cache;
85
86static debug_info_t *qdio_dbf_setup;
87static debug_info_t *qdio_dbf_sbal;
88static debug_info_t *qdio_dbf_trace;
89static debug_info_t *qdio_dbf_sense;
90#ifdef CONFIG_QDIO_DEBUG
91static debug_info_t *qdio_dbf_slsb_out;
92static debug_info_t *qdio_dbf_slsb_in;
93#endif /* CONFIG_QDIO_DEBUG */
94
95/* iQDIO stuff: */
96static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
97 during a while loop */
98static DEFINE_SPINLOCK(ttiq_list_lock);
99static void *tiqdio_ind;
100static void tiqdio_tl(unsigned long);
101static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0);
102
103/* not a macro, as one of the arguments is atomic_read */
104static inline int
105qdio_min(int a,int b)
106{
107 if (a<b)
108 return a;
109 else
110 return b;
111}
112
113/***************** SCRUBBER HELPER ROUTINES **********************/
114#ifdef CONFIG_64BIT
115static inline void qdio_perf_stat_inc(atomic64_t *count)
116{
117 if (qdio_performance_stats)
118 atomic64_inc(count);
119}
120
121static inline void qdio_perf_stat_dec(atomic64_t *count)
122{
123 if (qdio_performance_stats)
124 atomic64_dec(count);
125}
126#else /* CONFIG_64BIT */
127static inline void qdio_perf_stat_inc(atomic_t *count)
128{
129 if (qdio_performance_stats)
130 atomic_inc(count);
131}
132
133static inline void qdio_perf_stat_dec(atomic_t *count)
134{
135 if (qdio_performance_stats)
136 atomic_dec(count);
137}
138#endif /* CONFIG_64BIT */
139
140static inline __u64
141qdio_get_micros(void)
142{
143 return (get_clock() >> 12); /* time>>12 is microseconds */
144}
145
146/*
147 * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve
148 * the q in any case, so that we'll not be interrupted when we are in
149 * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost
150 * ever works (last famous words)
151 */
152static inline int
153qdio_reserve_q(struct qdio_q *q)
154{
155 return atomic_add_return(1,&q->use_count) - 1;
156}
157
158static inline void
159qdio_release_q(struct qdio_q *q)
160{
161 atomic_dec(&q->use_count);
162}
163
164/*check ccq */
165static int
166qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
167{
168 char dbf_text[15];
169
170 if (ccq == 0 || ccq == 32)
171 return 0;
172 if (ccq == 96 || ccq == 97)
173 return 1;
174 /*notify devices immediately*/
175 sprintf(dbf_text,"%d", ccq);
176 QDIO_DBF_TEXT2(1,trace,dbf_text);
177 return -EIO;
178}
179/* EQBS: extract buffer states */
180static int
181qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
182 unsigned int *start, unsigned int *cnt)
183{
184 struct qdio_irq *irq;
185 unsigned int tmp_cnt, q_no, ccq;
186 int rc ;
187 char dbf_text[15];
188
189 ccq = 0;
190 tmp_cnt = *cnt;
191 irq = (struct qdio_irq*)q->irq_ptr;
192 q_no = q->q_no;
193 if(!q->is_input_q)
194 q_no += irq->no_input_qs;
195again:
196 ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
197 rc = qdio_check_ccq(q, ccq);
198 if ((ccq == 96) && (tmp_cnt != *cnt))
199 rc = 0;
200 if (rc == 1) {
201 QDIO_DBF_TEXT5(1,trace,"eqAGAIN");
202 goto again;
203 }
204 if (rc < 0) {
205 QDIO_DBF_TEXT2(1,trace,"eqberr");
206 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
207 QDIO_DBF_TEXT2(1,trace,dbf_text);
208 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
209 QDIO_STATUS_LOOK_FOR_ERROR,
210 0, 0, 0, -1, -1, q->int_parm);
211 return 0;
212 }
213 return (tmp_cnt - *cnt);
214}
215
216/* SQBS: set buffer states */
217static int
218qdio_do_sqbs(struct qdio_q *q, unsigned char state,
219 unsigned int *start, unsigned int *cnt)
220{
221 struct qdio_irq *irq;
222 unsigned int tmp_cnt, q_no, ccq;
223 int rc;
224 char dbf_text[15];
225
226 ccq = 0;
227 tmp_cnt = *cnt;
228 irq = (struct qdio_irq*)q->irq_ptr;
229 q_no = q->q_no;
230 if(!q->is_input_q)
231 q_no += irq->no_input_qs;
232again:
233 ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
234 rc = qdio_check_ccq(q, ccq);
235 if (rc == 1) {
236 QDIO_DBF_TEXT5(1,trace,"sqAGAIN");
237 goto again;
238 }
239 if (rc < 0) {
240 QDIO_DBF_TEXT3(1,trace,"sqberr");
241 sprintf(dbf_text,"%2x,%2x",tmp_cnt,*cnt);
242 QDIO_DBF_TEXT3(1,trace,dbf_text);
243 sprintf(dbf_text,"%d,%d",ccq,q_no);
244 QDIO_DBF_TEXT3(1,trace,dbf_text);
245 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
246 QDIO_STATUS_LOOK_FOR_ERROR,
247 0, 0, 0, -1, -1, q->int_parm);
248 return 0;
249 }
250 return (tmp_cnt - *cnt);
251}
252
253static inline int
254qdio_set_slsb(struct qdio_q *q, unsigned int *bufno,
255 unsigned char state, unsigned int *count)
256{
257 volatile char *slsb;
258 struct qdio_irq *irq;
259
260 irq = (struct qdio_irq*)q->irq_ptr;
261 if (!irq->is_qebsm) {
262 slsb = (char *)&q->slsb.acc.val[(*bufno)];
263 xchg(slsb, state);
264 return 1;
265 }
266 return qdio_do_sqbs(q, state, bufno, count);
267}
268
269#ifdef CONFIG_QDIO_DEBUG
270static inline void
271qdio_trace_slsb(struct qdio_q *q)
272{
273 if (q->queue_type==QDIO_TRACE_QTYPE) {
274 if (q->is_input_q)
275 QDIO_DBF_HEX2(0,slsb_in,&q->slsb,
276 QDIO_MAX_BUFFERS_PER_Q);
277 else
278 QDIO_DBF_HEX2(0,slsb_out,&q->slsb,
279 QDIO_MAX_BUFFERS_PER_Q);
280 }
281}
282#endif
283
284static inline int
285set_slsb(struct qdio_q *q, unsigned int *bufno,
286 unsigned char state, unsigned int *count)
287{
288 int rc;
289#ifdef CONFIG_QDIO_DEBUG
290 qdio_trace_slsb(q);
291#endif
292 rc = qdio_set_slsb(q, bufno, state, count);
293#ifdef CONFIG_QDIO_DEBUG
294 qdio_trace_slsb(q);
295#endif
296 return rc;
297}
298static inline int
299qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
300 unsigned int gpr3)
301{
302 int cc;
303
304 QDIO_DBF_TEXT4(0,trace,"sigasync");
305 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
306
307 qdio_perf_stat_inc(&perf_stats.siga_syncs);
308
309 cc = do_siga_sync(q->schid, gpr2, gpr3);
310 if (cc)
311 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
312
313 return cc;
314}
315
316static inline int
317qdio_siga_sync_q(struct qdio_q *q)
318{
319 if (q->is_input_q)
320 return qdio_siga_sync(q, 0, q->mask);
321 return qdio_siga_sync(q, q->mask, 0);
322}
323
324static int
325__do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
326{
327 struct qdio_irq *irq;
328 unsigned int fc = 0;
329 unsigned long schid;
330
331 irq = (struct qdio_irq *) q->irq_ptr;
332 if (!irq->is_qebsm)
333 schid = *((u32 *)&q->schid);
334 else {
335 schid = irq->sch_token;
336 fc |= 0x80;
337 }
338 return do_siga_output(schid, q->mask, busy_bit, fc);
339}
340
341/*
342 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
343 * an access exception
344 */
345static int
346qdio_siga_output(struct qdio_q *q)
347{
348 int cc;
349 __u32 busy_bit;
350 __u64 start_time=0;
351
352 qdio_perf_stat_inc(&perf_stats.siga_outs);
353
354 QDIO_DBF_TEXT4(0,trace,"sigaout");
355 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
356
357 for (;;) {
358 cc = __do_siga_output(q, &busy_bit);
359//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
360 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
361 if (!start_time)
362 start_time=NOW;
363 if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE)
364 break;
365 } else
366 break;
367 }
368
369 if ((cc==2) && (busy_bit))
370 cc |= QDIO_SIGA_ERROR_B_BIT_SET;
371
372 if (cc)
373 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
374
375 return cc;
376}
377
378static int
379qdio_siga_input(struct qdio_q *q)
380{
381 int cc;
382
383 QDIO_DBF_TEXT4(0,trace,"sigain");
384 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
385
386 qdio_perf_stat_inc(&perf_stats.siga_ins);
387
388 cc = do_siga_input(q->schid, q->mask);
389
390 if (cc)
391 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
392
393 return cc;
394}
395
396/* locked by the locks in qdio_activate and qdio_cleanup */
397static __u32 *
398qdio_get_indicator(void)
399{
400 int i;
401
402 for (i = 0; i < INDICATORS_PER_CACHELINE; i++)
403 if (!indicator_used[i]) {
404 indicator_used[i]=1;
405 return indicators+i;
406 }
407 atomic_inc(&spare_indicator_usecount);
408 return (__u32 * volatile) &spare_indicator;
409}
410
411/* locked by the locks in qdio_activate and qdio_cleanup */
412static void
413qdio_put_indicator(__u32 *addr)
414{
415 int i;
416
417 if ( (addr) && (addr!=&spare_indicator) ) {
418 i=addr-indicators;
419 indicator_used[i]=0;
420 }
421 if (addr == &spare_indicator)
422 atomic_dec(&spare_indicator_usecount);
423}
424
425static inline void
426tiqdio_clear_summary_bit(__u32 *location)
427{
428 QDIO_DBF_TEXT5(0,trace,"clrsummb");
429 QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
430
431 xchg(location,0);
432}
433
434static inline void
435tiqdio_set_summary_bit(__u32 *location)
436{
437 QDIO_DBF_TEXT5(0,trace,"setsummb");
438 QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
439
440 xchg(location,-1);
441}
442
443static inline void
444tiqdio_sched_tl(void)
445{
446 tasklet_hi_schedule(&tiqdio_tasklet);
447}
448
449static void
450qdio_mark_tiq(struct qdio_q *q)
451{
452 unsigned long flags;
453
454 QDIO_DBF_TEXT4(0,trace,"mark iq");
455 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
456
457 spin_lock_irqsave(&ttiq_list_lock,flags);
458 if (unlikely(atomic_read(&q->is_in_shutdown)))
459 goto out_unlock;
460
461 if (!q->is_input_q)
462 goto out_unlock;
463
464 if ((q->list_prev) || (q->list_next))
465 goto out_unlock;
466
467 if (!tiq_list) {
468 tiq_list=q;
469 q->list_prev=q;
470 q->list_next=q;
471 } else {
472 q->list_next=tiq_list;
473 q->list_prev=tiq_list->list_prev;
474 tiq_list->list_prev->list_next=q;
475 tiq_list->list_prev=q;
476 }
477 spin_unlock_irqrestore(&ttiq_list_lock,flags);
478
479 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
480 tiqdio_sched_tl();
481 return;
482out_unlock:
483 spin_unlock_irqrestore(&ttiq_list_lock,flags);
484 return;
485}
486
487static inline void
488qdio_mark_q(struct qdio_q *q)
489{
490 QDIO_DBF_TEXT4(0,trace,"mark q");
491 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
492
493 if (unlikely(atomic_read(&q->is_in_shutdown)))
494 return;
495
496 tasklet_schedule(&q->tasklet);
497}
498
499static int
500qdio_stop_polling(struct qdio_q *q)
501{
502#ifdef QDIO_USE_PROCESSING_STATE
503 unsigned int tmp, gsf, count = 1;
504 unsigned char state = 0;
505 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
506
507 if (!atomic_xchg(&q->polling,0))
508 return 1;
509
510 QDIO_DBF_TEXT4(0,trace,"stoppoll");
511 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
512
513 /* show the card that we are not polling anymore */
514 if (!q->is_input_q)
515 return 1;
516
517 tmp = gsf = GET_SAVED_FRONTIER(q);
518 tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) );
519 set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count);
520
521 /*
522 * we don't issue this SYNC_MEMORY, as we trust Rick T and
523 * moreover will not use the PROCESSING state under VM, so
524 * q->polling was 0 anyway
525 */
526 /*SYNC_MEMORY;*/
527 if (irq->is_qebsm) {
528 count = 1;
529 qdio_do_eqbs(q, &state, &gsf, &count);
530 } else
531 state = q->slsb.acc.val[gsf];
532 if (state != SLSB_P_INPUT_PRIMED)
533 return 1;
534 /*
535 * set our summary bit again, as otherwise there is a
536 * small window we can miss between resetting it and
537 * checking for PRIMED state
538 */
539 if (q->is_thinint_q)
540 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
541 return 0;
542
543#else /* QDIO_USE_PROCESSING_STATE */
544 return 1;
545#endif /* QDIO_USE_PROCESSING_STATE */
546}
547
548/*
549 * see the comment in do_QDIO and before qdio_reserve_q about the
550 * sophisticated locking outside of unmark_q, so that we don't need to
551 * disable the interrupts :-)
552*/
553static void
554qdio_unmark_q(struct qdio_q *q)
555{
556 unsigned long flags;
557
558 QDIO_DBF_TEXT4(0,trace,"unmark q");
559 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
560
561 if ((!q->list_prev)||(!q->list_next))
562 return;
563
564 if ((q->is_thinint_q)&&(q->is_input_q)) {
565 /* iQDIO */
566 spin_lock_irqsave(&ttiq_list_lock,flags);
567 /* in case cleanup has done this already and simultanously
568 * qdio_unmark_q is called from the interrupt handler, we've
569 * got to check this in this specific case again */
570 if ((!q->list_prev)||(!q->list_next))
571 goto out;
572 if (q->list_next==q) {
573 /* q was the only interesting q */
574 tiq_list=NULL;
575 q->list_next=NULL;
576 q->list_prev=NULL;
577 } else {
578 q->list_next->list_prev=q->list_prev;
579 q->list_prev->list_next=q->list_next;
580 tiq_list=q->list_next;
581 q->list_next=NULL;
582 q->list_prev=NULL;
583 }
584out:
585 spin_unlock_irqrestore(&ttiq_list_lock,flags);
586 }
587}
588
589static inline unsigned long
590tiqdio_clear_global_summary(void)
591{
592 unsigned long time;
593
594 QDIO_DBF_TEXT5(0,trace,"clrglobl");
595
596 time = do_clear_global_summary();
597
598 QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long));
599
600 return time;
601}
602
603
604/************************* OUTBOUND ROUTINES *******************************/
605static int
606qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q)
607{
608 struct qdio_irq *irq;
609 unsigned char state;
610 unsigned int cnt, count, ftc;
611
612 irq = (struct qdio_irq *) q->irq_ptr;
613 if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis))
614 SYNC_MEMORY;
615
616 ftc = q->first_to_check;
617 count = qdio_min(atomic_read(&q->number_of_buffers_used),
618 (QDIO_MAX_BUFFERS_PER_Q-1));
619 if (count == 0)
620 return q->first_to_check;
621 cnt = qdio_do_eqbs(q, &state, &ftc, &count);
622 if (cnt == 0)
623 return q->first_to_check;
624 switch (state) {
625 case SLSB_P_OUTPUT_ERROR:
626 QDIO_DBF_TEXT3(0,trace,"outperr");
627 atomic_sub(cnt , &q->number_of_buffers_used);
628 if (q->qdio_error)
629 q->error_status_flags |=
630 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
631 q->qdio_error = SLSB_P_OUTPUT_ERROR;
632 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
633 q->first_to_check = ftc;
634 break;
635 case SLSB_P_OUTPUT_EMPTY:
636 QDIO_DBF_TEXT5(0,trace,"outpempt");
637 atomic_sub(cnt, &q->number_of_buffers_used);
638 q->first_to_check = ftc;
639 break;
640 case SLSB_CU_OUTPUT_PRIMED:
641 /* all buffers primed */
642 QDIO_DBF_TEXT5(0,trace,"outpprim");
643 break;
644 default:
645 break;
646 }
647 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
648 return q->first_to_check;
649}
650
651static int
652qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
653{
654 struct qdio_irq *irq;
655 unsigned char state;
656 int tmp, ftc, count, cnt;
657 char dbf_text[15];
658
659
660 irq = (struct qdio_irq *) q->irq_ptr;
661 ftc = q->first_to_check;
662 count = qdio_min(atomic_read(&q->number_of_buffers_used),
663 (QDIO_MAX_BUFFERS_PER_Q-1));
664 if (count == 0)
665 return q->first_to_check;
666 cnt = qdio_do_eqbs(q, &state, &ftc, &count);
667 if (cnt == 0)
668 return q->first_to_check;
669 switch (state) {
670 case SLSB_P_INPUT_ERROR :
671#ifdef CONFIG_QDIO_DEBUG
672 QDIO_DBF_TEXT3(1,trace,"inperr");
673 sprintf(dbf_text,"%2x,%2x",ftc,count);
674 QDIO_DBF_TEXT3(1,trace,dbf_text);
675#endif /* CONFIG_QDIO_DEBUG */
676 if (q->qdio_error)
677 q->error_status_flags |=
678 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
679 q->qdio_error = SLSB_P_INPUT_ERROR;
680 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
681 atomic_sub(cnt, &q->number_of_buffers_used);
682 q->first_to_check = ftc;
683 break;
684 case SLSB_P_INPUT_PRIMED :
685 QDIO_DBF_TEXT3(0,trace,"inptprim");
686 sprintf(dbf_text,"%2x,%2x",ftc,count);
687 QDIO_DBF_TEXT3(1,trace,dbf_text);
688 tmp = 0;
689 ftc = q->first_to_check;
690#ifdef QDIO_USE_PROCESSING_STATE
691 if (cnt > 1) {
692 cnt -= 1;
693 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
694 if (!tmp)
695 break;
696 }
697 cnt = 1;
698 tmp += set_slsb(q, &ftc,
699 SLSB_P_INPUT_PROCESSING, &cnt);
700 atomic_set(&q->polling, 1);
701#else
702 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
703#endif
704 atomic_sub(tmp, &q->number_of_buffers_used);
705 q->first_to_check = ftc;
706 break;
707 case SLSB_CU_INPUT_EMPTY:
708 case SLSB_P_INPUT_NOT_INIT:
709 case SLSB_P_INPUT_PROCESSING:
710 QDIO_DBF_TEXT5(0,trace,"inpnipro");
711 break;
712 default:
713 break;
714 }
715 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
716 return q->first_to_check;
717}
718
719static int
720qdio_get_outbound_buffer_frontier(struct qdio_q *q)
721{
722 struct qdio_irq *irq;
723 volatile char *slsb;
724 unsigned int count = 1;
725 int first_not_to_check, f, f_mod_no;
726 char dbf_text[15];
727
728 QDIO_DBF_TEXT4(0,trace,"getobfro");
729 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
730
731 irq = (struct qdio_irq *) q->irq_ptr;
732 if (irq->is_qebsm)
733 return qdio_qebsm_get_outbound_buffer_frontier(q);
734
735 slsb=&q->slsb.acc.val[0];
736 f_mod_no=f=q->first_to_check;
737 /*
738 * f points to already processed elements, so f+no_used is correct...
739 * ... but: we don't check 128 buffers, as otherwise
740 * qdio_has_outbound_q_moved would return 0
741 */
742 first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
743 (QDIO_MAX_BUFFERS_PER_Q-1));
744
745 if (((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) ||
746 (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH))
747 SYNC_MEMORY;
748
749check_next:
750 if (f==first_not_to_check)
751 goto out;
752
753 switch(slsb[f_mod_no]) {
754
755 /* the adapter has not fetched the output yet */
756 case SLSB_CU_OUTPUT_PRIMED:
757 QDIO_DBF_TEXT5(0,trace,"outpprim");
758 break;
759
760 /* the adapter got it */
761 case SLSB_P_OUTPUT_EMPTY:
762 atomic_dec(&q->number_of_buffers_used);
763 f++;
764 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
765 QDIO_DBF_TEXT5(0,trace,"outpempt");
766 goto check_next;
767
768 case SLSB_P_OUTPUT_ERROR:
769 QDIO_DBF_TEXT3(0,trace,"outperr");
770 sprintf(dbf_text,"%x-%x-%x",f_mod_no,
771 q->sbal[f_mod_no]->element[14].sbalf.value,
772 q->sbal[f_mod_no]->element[15].sbalf.value);
773 QDIO_DBF_TEXT3(1,trace,dbf_text);
774 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
775
776 /* kind of process the buffer */
777 set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count);
778
779 /*
780 * we increment the frontier, as this buffer
781 * was processed obviously
782 */
783 atomic_dec(&q->number_of_buffers_used);
784 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
785
786 if (q->qdio_error)
787 q->error_status_flags|=
788 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
789 q->qdio_error=SLSB_P_OUTPUT_ERROR;
790 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
791
792 break;
793
794 /* no new buffers */
795 default:
796 QDIO_DBF_TEXT5(0,trace,"outpni");
797 }
798out:
799 return (q->first_to_check=f_mod_no);
800}
801
802/* all buffers are processed */
803static int
804qdio_is_outbound_q_done(struct qdio_q *q)
805{
806 int no_used;
807#ifdef CONFIG_QDIO_DEBUG
808 char dbf_text[15];
809#endif
810
811 no_used=atomic_read(&q->number_of_buffers_used);
812
813#ifdef CONFIG_QDIO_DEBUG
814 if (no_used) {
815 sprintf(dbf_text,"oqisnt%02x",no_used);
816 QDIO_DBF_TEXT4(0,trace,dbf_text);
817 } else {
818 QDIO_DBF_TEXT4(0,trace,"oqisdone");
819 }
820 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
821#endif /* CONFIG_QDIO_DEBUG */
822 return (no_used==0);
823}
824
825static int
826qdio_has_outbound_q_moved(struct qdio_q *q)
827{
828 int i;
829
830 i=qdio_get_outbound_buffer_frontier(q);
831
832 if ( (i!=GET_SAVED_FRONTIER(q)) ||
833 (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
834 SAVE_FRONTIER(q,i);
835 QDIO_DBF_TEXT4(0,trace,"oqhasmvd");
836 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
837 return 1;
838 } else {
839 QDIO_DBF_TEXT4(0,trace,"oqhsntmv");
840 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
841 return 0;
842 }
843}
844
845static void
846qdio_kick_outbound_q(struct qdio_q *q)
847{
848 int result;
849#ifdef CONFIG_QDIO_DEBUG
850 char dbf_text[15];
851
852 QDIO_DBF_TEXT4(0,trace,"kickoutq");
853 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
854#endif /* CONFIG_QDIO_DEBUG */
855
856 if (!q->siga_out)
857 return;
858
859 /* here's the story with cc=2 and busy bit set (thanks, Rick):
860 * VM's CP could present us cc=2 and busy bit set on SIGA-write
861 * during reconfiguration of their Guest LAN (only in HIPERS mode,
862 * QDIO mode is asynchronous -- cc=2 and busy bit there will take
863 * the queues down immediately; and not being under VM we have a
864 * problem on cc=2 and busy bit set right away).
865 *
866 * Therefore qdio_siga_output will try for a short time constantly,
867 * if such a condition occurs. If it doesn't change, it will
868 * increase the busy_siga_counter and save the timestamp, and
869 * schedule the queue for later processing (via mark_q, using the
870 * queue tasklet). __qdio_outbound_processing will check out the
871 * counter. If non-zero, it will call qdio_kick_outbound_q as often
872 * as the value of the counter. This will attempt further SIGA
873 * instructions. For each successful SIGA, the counter is
874 * decreased, for failing SIGAs the counter remains the same, after
875 * all.
876 * After some time of no movement, qdio_kick_outbound_q will
877 * finally fail and reflect corresponding error codes to call
878 * the upper layer module and have it take the queues down.
879 *
880 * Note that this is a change from the original HiperSockets design
881 * (saying cc=2 and busy bit means take the queues down), but in
882 * these days Guest LAN didn't exist... excessive cc=2 with busy bit
883 * conditions will still take the queues down, but the threshold is
884 * higher due to the Guest LAN environment.
885 */
886
887
888 result=qdio_siga_output(q);
889
890 switch (result) {
891 case 0:
892 /* went smooth this time, reset timestamp */
893#ifdef CONFIG_QDIO_DEBUG
894 QDIO_DBF_TEXT3(0,trace,"cc2reslv");
895 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
896 atomic_read(&q->busy_siga_counter));
897 QDIO_DBF_TEXT3(0,trace,dbf_text);
898#endif /* CONFIG_QDIO_DEBUG */
899 q->timing.busy_start=0;
900 break;
901 case (2|QDIO_SIGA_ERROR_B_BIT_SET):
902 /* cc=2 and busy bit: */
903 atomic_inc(&q->busy_siga_counter);
904
905 /* if the last siga was successful, save
906 * timestamp here */
907 if (!q->timing.busy_start)
908 q->timing.busy_start=NOW;
909
910 /* if we're in time, don't touch error_status_flags
911 * and siga_error */
912 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
913 qdio_mark_q(q);
914 break;
915 }
916 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
917#ifdef CONFIG_QDIO_DEBUG
918 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
919 atomic_read(&q->busy_siga_counter));
920 QDIO_DBF_TEXT3(0,trace,dbf_text);
921#endif /* CONFIG_QDIO_DEBUG */
922 /* else fallthrough and report error */
923 default:
924 /* for plain cc=1, 2 or 3: */
925 if (q->siga_error)
926 q->error_status_flags|=
927 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
928 q->error_status_flags|=
929 QDIO_STATUS_LOOK_FOR_ERROR;
930 q->siga_error=result;
931 }
932}
933
934static void
935qdio_kick_outbound_handler(struct qdio_q *q)
936{
937 int start, end, real_end, count;
938#ifdef CONFIG_QDIO_DEBUG
939 char dbf_text[15];
940#endif
941
942 start = q->first_element_to_kick;
943 /* last_move_ftc was just updated */
944 real_end = GET_SAVED_FRONTIER(q);
945 end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)&
946 (QDIO_MAX_BUFFERS_PER_Q-1);
947 count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)&
948 (QDIO_MAX_BUFFERS_PER_Q-1);
949
950#ifdef CONFIG_QDIO_DEBUG
951 QDIO_DBF_TEXT4(0,trace,"kickouth");
952 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
953
954 sprintf(dbf_text,"s=%2xc=%2x",start,count);
955 QDIO_DBF_TEXT4(0,trace,dbf_text);
956#endif /* CONFIG_QDIO_DEBUG */
957
958 if (q->state==QDIO_IRQ_STATE_ACTIVE)
959 q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
960 q->error_status_flags,
961 q->qdio_error,q->siga_error,q->q_no,start,count,
962 q->int_parm);
963
964 /* for the next time: */
965 q->first_element_to_kick=real_end;
966 q->qdio_error=0;
967 q->siga_error=0;
968 q->error_status_flags=0;
969}
970
971static void
972__qdio_outbound_processing(struct qdio_q *q)
973{
974 int siga_attempts;
975
976 QDIO_DBF_TEXT4(0,trace,"qoutproc");
977 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
978
979 if (unlikely(qdio_reserve_q(q))) {
980 qdio_release_q(q);
981 qdio_perf_stat_inc(&perf_stats.outbound_tl_runs_resched);
982 /* as we're sissies, we'll check next time */
983 if (likely(!atomic_read(&q->is_in_shutdown))) {
984 qdio_mark_q(q);
985 QDIO_DBF_TEXT4(0,trace,"busy,agn");
986 }
987 return;
988 }
989 qdio_perf_stat_inc(&perf_stats.outbound_tl_runs);
990 qdio_perf_stat_inc(&perf_stats.tl_runs);
991
992 /* see comment in qdio_kick_outbound_q */
993 siga_attempts=atomic_read(&q->busy_siga_counter);
994 while (siga_attempts) {
995 atomic_dec(&q->busy_siga_counter);
996 qdio_kick_outbound_q(q);
997 siga_attempts--;
998 }
999
1000 if (qdio_has_outbound_q_moved(q))
1001 qdio_kick_outbound_handler(q);
1002
1003 if (q->queue_type == QDIO_ZFCP_QFMT) {
1004 if ((!q->hydra_gives_outbound_pcis) &&
1005 (!qdio_is_outbound_q_done(q)))
1006 qdio_mark_q(q);
1007 }
1008 else if (((!q->is_iqdio_q) && (!q->is_pci_out)) ||
1009 (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH)) {
1010 /*
1011 * make sure buffer switch from PRIMED to EMPTY is noticed
1012 * and outbound_handler is called
1013 */
1014 if (qdio_is_outbound_q_done(q)) {
1015 del_timer(&q->timer);
1016 } else {
1017 if (!timer_pending(&q->timer))
1018 mod_timer(&q->timer, jiffies +
1019 QDIO_FORCE_CHECK_TIMEOUT);
1020 }
1021 }
1022
1023 qdio_release_q(q);
1024}
1025
1026static void
1027qdio_outbound_processing(unsigned long q)
1028{
1029 __qdio_outbound_processing((struct qdio_q *) q);
1030}
1031
1032/************************* INBOUND ROUTINES *******************************/
1033
1034
1035static int
1036qdio_get_inbound_buffer_frontier(struct qdio_q *q)
1037{
1038 struct qdio_irq *irq;
1039 int f,f_mod_no;
1040 volatile char *slsb;
1041 unsigned int count = 1;
1042 int first_not_to_check;
1043#ifdef CONFIG_QDIO_DEBUG
1044 char dbf_text[15];
1045#endif /* CONFIG_QDIO_DEBUG */
1046#ifdef QDIO_USE_PROCESSING_STATE
1047 int last_position=-1;
1048#endif /* QDIO_USE_PROCESSING_STATE */
1049
1050 QDIO_DBF_TEXT4(0,trace,"getibfro");
1051 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1052
1053 irq = (struct qdio_irq *) q->irq_ptr;
1054 if (irq->is_qebsm)
1055 return qdio_qebsm_get_inbound_buffer_frontier(q);
1056
1057 slsb=&q->slsb.acc.val[0];
1058 f_mod_no=f=q->first_to_check;
1059 /*
1060 * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved
1061 * would return 0
1062 */
1063 first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
1064 (QDIO_MAX_BUFFERS_PER_Q-1));
1065
1066 /*
1067 * we don't use this one, as a PCI or we after a thin interrupt
1068 * will sync the queues
1069 */
1070 /* SYNC_MEMORY;*/
1071
1072check_next:
1073 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
1074 if (f==first_not_to_check)
1075 goto out;
1076 switch (slsb[f_mod_no]) {
1077
1078 /* CU_EMPTY means frontier is reached */
1079 case SLSB_CU_INPUT_EMPTY:
1080 QDIO_DBF_TEXT5(0,trace,"inptempt");
1081 break;
1082
1083 /* P_PRIMED means set slsb to P_PROCESSING and move on */
1084 case SLSB_P_INPUT_PRIMED:
1085 QDIO_DBF_TEXT5(0,trace,"inptprim");
1086
1087#ifdef QDIO_USE_PROCESSING_STATE
1088 /*
1089 * as soon as running under VM, polling the input queues will
1090 * kill VM in terms of CP overhead
1091 */
1092 if (q->siga_sync) {
1093 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1094 } else {
1095 /* set the previous buffer to NOT_INIT. The current
1096 * buffer will be set to PROCESSING at the end of
1097 * this function to avoid further interrupts. */
1098 if (last_position>=0)
1099 set_slsb(q, &last_position,
1100 SLSB_P_INPUT_NOT_INIT, &count);
1101 atomic_set(&q->polling,1);
1102 last_position=f_mod_no;
1103 }
1104#else /* QDIO_USE_PROCESSING_STATE */
1105 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1106#endif /* QDIO_USE_PROCESSING_STATE */
1107 /*
1108 * not needed, as the inbound queue will be synced on the next
1109 * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s
1110 */
1111 /*SYNC_MEMORY;*/
1112 f++;
1113 atomic_dec(&q->number_of_buffers_used);
1114 goto check_next;
1115
1116 case SLSB_P_INPUT_NOT_INIT:
1117 case SLSB_P_INPUT_PROCESSING:
1118 QDIO_DBF_TEXT5(0,trace,"inpnipro");
1119 break;
1120
1121 /* P_ERROR means frontier is reached, break and report error */
1122 case SLSB_P_INPUT_ERROR:
1123#ifdef CONFIG_QDIO_DEBUG
1124 sprintf(dbf_text,"inperr%2x",f_mod_no);
1125 QDIO_DBF_TEXT3(1,trace,dbf_text);
1126#endif /* CONFIG_QDIO_DEBUG */
1127 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
1128
1129 /* kind of process the buffer */
1130 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1131
1132 if (q->qdio_error)
1133 q->error_status_flags|=
1134 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
1135 q->qdio_error=SLSB_P_INPUT_ERROR;
1136 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
1137
1138 /* we increment the frontier, as this buffer
1139 * was processed obviously */
1140 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1141 atomic_dec(&q->number_of_buffers_used);
1142
1143#ifdef QDIO_USE_PROCESSING_STATE
1144 last_position=-1;
1145#endif /* QDIO_USE_PROCESSING_STATE */
1146
1147 break;
1148
1149 /* everything else means frontier not changed (HALTED or so) */
1150 default:
1151 break;
1152 }
1153out:
1154 q->first_to_check=f_mod_no;
1155
1156#ifdef QDIO_USE_PROCESSING_STATE
1157 if (last_position>=0)
1158 set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count);
1159#endif /* QDIO_USE_PROCESSING_STATE */
1160
1161 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
1162
1163 return q->first_to_check;
1164}
1165
1166static int
1167qdio_has_inbound_q_moved(struct qdio_q *q)
1168{
1169 int i;
1170
1171 i=qdio_get_inbound_buffer_frontier(q);
1172 if ( (i!=GET_SAVED_FRONTIER(q)) ||
1173 (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
1174 SAVE_FRONTIER(q,i);
1175 if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis))
1176 SAVE_TIMESTAMP(q);
1177
1178 QDIO_DBF_TEXT4(0,trace,"inhasmvd");
1179 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1180 return 1;
1181 } else {
1182 QDIO_DBF_TEXT4(0,trace,"inhsntmv");
1183 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1184 return 0;
1185 }
1186}
1187
1188/* means, no more buffers to be filled */
1189static int
1190tiqdio_is_inbound_q_done(struct qdio_q *q)
1191{
1192 int no_used;
1193 unsigned int start_buf, count;
1194 unsigned char state = 0;
1195 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1196
1197#ifdef CONFIG_QDIO_DEBUG
1198 char dbf_text[15];
1199#endif
1200
1201 no_used=atomic_read(&q->number_of_buffers_used);
1202
1203 /* propagate the change from 82 to 80 through VM */
1204 SYNC_MEMORY;
1205
1206#ifdef CONFIG_QDIO_DEBUG
1207 if (no_used) {
1208 sprintf(dbf_text,"iqisnt%02x",no_used);
1209 QDIO_DBF_TEXT4(0,trace,dbf_text);
1210 } else {
1211 QDIO_DBF_TEXT4(0,trace,"iniqisdo");
1212 }
1213 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1214#endif /* CONFIG_QDIO_DEBUG */
1215
1216 if (!no_used)
1217 return 1;
1218 if (irq->is_qebsm) {
1219 count = 1;
1220 start_buf = q->first_to_check;
1221 qdio_do_eqbs(q, &state, &start_buf, &count);
1222 } else
1223 state = q->slsb.acc.val[q->first_to_check];
1224 if (state != SLSB_P_INPUT_PRIMED)
1225 /*
1226 * nothing more to do, if next buffer is not PRIMED.
1227 * note that we did a SYNC_MEMORY before, that there
1228 * has been a sychnronization.
1229 * we will return 0 below, as there is nothing to do
1230 * (stop_polling not necessary, as we have not been
1231 * using the PROCESSING state
1232 */
1233 return 0;
1234
1235 /*
1236 * ok, the next input buffer is primed. that means, that device state
1237 * change indicator and adapter local summary are set, so we will find
1238 * it next time.
1239 * we will return 0 below, as there is nothing to do, except scheduling
1240 * ourselves for the next time.
1241 */
1242 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1243 tiqdio_sched_tl();
1244 return 0;
1245}
1246
1247static int
1248qdio_is_inbound_q_done(struct qdio_q *q)
1249{
1250 int no_used;
1251 unsigned int start_buf, count;
1252 unsigned char state = 0;
1253 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1254
1255#ifdef CONFIG_QDIO_DEBUG
1256 char dbf_text[15];
1257#endif
1258
1259 no_used=atomic_read(&q->number_of_buffers_used);
1260
1261 /*
1262 * we need that one for synchronization with the adapter, as it
1263 * does a kind of PCI avoidance
1264 */
1265 SYNC_MEMORY;
1266
1267 if (!no_used) {
1268 QDIO_DBF_TEXT4(0,trace,"inqisdnA");
1269 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1270 return 1;
1271 }
1272 if (irq->is_qebsm) {
1273 count = 1;
1274 start_buf = q->first_to_check;
1275 qdio_do_eqbs(q, &state, &start_buf, &count);
1276 } else
1277 state = q->slsb.acc.val[q->first_to_check];
1278 if (state == SLSB_P_INPUT_PRIMED) {
1279 /* we got something to do */
1280 QDIO_DBF_TEXT4(0,trace,"inqisntA");
1281 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1282 return 0;
1283 }
1284
1285 /* on VM, we don't poll, so the q is always done here */
1286 if (q->siga_sync)
1287 return 1;
1288 if (q->hydra_gives_outbound_pcis)
1289 return 1;
1290
1291 /*
1292 * at this point we know, that inbound first_to_check
1293 * has (probably) not moved (see qdio_inbound_processing)
1294 */
1295 if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
1296#ifdef CONFIG_QDIO_DEBUG
1297 QDIO_DBF_TEXT4(0,trace,"inqisdon");
1298 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1299 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1300 QDIO_DBF_TEXT4(0,trace,dbf_text);
1301#endif /* CONFIG_QDIO_DEBUG */
1302 return 1;
1303 } else {
1304#ifdef CONFIG_QDIO_DEBUG
1305 QDIO_DBF_TEXT4(0,trace,"inqisntd");
1306 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1307 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1308 QDIO_DBF_TEXT4(0,trace,dbf_text);
1309#endif /* CONFIG_QDIO_DEBUG */
1310 return 0;
1311 }
1312}
1313
1314static void
1315qdio_kick_inbound_handler(struct qdio_q *q)
1316{
1317 int count, start, end, real_end, i;
1318#ifdef CONFIG_QDIO_DEBUG
1319 char dbf_text[15];
1320#endif
1321
1322 QDIO_DBF_TEXT4(0,trace,"kickinh");
1323 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1324
1325 start=q->first_element_to_kick;
1326 real_end=q->first_to_check;
1327 end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1328
1329 i=start;
1330 count=0;
1331 while (1) {
1332 count++;
1333 if (i==end)
1334 break;
1335 i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1336 }
1337
1338#ifdef CONFIG_QDIO_DEBUG
1339 sprintf(dbf_text,"s=%2xc=%2x",start,count);
1340 QDIO_DBF_TEXT4(0,trace,dbf_text);
1341#endif /* CONFIG_QDIO_DEBUG */
1342
1343 if (likely(q->state==QDIO_IRQ_STATE_ACTIVE))
1344 q->handler(q->cdev,
1345 QDIO_STATUS_INBOUND_INT|q->error_status_flags,
1346 q->qdio_error,q->siga_error,q->q_no,start,count,
1347 q->int_parm);
1348
1349 /* for the next time: */
1350 q->first_element_to_kick=real_end;
1351 q->qdio_error=0;
1352 q->siga_error=0;
1353 q->error_status_flags=0;
1354
1355 qdio_perf_stat_inc(&perf_stats.inbound_cnt);
1356}
1357
1358static void
1359__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1360{
1361 struct qdio_irq *irq_ptr;
1362 struct qdio_q *oq;
1363 int i;
1364
1365 QDIO_DBF_TEXT4(0,trace,"iqinproc");
1366 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1367
1368 /*
1369 * we first want to reserve the q, so that we know, that we don't
1370 * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might
1371 * be set
1372 */
1373 if (unlikely(qdio_reserve_q(q))) {
1374 qdio_release_q(q);
1375 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
1376 /*
1377 * as we might just be about to stop polling, we make
1378 * sure that we check again at least once more
1379 */
1380 tiqdio_sched_tl();
1381 return;
1382 }
1383 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs);
1384 if (unlikely(atomic_read(&q->is_in_shutdown))) {
1385 qdio_unmark_q(q);
1386 goto out;
1387 }
1388
1389 /*
1390 * we reset spare_ind_was_set, when the queue does not use the
1391 * spare indicator
1392 */
1393 if (spare_ind_was_set)
1394 spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator);
1395
1396 if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set)
1397 goto out;
1398 /*
1399 * q->dev_st_chg_ind is the indicator, be it shared or not.
1400 * only clear it, if indicator is non-shared
1401 */
1402 if (q->dev_st_chg_ind != &spare_indicator)
1403 tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
1404
1405 if (q->hydra_gives_outbound_pcis) {
1406 if (!q->siga_sync_done_on_thinints) {
1407 SYNC_MEMORY_ALL;
1408 } else if (!q->siga_sync_done_on_outb_tis) {
1409 SYNC_MEMORY_ALL_OUTB;
1410 }
1411 } else {
1412 SYNC_MEMORY;
1413 }
1414 /*
1415 * maybe we have to do work on our outbound queues... at least
1416 * we have to check the outbound-int-capable thinint-capable
1417 * queues
1418 */
1419 if (q->hydra_gives_outbound_pcis) {
1420 irq_ptr = (struct qdio_irq*)q->irq_ptr;
1421 for (i=0;i<irq_ptr->no_output_qs;i++) {
1422 oq = irq_ptr->output_qs[i];
1423 if (!qdio_is_outbound_q_done(oq)) {
1424 qdio_perf_stat_dec(&perf_stats.tl_runs);
1425 __qdio_outbound_processing(oq);
1426 }
1427 }
1428 }
1429
1430 if (!qdio_has_inbound_q_moved(q))
1431 goto out;
1432
1433 qdio_kick_inbound_handler(q);
1434 if (tiqdio_is_inbound_q_done(q))
1435 if (!qdio_stop_polling(q)) {
1436 /*
1437 * we set the flags to get into the stuff next time,
1438 * see also comment in qdio_stop_polling
1439 */
1440 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1441 tiqdio_sched_tl();
1442 }
1443out:
1444 qdio_release_q(q);
1445}
1446
1447static void
1448tiqdio_inbound_processing(unsigned long q)
1449{
1450 __tiqdio_inbound_processing((struct qdio_q *) q,
1451 atomic_read(&spare_indicator_usecount));
1452}
1453
1454static void
1455__qdio_inbound_processing(struct qdio_q *q)
1456{
1457 int q_laps=0;
1458
1459 QDIO_DBF_TEXT4(0,trace,"qinproc");
1460 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1461
1462 if (unlikely(qdio_reserve_q(q))) {
1463 qdio_release_q(q);
1464 qdio_perf_stat_inc(&perf_stats.inbound_tl_runs_resched);
1465 /* as we're sissies, we'll check next time */
1466 if (likely(!atomic_read(&q->is_in_shutdown))) {
1467 qdio_mark_q(q);
1468 QDIO_DBF_TEXT4(0,trace,"busy,agn");
1469 }
1470 return;
1471 }
1472 qdio_perf_stat_inc(&perf_stats.inbound_tl_runs);
1473 qdio_perf_stat_inc(&perf_stats.tl_runs);
1474
1475again:
1476 if (qdio_has_inbound_q_moved(q)) {
1477 qdio_kick_inbound_handler(q);
1478 if (!qdio_stop_polling(q)) {
1479 q_laps++;
1480 if (q_laps<QDIO_Q_LAPS)
1481 goto again;
1482 }
1483 qdio_mark_q(q);
1484 } else {
1485 if (!qdio_is_inbound_q_done(q))
1486 /* means poll time is not yet over */
1487 qdio_mark_q(q);
1488 }
1489
1490 qdio_release_q(q);
1491}
1492
1493static void
1494qdio_inbound_processing(unsigned long q)
1495{
1496 __qdio_inbound_processing((struct qdio_q *) q);
1497}
1498
1499/************************* MAIN ROUTINES *******************************/
1500
1501#ifdef QDIO_USE_PROCESSING_STATE
1502static int
1503tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1504{
1505 if (!q) {
1506 tiqdio_sched_tl();
1507 return 0;
1508 }
1509
1510 /*
1511 * under VM, we have not used the PROCESSING state, so no
1512 * need to stop polling
1513 */
1514 if (q->siga_sync)
1515 return 2;
1516
1517 if (unlikely(qdio_reserve_q(q))) {
1518 qdio_release_q(q);
1519 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
1520 /*
1521 * as we might just be about to stop polling, we make
1522 * sure that we check again at least once more
1523 */
1524
1525 /*
1526 * sanity -- we'd get here without setting the
1527 * dev st chg ind
1528 */
1529 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1530 tiqdio_sched_tl();
1531 return 0;
1532 }
1533 if (qdio_stop_polling(q)) {
1534 qdio_release_q(q);
1535 return 2;
1536 }
1537 if (q_laps<QDIO_Q_LAPS-1) {
1538 qdio_release_q(q);
1539 return 3;
1540 }
1541 /*
1542 * we set the flags to get into the stuff
1543 * next time, see also comment in qdio_stop_polling
1544 */
1545 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1546 tiqdio_sched_tl();
1547 qdio_release_q(q);
1548 return 1;
1549
1550}
1551#endif /* QDIO_USE_PROCESSING_STATE */
1552
1553static void
1554tiqdio_inbound_checks(void)
1555{
1556 struct qdio_q *q;
1557 int spare_ind_was_set=0;
1558#ifdef QDIO_USE_PROCESSING_STATE
1559 int q_laps=0;
1560#endif /* QDIO_USE_PROCESSING_STATE */
1561
1562 QDIO_DBF_TEXT4(0,trace,"iqdinbck");
1563 QDIO_DBF_TEXT5(0,trace,"iqlocsum");
1564
1565#ifdef QDIO_USE_PROCESSING_STATE
1566again:
1567#endif /* QDIO_USE_PROCESSING_STATE */
1568
1569 /* when the spare indicator is used and set, save that and clear it */
1570 if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) {
1571 spare_ind_was_set = 1;
1572 tiqdio_clear_summary_bit((__u32*)&spare_indicator);
1573 }
1574
1575 q=(struct qdio_q*)tiq_list;
1576 do {
1577 if (!q)
1578 break;
1579 __tiqdio_inbound_processing(q, spare_ind_was_set);
1580 q=(struct qdio_q*)q->list_next;
1581 } while (q!=(struct qdio_q*)tiq_list);
1582
1583#ifdef QDIO_USE_PROCESSING_STATE
1584 q=(struct qdio_q*)tiq_list;
1585 do {
1586 int ret;
1587
1588 ret = tiqdio_reset_processing_state(q, q_laps);
1589 switch (ret) {
1590 case 0:
1591 return;
1592 case 1:
1593 q_laps++;
1594 case 2:
1595 q = (struct qdio_q*)q->list_next;
1596 break;
1597 default:
1598 q_laps++;
1599 goto again;
1600 }
1601 } while (q!=(struct qdio_q*)tiq_list);
1602#endif /* QDIO_USE_PROCESSING_STATE */
1603}
1604
1605static void
1606tiqdio_tl(unsigned long data)
1607{
1608 QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
1609
1610 qdio_perf_stat_inc(&perf_stats.tl_runs);
1611
1612 tiqdio_inbound_checks();
1613}
1614
1615/********************* GENERAL HELPER_ROUTINES ***********************/
1616
1617static void
1618qdio_release_irq_memory(struct qdio_irq *irq_ptr)
1619{
1620 int i;
1621 struct qdio_q *q;
1622
1623 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
1624 q = irq_ptr->input_qs[i];
1625 if (q) {
1626 free_page((unsigned long) q->slib);
1627 kmem_cache_free(qdio_q_cache, q);
1628 }
1629 q = irq_ptr->output_qs[i];
1630 if (q) {
1631 free_page((unsigned long) q->slib);
1632 kmem_cache_free(qdio_q_cache, q);
1633 }
1634 }
1635 free_page((unsigned long) irq_ptr->qdr);
1636 free_page((unsigned long) irq_ptr);
1637}
1638
1639static void
1640qdio_set_impl_params(struct qdio_irq *irq_ptr,
1641 unsigned int qib_param_field_format,
1642 /* pointer to 128 bytes or NULL, if no param field */
1643 unsigned char *qib_param_field,
1644 /* pointer to no_queues*128 words of data or NULL */
1645 unsigned int no_input_qs,
1646 unsigned int no_output_qs,
1647 unsigned long *input_slib_elements,
1648 unsigned long *output_slib_elements)
1649{
1650 int i,j;
1651
1652 if (!irq_ptr)
1653 return;
1654
1655 irq_ptr->qib.pfmt=qib_param_field_format;
1656 if (qib_param_field)
1657 memcpy(irq_ptr->qib.parm,qib_param_field,
1658 QDIO_MAX_BUFFERS_PER_Q);
1659
1660 if (input_slib_elements)
1661 for (i=0;i<no_input_qs;i++) {
1662 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1663 irq_ptr->input_qs[i]->slib->slibe[j].parms=
1664 input_slib_elements[
1665 i*QDIO_MAX_BUFFERS_PER_Q+j];
1666 }
1667 if (output_slib_elements)
1668 for (i=0;i<no_output_qs;i++) {
1669 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1670 irq_ptr->output_qs[i]->slib->slibe[j].parms=
1671 output_slib_elements[
1672 i*QDIO_MAX_BUFFERS_PER_Q+j];
1673 }
1674}
1675
1676static int
1677qdio_alloc_qs(struct qdio_irq *irq_ptr,
1678 int no_input_qs, int no_output_qs)
1679{
1680 int i;
1681 struct qdio_q *q;
1682
1683 for (i = 0; i < no_input_qs; i++) {
1684 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
1685 if (!q)
1686 return -ENOMEM;
1687 memset(q, 0, sizeof(*q));
1688
1689 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
1690 if (!q->slib) {
1691 kmem_cache_free(qdio_q_cache, q);
1692 return -ENOMEM;
1693 }
1694 irq_ptr->input_qs[i]=q;
1695 }
1696
1697 for (i = 0; i < no_output_qs; i++) {
1698 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
1699 if (!q)
1700 return -ENOMEM;
1701 memset(q, 0, sizeof(*q));
1702
1703 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
1704 if (!q->slib) {
1705 kmem_cache_free(qdio_q_cache, q);
1706 return -ENOMEM;
1707 }
1708 irq_ptr->output_qs[i]=q;
1709 }
1710 return 0;
1711}
1712
1713static void
1714qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1715 int no_input_qs, int no_output_qs,
1716 qdio_handler_t *input_handler,
1717 qdio_handler_t *output_handler,
1718 unsigned long int_parm,int q_format,
1719 unsigned long flags,
1720 void **inbound_sbals_array,
1721 void **outbound_sbals_array)
1722{
1723 struct qdio_q *q;
1724 int i,j;
1725 char dbf_text[20]; /* see qdio_initialize */
1726 void *ptr;
1727 int available;
1728
1729 sprintf(dbf_text,"qfqs%4x",cdev->private->schid.sch_no);
1730 QDIO_DBF_TEXT0(0,setup,dbf_text);
1731 for (i=0;i<no_input_qs;i++) {
1732 q=irq_ptr->input_qs[i];
1733
1734 memset(q,0,((char*)&q->slib)-((char*)q));
1735 sprintf(dbf_text,"in-q%4x",i);
1736 QDIO_DBF_TEXT0(0,setup,dbf_text);
1737 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1738
1739 memset(q->slib,0,PAGE_SIZE);
1740 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1741
1742 available=0;
1743
1744 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1745 q->sbal[j]=*(inbound_sbals_array++);
1746
1747 q->queue_type=q_format;
1748 q->int_parm=int_parm;
1749 q->schid = irq_ptr->schid;
1750 q->irq_ptr = irq_ptr;
1751 q->cdev = cdev;
1752 q->mask=1<<(31-i);
1753 q->q_no=i;
1754 q->is_input_q=1;
1755 q->first_to_check=0;
1756 q->last_move_ftc=0;
1757 q->handler=input_handler;
1758 q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind;
1759
1760 /* q->is_thinint_q isn't valid at this time, but
1761 * irq_ptr->is_thinint_irq is
1762 */
1763 if (irq_ptr->is_thinint_irq)
1764 tasklet_init(&q->tasklet, tiqdio_inbound_processing,
1765 (unsigned long) q);
1766 else
1767 tasklet_init(&q->tasklet, qdio_inbound_processing,
1768 (unsigned long) q);
1769
1770 /* actually this is not used for inbound queues. yet. */
1771 atomic_set(&q->busy_siga_counter,0);
1772 q->timing.busy_start=0;
1773
1774/* for (j=0;j<QDIO_STATS_NUMBER;j++)
1775 q->timing.last_transfer_times[j]=(qdio_get_micros()/
1776 QDIO_STATS_NUMBER)*j;
1777 q->timing.last_transfer_index=QDIO_STATS_NUMBER-1;
1778*/
1779
1780 /* fill in slib */
1781 if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba=
1782 (unsigned long)(q->slib);
1783 q->slib->sla=(unsigned long)(q->sl);
1784 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1785
1786 /* fill in sl */
1787 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1788 q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1789
1790 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1791 ptr=(void*)q->sl;
1792 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1793 ptr=(void*)&q->slsb;
1794 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1795 ptr=(void*)q->sbal[0];
1796 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1797
1798 /* fill in slsb */
1799 if (!irq_ptr->is_qebsm) {
1800 unsigned int count = 1;
1801 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1802 set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count);
1803 }
1804 }
1805
1806 for (i=0;i<no_output_qs;i++) {
1807 q=irq_ptr->output_qs[i];
1808 memset(q,0,((char*)&q->slib)-((char*)q));
1809
1810 sprintf(dbf_text,"outq%4x",i);
1811 QDIO_DBF_TEXT0(0,setup,dbf_text);
1812 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1813
1814 memset(q->slib,0,PAGE_SIZE);
1815 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1816
1817 available=0;
1818
1819 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1820 q->sbal[j]=*(outbound_sbals_array++);
1821
1822 q->queue_type=q_format;
1823 if ((q->queue_type == QDIO_IQDIO_QFMT) &&
1824 (no_output_qs > 1) &&
1825 (i == no_output_qs-1))
1826 q->queue_type = QDIO_IQDIO_QFMT_ASYNCH;
1827 q->int_parm=int_parm;
1828 q->is_input_q=0;
1829 q->is_pci_out = 0;
1830 q->schid = irq_ptr->schid;
1831 q->cdev = cdev;
1832 q->irq_ptr = irq_ptr;
1833 q->mask=1<<(31-i);
1834 q->q_no=i;
1835 q->first_to_check=0;
1836 q->last_move_ftc=0;
1837 q->handler=output_handler;
1838
1839 tasklet_init(&q->tasklet, qdio_outbound_processing,
1840 (unsigned long) q);
1841 setup_timer(&q->timer, qdio_outbound_processing,
1842 (unsigned long) q);
1843
1844 atomic_set(&q->busy_siga_counter,0);
1845 q->timing.busy_start=0;
1846
1847 /* fill in slib */
1848 if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba=
1849 (unsigned long)(q->slib);
1850 q->slib->sla=(unsigned long)(q->sl);
1851 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1852
1853 /* fill in sl */
1854 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1855 q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1856
1857 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1858 ptr=(void*)q->sl;
1859 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1860 ptr=(void*)&q->slsb;
1861 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1862 ptr=(void*)q->sbal[0];
1863 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1864
1865 /* fill in slsb */
1866 if (!irq_ptr->is_qebsm) {
1867 unsigned int count = 1;
1868 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1869 set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count);
1870 }
1871 }
1872}
1873
1874static void
1875qdio_fill_thresholds(struct qdio_irq *irq_ptr,
1876 unsigned int no_input_qs,
1877 unsigned int no_output_qs,
1878 unsigned int min_input_threshold,
1879 unsigned int max_input_threshold,
1880 unsigned int min_output_threshold,
1881 unsigned int max_output_threshold)
1882{
1883 int i;
1884 struct qdio_q *q;
1885
1886 for (i=0;i<no_input_qs;i++) {
1887 q=irq_ptr->input_qs[i];
1888 q->timing.threshold=max_input_threshold;
1889/* for (j=0;j<QDIO_STATS_CLASSES;j++) {
1890 q->threshold_classes[j].threshold=
1891 min_input_threshold+
1892 (max_input_threshold-min_input_threshold)/
1893 QDIO_STATS_CLASSES;
1894 }
1895 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1896 }
1897 for (i=0;i<no_output_qs;i++) {
1898 q=irq_ptr->output_qs[i];
1899 q->timing.threshold=max_output_threshold;
1900/* for (j=0;j<QDIO_STATS_CLASSES;j++) {
1901 q->threshold_classes[j].threshold=
1902 min_output_threshold+
1903 (max_output_threshold-min_output_threshold)/
1904 QDIO_STATS_CLASSES;
1905 }
1906 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1907 }
1908}
1909
1910static void tiqdio_thinint_handler(void *ind, void *drv_data)
1911{
1912 QDIO_DBF_TEXT4(0,trace,"thin_int");
1913
1914 qdio_perf_stat_inc(&perf_stats.thinints);
1915
1916 /* SVS only when needed:
1917 * issue SVS to benefit from iqdio interrupt avoidance
1918 * (SVS clears AISOI)*/
1919 if (!omit_svs)
1920 tiqdio_clear_global_summary();
1921
1922 tiqdio_inbound_checks();
1923}
1924
1925static void
1926qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1927{
1928 int i;
1929#ifdef CONFIG_QDIO_DEBUG
1930 char dbf_text[15];
1931
1932 QDIO_DBF_TEXT5(0,trace,"newstate");
1933 sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state);
1934 QDIO_DBF_TEXT5(0,trace,dbf_text);
1935#endif /* CONFIG_QDIO_DEBUG */
1936
1937 irq_ptr->state=state;
1938 for (i=0;i<irq_ptr->no_input_qs;i++)
1939 irq_ptr->input_qs[i]->state=state;
1940 for (i=0;i<irq_ptr->no_output_qs;i++)
1941 irq_ptr->output_qs[i]->state=state;
1942 mb();
1943}
1944
1945static void
1946qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1947{
1948 char dbf_text[15];
1949
1950 if (irb->esw.esw0.erw.cons) {
1951 sprintf(dbf_text,"sens%4x",schid.sch_no);
1952 QDIO_DBF_TEXT2(1,trace,dbf_text);
1953 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
1954
1955 QDIO_PRINT_WARN("sense data available on qdio channel.\n");
1956 QDIO_HEXDUMP16(WARN,"irb: ",irb);
1957 QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw);
1958 }
1959
1960}
1961
1962static void
1963qdio_handle_pci(struct qdio_irq *irq_ptr)
1964{
1965 int i;
1966 struct qdio_q *q;
1967
1968 qdio_perf_stat_inc(&perf_stats.pcis);
1969 for (i=0;i<irq_ptr->no_input_qs;i++) {
1970 q=irq_ptr->input_qs[i];
1971 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
1972 qdio_mark_q(q);
1973 else {
1974 qdio_perf_stat_dec(&perf_stats.tl_runs);
1975 __qdio_inbound_processing(q);
1976 }
1977 }
1978 if (!irq_ptr->hydra_gives_outbound_pcis)
1979 return;
1980 for (i=0;i<irq_ptr->no_output_qs;i++) {
1981 q=irq_ptr->output_qs[i];
1982 if (qdio_is_outbound_q_done(q))
1983 continue;
1984 qdio_perf_stat_dec(&perf_stats.tl_runs);
1985 if (!irq_ptr->sync_done_on_outb_pcis)
1986 SYNC_MEMORY;
1987 __qdio_outbound_processing(q);
1988 }
1989}
1990
1991static void qdio_establish_handle_irq(struct ccw_device*, int, int);
1992
1993static void
1994qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
1995 int cstat, int dstat)
1996{
1997 struct qdio_irq *irq_ptr;
1998 struct qdio_q *q;
1999 char dbf_text[15];
2000
2001 irq_ptr = cdev->private->qdio_data;
2002
2003 QDIO_DBF_TEXT2(1, trace, "ick2");
2004 sprintf(dbf_text,"%s", cdev->dev.bus_id);
2005 QDIO_DBF_TEXT2(1,trace,dbf_text);
2006 QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int));
2007 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2008 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2009 QDIO_PRINT_ERR("received check condition on activate " \
2010 "queues on device %s (cs=x%x, ds=x%x).\n",
2011 cdev->dev.bus_id, cstat, dstat);
2012 if (irq_ptr->no_input_qs) {
2013 q=irq_ptr->input_qs[0];
2014 } else if (irq_ptr->no_output_qs) {
2015 q=irq_ptr->output_qs[0];
2016 } else {
2017 QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n",
2018 cdev->dev.bus_id);
2019 goto omit_handler_call;
2020 }
2021 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
2022 QDIO_STATUS_LOOK_FOR_ERROR,
2023 0,0,0,-1,-1,q->int_parm);
2024omit_handler_call:
2025 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED);
2026
2027}
2028
2029static void
2030qdio_call_shutdown(struct work_struct *work)
2031{
2032 struct ccw_device_private *priv;
2033 struct ccw_device *cdev;
2034
2035 priv = container_of(work, struct ccw_device_private, kick_work);
2036 cdev = priv->cdev;
2037 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
2038 put_device(&cdev->dev);
2039}
2040
2041static void
2042qdio_timeout_handler(struct ccw_device *cdev)
2043{
2044 struct qdio_irq *irq_ptr;
2045 char dbf_text[15];
2046
2047 QDIO_DBF_TEXT2(0, trace, "qtoh");
2048 sprintf(dbf_text, "%s", cdev->dev.bus_id);
2049 QDIO_DBF_TEXT2(0, trace, dbf_text);
2050
2051 irq_ptr = cdev->private->qdio_data;
2052 sprintf(dbf_text, "state:%d", irq_ptr->state);
2053 QDIO_DBF_TEXT2(0, trace, dbf_text);
2054
2055 switch (irq_ptr->state) {
2056 case QDIO_IRQ_STATE_INACTIVE:
2057 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n",
2058 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2059 QDIO_DBF_TEXT2(1,setup,"eq:timeo");
2060 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2061 break;
2062 case QDIO_IRQ_STATE_CLEANUP:
2063 QDIO_PRINT_INFO("Did not get interrupt on cleanup, "
2064 "irq=0.%x.%x.\n",
2065 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2066 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2067 break;
2068 case QDIO_IRQ_STATE_ESTABLISHED:
2069 case QDIO_IRQ_STATE_ACTIVE:
2070 /* I/O has been terminated by common I/O layer. */
2071 QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n",
2072 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2073 QDIO_DBF_TEXT2(1, trace, "cio:term");
2074 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
2075 if (get_device(&cdev->dev)) {
2076 /* Can't call shutdown from interrupt context. */
2077 PREPARE_WORK(&cdev->private->kick_work,
2078 qdio_call_shutdown);
2079 queue_work(ccw_device_work, &cdev->private->kick_work);
2080 }
2081 break;
2082 default:
2083 BUG();
2084 }
2085 wake_up(&cdev->private->wait_q);
2086}
2087
2088static void
2089qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2090{
2091 struct qdio_irq *irq_ptr;
2092 int cstat,dstat;
2093 char dbf_text[15];
2094
2095#ifdef CONFIG_QDIO_DEBUG
2096 QDIO_DBF_TEXT4(0, trace, "qint");
2097 sprintf(dbf_text, "%s", cdev->dev.bus_id);
2098 QDIO_DBF_TEXT4(0, trace, dbf_text);
2099#endif /* CONFIG_QDIO_DEBUG */
2100
2101 if (!intparm) {
2102 QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
2103 "handler, device %s\n", cdev->dev.bus_id);
2104 return;
2105 }
2106
2107 irq_ptr = cdev->private->qdio_data;
2108 if (!irq_ptr) {
2109 QDIO_DBF_TEXT2(1, trace, "uint");
2110 sprintf(dbf_text,"%s", cdev->dev.bus_id);
2111 QDIO_DBF_TEXT2(1,trace,dbf_text);
2112 QDIO_PRINT_ERR("received interrupt on unused device %s!\n",
2113 cdev->dev.bus_id);
2114 return;
2115 }
2116
2117 if (IS_ERR(irb)) {
2118 /* Currently running i/o is in error. */
2119 switch (PTR_ERR(irb)) {
2120 case -EIO:
2121 QDIO_PRINT_ERR("i/o error on device %s\n",
2122 cdev->dev.bus_id);
2123 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2124 wake_up(&cdev->private->wait_q);
2125 return;
2126 case -ETIMEDOUT:
2127 qdio_timeout_handler(cdev);
2128 return;
2129 default:
2130 QDIO_PRINT_ERR("unknown error state %ld on device %s\n",
2131 PTR_ERR(irb), cdev->dev.bus_id);
2132 return;
2133 }
2134 }
2135
2136 qdio_irq_check_sense(irq_ptr->schid, irb);
2137
2138#ifdef CONFIG_QDIO_DEBUG
2139 sprintf(dbf_text, "state:%d", irq_ptr->state);
2140 QDIO_DBF_TEXT4(0, trace, dbf_text);
2141#endif /* CONFIG_QDIO_DEBUG */
2142
2143 cstat = irb->scsw.cmd.cstat;
2144 dstat = irb->scsw.cmd.dstat;
2145
2146 switch (irq_ptr->state) {
2147 case QDIO_IRQ_STATE_INACTIVE:
2148 qdio_establish_handle_irq(cdev, cstat, dstat);
2149 break;
2150
2151 case QDIO_IRQ_STATE_CLEANUP:
2152 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2153 break;
2154
2155 case QDIO_IRQ_STATE_ESTABLISHED:
2156 case QDIO_IRQ_STATE_ACTIVE:
2157 if (cstat & SCHN_STAT_PCI) {
2158 qdio_handle_pci(irq_ptr);
2159 break;
2160 }
2161
2162 if ((cstat&~SCHN_STAT_PCI)||dstat) {
2163 qdio_handle_activate_check(cdev, intparm, cstat, dstat);
2164 break;
2165 }
2166 default:
2167 QDIO_PRINT_ERR("got interrupt for queues in state %d on " \
2168 "device %s?!\n",
2169 irq_ptr->state, cdev->dev.bus_id);
2170 }
2171 wake_up(&cdev->private->wait_q);
2172
2173}
2174
2175int
2176qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
2177 unsigned int queue_number)
2178{
2179 int cc = 0;
2180 struct qdio_q *q;
2181 struct qdio_irq *irq_ptr;
2182 void *ptr;
2183#ifdef CONFIG_QDIO_DEBUG
2184 char dbf_text[15]="SyncXXXX";
2185#endif
2186
2187 irq_ptr = cdev->private->qdio_data;
2188 if (!irq_ptr)
2189 return -ENODEV;
2190
2191#ifdef CONFIG_QDIO_DEBUG
2192 *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no;
2193 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
2194 *((int*)(&dbf_text[0]))=flags;
2195 *((int*)(&dbf_text[4]))=queue_number;
2196 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
2197#endif /* CONFIG_QDIO_DEBUG */
2198
2199 if (flags&QDIO_FLAG_SYNC_INPUT) {
2200 q=irq_ptr->input_qs[queue_number];
2201 if (!q)
2202 return -EINVAL;
2203 if (!(irq_ptr->is_qebsm))
2204 cc = do_siga_sync(q->schid, 0, q->mask);
2205 } else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
2206 q=irq_ptr->output_qs[queue_number];
2207 if (!q)
2208 return -EINVAL;
2209 if (!(irq_ptr->is_qebsm))
2210 cc = do_siga_sync(q->schid, q->mask, 0);
2211 } else
2212 return -EINVAL;
2213
2214 ptr=&cc;
2215 if (cc)
2216 QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int));
2217
2218 return cc;
2219}
2220
2221static int
2222qdio_get_ssqd_information(struct subchannel_id *schid,
2223 struct qdio_chsc_ssqd **ssqd_area)
2224{
2225 int result;
2226
2227 QDIO_DBF_TEXT0(0, setup, "getssqd");
2228 *ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2229 if (!ssqd_area) {
2230 QDIO_PRINT_WARN("Could not get memory for chsc on sch x%x.\n",
2231 schid->sch_no);
2232 return -ENOMEM;
2233 }
2234
2235 (*ssqd_area)->request = (struct chsc_header) {
2236 .length = 0x0010,
2237 .code = 0x0024,
2238 };
2239 (*ssqd_area)->first_sch = schid->sch_no;
2240 (*ssqd_area)->last_sch = schid->sch_no;
2241 (*ssqd_area)->ssid = schid->ssid;
2242 result = chsc(*ssqd_area);
2243
2244 if (result) {
2245 QDIO_PRINT_WARN("CHSC returned cc %i on sch 0.%x.%x.\n",
2246 result, schid->ssid, schid->sch_no);
2247 goto out;
2248 }
2249
2250 if ((*ssqd_area)->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
2251 QDIO_PRINT_WARN("CHSC response is 0x%x on sch 0.%x.%x.\n",
2252 (*ssqd_area)->response.code,
2253 schid->ssid, schid->sch_no);
2254 goto out;
2255 }
2256 if (!((*ssqd_area)->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2257 !((*ssqd_area)->flags & CHSC_FLAG_VALIDITY) ||
2258 ((*ssqd_area)->sch != schid->sch_no)) {
2259 QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
2260 "using all SIGAs.\n",
2261 schid->ssid, schid->sch_no);
2262 goto out;
2263 }
2264 return 0;
2265out:
2266 return -EINVAL;
2267}
2268
2269int
2270qdio_get_ssqd_pct(struct ccw_device *cdev)
2271{
2272 struct qdio_chsc_ssqd *ssqd_area;
2273 struct subchannel_id schid;
2274 char dbf_text[15];
2275 int rc;
2276 int pct = 0;
2277
2278 QDIO_DBF_TEXT0(0, setup, "getpct");
2279 schid = ccw_device_get_subchannel_id(cdev);
2280 rc = qdio_get_ssqd_information(&schid, &ssqd_area);
2281 if (!rc)
2282 pct = (int)ssqd_area->pct;
2283 if (rc != -ENOMEM)
2284 mempool_free(ssqd_area, qdio_mempool_scssc);
2285 sprintf(dbf_text, "pct: %d", pct);
2286 QDIO_DBF_TEXT2(0, setup, dbf_text);
2287 return pct;
2288}
2289EXPORT_SYMBOL(qdio_get_ssqd_pct);
2290
2291static void
2292qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned long token)
2293{
2294 struct qdio_q *q;
2295 int i;
2296 unsigned int count, start_buf;
2297 char dbf_text[15];
2298
2299 /*check if QEBSM is disabled */
2300 if (!(irq_ptr->is_qebsm) || !(irq_ptr->qdioac & 0x01)) {
2301 irq_ptr->is_qebsm = 0;
2302 irq_ptr->sch_token = 0;
2303 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2304 QDIO_DBF_TEXT0(0,setup,"noV=V");
2305 return;
2306 }
2307 irq_ptr->sch_token = token;
2308 /*input queue*/
2309 for (i = 0; i < irq_ptr->no_input_qs;i++) {
2310 q = irq_ptr->input_qs[i];
2311 count = QDIO_MAX_BUFFERS_PER_Q;
2312 start_buf = 0;
2313 set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count);
2314 }
2315 sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm);
2316 QDIO_DBF_TEXT0(0,setup,dbf_text);
2317 sprintf(dbf_text,"%8lx",irq_ptr->sch_token);
2318 QDIO_DBF_TEXT0(0,setup,dbf_text);
2319 /*output queue*/
2320 for (i = 0; i < irq_ptr->no_output_qs; i++) {
2321 q = irq_ptr->output_qs[i];
2322 count = QDIO_MAX_BUFFERS_PER_Q;
2323 start_buf = 0;
2324 set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count);
2325 }
2326}
2327
2328static void
2329qdio_get_ssqd_siga(struct qdio_irq *irq_ptr)
2330{
2331 int rc;
2332 struct qdio_chsc_ssqd *ssqd_area;
2333
2334 QDIO_DBF_TEXT0(0,setup,"getssqd");
2335 irq_ptr->qdioac = 0;
2336 rc = qdio_get_ssqd_information(&irq_ptr->schid, &ssqd_area);
2337 if (rc) {
2338 QDIO_PRINT_WARN("using all SIGAs for sch x%x.n",
2339 irq_ptr->schid.sch_no);
2340 irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2341 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2342 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2343 irq_ptr->is_qebsm = 0;
2344 } else
2345 irq_ptr->qdioac = ssqd_area->qdioac1;
2346
2347 qdio_check_subchannel_qebsm(irq_ptr, ssqd_area->sch_token);
2348 if (rc != -ENOMEM)
2349 mempool_free(ssqd_area, qdio_mempool_scssc);
2350}
2351
2352static unsigned int
2353tiqdio_check_chsc_availability(void)
2354{
2355 char dbf_text[15];
2356
2357 /* Check for bit 41. */
2358 if (!css_general_characteristics.aif) {
2359 QDIO_PRINT_WARN("Adapter interruption facility not " \
2360 "installed.\n");
2361 return -ENOENT;
2362 }
2363
2364 /* Check for bits 107 and 108. */
2365 if (!css_chsc_characteristics.scssc ||
2366 !css_chsc_characteristics.scsscf) {
2367 QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \
2368 "not available.\n");
2369 return -ENOENT;
2370 }
2371
2372 /* Check for OSA/FCP thin interrupts (bit 67). */
2373 hydra_thinints = css_general_characteristics.aif_osa;
2374 sprintf(dbf_text,"hydrati%1x", hydra_thinints);
2375 QDIO_DBF_TEXT0(0,setup,dbf_text);
2376
2377#ifdef CONFIG_64BIT
2378 /* Check for QEBSM support in general (bit 58). */
2379 is_passthrough = css_general_characteristics.qebsm;
2380#endif
2381 sprintf(dbf_text,"cssQBS:%1x", is_passthrough);
2382 QDIO_DBF_TEXT0(0,setup,dbf_text);
2383
2384 /* Check for aif time delay disablement fac (bit 56). If installed,
2385 * omit svs even under lpar (good point by rick again) */
2386 omit_svs = css_general_characteristics.aif_tdd;
2387 sprintf(dbf_text,"omitsvs%1x", omit_svs);
2388 QDIO_DBF_TEXT0(0,setup,dbf_text);
2389 return 0;
2390}
2391
2392
2393static unsigned int
2394tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2395{
2396 unsigned long real_addr_local_summary_bit;
2397 unsigned long real_addr_dev_st_chg_ind;
2398 void *ptr;
2399 char dbf_text[15];
2400
2401 unsigned int resp_code;
2402 int result;
2403
2404 struct {
2405 struct chsc_header request;
2406 u16 operation_code;
2407 u16 reserved1;
2408 u32 reserved2;
2409 u32 reserved3;
2410 u64 summary_indicator_addr;
2411 u64 subchannel_indicator_addr;
2412 u32 ks:4;
2413 u32 kc:4;
2414 u32 reserved4:21;
2415 u32 isc:3;
2416 u32 word_with_d_bit;
2417 /* set to 0x10000000 to enable
2418 * time delay disablement facility */
2419 u32 reserved5;
2420 struct subchannel_id schid;
2421 u32 reserved6[1004];
2422 struct chsc_header response;
2423 u32 reserved7;
2424 } *scssc_area;
2425
2426 if (!irq_ptr->is_thinint_irq)
2427 return -ENODEV;
2428
2429 if (reset_to_zero) {
2430 real_addr_local_summary_bit=0;
2431 real_addr_dev_st_chg_ind=0;
2432 } else {
2433 real_addr_local_summary_bit=
2434 virt_to_phys((volatile void *)tiqdio_ind);
2435 real_addr_dev_st_chg_ind=
2436 virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
2437 }
2438
2439 scssc_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2440 if (!scssc_area) {
2441 QDIO_PRINT_WARN("No memory for setting indicators on " \
2442 "subchannel 0.%x.%x.\n",
2443 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2444 return -ENOMEM;
2445 }
2446 scssc_area->request = (struct chsc_header) {
2447 .length = 0x0fe0,
2448 .code = 0x0021,
2449 };
2450 scssc_area->operation_code = 0;
2451
2452 scssc_area->summary_indicator_addr = real_addr_local_summary_bit;
2453 scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind;
2454 scssc_area->ks = QDIO_STORAGE_KEY;
2455 scssc_area->kc = QDIO_STORAGE_KEY;
2456 scssc_area->isc = TIQDIO_THININT_ISC;
2457 scssc_area->schid = irq_ptr->schid;
2458 /* enables the time delay disablement facility. Don't care
2459 * whether it is really there (i.e. we haven't checked for
2460 * it) */
2461 if (css_general_characteristics.aif_tdd)
2462 scssc_area->word_with_d_bit = 0x10000000;
2463 else
2464 QDIO_PRINT_WARN("Time delay disablement facility " \
2465 "not available\n");
2466
2467 result = chsc(scssc_area);
2468 if (result) {
2469 QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \
2470 "cc=%i.\n",
2471 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result);
2472 result = -EIO;
2473 goto out;
2474 }
2475
2476 resp_code = scssc_area->response.code;
2477 if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2478 QDIO_PRINT_WARN("response upon setting indicators " \
2479 "is 0x%x.\n",resp_code);
2480 sprintf(dbf_text,"sidR%4x",resp_code);
2481 QDIO_DBF_TEXT1(0,trace,dbf_text);
2482 QDIO_DBF_TEXT1(0,setup,dbf_text);
2483 ptr=&scssc_area->response;
2484 QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN);
2485 result = -EIO;
2486 goto out;
2487 }
2488
2489 QDIO_DBF_TEXT2(0,setup,"setscind");
2490 QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit,
2491 sizeof(unsigned long));
2492 QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long));
2493 result = 0;
2494out:
2495 mempool_free(scssc_area, qdio_mempool_scssc);
2496 return result;
2497
2498}
2499
2500static unsigned int
2501tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2502{
2503 unsigned int resp_code;
2504 int result;
2505 void *ptr;
2506 char dbf_text[15];
2507
2508 struct {
2509 struct chsc_header request;
2510 u16 operation_code;
2511 u16 reserved1;
2512 u32 reserved2;
2513 u32 reserved3;
2514 u32 reserved4[2];
2515 u32 delay_target;
2516 u32 reserved5[1009];
2517 struct chsc_header response;
2518 u32 reserved6;
2519 } *scsscf_area;
2520
2521 if (!irq_ptr->is_thinint_irq)
2522 return -ENODEV;
2523
2524 scsscf_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2525 if (!scsscf_area) {
2526 QDIO_PRINT_WARN("No memory for setting delay target on " \
2527 "subchannel 0.%x.%x.\n",
2528 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2529 return -ENOMEM;
2530 }
2531 scsscf_area->request = (struct chsc_header) {
2532 .length = 0x0fe0,
2533 .code = 0x1027,
2534 };
2535
2536 scsscf_area->delay_target = delay_target<<16;
2537
2538 result=chsc(scsscf_area);
2539 if (result) {
2540 QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \
2541 "cc=%i. Continuing.\n",
2542 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2543 result);
2544 result = -EIO;
2545 goto out;
2546 }
2547
2548 resp_code = scsscf_area->response.code;
2549 if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2550 QDIO_PRINT_WARN("response upon setting delay target " \
2551 "is 0x%x. Continuing.\n",resp_code);
2552 sprintf(dbf_text,"sdtR%4x",resp_code);
2553 QDIO_DBF_TEXT1(0,trace,dbf_text);
2554 QDIO_DBF_TEXT1(0,setup,dbf_text);
2555 ptr=&scsscf_area->response;
2556 QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN);
2557 }
2558 QDIO_DBF_TEXT2(0,trace,"delytrgt");
2559 QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long));
2560 result = 0; /* not critical */
2561out:
2562 mempool_free(scsscf_area, qdio_mempool_scssc);
2563 return result;
2564}
2565
2566int
2567qdio_cleanup(struct ccw_device *cdev, int how)
2568{
2569 struct qdio_irq *irq_ptr;
2570 char dbf_text[15];
2571 int rc;
2572
2573 irq_ptr = cdev->private->qdio_data;
2574 if (!irq_ptr)
2575 return -ENODEV;
2576
2577 sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no);
2578 QDIO_DBF_TEXT1(0,trace,dbf_text);
2579 QDIO_DBF_TEXT0(0,setup,dbf_text);
2580
2581 rc = qdio_shutdown(cdev, how);
2582 if ((rc == 0) || (rc == -EINPROGRESS))
2583 rc = qdio_free(cdev);
2584 return rc;
2585}
2586
2587int
2588qdio_shutdown(struct ccw_device *cdev, int how)
2589{
2590 struct qdio_irq *irq_ptr;
2591 int i;
2592 int result = 0;
2593 int rc;
2594 unsigned long flags;
2595 int timeout;
2596 char dbf_text[15];
2597
2598 irq_ptr = cdev->private->qdio_data;
2599 if (!irq_ptr)
2600 return -ENODEV;
2601
2602 down(&irq_ptr->setting_up_sema);
2603
2604 sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no);
2605 QDIO_DBF_TEXT1(0,trace,dbf_text);
2606 QDIO_DBF_TEXT0(0,setup,dbf_text);
2607
2608 /* mark all qs as uninteresting */
2609 for (i=0;i<irq_ptr->no_input_qs;i++)
2610 atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1);
2611
2612 for (i=0;i<irq_ptr->no_output_qs;i++)
2613 atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1);
2614
2615 tasklet_kill(&tiqdio_tasklet);
2616
2617 for (i=0;i<irq_ptr->no_input_qs;i++) {
2618 qdio_unmark_q(irq_ptr->input_qs[i]);
2619 tasklet_kill(&irq_ptr->input_qs[i]->tasklet);
2620 wait_event_interruptible_timeout(cdev->private->wait_q,
2621 !atomic_read(&irq_ptr->
2622 input_qs[i]->
2623 use_count),
2624 QDIO_NO_USE_COUNT_TIMEOUT);
2625 if (atomic_read(&irq_ptr->input_qs[i]->use_count))
2626 result=-EINPROGRESS;
2627 }
2628
2629 for (i=0;i<irq_ptr->no_output_qs;i++) {
2630 tasklet_kill(&irq_ptr->output_qs[i]->tasklet);
2631 del_timer(&irq_ptr->output_qs[i]->timer);
2632 wait_event_interruptible_timeout(cdev->private->wait_q,
2633 !atomic_read(&irq_ptr->
2634 output_qs[i]->
2635 use_count),
2636 QDIO_NO_USE_COUNT_TIMEOUT);
2637 if (atomic_read(&irq_ptr->output_qs[i]->use_count))
2638 result=-EINPROGRESS;
2639 }
2640
2641 /* cleanup subchannel */
2642 spin_lock_irqsave(get_ccwdev_lock(cdev),flags);
2643 if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) {
2644 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
2645 timeout=QDIO_CLEANUP_CLEAR_TIMEOUT;
2646 } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) {
2647 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2648 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2649 } else { /* default behaviour */
2650 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2651 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2652 }
2653 if (rc == -ENODEV) {
2654 /* No need to wait for device no longer present. */
2655 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2656 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2657 } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) {
2658 /*
2659 * Whoever put another handler there, has to cope with the
2660 * interrupt theirself. Might happen if qdio_shutdown was
2661 * called on already shutdown queues, but this shouldn't have
2662 * bad side effects.
2663 */
2664 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2665 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2666 } else if (rc == 0) {
2667 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
2668 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
2669
2670 wait_event_interruptible_timeout(cdev->private->wait_q,
2671 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
2672 irq_ptr->state == QDIO_IRQ_STATE_ERR,
2673 timeout);
2674 } else {
2675 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
2676 "device %s\n", result, cdev->dev.bus_id);
2677 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2678 result = rc;
2679 goto out;
2680 }
2681 if (irq_ptr->is_thinint_irq) {
2682 qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind);
2683 tiqdio_set_subchannel_ind(irq_ptr,1);
2684 /* reset adapter interrupt indicators */
2685 }
2686
2687 /* exchange int handlers, if necessary */
2688 if ((void*)cdev->handler == (void*)qdio_handler)
2689 cdev->handler=irq_ptr->original_int_handler;
2690
2691 /* Ignore errors. */
2692 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2693out:
2694 up(&irq_ptr->setting_up_sema);
2695 return result;
2696}
2697
2698int
2699qdio_free(struct ccw_device *cdev)
2700{
2701 struct qdio_irq *irq_ptr;
2702 char dbf_text[15];
2703
2704 irq_ptr = cdev->private->qdio_data;
2705 if (!irq_ptr)
2706 return -ENODEV;
2707
2708 down(&irq_ptr->setting_up_sema);
2709
2710 sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no);
2711 QDIO_DBF_TEXT1(0,trace,dbf_text);
2712 QDIO_DBF_TEXT0(0,setup,dbf_text);
2713
2714 cdev->private->qdio_data = NULL;
2715
2716 up(&irq_ptr->setting_up_sema);
2717
2718 qdio_release_irq_memory(irq_ptr);
2719 module_put(THIS_MODULE);
2720 return 0;
2721}
2722
2723static void
2724qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2725{
2726 char dbf_text[20]; /* if a printf printed out more than 8 chars */
2727
2728 sprintf(dbf_text,"qfmt:%x",init_data->q_format);
2729 QDIO_DBF_TEXT0(0,setup,dbf_text);
2730 QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8);
2731 sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format);
2732 QDIO_DBF_TEXT0(0,setup,dbf_text);
2733 QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*));
2734 QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*));
2735 QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*));
2736 sprintf(dbf_text,"miit%4x",init_data->min_input_threshold);
2737 QDIO_DBF_TEXT0(0,setup,dbf_text);
2738 sprintf(dbf_text,"mait%4x",init_data->max_input_threshold);
2739 QDIO_DBF_TEXT0(0,setup,dbf_text);
2740 sprintf(dbf_text,"miot%4x",init_data->min_output_threshold);
2741 QDIO_DBF_TEXT0(0,setup,dbf_text);
2742 sprintf(dbf_text,"maot%4x",init_data->max_output_threshold);
2743 QDIO_DBF_TEXT0(0,setup,dbf_text);
2744 sprintf(dbf_text,"niq:%4x",init_data->no_input_qs);
2745 QDIO_DBF_TEXT0(0,setup,dbf_text);
2746 sprintf(dbf_text,"noq:%4x",init_data->no_output_qs);
2747 QDIO_DBF_TEXT0(0,setup,dbf_text);
2748 QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*));
2749 QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*));
2750 QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long));
2751 QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long));
2752 QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*));
2753 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
2754}
2755
2756static void
2757qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2758{
2759 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
2760 irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2761
2762 irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib);
2763
2764 irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl);
2765
2766 irq_ptr->qdr->qdf0[i].slsba=
2767 (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]);
2768
2769 irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY;
2770 irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY;
2771 irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY;
2772 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
2773}
2774
2775static void
2776qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2777 int j, int iqfmt)
2778{
2779 irq_ptr->output_qs[i]->is_iqdio_q = iqfmt;
2780 irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2781
2782 irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib);
2783
2784 irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl);
2785
2786 irq_ptr->qdr->qdf0[i+j].slsba=
2787 (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]);
2788
2789 irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY;
2790 irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY;
2791 irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY;
2792 irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY;
2793}
2794
2795
2796static void
2797qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2798{
2799 int i;
2800
2801 for (i=0;i<irq_ptr->no_input_qs;i++) {
2802 irq_ptr->input_qs[i]->siga_sync=
2803 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2804 irq_ptr->input_qs[i]->siga_in=
2805 irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2806 irq_ptr->input_qs[i]->siga_out=
2807 irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2808 irq_ptr->input_qs[i]->siga_sync_done_on_thinints=
2809 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2810 irq_ptr->input_qs[i]->hydra_gives_outbound_pcis=
2811 irq_ptr->hydra_gives_outbound_pcis;
2812 irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis=
2813 ((irq_ptr->qdioac&
2814 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2815 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2816 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2817 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2818
2819 }
2820}
2821
2822static void
2823qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2824{
2825 int i;
2826
2827 for (i=0;i<irq_ptr->no_output_qs;i++) {
2828 irq_ptr->output_qs[i]->siga_sync=
2829 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2830 irq_ptr->output_qs[i]->siga_in=
2831 irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2832 irq_ptr->output_qs[i]->siga_out=
2833 irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2834 irq_ptr->output_qs[i]->siga_sync_done_on_thinints=
2835 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2836 irq_ptr->output_qs[i]->hydra_gives_outbound_pcis=
2837 irq_ptr->hydra_gives_outbound_pcis;
2838 irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis=
2839 ((irq_ptr->qdioac&
2840 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2841 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2842 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2843 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2844
2845 }
2846}
2847
2848static int
2849qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2850 int dstat)
2851{
2852 char dbf_text[15];
2853 struct qdio_irq *irq_ptr;
2854
2855 irq_ptr = cdev->private->qdio_data;
2856
2857 if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
2858 sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no);
2859 QDIO_DBF_TEXT2(1,trace,dbf_text);
2860 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2861 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2862 QDIO_PRINT_ERR("received check condition on establish " \
2863 "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n",
2864 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2865 cstat,dstat);
2866 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
2867 }
2868
2869 if (!(dstat & DEV_STAT_DEV_END)) {
2870 QDIO_DBF_TEXT2(1,setup,"eq:no de");
2871 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2872 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2873 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get "
2874 "device end: dstat=%02x, cstat=%02x\n",
2875 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2876 dstat, cstat);
2877 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2878 return 1;
2879 }
2880
2881 if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) {
2882 QDIO_DBF_TEXT2(1,setup,"eq:badio");
2883 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2884 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2885 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got "
2886 "the following devstat: dstat=%02x, "
2887 "cstat=%02x\n", irq_ptr->schid.ssid,
2888 irq_ptr->schid.sch_no, dstat, cstat);
2889 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2890 return 1;
2891 }
2892 return 0;
2893}
2894
2895static void
2896qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
2897{
2898 struct qdio_irq *irq_ptr;
2899 char dbf_text[15];
2900
2901 irq_ptr = cdev->private->qdio_data;
2902
2903 sprintf(dbf_text,"qehi%4x",cdev->private->schid.sch_no);
2904 QDIO_DBF_TEXT0(0,setup,dbf_text);
2905 QDIO_DBF_TEXT0(0,trace,dbf_text);
2906
2907 if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat))
2908 return;
2909
2910 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
2911}
2912
2913int
2914qdio_initialize(struct qdio_initialize *init_data)
2915{
2916 int rc;
2917 char dbf_text[15];
2918
2919 sprintf(dbf_text,"qini%4x",init_data->cdev->private->schid.sch_no);
2920 QDIO_DBF_TEXT0(0,setup,dbf_text);
2921 QDIO_DBF_TEXT0(0,trace,dbf_text);
2922
2923 rc = qdio_allocate(init_data);
2924 if (rc == 0) {
2925 rc = qdio_establish(init_data);
2926 if (rc != 0)
2927 qdio_free(init_data->cdev);
2928 }
2929
2930 return rc;
2931}
2932
2933
2934int
2935qdio_allocate(struct qdio_initialize *init_data)
2936{
2937 struct qdio_irq *irq_ptr;
2938 char dbf_text[15];
2939
2940 sprintf(dbf_text,"qalc%4x",init_data->cdev->private->schid.sch_no);
2941 QDIO_DBF_TEXT0(0,setup,dbf_text);
2942 QDIO_DBF_TEXT0(0,trace,dbf_text);
2943 if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2944 (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2945 ((init_data->no_input_qs) && (!init_data->input_handler)) ||
2946 ((init_data->no_output_qs) && (!init_data->output_handler)) )
2947 return -EINVAL;
2948
2949 if (!init_data->input_sbal_addr_array)
2950 return -EINVAL;
2951
2952 if (!init_data->output_sbal_addr_array)
2953 return -EINVAL;
2954
2955 qdio_allocate_do_dbf(init_data);
2956
2957 /* create irq */
2958 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2959
2960 QDIO_DBF_TEXT0(0,setup,"irq_ptr:");
2961 QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
2962
2963 if (!irq_ptr) {
2964 QDIO_PRINT_ERR("allocation of irq_ptr failed!\n");
2965 return -ENOMEM;
2966 }
2967
2968 init_MUTEX(&irq_ptr->setting_up_sema);
2969
2970 /* QDR must be in DMA area since CCW data address is only 32 bit */
2971 irq_ptr->qdr = (struct qdr *) __get_free_page(GFP_KERNEL | GFP_DMA);
2972 if (!(irq_ptr->qdr)) {
2973 free_page((unsigned long) irq_ptr);
2974 QDIO_PRINT_ERR("allocation of irq_ptr->qdr failed!\n");
2975 return -ENOMEM;
2976 }
2977 QDIO_DBF_TEXT0(0,setup,"qdr:");
2978 QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*));
2979
2980 if (qdio_alloc_qs(irq_ptr,
2981 init_data->no_input_qs,
2982 init_data->no_output_qs)) {
2983 QDIO_PRINT_ERR("queue allocation failed!\n");
2984 qdio_release_irq_memory(irq_ptr);
2985 return -ENOMEM;
2986 }
2987
2988 init_data->cdev->private->qdio_data = irq_ptr;
2989
2990 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE);
2991
2992 return 0;
2993}
2994
2995static int qdio_fill_irq(struct qdio_initialize *init_data)
2996{
2997 int i;
2998 char dbf_text[15];
2999 struct ciw *ciw;
3000 int is_iqdio;
3001 struct qdio_irq *irq_ptr;
3002
3003 irq_ptr = init_data->cdev->private->qdio_data;
3004
3005 memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr));
3006
3007 /* wipes qib.ac, required by ar7063 */
3008 memset(irq_ptr->qdr,0,sizeof(struct qdr));
3009
3010 irq_ptr->int_parm=init_data->int_parm;
3011
3012 irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
3013 irq_ptr->no_input_qs=init_data->no_input_qs;
3014 irq_ptr->no_output_qs=init_data->no_output_qs;
3015
3016 if (init_data->q_format==QDIO_IQDIO_QFMT) {
3017 irq_ptr->is_iqdio_irq=1;
3018 irq_ptr->is_thinint_irq=1;
3019 } else {
3020 irq_ptr->is_iqdio_irq=0;
3021 irq_ptr->is_thinint_irq=hydra_thinints;
3022 }
3023 sprintf(dbf_text,"is_i_t%1x%1x",
3024 irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq);
3025 QDIO_DBF_TEXT2(0,setup,dbf_text);
3026
3027 if (irq_ptr->is_thinint_irq) {
3028 irq_ptr->dev_st_chg_ind = qdio_get_indicator();
3029 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
3030 if (!irq_ptr->dev_st_chg_ind) {
3031 QDIO_PRINT_WARN("no indicator location available " \
3032 "for irq 0.%x.%x\n",
3033 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
3034 qdio_release_irq_memory(irq_ptr);
3035 return -ENOBUFS;
3036 }
3037 }
3038
3039 /* defaults */
3040 irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD;
3041 irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT;
3042 irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD;
3043 irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT;
3044
3045 qdio_fill_qs(irq_ptr, init_data->cdev,
3046 init_data->no_input_qs,
3047 init_data->no_output_qs,
3048 init_data->input_handler,
3049 init_data->output_handler,init_data->int_parm,
3050 init_data->q_format,init_data->flags,
3051 init_data->input_sbal_addr_array,
3052 init_data->output_sbal_addr_array);
3053
3054 if (!try_module_get(THIS_MODULE)) {
3055 QDIO_PRINT_CRIT("try_module_get() failed!\n");
3056 qdio_release_irq_memory(irq_ptr);
3057 return -EINVAL;
3058 }
3059
3060 qdio_fill_thresholds(irq_ptr,init_data->no_input_qs,
3061 init_data->no_output_qs,
3062 init_data->min_input_threshold,
3063 init_data->max_input_threshold,
3064 init_data->min_output_threshold,
3065 init_data->max_output_threshold);
3066
3067 /* fill in qdr */
3068 irq_ptr->qdr->qfmt=init_data->q_format;
3069 irq_ptr->qdr->iqdcnt=init_data->no_input_qs;
3070 irq_ptr->qdr->oqdcnt=init_data->no_output_qs;
3071 irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */
3072 irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4;
3073
3074 irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib;
3075 irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
3076
3077 /* fill in qib */
3078 irq_ptr->is_qebsm = is_passthrough;
3079 if (irq_ptr->is_qebsm)
3080 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
3081
3082 irq_ptr->qib.qfmt=init_data->q_format;
3083 if (init_data->no_input_qs)
3084 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
3085 if (init_data->no_output_qs)
3086 irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib);
3087 memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8);
3088
3089 qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format,
3090 init_data->qib_param_field,
3091 init_data->no_input_qs,
3092 init_data->no_output_qs,
3093 init_data->input_slib_elements,
3094 init_data->output_slib_elements);
3095
3096 /* first input descriptors, then output descriptors */
3097 is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0;
3098 for (i=0;i<init_data->no_input_qs;i++)
3099 qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio);
3100
3101 for (i=0;i<init_data->no_output_qs;i++)
3102 qdio_allocate_fill_output_desc(irq_ptr, i,
3103 init_data->no_input_qs,
3104 is_iqdio);
3105
3106 /* qdr, qib, sls, slsbs, slibs, sbales filled. */
3107
3108 /* get qdio commands */
3109 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
3110 if (!ciw) {
3111 QDIO_DBF_TEXT2(1,setup,"no eq");
3112 QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. "
3113 "Trying to use default.\n");
3114 } else
3115 irq_ptr->equeue = *ciw;
3116 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
3117 if (!ciw) {
3118 QDIO_DBF_TEXT2(1,setup,"no aq");
3119 QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. "
3120 "Trying to use default.\n");
3121 } else
3122 irq_ptr->aqueue = *ciw;
3123
3124 /* Set new interrupt handler. */
3125 irq_ptr->original_int_handler = init_data->cdev->handler;
3126 init_data->cdev->handler = qdio_handler;
3127
3128 return 0;
3129}
3130
3131int
3132qdio_establish(struct qdio_initialize *init_data)
3133{
3134 struct qdio_irq *irq_ptr;
3135 unsigned long saveflags;
3136 int result, result2;
3137 struct ccw_device *cdev;
3138 char dbf_text[20];
3139
3140 cdev=init_data->cdev;
3141 irq_ptr = cdev->private->qdio_data;
3142 if (!irq_ptr)
3143 return -EINVAL;
3144
3145 if (cdev->private->state != DEV_STATE_ONLINE)
3146 return -EINVAL;
3147
3148 down(&irq_ptr->setting_up_sema);
3149
3150 qdio_fill_irq(init_data);
3151
3152 /* the thinint CHSC stuff */
3153 if (irq_ptr->is_thinint_irq) {
3154
3155 result = tiqdio_set_subchannel_ind(irq_ptr,0);
3156 if (result) {
3157 up(&irq_ptr->setting_up_sema);
3158 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3159 return result;
3160 }
3161 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
3162 }
3163
3164 sprintf(dbf_text,"qest%4x",cdev->private->schid.sch_no);
3165 QDIO_DBF_TEXT0(0,setup,dbf_text);
3166 QDIO_DBF_TEXT0(0,trace,dbf_text);
3167
3168 /* establish q */
3169 irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd;
3170 irq_ptr->ccw.flags=CCW_FLAG_SLI;
3171 irq_ptr->ccw.count=irq_ptr->equeue.count;
3172 irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr);
3173
3174 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3175
3176 ccw_device_set_options_mask(cdev, 0);
3177 result = ccw_device_start(cdev, &irq_ptr->ccw,
3178 QDIO_DOING_ESTABLISH, 0, 0);
3179 if (result) {
3180 result2 = ccw_device_start(cdev, &irq_ptr->ccw,
3181 QDIO_DOING_ESTABLISH, 0, 0);
3182 sprintf(dbf_text,"eq:io%4x",result);
3183 QDIO_DBF_TEXT2(1,setup,dbf_text);
3184 if (result2) {
3185 sprintf(dbf_text,"eq:io%4x",result);
3186 QDIO_DBF_TEXT2(1,setup,dbf_text);
3187 }
3188 QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \
3189 "returned %i, next try returned %i\n",
3190 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3191 result, result2);
3192 result=result2;
3193 }
3194
3195 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
3196
3197 if (result) {
3198 up(&irq_ptr->setting_up_sema);
3199 qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR);
3200 return result;
3201 }
3202
3203 wait_event_interruptible_timeout(cdev->private->wait_q,
3204 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
3205 irq_ptr->state == QDIO_IRQ_STATE_ERR,
3206 QDIO_ESTABLISH_TIMEOUT);
3207
3208 if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED)
3209 result = 0;
3210 else {
3211 up(&irq_ptr->setting_up_sema);
3212 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3213 return -EIO;
3214 }
3215
3216 qdio_get_ssqd_siga(irq_ptr);
3217 /* if this gets set once, we're running under VM and can omit SVSes */
3218 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
3219 omit_svs=1;
3220
3221 sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac);
3222 QDIO_DBF_TEXT2(0,setup,dbf_text);
3223
3224 sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac);
3225 QDIO_DBF_TEXT2(0,setup,dbf_text);
3226
3227 irq_ptr->hydra_gives_outbound_pcis=
3228 irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED;
3229 irq_ptr->sync_done_on_outb_pcis=
3230 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS;
3231
3232 qdio_initialize_set_siga_flags_input(irq_ptr);
3233 qdio_initialize_set_siga_flags_output(irq_ptr);
3234
3235 up(&irq_ptr->setting_up_sema);
3236
3237 return result;
3238
3239}
3240
3241int
3242qdio_activate(struct ccw_device *cdev, int flags)
3243{
3244 struct qdio_irq *irq_ptr;
3245 int i,result=0,result2;
3246 unsigned long saveflags;
3247 char dbf_text[20]; /* see qdio_initialize */
3248
3249 irq_ptr = cdev->private->qdio_data;
3250 if (!irq_ptr)
3251 return -ENODEV;
3252
3253 if (cdev->private->state != DEV_STATE_ONLINE)
3254 return -EINVAL;
3255
3256 down(&irq_ptr->setting_up_sema);
3257 if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) {
3258 result=-EBUSY;
3259 goto out;
3260 }
3261
3262 sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no);
3263 QDIO_DBF_TEXT2(0,setup,dbf_text);
3264 QDIO_DBF_TEXT2(0,trace,dbf_text);
3265
3266 /* activate q */
3267 irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd;
3268 irq_ptr->ccw.flags=CCW_FLAG_SLI;
3269 irq_ptr->ccw.count=irq_ptr->aqueue.count;
3270 irq_ptr->ccw.cda=QDIO_GET_ADDR(0);
3271
3272 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3273
3274 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
3275 result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
3276 0, DOIO_DENY_PREFETCH);
3277 if (result) {
3278 result2=ccw_device_start(cdev,&irq_ptr->ccw,
3279 QDIO_DOING_ACTIVATE,0,0);
3280 sprintf(dbf_text,"aq:io%4x",result);
3281 QDIO_DBF_TEXT2(1,setup,dbf_text);
3282 if (result2) {
3283 sprintf(dbf_text,"aq:io%4x",result);
3284 QDIO_DBF_TEXT2(1,setup,dbf_text);
3285 }
3286 QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \
3287 "returned %i, next try returned %i\n",
3288 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3289 result, result2);
3290 result=result2;
3291 }
3292
3293 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
3294 if (result)
3295 goto out;
3296
3297 for (i=0;i<irq_ptr->no_input_qs;i++) {
3298 if (irq_ptr->is_thinint_irq) {
3299 /*
3300 * that way we know, that, if we will get interrupted
3301 * by tiqdio_inbound_processing, qdio_unmark_q will
3302 * not be called
3303 */
3304 qdio_reserve_q(irq_ptr->input_qs[i]);
3305 qdio_mark_tiq(irq_ptr->input_qs[i]);
3306 qdio_release_q(irq_ptr->input_qs[i]);
3307 }
3308 }
3309
3310 if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) {
3311 for (i=0;i<irq_ptr->no_input_qs;i++) {
3312 irq_ptr->input_qs[i]->is_input_q|=
3313 QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT;
3314 }
3315 }
3316
3317 msleep(QDIO_ACTIVATE_TIMEOUT);
3318 switch (irq_ptr->state) {
3319 case QDIO_IRQ_STATE_STOPPED:
3320 case QDIO_IRQ_STATE_ERR:
3321 up(&irq_ptr->setting_up_sema);
3322 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3323 down(&irq_ptr->setting_up_sema);
3324 result = -EIO;
3325 break;
3326 default:
3327 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
3328 result = 0;
3329 }
3330 out:
3331 up(&irq_ptr->setting_up_sema);
3332
3333 return result;
3334}
3335
3336/* buffers filled forwards again to make Rick happy */
3337static void
3338qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3339 unsigned int count, struct qdio_buffer *buffers)
3340{
3341 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3342 int tmp = 0;
3343
3344 qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3345 if (irq->is_qebsm) {
3346 while (count) {
3347 tmp = set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3348 if (!tmp)
3349 return;
3350 }
3351 return;
3352 }
3353 for (;;) {
3354 set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3355 count--;
3356 if (!count) break;
3357 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3358 }
3359}
3360
3361static void
3362qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3363 unsigned int count, struct qdio_buffer *buffers)
3364{
3365 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3366 int tmp = 0;
3367
3368 qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3369 if (irq->is_qebsm) {
3370 while (count) {
3371 tmp = set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3372 if (!tmp)
3373 return;
3374 }
3375 return;
3376 }
3377
3378 for (;;) {
3379 set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3380 count--;
3381 if (!count) break;
3382 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3383 }
3384}
3385
3386static void
3387do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3388 unsigned int qidx, unsigned int count,
3389 struct qdio_buffer *buffers)
3390{
3391 int used_elements;
3392
3393 /* This is the inbound handling of queues */
3394 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3395
3396 qdio_do_qdio_fill_input(q,qidx,count,buffers);
3397
3398 if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
3399 (callflags&QDIO_FLAG_UNDER_INTERRUPT))
3400 atomic_xchg(&q->polling,0);
3401
3402 if (used_elements)
3403 return;
3404 if (callflags&QDIO_FLAG_DONT_SIGA)
3405 return;
3406 if (q->siga_in) {
3407 int result;
3408
3409 result=qdio_siga_input(q);
3410 if (result) {
3411 if (q->siga_error)
3412 q->error_status_flags|=
3413 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
3414 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
3415 q->siga_error=result;
3416 }
3417 }
3418
3419 qdio_mark_q(q);
3420}
3421
3422static void
3423do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3424 unsigned int qidx, unsigned int count,
3425 struct qdio_buffer *buffers)
3426{
3427 int used_elements;
3428 unsigned int cnt, start_buf;
3429 unsigned char state = 0;
3430 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3431
3432 /* This is the outbound handling of queues */
3433 qdio_do_qdio_fill_output(q,qidx,count,buffers);
3434
3435 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3436
3437 if (callflags&QDIO_FLAG_DONT_SIGA) {
3438 qdio_perf_stat_inc(&perf_stats.outbound_cnt);
3439 return;
3440 }
3441 if (callflags & QDIO_FLAG_PCI_OUT)
3442 q->is_pci_out = 1;
3443 else
3444 q->is_pci_out = 0;
3445 if (q->is_iqdio_q) {
3446 /* one siga for every sbal */
3447 while (count--)
3448 qdio_kick_outbound_q(q);
3449
3450 __qdio_outbound_processing(q);
3451 } else {
3452 /* under VM, we do a SIGA sync unconditionally */
3453 SYNC_MEMORY;
3454 else {
3455 /*
3456 * w/o shadow queues (else branch of
3457 * SYNC_MEMORY :-/ ), we try to
3458 * fast-requeue buffers
3459 */
3460 if (irq->is_qebsm) {
3461 cnt = 1;
3462 start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) &
3463 (QDIO_MAX_BUFFERS_PER_Q-1));
3464 qdio_do_eqbs(q, &state, &start_buf, &cnt);
3465 } else
3466 state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
3467 &(QDIO_MAX_BUFFERS_PER_Q-1) ];
3468 if (state != SLSB_CU_OUTPUT_PRIMED) {
3469 qdio_kick_outbound_q(q);
3470 } else {
3471 QDIO_DBF_TEXT3(0,trace, "fast-req");
3472 qdio_perf_stat_inc(&perf_stats.fast_reqs);
3473 }
3474 }
3475 /*
3476 * only marking the q could take too long,
3477 * the upper layer module could do a lot of
3478 * traffic in that time
3479 */
3480 __qdio_outbound_processing(q);
3481 }
3482
3483 qdio_perf_stat_inc(&perf_stats.outbound_cnt);
3484}
3485
3486/* count must be 1 in iqdio */
3487int
3488do_QDIO(struct ccw_device *cdev,unsigned int callflags,
3489 unsigned int queue_number, unsigned int qidx,
3490 unsigned int count,struct qdio_buffer *buffers)
3491{
3492 struct qdio_irq *irq_ptr;
3493#ifdef CONFIG_QDIO_DEBUG
3494 char dbf_text[20];
3495
3496 sprintf(dbf_text,"doQD%04x",cdev->private->schid.sch_no);
3497 QDIO_DBF_TEXT3(0,trace,dbf_text);
3498#endif /* CONFIG_QDIO_DEBUG */
3499
3500 if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
3501 (count>QDIO_MAX_BUFFERS_PER_Q) ||
3502 (queue_number>QDIO_MAX_QUEUES_PER_IRQ) )
3503 return -EINVAL;
3504
3505 if (count==0)
3506 return 0;
3507
3508 irq_ptr = cdev->private->qdio_data;
3509 if (!irq_ptr)
3510 return -ENODEV;
3511
3512#ifdef CONFIG_QDIO_DEBUG
3513 if (callflags&QDIO_FLAG_SYNC_INPUT)
3514 QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
3515 sizeof(void*));
3516 else
3517 QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number],
3518 sizeof(void*));
3519 sprintf(dbf_text,"flag%04x",callflags);
3520 QDIO_DBF_TEXT3(0,trace,dbf_text);
3521 sprintf(dbf_text,"qi%02xct%02x",qidx,count);
3522 QDIO_DBF_TEXT3(0,trace,dbf_text);
3523#endif /* CONFIG_QDIO_DEBUG */
3524
3525 if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
3526 return -EBUSY;
3527
3528 if (callflags&QDIO_FLAG_SYNC_INPUT)
3529 do_qdio_handle_inbound(irq_ptr->input_qs[queue_number],
3530 callflags, qidx, count, buffers);
3531 else if (callflags&QDIO_FLAG_SYNC_OUTPUT)
3532 do_qdio_handle_outbound(irq_ptr->output_qs[queue_number],
3533 callflags, qidx, count, buffers);
3534 else {
3535 QDIO_DBF_TEXT3(1,trace,"doQD:inv");
3536 return -EINVAL;
3537 }
3538 return 0;
3539}
3540
3541static int
3542qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
3543 int buffer_length, int *eof, void *data)
3544{
3545 int c=0;
3546
3547 /* we are always called with buffer_length=4k, so we all
3548 deliver on the first read */
3549 if (offset>0)
3550 return 0;
3551
3552#define _OUTP_IT(x...) c+=sprintf(buffer+c,x)
3553#ifdef CONFIG_64BIT
3554 _OUTP_IT("Number of tasklet runs (total) : %li\n",
3555 (long)atomic64_read(&perf_stats.tl_runs));
3556 _OUTP_IT("Inbound tasklet runs tried/retried : %li/%li\n",
3557 (long)atomic64_read(&perf_stats.inbound_tl_runs),
3558 (long)atomic64_read(&perf_stats.inbound_tl_runs_resched));
3559 _OUTP_IT("Inbound-thin tasklet runs tried/retried : %li/%li\n",
3560 (long)atomic64_read(&perf_stats.inbound_thin_tl_runs),
3561 (long)atomic64_read(&perf_stats.inbound_thin_tl_runs_resched));
3562 _OUTP_IT("Outbound tasklet runs tried/retried : %li/%li\n",
3563 (long)atomic64_read(&perf_stats.outbound_tl_runs),
3564 (long)atomic64_read(&perf_stats.outbound_tl_runs_resched));
3565 _OUTP_IT("\n");
3566 _OUTP_IT("Number of SIGA sync's issued : %li\n",
3567 (long)atomic64_read(&perf_stats.siga_syncs));
3568 _OUTP_IT("Number of SIGA in's issued : %li\n",
3569 (long)atomic64_read(&perf_stats.siga_ins));
3570 _OUTP_IT("Number of SIGA out's issued : %li\n",
3571 (long)atomic64_read(&perf_stats.siga_outs));
3572 _OUTP_IT("Number of PCIs caught : %li\n",
3573 (long)atomic64_read(&perf_stats.pcis));
3574 _OUTP_IT("Number of adapter interrupts caught : %li\n",
3575 (long)atomic64_read(&perf_stats.thinints));
3576 _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %li\n",
3577 (long)atomic64_read(&perf_stats.fast_reqs));
3578 _OUTP_IT("\n");
3579 _OUTP_IT("Number of inbound transfers : %li\n",
3580 (long)atomic64_read(&perf_stats.inbound_cnt));
3581 _OUTP_IT("Number of do_QDIOs outbound : %li\n",
3582 (long)atomic64_read(&perf_stats.outbound_cnt));
3583#else /* CONFIG_64BIT */
3584 _OUTP_IT("Number of tasklet runs (total) : %i\n",
3585 atomic_read(&perf_stats.tl_runs));
3586 _OUTP_IT("Inbound tasklet runs tried/retried : %i/%i\n",
3587 atomic_read(&perf_stats.inbound_tl_runs),
3588 atomic_read(&perf_stats.inbound_tl_runs_resched));
3589 _OUTP_IT("Inbound-thin tasklet runs tried/retried : %i/%i\n",
3590 atomic_read(&perf_stats.inbound_thin_tl_runs),
3591 atomic_read(&perf_stats.inbound_thin_tl_runs_resched));
3592 _OUTP_IT("Outbound tasklet runs tried/retried : %i/%i\n",
3593 atomic_read(&perf_stats.outbound_tl_runs),
3594 atomic_read(&perf_stats.outbound_tl_runs_resched));
3595 _OUTP_IT("\n");
3596 _OUTP_IT("Number of SIGA sync's issued : %i\n",
3597 atomic_read(&perf_stats.siga_syncs));
3598 _OUTP_IT("Number of SIGA in's issued : %i\n",
3599 atomic_read(&perf_stats.siga_ins));
3600 _OUTP_IT("Number of SIGA out's issued : %i\n",
3601 atomic_read(&perf_stats.siga_outs));
3602 _OUTP_IT("Number of PCIs caught : %i\n",
3603 atomic_read(&perf_stats.pcis));
3604 _OUTP_IT("Number of adapter interrupts caught : %i\n",
3605 atomic_read(&perf_stats.thinints));
3606 _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %i\n",
3607 atomic_read(&perf_stats.fast_reqs));
3608 _OUTP_IT("\n");
3609 _OUTP_IT("Number of inbound transfers : %i\n",
3610 atomic_read(&perf_stats.inbound_cnt));
3611 _OUTP_IT("Number of do_QDIOs outbound : %i\n",
3612 atomic_read(&perf_stats.outbound_cnt));
3613#endif /* CONFIG_64BIT */
3614 _OUTP_IT("\n");
3615
3616 return c;
3617}
3618
3619static struct proc_dir_entry *qdio_perf_proc_file;
3620
3621static void
3622qdio_add_procfs_entry(void)
3623{
3624 proc_perf_file_registration=0;
3625 qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
3626 S_IFREG|0444,NULL);
3627 if (qdio_perf_proc_file) {
3628 qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read;
3629 } else proc_perf_file_registration=-1;
3630
3631 if (proc_perf_file_registration)
3632 QDIO_PRINT_WARN("was not able to register perf. " \
3633 "proc-file (%i).\n",
3634 proc_perf_file_registration);
3635}
3636
3637static void
3638qdio_remove_procfs_entry(void)
3639{
3640 if (!proc_perf_file_registration) /* means if it went ok earlier */
3641 remove_proc_entry(QDIO_PERF,NULL);
3642}
3643
3644/**
3645 * attributes in sysfs
3646 *****************************************************************************/
3647
3648static ssize_t
3649qdio_performance_stats_show(struct bus_type *bus, char *buf)
3650{
3651 return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
3652}
3653
3654static ssize_t
3655qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count)
3656{
3657 unsigned long i;
3658 int ret;
3659
3660 ret = strict_strtoul(buf, 16, &i);
3661 if (!ret && ((i == 0) || (i == 1))) {
3662 if (i == qdio_performance_stats)
3663 return count;
3664 qdio_performance_stats = i;
3665 if (i==0) {
3666 /* reset perf. stat. info */
3667#ifdef CONFIG_64BIT
3668 atomic64_set(&perf_stats.tl_runs, 0);
3669 atomic64_set(&perf_stats.outbound_tl_runs, 0);
3670 atomic64_set(&perf_stats.inbound_tl_runs, 0);
3671 atomic64_set(&perf_stats.inbound_tl_runs_resched, 0);
3672 atomic64_set(&perf_stats.inbound_thin_tl_runs, 0);
3673 atomic64_set(&perf_stats.inbound_thin_tl_runs_resched,
3674 0);
3675 atomic64_set(&perf_stats.siga_outs, 0);
3676 atomic64_set(&perf_stats.siga_ins, 0);
3677 atomic64_set(&perf_stats.siga_syncs, 0);
3678 atomic64_set(&perf_stats.pcis, 0);
3679 atomic64_set(&perf_stats.thinints, 0);
3680 atomic64_set(&perf_stats.fast_reqs, 0);
3681 atomic64_set(&perf_stats.outbound_cnt, 0);
3682 atomic64_set(&perf_stats.inbound_cnt, 0);
3683#else /* CONFIG_64BIT */
3684 atomic_set(&perf_stats.tl_runs, 0);
3685 atomic_set(&perf_stats.outbound_tl_runs, 0);
3686 atomic_set(&perf_stats.inbound_tl_runs, 0);
3687 atomic_set(&perf_stats.inbound_tl_runs_resched, 0);
3688 atomic_set(&perf_stats.inbound_thin_tl_runs, 0);
3689 atomic_set(&perf_stats.inbound_thin_tl_runs_resched, 0);
3690 atomic_set(&perf_stats.siga_outs, 0);
3691 atomic_set(&perf_stats.siga_ins, 0);
3692 atomic_set(&perf_stats.siga_syncs, 0);
3693 atomic_set(&perf_stats.pcis, 0);
3694 atomic_set(&perf_stats.thinints, 0);
3695 atomic_set(&perf_stats.fast_reqs, 0);
3696 atomic_set(&perf_stats.outbound_cnt, 0);
3697 atomic_set(&perf_stats.inbound_cnt, 0);
3698#endif /* CONFIG_64BIT */
3699 }
3700 } else {
3701 QDIO_PRINT_ERR("QDIO performance_stats: write 0 or 1 to this file!\n");
3702 return -EINVAL;
3703 }
3704 return count;
3705}
3706
3707static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show,
3708 qdio_performance_stats_store);
3709
3710static void
3711tiqdio_register_thinints(void)
3712{
3713 char dbf_text[20];
3714
3715 tiqdio_ind =
3716 s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL,
3717 TIQDIO_THININT_ISC);
3718 if (IS_ERR(tiqdio_ind)) {
3719 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind));
3720 QDIO_DBF_TEXT0(0,setup,dbf_text);
3721 QDIO_PRINT_ERR("failed to register adapter handler " \
3722 "(rc=%li).\nAdapter interrupts might " \
3723 "not work. Continuing.\n",
3724 PTR_ERR(tiqdio_ind));
3725 tiqdio_ind = NULL;
3726 }
3727}
3728
3729static void
3730tiqdio_unregister_thinints(void)
3731{
3732 if (tiqdio_ind)
3733 s390_unregister_adapter_interrupt(tiqdio_ind,
3734 TIQDIO_THININT_ISC);
3735}
3736
3737static int
3738qdio_get_qdio_memory(void)
3739{
3740 int i;
3741 indicator_used[0]=1;
3742
3743 for (i=1;i<INDICATORS_PER_CACHELINE;i++)
3744 indicator_used[i]=0;
3745 indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE),
3746 GFP_KERNEL);
3747 if (!indicators)
3748 return -ENOMEM;
3749 return 0;
3750}
3751
3752static void
3753qdio_release_qdio_memory(void)
3754{
3755 kfree(indicators);
3756}
3757
3758static void
3759qdio_unregister_dbf_views(void)
3760{
3761 if (qdio_dbf_setup)
3762 debug_unregister(qdio_dbf_setup);
3763 if (qdio_dbf_sbal)
3764 debug_unregister(qdio_dbf_sbal);
3765 if (qdio_dbf_sense)
3766 debug_unregister(qdio_dbf_sense);
3767 if (qdio_dbf_trace)
3768 debug_unregister(qdio_dbf_trace);
3769#ifdef CONFIG_QDIO_DEBUG
3770 if (qdio_dbf_slsb_out)
3771 debug_unregister(qdio_dbf_slsb_out);
3772 if (qdio_dbf_slsb_in)
3773 debug_unregister(qdio_dbf_slsb_in);
3774#endif /* CONFIG_QDIO_DEBUG */
3775}
3776
3777static int
3778qdio_register_dbf_views(void)
3779{
3780 qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME,
3781 QDIO_DBF_SETUP_PAGES,
3782 QDIO_DBF_SETUP_NR_AREAS,
3783 QDIO_DBF_SETUP_LEN);
3784 if (!qdio_dbf_setup)
3785 goto oom;
3786 debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view);
3787 debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL);
3788
3789 qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME,
3790 QDIO_DBF_SBAL_PAGES,
3791 QDIO_DBF_SBAL_NR_AREAS,
3792 QDIO_DBF_SBAL_LEN);
3793 if (!qdio_dbf_sbal)
3794 goto oom;
3795
3796 debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view);
3797 debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL);
3798
3799 qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME,
3800 QDIO_DBF_SENSE_PAGES,
3801 QDIO_DBF_SENSE_NR_AREAS,
3802 QDIO_DBF_SENSE_LEN);
3803 if (!qdio_dbf_sense)
3804 goto oom;
3805
3806 debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view);
3807 debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL);
3808
3809 qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME,
3810 QDIO_DBF_TRACE_PAGES,
3811 QDIO_DBF_TRACE_NR_AREAS,
3812 QDIO_DBF_TRACE_LEN);
3813 if (!qdio_dbf_trace)
3814 goto oom;
3815
3816 debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view);
3817 debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL);
3818
3819#ifdef CONFIG_QDIO_DEBUG
3820 qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME,
3821 QDIO_DBF_SLSB_OUT_PAGES,
3822 QDIO_DBF_SLSB_OUT_NR_AREAS,
3823 QDIO_DBF_SLSB_OUT_LEN);
3824 if (!qdio_dbf_slsb_out)
3825 goto oom;
3826 debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view);
3827 debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL);
3828
3829 qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME,
3830 QDIO_DBF_SLSB_IN_PAGES,
3831 QDIO_DBF_SLSB_IN_NR_AREAS,
3832 QDIO_DBF_SLSB_IN_LEN);
3833 if (!qdio_dbf_slsb_in)
3834 goto oom;
3835 debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view);
3836 debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL);
3837#endif /* CONFIG_QDIO_DEBUG */
3838 return 0;
3839oom:
3840 QDIO_PRINT_ERR("not enough memory for dbf.\n");
3841 qdio_unregister_dbf_views();
3842 return -ENOMEM;
3843}
3844
3845static void *qdio_mempool_alloc(gfp_t gfp_mask, void *size)
3846{
3847 return (void *) get_zeroed_page(gfp_mask|GFP_DMA);
3848}
3849
3850static void qdio_mempool_free(void *element, void *size)
3851{
3852 free_page((unsigned long) element);
3853}
3854
3855static int __init
3856init_QDIO(void)
3857{
3858 int res;
3859 void *ptr;
3860
3861 printk("qdio: loading %s\n",version);
3862
3863 res=qdio_get_qdio_memory();
3864 if (res)
3865 return res;
3866
3867 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
3868 256, 0, NULL);
3869 if (!qdio_q_cache) {
3870 qdio_release_qdio_memory();
3871 return -ENOMEM;
3872 }
3873
3874 res = qdio_register_dbf_views();
3875 if (res) {
3876 kmem_cache_destroy(qdio_q_cache);
3877 qdio_release_qdio_memory();
3878 return res;
3879 }
3880
3881 QDIO_DBF_TEXT0(0,setup,"initQDIO");
3882 res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
3883
3884 memset((void*)&perf_stats,0,sizeof(perf_stats));
3885 QDIO_DBF_TEXT0(0,setup,"perfstat");
3886 ptr=&perf_stats;
3887 QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
3888
3889 qdio_add_procfs_entry();
3890
3891 qdio_mempool_scssc = mempool_create(QDIO_MEMPOOL_SCSSC_ELEMENTS,
3892 qdio_mempool_alloc,
3893 qdio_mempool_free, NULL);
3894
3895 isc_register(QDIO_AIRQ_ISC);
3896 if (tiqdio_check_chsc_availability())
3897 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
3898
3899 tiqdio_register_thinints();
3900
3901 return 0;
3902 }
3903
3904static void __exit
3905cleanup_QDIO(void)
3906{
3907 tiqdio_unregister_thinints();
3908 isc_unregister(QDIO_AIRQ_ISC);
3909 qdio_remove_procfs_entry();
3910 qdio_release_qdio_memory();
3911 qdio_unregister_dbf_views();
3912 mempool_destroy(qdio_mempool_scssc);
3913 kmem_cache_destroy(qdio_q_cache);
3914 bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
3915 printk("qdio: %s: module removed\n",version);
3916}
3917
3918module_init(init_QDIO);
3919module_exit(cleanup_QDIO);
3920
3921EXPORT_SYMBOL(qdio_allocate);
3922EXPORT_SYMBOL(qdio_establish);
3923EXPORT_SYMBOL(qdio_initialize);
3924EXPORT_SYMBOL(qdio_activate);
3925EXPORT_SYMBOL(do_QDIO);
3926EXPORT_SYMBOL(qdio_shutdown);
3927EXPORT_SYMBOL(qdio_free);
3928EXPORT_SYMBOL(qdio_cleanup);
3929EXPORT_SYMBOL(qdio_synchronize);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 7656081a24d2..c1a70985abfa 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -1,66 +1,20 @@
1/*
2 * linux/drivers/s390/cio/qdio.h
3 *
4 * Copyright 2000,2008 IBM Corp.
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
6 * Jan Glauber <jang@linux.vnet.ibm.com>
7 */
1#ifndef _CIO_QDIO_H 8#ifndef _CIO_QDIO_H
2#define _CIO_QDIO_H 9#define _CIO_QDIO_H
3 10
4#include <asm/page.h> 11#include <asm/page.h>
5#include <asm/isc.h>
6#include <asm/schid.h> 12#include <asm/schid.h>
13#include "chsc.h"
7 14
8#ifdef CONFIG_QDIO_DEBUG 15#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */
9#define QDIO_VERBOSE_LEVEL 9 16#define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */
10#else /* CONFIG_QDIO_DEBUG */ 17#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */
11#define QDIO_VERBOSE_LEVEL 5
12#endif /* CONFIG_QDIO_DEBUG */
13#define QDIO_USE_PROCESSING_STATE
14
15#define QDIO_MINIMAL_BH_RELIEF_TIME 16
16#define QDIO_TIMER_POLL_VALUE 1
17#define IQDIO_TIMER_POLL_VALUE 1
18
19/*
20 * unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as
21 * we never know, whether we'll get initiative again, e.g. to give the
22 * transmit skb's back to the stack, however the stack may be waiting for
23 * them... therefore we define 4 as threshold to start polling (which
24 * will stop as soon as the asynchronous queue catches up)
25 * btw, this only applies to the asynchronous HiperSockets queue
26 */
27#define IQDIO_FILL_LEVEL_TO_POLL 4
28
29#define TIQDIO_THININT_ISC QDIO_AIRQ_ISC
30#define TIQDIO_DELAY_TARGET 0
31#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */
32#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */
33#define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */
34#define IQDIO_GLOBAL_LAPS_INT 1 /* don't global summary */
35#define IQDIO_LOCAL_LAPS 4
36#define IQDIO_LOCAL_LAPS_INT 1
37#define IQDIO_GLOBAL_SUMMARY_CC_MASK 2
38/*#define IQDIO_IQDC_INT_PARM 0x1234*/
39
40#define QDIO_Q_LAPS 5
41
42#define QDIO_STORAGE_KEY PAGE_DEFAULT_KEY
43
44#define L2_CACHELINE_SIZE 256
45#define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32))
46
47#define QDIO_PERF "qdio_perf"
48
49/* must be a power of 2 */
50/*#define QDIO_STATS_NUMBER 4
51
52#define QDIO_STATS_CLASSES 2
53#define QDIO_STATS_COUNT_NEEDED 2*/
54
55#define QDIO_NO_USE_COUNT_TIMEOUT (1*HZ) /* wait for 1 sec on each q before
56 exiting without having use_count
57 of the queue to 0 */
58
59#define QDIO_ESTABLISH_TIMEOUT (1*HZ)
60#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ)
61#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ)
62#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ)
63#define QDIO_ACTIVATE_TIMEOUT (5) /* 5 ms */
64 18
65enum qdio_irq_states { 19enum qdio_irq_states {
66 QDIO_IRQ_STATE_INACTIVE, 20 QDIO_IRQ_STATE_INACTIVE,
@@ -72,565 +26,352 @@ enum qdio_irq_states {
72 NR_QDIO_IRQ_STATES, 26 NR_QDIO_IRQ_STATES,
73}; 27};
74 28
75/* used as intparm in do_IO: */ 29/* used as intparm in do_IO */
76#define QDIO_DOING_SENSEID 0 30#define QDIO_DOING_ESTABLISH 1
77#define QDIO_DOING_ESTABLISH 1 31#define QDIO_DOING_ACTIVATE 2
78#define QDIO_DOING_ACTIVATE 2 32#define QDIO_DOING_CLEANUP 3
79#define QDIO_DOING_CLEANUP 3 33
80 34#define SLSB_STATE_NOT_INIT 0x0
81/************************* DEBUG FACILITY STUFF *********************/ 35#define SLSB_STATE_EMPTY 0x1
82 36#define SLSB_STATE_PRIMED 0x2
83#define QDIO_DBF_HEX(ex,name,level,addr,len) \ 37#define SLSB_STATE_HALTED 0xe
84 do { \ 38#define SLSB_STATE_ERROR 0xf
85 if (ex) \ 39#define SLSB_TYPE_INPUT 0x0
86 debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \ 40#define SLSB_TYPE_OUTPUT 0x20
87 else \ 41#define SLSB_OWNER_PROG 0x80
88 debug_event(qdio_dbf_##name,level,(void*)(addr),len); \ 42#define SLSB_OWNER_CU 0x40
89 } while (0) 43
90#define QDIO_DBF_TEXT(ex,name,level,text) \ 44#define SLSB_P_INPUT_NOT_INIT \
91 do { \ 45 (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT) /* 0x80 */
92 if (ex) \ 46#define SLSB_P_INPUT_ACK \
93 debug_text_exception(qdio_dbf_##name,level,text); \ 47 (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x81 */
94 else \ 48#define SLSB_CU_INPUT_EMPTY \
95 debug_text_event(qdio_dbf_##name,level,text); \ 49 (SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x41 */
96 } while (0) 50#define SLSB_P_INPUT_PRIMED \
97 51 (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED) /* 0x82 */
98 52#define SLSB_P_INPUT_HALTED \
99#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len) 53 (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED) /* 0x8e */
100#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len) 54#define SLSB_P_INPUT_ERROR \
101#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len) 55 (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR) /* 0x8f */
102#ifdef CONFIG_QDIO_DEBUG 56#define SLSB_P_OUTPUT_NOT_INIT \
103#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len) 57 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
104#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len) 58#define SLSB_P_OUTPUT_EMPTY \
105#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len) 59 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */
106#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len) 60#define SLSB_CU_OUTPUT_PRIMED \
107#else /* CONFIG_QDIO_DEBUG */ 61 (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */
108#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0) 62#define SLSB_P_OUTPUT_HALTED \
109#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0) 63 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED) /* 0xae */
110#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0) 64#define SLSB_P_OUTPUT_ERROR \
111#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0) 65 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR) /* 0xaf */
112#endif /* CONFIG_QDIO_DEBUG */ 66
113 67#define SLSB_ERROR_DURING_LOOKUP 0xff
114#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text) 68
115#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text) 69/* additional CIWs returned by extended Sense-ID */
116#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text) 70#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
117#ifdef CONFIG_QDIO_DEBUG 71#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
118#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text)
119#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text)
120#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text)
121#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text)
122#else /* CONFIG_QDIO_DEBUG */
123#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0)
124#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0)
125#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0)
126#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0)
127#endif /* CONFIG_QDIO_DEBUG */
128
129#define QDIO_DBF_SETUP_NAME "qdio_setup"
130#define QDIO_DBF_SETUP_LEN 8
131#define QDIO_DBF_SETUP_PAGES 4
132#define QDIO_DBF_SETUP_NR_AREAS 1
133#ifdef CONFIG_QDIO_DEBUG
134#define QDIO_DBF_SETUP_LEVEL 6
135#else /* CONFIG_QDIO_DEBUG */
136#define QDIO_DBF_SETUP_LEVEL 2
137#endif /* CONFIG_QDIO_DEBUG */
138
139#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */
140#define QDIO_DBF_SBAL_LEN 256
141#define QDIO_DBF_SBAL_PAGES 4
142#define QDIO_DBF_SBAL_NR_AREAS 2
143#ifdef CONFIG_QDIO_DEBUG
144#define QDIO_DBF_SBAL_LEVEL 6
145#else /* CONFIG_QDIO_DEBUG */
146#define QDIO_DBF_SBAL_LEVEL 2
147#endif /* CONFIG_QDIO_DEBUG */
148
149#define QDIO_DBF_TRACE_NAME "qdio_trace"
150#define QDIO_DBF_TRACE_LEN 8
151#define QDIO_DBF_TRACE_NR_AREAS 2
152#ifdef CONFIG_QDIO_DEBUG
153#define QDIO_DBF_TRACE_PAGES 16
154#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */
155#else /* CONFIG_QDIO_DEBUG */
156#define QDIO_DBF_TRACE_PAGES 4
157#define QDIO_DBF_TRACE_LEVEL 2
158#endif /* CONFIG_QDIO_DEBUG */
159
160#define QDIO_DBF_SENSE_NAME "qdio_sense"
161#define QDIO_DBF_SENSE_LEN 64
162#define QDIO_DBF_SENSE_PAGES 2
163#define QDIO_DBF_SENSE_NR_AREAS 1
164#ifdef CONFIG_QDIO_DEBUG
165#define QDIO_DBF_SENSE_LEVEL 6
166#else /* CONFIG_QDIO_DEBUG */
167#define QDIO_DBF_SENSE_LEVEL 2
168#endif /* CONFIG_QDIO_DEBUG */
169
170#ifdef CONFIG_QDIO_DEBUG
171#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT
172
173#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out"
174#define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q
175#define QDIO_DBF_SLSB_OUT_PAGES 256
176#define QDIO_DBF_SLSB_OUT_NR_AREAS 1
177#define QDIO_DBF_SLSB_OUT_LEVEL 6
178
179#define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in"
180#define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q
181#define QDIO_DBF_SLSB_IN_PAGES 256
182#define QDIO_DBF_SLSB_IN_NR_AREAS 1
183#define QDIO_DBF_SLSB_IN_LEVEL 6
184#endif /* CONFIG_QDIO_DEBUG */
185
186#define QDIO_PRINTK_HEADER QDIO_NAME ": "
187
188#if QDIO_VERBOSE_LEVEL>8
189#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
190#else
191#define QDIO_PRINT_STUPID(x...) do { } while (0)
192#endif
193 72
194#if QDIO_VERBOSE_LEVEL>7 73/* flags for st qdio sch data */
195#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x) 74#define CHSC_FLAG_QDIO_CAPABILITY 0x80
196#else 75#define CHSC_FLAG_VALIDITY 0x40
197#define QDIO_PRINT_ALL(x...) do { } while (0) 76
198#endif 77/* qdio adapter-characteristics-1 flag */
199 78#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */
200#if QDIO_VERBOSE_LEVEL>6 79#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */
201#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x) 80#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */
202#else 81#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */
203#define QDIO_PRINT_INFO(x...) do { } while (0) 82#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */
204#endif 83#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
205 84#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
206#if QDIO_VERBOSE_LEVEL>5
207#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
208#else
209#define QDIO_PRINT_WARN(x...) do { } while (0)
210#endif
211
212#if QDIO_VERBOSE_LEVEL>4
213#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
214#else
215#define QDIO_PRINT_ERR(x...) do { } while (0)
216#endif
217
218#if QDIO_VERBOSE_LEVEL>3
219#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
220#else
221#define QDIO_PRINT_CRIT(x...) do { } while (0)
222#endif
223
224#if QDIO_VERBOSE_LEVEL>2
225#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x)
226#else
227#define QDIO_PRINT_ALERT(x...) do { } while (0)
228#endif
229 85
230#if QDIO_VERBOSE_LEVEL>1 86#ifdef CONFIG_64BIT
231#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x) 87static inline int do_sqbs(u64 token, unsigned char state, int queue,
232#else 88 int *start, int *count)
233#define QDIO_PRINT_EMERG(x...) do { } while (0) 89{
234#endif 90 register unsigned long _ccq asm ("0") = *count;
235 91 register unsigned long _token asm ("1") = token;
236#define QDIO_HEXDUMP16(importance,header,ptr) \ 92 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
237QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \
238 "%02x %02x %02x %02x %02x %02x %02x %02x " \
239 "%02x %02x %02x %02x\n",*(((char*)ptr)), \
240 *(((char*)ptr)+1),*(((char*)ptr)+2), \
241 *(((char*)ptr)+3),*(((char*)ptr)+4), \
242 *(((char*)ptr)+5),*(((char*)ptr)+6), \
243 *(((char*)ptr)+7),*(((char*)ptr)+8), \
244 *(((char*)ptr)+9),*(((char*)ptr)+10), \
245 *(((char*)ptr)+11),*(((char*)ptr)+12), \
246 *(((char*)ptr)+13),*(((char*)ptr)+14), \
247 *(((char*)ptr)+15)); \
248QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
249 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
250 *(((char*)ptr)+16),*(((char*)ptr)+17), \
251 *(((char*)ptr)+18),*(((char*)ptr)+19), \
252 *(((char*)ptr)+20),*(((char*)ptr)+21), \
253 *(((char*)ptr)+22),*(((char*)ptr)+23), \
254 *(((char*)ptr)+24),*(((char*)ptr)+25), \
255 *(((char*)ptr)+26),*(((char*)ptr)+27), \
256 *(((char*)ptr)+28),*(((char*)ptr)+29), \
257 *(((char*)ptr)+30),*(((char*)ptr)+31));
258
259/****************** END OF DEBUG FACILITY STUFF *********************/
260 93
261/* 94 asm volatile(
262 * Some instructions as assembly 95 " .insn rsy,0xeb000000008A,%1,0,0(%2)"
263 */ 96 : "+d" (_ccq), "+d" (_queuestart)
97 : "d" ((unsigned long)state), "d" (_token)
98 : "memory", "cc");
99 *count = _ccq & 0xff;
100 *start = _queuestart & 0xff;
264 101
265static inline int 102 return (_ccq >> 32) & 0xff;
266do_sqbs(unsigned long sch, unsigned char state, int queue,
267 unsigned int *start, unsigned int *count)
268{
269#ifdef CONFIG_64BIT
270 register unsigned long _ccq asm ("0") = *count;
271 register unsigned long _sch asm ("1") = sch;
272 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
273
274 asm volatile(
275 " .insn rsy,0xeb000000008A,%1,0,0(%2)"
276 : "+d" (_ccq), "+d" (_queuestart)
277 : "d" ((unsigned long)state), "d" (_sch)
278 : "memory", "cc");
279 *count = _ccq & 0xff;
280 *start = _queuestart & 0xff;
281
282 return (_ccq >> 32) & 0xff;
283#else
284 return 0;
285#endif
286} 103}
287 104
288static inline int 105static inline int do_eqbs(u64 token, unsigned char *state, int queue,
289do_eqbs(unsigned long sch, unsigned char *state, int queue, 106 int *start, int *count)
290 unsigned int *start, unsigned int *count)
291{ 107{
292#ifdef CONFIG_64BIT
293 register unsigned long _ccq asm ("0") = *count; 108 register unsigned long _ccq asm ("0") = *count;
294 register unsigned long _sch asm ("1") = sch; 109 register unsigned long _token asm ("1") = token;
295 unsigned long _queuestart = ((unsigned long)queue << 32) | *start; 110 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
296 unsigned long _state = 0; 111 unsigned long _state = 0;
297 112
298 asm volatile( 113 asm volatile(
299 " .insn rrf,0xB99c0000,%1,%2,0,0" 114 " .insn rrf,0xB99c0000,%1,%2,0,0"
300 : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) 115 : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
301 : "d" (_sch) 116 : "d" (_token)
302 : "memory", "cc" ); 117 : "memory", "cc");
303 *count = _ccq & 0xff; 118 *count = _ccq & 0xff;
304 *start = _queuestart & 0xff; 119 *start = _queuestart & 0xff;
305 *state = _state & 0xff; 120 *state = _state & 0xff;
306 121
307 return (_ccq >> 32) & 0xff; 122 return (_ccq >> 32) & 0xff;
308#else
309 return 0;
310#endif
311}
312
313
314static inline int
315do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2)
316{
317 register unsigned long reg0 asm ("0") = 2;
318 register struct subchannel_id reg1 asm ("1") = schid;
319 register unsigned long reg2 asm ("2") = mask1;
320 register unsigned long reg3 asm ("3") = mask2;
321 int cc;
322
323 asm volatile(
324 " siga 0\n"
325 " ipm %0\n"
326 " srl %0,28\n"
327 : "=d" (cc)
328 : "d" (reg0), "d" (reg1), "d" (reg2), "d" (reg3) : "cc");
329 return cc;
330}
331
332static inline int
333do_siga_input(struct subchannel_id schid, unsigned int mask)
334{
335 register unsigned long reg0 asm ("0") = 1;
336 register struct subchannel_id reg1 asm ("1") = schid;
337 register unsigned long reg2 asm ("2") = mask;
338 int cc;
339
340 asm volatile(
341 " siga 0\n"
342 " ipm %0\n"
343 " srl %0,28\n"
344 : "=d" (cc)
345 : "d" (reg0), "d" (reg1), "d" (reg2) : "cc", "memory");
346 return cc;
347}
348
349static inline int
350do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb,
351 unsigned int fc)
352{
353 register unsigned long __fc asm("0") = fc;
354 register unsigned long __schid asm("1") = schid;
355 register unsigned long __mask asm("2") = mask;
356 int cc;
357
358 asm volatile(
359 " siga 0\n"
360 "0: ipm %0\n"
361 " srl %0,28\n"
362 "1:\n"
363 EX_TABLE(0b,1b)
364 : "=d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
365 : "0" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
366 : "cc", "memory");
367 (*bb) = ((unsigned int) __fc) >> 31;
368 return cc;
369}
370
371static inline unsigned long
372do_clear_global_summary(void)
373{
374 register unsigned long __fn asm("1") = 3;
375 register unsigned long __tmp asm("2");
376 register unsigned long __time asm("3");
377
378 asm volatile(
379 " .insn rre,0xb2650000,2,0"
380 : "+d" (__fn), "=d" (__tmp), "=d" (__time));
381 return __time;
382} 123}
383 124#else
384/* 125static inline int do_sqbs(u64 token, unsigned char state, int queue,
385 * QDIO device commands returned by extended Sense-ID 126 int *start, int *count) { return 0; }
386 */ 127static inline int do_eqbs(u64 token, unsigned char *state, int queue,
387#define DEFAULT_ESTABLISH_QS_CMD 0x1b 128 int *start, int *count) { return 0; }
388#define DEFAULT_ESTABLISH_QS_COUNT 0x1000 129#endif /* CONFIG_64BIT */
389#define DEFAULT_ACTIVATE_QS_CMD 0x1f
390#define DEFAULT_ACTIVATE_QS_COUNT 0
391
392/*
393 * additional CIWs returned by extended Sense-ID
394 */
395#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
396#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
397 130
398#define QDIO_CHSC_RESPONSE_CODE_OK 1 131struct qdio_irq;
399/* flags for st qdio sch data */
400#define CHSC_FLAG_QDIO_CAPABILITY 0x80
401#define CHSC_FLAG_VALIDITY 0x40
402 132
403#define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40 133struct siga_flag {
404#define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20 134 u8 input:1;
405#define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10 135 u8 output:1;
406#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08 136 u8 sync:1;
407#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 137 u8 no_sync_ti:1;
138 u8 no_sync_out_ti:1;
139 u8 no_sync_out_pci:1;
140 u8:2;
141} __attribute__ ((packed));
408 142
409struct qdio_chsc_ssqd { 143struct chsc_ssqd_area {
410 struct chsc_header request; 144 struct chsc_header request;
411 u16 reserved1:10; 145 u16:10;
412 u16 ssid:2; 146 u8 ssid:2;
413 u16 fmt:4; 147 u8 fmt:4;
414 u16 first_sch; 148 u16 first_sch;
415 u16 reserved2; 149 u16:16;
416 u16 last_sch; 150 u16 last_sch;
417 u32 reserved3; 151 u32:32;
418 struct chsc_header response; 152 struct chsc_header response;
419 u32 reserved4; 153 u32:32;
420 u8 flags; 154 struct qdio_ssqd_desc qdio_ssqd;
421 u8 reserved5; 155} __attribute__ ((packed));
422 u16 sch;
423 u8 qfmt;
424 u8 parm;
425 u8 qdioac1;
426 u8 sch_class;
427 u8 pct;
428 u8 icnt;
429 u8 reserved7;
430 u8 ocnt;
431 u8 reserved8;
432 u8 mbccnt;
433 u16 qdioac2;
434 u64 sch_token;
435};
436 156
437struct qdio_perf_stats { 157struct scssc_area {
438#ifdef CONFIG_64BIT 158 struct chsc_header request;
439 atomic64_t tl_runs; 159 u16 operation_code;
440 atomic64_t outbound_tl_runs; 160 u16:16;
441 atomic64_t outbound_tl_runs_resched; 161 u32:32;
442 atomic64_t inbound_tl_runs; 162 u32:32;
443 atomic64_t inbound_tl_runs_resched; 163 u64 summary_indicator_addr;
444 atomic64_t inbound_thin_tl_runs; 164 u64 subchannel_indicator_addr;
445 atomic64_t inbound_thin_tl_runs_resched; 165 u32 ks:4;
446 166 u32 kc:4;
447 atomic64_t siga_outs; 167 u32:21;
448 atomic64_t siga_ins; 168 u32 isc:3;
449 atomic64_t siga_syncs; 169 u32 word_with_d_bit;
450 atomic64_t pcis; 170 u32:32;
451 atomic64_t thinints; 171 struct subchannel_id schid;
452 atomic64_t fast_reqs; 172 u32 reserved[1004];
453 173 struct chsc_header response;
454 atomic64_t outbound_cnt; 174 u32:32;
455 atomic64_t inbound_cnt; 175} __attribute__ ((packed));
456#else /* CONFIG_64BIT */ 176
457 atomic_t tl_runs; 177struct qdio_input_q {
458 atomic_t outbound_tl_runs; 178 /* input buffer acknowledgement flag */
459 atomic_t outbound_tl_runs_resched; 179 int polling;
460 atomic_t inbound_tl_runs; 180
461 atomic_t inbound_tl_runs_resched; 181 /* last time of noticing incoming data */
462 atomic_t inbound_thin_tl_runs; 182 u64 timestamp;
463 atomic_t inbound_thin_tl_runs_resched; 183
464 184 /* lock for clearing the acknowledgement */
465 atomic_t siga_outs; 185 spinlock_t lock;
466 atomic_t siga_ins;
467 atomic_t siga_syncs;
468 atomic_t pcis;
469 atomic_t thinints;
470 atomic_t fast_reqs;
471
472 atomic_t outbound_cnt;
473 atomic_t inbound_cnt;
474#endif /* CONFIG_64BIT */
475}; 186};
476 187
477/* unlikely as the later the better */ 188struct qdio_output_q {
478#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q) 189 /* failed siga-w attempts*/
479#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \ 190 atomic_t busy_siga_counter;
480 qdio_siga_sync(q,~0U,~0U)
481#define SYNC_MEMORY_ALL_OUTB if (unlikely(q->siga_sync)) \
482 qdio_siga_sync(q,~0U,0)
483 191
484#define NOW qdio_get_micros() 192 /* start time of busy condition */
485#define SAVE_TIMESTAMP(q) q->timing.last_transfer_time=NOW 193 u64 timestamp;
486#define GET_SAVED_TIMESTAMP(q) (q->timing.last_transfer_time)
487#define SAVE_FRONTIER(q,val) q->last_move_ftc=val
488#define GET_SAVED_FRONTIER(q) (q->last_move_ftc)
489 194
490#define MY_MODULE_STRING(x) #x 195 /* PCIs are enabled for the queue */
196 int pci_out_enabled;
491 197
492#ifdef CONFIG_64BIT 198 /* timer to check for more outbound work */
493#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x) 199 struct timer_list timer;
494#else /* CONFIG_64BIT */ 200};
495#define QDIO_GET_ADDR(x) ((__u32)(long)x)
496#endif /* CONFIG_64BIT */
497 201
498struct qdio_q { 202struct qdio_q {
499 volatile struct slsb slsb; 203 struct slsb slsb;
204 union {
205 struct qdio_input_q in;
206 struct qdio_output_q out;
207 } u;
500 208
501 char unused[QDIO_MAX_BUFFERS_PER_Q]; 209 /* queue number */
210 int nr;
502 211
503 __u32 * dev_st_chg_ind; 212 /* bitmask of queue number */
213 int mask;
504 214
215 /* input or output queue */
505 int is_input_q; 216 int is_input_q;
506 struct subchannel_id schid;
507 struct ccw_device *cdev;
508
509 unsigned int is_iqdio_q;
510 unsigned int is_thinint_q;
511 217
512 /* bit 0 means queue 0, bit 1 means queue 1, ... */ 218 /* list of thinint input queues */
513 unsigned int mask; 219 struct list_head entry;
514 unsigned int q_no;
515 220
221 /* upper-layer program handler */
516 qdio_handler_t (*handler); 222 qdio_handler_t (*handler);
517 223
518 /* points to the next buffer to be checked for having 224 /*
519 * been processed by the card (outbound) 225 * inbound: next buffer the program should check for
520 * or to the next buffer the program should check for (inbound) */ 226 * outbound: next buffer to check for having been processed
521 volatile int first_to_check; 227 * by the card
522 /* and the last time it was: */ 228 */
523 volatile int last_move_ftc; 229 int first_to_check;
524 230
525 atomic_t number_of_buffers_used; 231 /* first_to_check of the last time */
526 atomic_t polling; 232 int last_move_ftc;
527 233
528 unsigned int siga_in; 234 /* beginning position for calling the program */
529 unsigned int siga_out; 235 int first_to_kick;
530 unsigned int siga_sync;
531 unsigned int siga_sync_done_on_thinints;
532 unsigned int siga_sync_done_on_outb_tis;
533 unsigned int hydra_gives_outbound_pcis;
534 236
535 /* used to save beginning position when calling dd_handlers */ 237 /* number of buffers in use by the adapter */
536 int first_element_to_kick; 238 atomic_t nr_buf_used;
537 239
538 atomic_t use_count; 240 struct qdio_irq *irq_ptr;
539 atomic_t is_in_shutdown;
540
541 void *irq_ptr;
542
543 struct timer_list timer;
544#ifdef QDIO_USE_TIMERS_FOR_POLLING
545 atomic_t timer_already_set;
546 spinlock_t timer_lock;
547#else /* QDIO_USE_TIMERS_FOR_POLLING */
548 struct tasklet_struct tasklet; 241 struct tasklet_struct tasklet;
549#endif /* QDIO_USE_TIMERS_FOR_POLLING */
550 242
551 243 /* error condition during a data transfer */
552 enum qdio_irq_states state;
553
554 /* used to store the error condition during a data transfer */
555 unsigned int qdio_error; 244 unsigned int qdio_error;
556 unsigned int siga_error;
557 unsigned int error_status_flags;
558
559 /* list of interesting queues */
560 volatile struct qdio_q *list_next;
561 volatile struct qdio_q *list_prev;
562 245
563 struct sl *sl; 246 struct sl *sl;
564 volatile struct sbal *sbal[QDIO_MAX_BUFFERS_PER_Q]; 247 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
565 248
566 struct qdio_buffer *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q]; 249 /*
567 250 * Warning: Leave this member at the end so it won't be cleared in
568 unsigned long int_parm; 251 * qdio_fill_qs. A page is allocated under this pointer and used for
569 252 * slib and sl. slib is 2048 bytes big and sl points to offset
570 /*struct { 253 * PAGE_SIZE / 2.
571 int in_bh_check_limit; 254 */
572 int threshold; 255 struct slib *slib;
573 } threshold_classes[QDIO_STATS_CLASSES];*/
574
575 struct {
576 /* inbound: the time to stop polling
577 outbound: the time to kick peer */
578 int threshold; /* the real value */
579
580 /* outbound: last time of do_QDIO
581 inbound: last time of noticing incoming data */
582 /*__u64 last_transfer_times[QDIO_STATS_NUMBER];
583 int last_transfer_index; */
584
585 __u64 last_transfer_time;
586 __u64 busy_start;
587 } timing;
588 atomic_t busy_siga_counter;
589 unsigned int queue_type;
590 unsigned int is_pci_out;
591
592 /* leave this member at the end. won't be cleared in qdio_fill_qs */
593 struct slib *slib; /* a page is allocated under this pointer,
594 sl points into this page, offset PAGE_SIZE/2
595 (after slib) */
596} __attribute__ ((aligned(256))); 256} __attribute__ ((aligned(256)));
597 257
598struct qdio_irq { 258struct qdio_irq {
599 __u32 * volatile dev_st_chg_ind; 259 struct qib qib;
260 u32 *dsci; /* address of device state change indicator */
261 struct ccw_device *cdev;
600 262
601 unsigned long int_parm; 263 unsigned long int_parm;
602 struct subchannel_id schid; 264 struct subchannel_id schid;
603 265 unsigned long sch_token; /* QEBSM facility */
604 unsigned int is_iqdio_irq;
605 unsigned int is_thinint_irq;
606 unsigned int hydra_gives_outbound_pcis;
607 unsigned int sync_done_on_outb_pcis;
608
609 /* QEBSM facility */
610 unsigned int is_qebsm;
611 unsigned long sch_token;
612 266
613 enum qdio_irq_states state; 267 enum qdio_irq_states state;
614 268
615 unsigned int no_input_qs; 269 struct siga_flag siga_flag; /* siga sync information from qdioac */
616 unsigned int no_output_qs;
617 270
618 unsigned char qdioac; 271 int nr_input_qs;
272 int nr_output_qs;
619 273
620 struct ccw1 ccw; 274 struct ccw1 ccw;
621
622 struct ciw equeue; 275 struct ciw equeue;
623 struct ciw aqueue; 276 struct ciw aqueue;
624 277
625 struct qib qib; 278 struct qdio_ssqd_desc ssqd_desc;
626 279
627 void (*original_int_handler) (struct ccw_device *, 280 void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
628 unsigned long, struct irb *);
629 281
630 /* leave these four members together at the end. won't be cleared in qdio_fill_irq */ 282 /*
283 * Warning: Leave these members together at the end so they won't be
284 * cleared in qdio_setup_irq.
285 */
631 struct qdr *qdr; 286 struct qdr *qdr;
287 unsigned long chsc_page;
288
632 struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; 289 struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
633 struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; 290 struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
634 struct semaphore setting_up_sema; 291
292 struct mutex setup_mutex;
635}; 293};
636#endif 294
295/* helper functions */
296#define queue_type(q) q->irq_ptr->qib.qfmt
297
298#define is_thinint_irq(irq) \
299 (irq->qib.qfmt == QDIO_IQDIO_QFMT || \
300 css_general_characteristics.aif_osa)
301
302/* the highest iqdio queue is used for multicast */
303static inline int multicast_outbound(struct qdio_q *q)
304{
305 return (q->irq_ptr->nr_output_qs > 1) &&
306 (q->nr == q->irq_ptr->nr_output_qs - 1);
307}
308
309static inline unsigned long long get_usecs(void)
310{
311 return monotonic_clock() >> 12;
312}
313
314#define pci_out_supported(q) \
315 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
316#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
317
318#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti)
319#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti)
320#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
321#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
322#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync)
323#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci)
324
325#define for_each_input_queue(irq_ptr, q, i) \
326 for (i = 0, q = irq_ptr->input_qs[0]; \
327 i < irq_ptr->nr_input_qs; \
328 q = irq_ptr->input_qs[++i])
329#define for_each_output_queue(irq_ptr, q, i) \
330 for (i = 0, q = irq_ptr->output_qs[0]; \
331 i < irq_ptr->nr_output_qs; \
332 q = irq_ptr->output_qs[++i])
333
334#define prev_buf(bufnr) \
335 ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
336#define next_buf(bufnr) \
337 ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
338#define add_buf(bufnr, inc) \
339 ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
340
341/* prototypes for thin interrupt */
342void qdio_sync_after_thinint(struct qdio_q *q);
343int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state);
344void qdio_check_outbound_after_thinint(struct qdio_q *q);
345int qdio_inbound_q_moved(struct qdio_q *q);
346void qdio_kick_inbound_handler(struct qdio_q *q);
347void qdio_stop_polling(struct qdio_q *q);
348int qdio_siga_sync_q(struct qdio_q *q);
349
350void qdio_setup_thinint(struct qdio_irq *irq_ptr);
351int qdio_establish_thinint(struct qdio_irq *irq_ptr);
352void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
353void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
354void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr);
355void tiqdio_inbound_processing(unsigned long q);
356int tiqdio_allocate_memory(void);
357void tiqdio_free_memory(void);
358int tiqdio_register_thinints(void);
359void tiqdio_unregister_thinints(void);
360
361/* prototypes for setup */
362void qdio_inbound_processing(unsigned long data);
363void qdio_outbound_processing(unsigned long data);
364void qdio_outbound_timer(unsigned long data);
365void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
366 struct irb *irb);
367int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
368 int nr_output_qs);
369void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
370int qdio_setup_irq(struct qdio_initialize *init_data);
371void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
372 struct ccw_device *cdev);
373void qdio_release_memory(struct qdio_irq *irq_ptr);
374int qdio_setup_init(void);
375void qdio_setup_exit(void);
376
377#endif /* _CIO_QDIO_H */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
new file mode 100644
index 000000000000..337aa3087a78
--- /dev/null
+++ b/drivers/s390/cio/qdio_debug.c
@@ -0,0 +1,240 @@
1/*
2 * drivers/s390/cio/qdio_debug.c
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#include <linux/proc_fs.h>
9#include <linux/seq_file.h>
10#include <linux/debugfs.h>
11#include <asm/qdio.h>
12#include <asm/debug.h>
13#include "qdio_debug.h"
14#include "qdio.h"
15
16debug_info_t *qdio_dbf_setup;
17debug_info_t *qdio_dbf_trace;
18
19static struct dentry *debugfs_root;
20#define MAX_DEBUGFS_QUEUES 32
21static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
22static DEFINE_MUTEX(debugfs_mutex);
23
24void qdio_allocate_do_dbf(struct qdio_initialize *init_data)
25{
26 char dbf_text[20];
27
28 sprintf(dbf_text, "qfmt:%x", init_data->q_format);
29 QDIO_DBF_TEXT0(0, setup, dbf_text);
30 QDIO_DBF_HEX0(0, setup, init_data->adapter_name, 8);
31 sprintf(dbf_text, "qpff%4x", init_data->qib_param_field_format);
32 QDIO_DBF_TEXT0(0, setup, dbf_text);
33 QDIO_DBF_HEX0(0, setup, &init_data->qib_param_field, sizeof(void *));
34 QDIO_DBF_HEX0(0, setup, &init_data->input_slib_elements, sizeof(void *));
35 QDIO_DBF_HEX0(0, setup, &init_data->output_slib_elements, sizeof(void *));
36 sprintf(dbf_text, "niq:%4x", init_data->no_input_qs);
37 QDIO_DBF_TEXT0(0, setup, dbf_text);
38 sprintf(dbf_text, "noq:%4x", init_data->no_output_qs);
39 QDIO_DBF_TEXT0(0, setup, dbf_text);
40 QDIO_DBF_HEX0(0, setup, &init_data->input_handler, sizeof(void *));
41 QDIO_DBF_HEX0(0, setup, &init_data->output_handler, sizeof(void *));
42 QDIO_DBF_HEX0(0, setup, &init_data->int_parm, sizeof(long));
43 QDIO_DBF_HEX0(0, setup, &init_data->flags, sizeof(long));
44 QDIO_DBF_HEX0(0, setup, &init_data->input_sbal_addr_array, sizeof(void *));
45 QDIO_DBF_HEX0(0, setup, &init_data->output_sbal_addr_array, sizeof(void *));
46}
47
48static void qdio_unregister_dbf_views(void)
49{
50 if (qdio_dbf_setup)
51 debug_unregister(qdio_dbf_setup);
52 if (qdio_dbf_trace)
53 debug_unregister(qdio_dbf_trace);
54}
55
56static int qdio_register_dbf_views(void)
57{
58 qdio_dbf_setup = debug_register("qdio_setup", QDIO_DBF_SETUP_PAGES,
59 QDIO_DBF_SETUP_NR_AREAS,
60 QDIO_DBF_SETUP_LEN);
61 if (!qdio_dbf_setup)
62 goto oom;
63 debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
64 debug_set_level(qdio_dbf_setup, QDIO_DBF_SETUP_LEVEL);
65
66 qdio_dbf_trace = debug_register("qdio_trace", QDIO_DBF_TRACE_PAGES,
67 QDIO_DBF_TRACE_NR_AREAS,
68 QDIO_DBF_TRACE_LEN);
69 if (!qdio_dbf_trace)
70 goto oom;
71 debug_register_view(qdio_dbf_trace, &debug_hex_ascii_view);
72 debug_set_level(qdio_dbf_trace, QDIO_DBF_TRACE_LEVEL);
73 return 0;
74oom:
75 qdio_unregister_dbf_views();
76 return -ENOMEM;
77}
78
79static int qstat_show(struct seq_file *m, void *v)
80{
81 unsigned char state;
82 struct qdio_q *q = m->private;
83 int i;
84
85 if (!q)
86 return 0;
87
88 seq_printf(m, "device state indicator: %d\n", *q->irq_ptr->dsci);
89 seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used));
90 seq_printf(m, "ftc: %d\n", q->first_to_check);
91 seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc);
92 seq_printf(m, "polling: %d\n", q->u.in.polling);
93 seq_printf(m, "slsb buffer states:\n");
94
95 qdio_siga_sync_q(q);
96 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
97 get_buf_state(q, i, &state);
98 switch (state) {
99 case SLSB_P_INPUT_NOT_INIT:
100 case SLSB_P_OUTPUT_NOT_INIT:
101 seq_printf(m, "N");
102 break;
103 case SLSB_P_INPUT_PRIMED:
104 case SLSB_CU_OUTPUT_PRIMED:
105 seq_printf(m, "+");
106 break;
107 case SLSB_P_INPUT_ACK:
108 seq_printf(m, "A");
109 break;
110 case SLSB_P_INPUT_ERROR:
111 case SLSB_P_OUTPUT_ERROR:
112 seq_printf(m, "x");
113 break;
114 case SLSB_CU_INPUT_EMPTY:
115 case SLSB_P_OUTPUT_EMPTY:
116 seq_printf(m, "-");
117 break;
118 case SLSB_P_INPUT_HALTED:
119 case SLSB_P_OUTPUT_HALTED:
120 seq_printf(m, ".");
121 break;
122 default:
123 seq_printf(m, "?");
124 }
125 if (i == 63)
126 seq_printf(m, "\n");
127 }
128 seq_printf(m, "\n");
129 return 0;
130}
131
132static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
133 size_t count, loff_t *off)
134{
135 struct seq_file *seq = file->private_data;
136 struct qdio_q *q = seq->private;
137
138 if (!q)
139 return 0;
140
141 if (q->is_input_q)
142 xchg(q->irq_ptr->dsci, 1);
143 local_bh_disable();
144 tasklet_schedule(&q->tasklet);
145 local_bh_enable();
146 return count;
147}
148
149static int qstat_seq_open(struct inode *inode, struct file *filp)
150{
151 return single_open(filp, qstat_show,
152 filp->f_path.dentry->d_inode->i_private);
153}
154
155static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name)
156{
157 memset(name, 0, sizeof(name));
158 sprintf(name, "%s", cdev->dev.bus_id);
159 if (q->is_input_q)
160 sprintf(name + strlen(name), "_input");
161 else
162 sprintf(name + strlen(name), "_output");
163 sprintf(name + strlen(name), "_%d", q->nr);
164}
165
166static void remove_debugfs_entry(struct qdio_q *q)
167{
168 int i;
169
170 for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) {
171 if (!debugfs_queues[i])
172 continue;
173 if (debugfs_queues[i]->d_inode->i_private == q) {
174 debugfs_remove(debugfs_queues[i]);
175 debugfs_queues[i] = NULL;
176 }
177 }
178}
179
180static struct file_operations debugfs_fops = {
181 .owner = THIS_MODULE,
182 .open = qstat_seq_open,
183 .read = seq_read,
184 .write = qstat_seq_write,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
189static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
190{
191 int i = 0;
192 char name[40];
193
194 while (debugfs_queues[i] != NULL) {
195 i++;
196 if (i >= MAX_DEBUGFS_QUEUES)
197 return;
198 }
199 get_queue_name(q, cdev, name);
200 debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
201 debugfs_root, q, &debugfs_fops);
202}
203
204void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
205{
206 struct qdio_q *q;
207 int i;
208
209 mutex_lock(&debugfs_mutex);
210 for_each_input_queue(irq_ptr, q, i)
211 setup_debugfs_entry(q, cdev);
212 for_each_output_queue(irq_ptr, q, i)
213 setup_debugfs_entry(q, cdev);
214 mutex_unlock(&debugfs_mutex);
215}
216
217void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
218{
219 struct qdio_q *q;
220 int i;
221
222 mutex_lock(&debugfs_mutex);
223 for_each_input_queue(irq_ptr, q, i)
224 remove_debugfs_entry(q);
225 for_each_output_queue(irq_ptr, q, i)
226 remove_debugfs_entry(q);
227 mutex_unlock(&debugfs_mutex);
228}
229
230int __init qdio_debug_init(void)
231{
232 debugfs_root = debugfs_create_dir("qdio_queues", NULL);
233 return qdio_register_dbf_views();
234}
235
236void qdio_debug_exit(void)
237{
238 debugfs_remove(debugfs_root);
239 qdio_unregister_dbf_views();
240}
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
new file mode 100644
index 000000000000..8484b83698e1
--- /dev/null
+++ b/drivers/s390/cio/qdio_debug.h
@@ -0,0 +1,91 @@
1/*
2 * drivers/s390/cio/qdio_debug.h
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#ifndef QDIO_DEBUG_H
9#define QDIO_DEBUG_H
10
11#include <asm/debug.h>
12#include <asm/qdio.h>
13#include "qdio.h"
14
15#define QDIO_DBF_HEX(ex, name, level, addr, len) \
16 do { \
17 if (ex) \
18 debug_exception(qdio_dbf_##name, level, (void *)(addr), len); \
19 else \
20 debug_event(qdio_dbf_##name, level, (void *)(addr), len); \
21 } while (0)
22#define QDIO_DBF_TEXT(ex, name, level, text) \
23 do { \
24 if (ex) \
25 debug_text_exception(qdio_dbf_##name, level, text); \
26 else \
27 debug_text_event(qdio_dbf_##name, level, text); \
28 } while (0)
29
30#define QDIO_DBF_HEX0(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 0, addr, len)
31#define QDIO_DBF_HEX1(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 1, addr, len)
32#define QDIO_DBF_HEX2(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 2, addr, len)
33
34#ifdef CONFIG_QDIO_DEBUG
35#define QDIO_DBF_HEX3(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 3, addr, len)
36#define QDIO_DBF_HEX4(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 4, addr, len)
37#define QDIO_DBF_HEX5(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 5, addr, len)
38#define QDIO_DBF_HEX6(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 6, addr, len)
39#else
40#define QDIO_DBF_HEX3(ex, name, addr, len) do {} while (0)
41#define QDIO_DBF_HEX4(ex, name, addr, len) do {} while (0)
42#define QDIO_DBF_HEX5(ex, name, addr, len) do {} while (0)
43#define QDIO_DBF_HEX6(ex, name, addr, len) do {} while (0)
44#endif /* CONFIG_QDIO_DEBUG */
45
46#define QDIO_DBF_TEXT0(ex, name, text) QDIO_DBF_TEXT(ex, name, 0, text)
47#define QDIO_DBF_TEXT1(ex, name, text) QDIO_DBF_TEXT(ex, name, 1, text)
48#define QDIO_DBF_TEXT2(ex, name, text) QDIO_DBF_TEXT(ex, name, 2, text)
49
50#ifdef CONFIG_QDIO_DEBUG
51#define QDIO_DBF_TEXT3(ex, name, text) QDIO_DBF_TEXT(ex, name, 3, text)
52#define QDIO_DBF_TEXT4(ex, name, text) QDIO_DBF_TEXT(ex, name, 4, text)
53#define QDIO_DBF_TEXT5(ex, name, text) QDIO_DBF_TEXT(ex, name, 5, text)
54#define QDIO_DBF_TEXT6(ex, name, text) QDIO_DBF_TEXT(ex, name, 6, text)
55#else
56#define QDIO_DBF_TEXT3(ex, name, text) do {} while (0)
57#define QDIO_DBF_TEXT4(ex, name, text) do {} while (0)
58#define QDIO_DBF_TEXT5(ex, name, text) do {} while (0)
59#define QDIO_DBF_TEXT6(ex, name, text) do {} while (0)
60#endif /* CONFIG_QDIO_DEBUG */
61
62/* s390dbf views */
63#define QDIO_DBF_SETUP_LEN 8
64#define QDIO_DBF_SETUP_PAGES 4
65#define QDIO_DBF_SETUP_NR_AREAS 1
66
67#define QDIO_DBF_TRACE_LEN 8
68#define QDIO_DBF_TRACE_NR_AREAS 2
69
70#ifdef CONFIG_QDIO_DEBUG
71#define QDIO_DBF_TRACE_PAGES 16
72#define QDIO_DBF_SETUP_LEVEL 6
73#define QDIO_DBF_TRACE_LEVEL 4
74#else /* !CONFIG_QDIO_DEBUG */
75#define QDIO_DBF_TRACE_PAGES 4
76#define QDIO_DBF_SETUP_LEVEL 2
77#define QDIO_DBF_TRACE_LEVEL 2
78#endif /* CONFIG_QDIO_DEBUG */
79
80extern debug_info_t *qdio_dbf_setup;
81extern debug_info_t *qdio_dbf_trace;
82
83void qdio_allocate_do_dbf(struct qdio_initialize *init_data);
84void debug_print_bstat(struct qdio_q *q);
85void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
86 struct ccw_device *cdev);
87void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr,
88 struct ccw_device *cdev);
89int qdio_debug_init(void);
90void qdio_debug_exit(void);
91#endif
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
new file mode 100644
index 000000000000..d10c73cc1688
--- /dev/null
+++ b/drivers/s390/cio/qdio_main.c
@@ -0,0 +1,1755 @@
1/*
2 * linux/drivers/s390/cio/qdio_main.c
3 *
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
5 *
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10 */
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/delay.h>
16#include <asm/atomic.h>
17#include <asm/debug.h>
18#include <asm/qdio.h>
19
20#include "cio.h"
21#include "css.h"
22#include "device.h"
23#include "qdio.h"
24#include "qdio_debug.h"
25#include "qdio_perf.h"
26
27MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>");
29MODULE_DESCRIPTION("QDIO base support");
30MODULE_LICENSE("GPL");
31
32static inline int do_siga_sync(struct subchannel_id schid,
33 unsigned int out_mask, unsigned int in_mask)
34{
35 register unsigned long __fc asm ("0") = 2;
36 register struct subchannel_id __schid asm ("1") = schid;
37 register unsigned long out asm ("2") = out_mask;
38 register unsigned long in asm ("3") = in_mask;
39 int cc;
40
41 asm volatile(
42 " siga 0\n"
43 " ipm %0\n"
44 " srl %0,28\n"
45 : "=d" (cc)
46 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
47 return cc;
48}
49
50static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
51{
52 register unsigned long __fc asm ("0") = 1;
53 register struct subchannel_id __schid asm ("1") = schid;
54 register unsigned long __mask asm ("2") = mask;
55 int cc;
56
57 asm volatile(
58 " siga 0\n"
59 " ipm %0\n"
60 " srl %0,28\n"
61 : "=d" (cc)
62 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
63 return cc;
64}
65
66/**
67 * do_siga_output - perform SIGA-w/wt function
68 * @schid: subchannel id or in case of QEBSM the subchannel token
69 * @mask: which output queues to process
70 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
71 * @fc: function code to perform
72 *
73 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
74 * Note: For IQDC unicast queues only the highest priority queue is processed.
75 */
76static inline int do_siga_output(unsigned long schid, unsigned long mask,
77 u32 *bb, unsigned int fc)
78{
79 register unsigned long __fc asm("0") = fc;
80 register unsigned long __schid asm("1") = schid;
81 register unsigned long __mask asm("2") = mask;
82 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
83
84 asm volatile(
85 " siga 0\n"
86 "0: ipm %0\n"
87 " srl %0,28\n"
88 "1:\n"
89 EX_TABLE(0b, 1b)
90 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
91 : : "cc", "memory");
92 *bb = ((unsigned int) __fc) >> 31;
93 return cc;
94}
95
96static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
97{
98 char dbf_text[15];
99
100 /* all done or next buffer state different */
101 if (ccq == 0 || ccq == 32)
102 return 0;
103 /* not all buffers processed */
104 if (ccq == 96 || ccq == 97)
105 return 1;
106 /* notify devices immediately */
107 sprintf(dbf_text, "%d", ccq);
108 QDIO_DBF_TEXT2(1, trace, dbf_text);
109 return -EIO;
110}
111
112/**
113 * qdio_do_eqbs - extract buffer states for QEBSM
114 * @q: queue to manipulate
115 * @state: state of the extracted buffers
116 * @start: buffer number to start at
117 * @count: count of buffers to examine
118 *
119 * Returns the number of successfull extracted equal buffer states.
120 * Stops processing if a state is different from the last buffers state.
121 */
122static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
123 int start, int count)
124{
125 unsigned int ccq = 0;
126 int tmp_count = count, tmp_start = start;
127 int nr = q->nr;
128 int rc;
129 char dbf_text[15];
130
131 BUG_ON(!q->irq_ptr->sch_token);
132
133 if (!q->is_input_q)
134 nr += q->irq_ptr->nr_input_qs;
135again:
136 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
137 rc = qdio_check_ccq(q, ccq);
138
139 /* At least one buffer was processed, return and extract the remaining
140 * buffers later.
141 */
142 if ((ccq == 96) && (count != tmp_count))
143 return (count - tmp_count);
144 if (rc == 1) {
145 QDIO_DBF_TEXT5(1, trace, "eqAGAIN");
146 goto again;
147 }
148
149 if (rc < 0) {
150 QDIO_DBF_TEXT2(1, trace, "eqberr");
151 sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr);
152 QDIO_DBF_TEXT2(1, trace, dbf_text);
153 q->handler(q->irq_ptr->cdev,
154 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
155 0, -1, -1, q->irq_ptr->int_parm);
156 return 0;
157 }
158 return count - tmp_count;
159}
160
161/**
162 * qdio_do_sqbs - set buffer states for QEBSM
163 * @q: queue to manipulate
164 * @state: new state of the buffers
165 * @start: first buffer number to change
166 * @count: how many buffers to change
167 *
168 * Returns the number of successfully changed buffers.
169 * Does retrying until the specified count of buffer states is set or an
170 * error occurs.
171 */
172static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
173 int count)
174{
175 unsigned int ccq = 0;
176 int tmp_count = count, tmp_start = start;
177 int nr = q->nr;
178 int rc;
179 char dbf_text[15];
180
181 BUG_ON(!q->irq_ptr->sch_token);
182
183 if (!q->is_input_q)
184 nr += q->irq_ptr->nr_input_qs;
185again:
186 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
187 rc = qdio_check_ccq(q, ccq);
188 if (rc == 1) {
189 QDIO_DBF_TEXT5(1, trace, "sqAGAIN");
190 goto again;
191 }
192 if (rc < 0) {
193 QDIO_DBF_TEXT3(1, trace, "sqberr");
194 sprintf(dbf_text, "%2x,%2x", count, tmp_count);
195 QDIO_DBF_TEXT3(1, trace, dbf_text);
196 sprintf(dbf_text, "%d,%d", ccq, nr);
197 QDIO_DBF_TEXT3(1, trace, dbf_text);
198
199 q->handler(q->irq_ptr->cdev,
200 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
201 0, -1, -1, q->irq_ptr->int_parm);
202 return 0;
203 }
204 WARN_ON(tmp_count);
205 return count - tmp_count;
206}
207
208/* returns number of examined buffers and their common state in *state */
209static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
210 unsigned char *state, unsigned int count)
211{
212 unsigned char __state = 0;
213 int i;
214
215 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
216 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
217
218 if (is_qebsm(q))
219 return qdio_do_eqbs(q, state, bufnr, count);
220
221 for (i = 0; i < count; i++) {
222 if (!__state)
223 __state = q->slsb.val[bufnr];
224 else if (q->slsb.val[bufnr] != __state)
225 break;
226 bufnr = next_buf(bufnr);
227 }
228 *state = __state;
229 return i;
230}
231
232inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
233 unsigned char *state)
234{
235 return get_buf_states(q, bufnr, state, 1);
236}
237
238/* wrap-around safe setting of slsb states, returns number of changed buffers */
239static inline int set_buf_states(struct qdio_q *q, int bufnr,
240 unsigned char state, int count)
241{
242 int i;
243
244 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
245 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
246
247 if (is_qebsm(q))
248 return qdio_do_sqbs(q, state, bufnr, count);
249
250 for (i = 0; i < count; i++) {
251 xchg(&q->slsb.val[bufnr], state);
252 bufnr = next_buf(bufnr);
253 }
254 return count;
255}
256
257static inline int set_buf_state(struct qdio_q *q, int bufnr,
258 unsigned char state)
259{
260 return set_buf_states(q, bufnr, state, 1);
261}
262
263/* set slsb states to initial state */
264void qdio_init_buf_states(struct qdio_irq *irq_ptr)
265{
266 struct qdio_q *q;
267 int i;
268
269 for_each_input_queue(irq_ptr, q, i)
270 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
271 QDIO_MAX_BUFFERS_PER_Q);
272 for_each_output_queue(irq_ptr, q, i)
273 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
274 QDIO_MAX_BUFFERS_PER_Q);
275}
276
277static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
278 unsigned int input)
279{
280 int cc;
281
282 if (!need_siga_sync(q))
283 return 0;
284
285 qdio_perf_stat_inc(&perf_stats.siga_sync);
286
287 cc = do_siga_sync(q->irq_ptr->schid, output, input);
288 if (cc) {
289 QDIO_DBF_TEXT4(0, trace, "sigasync");
290 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
291 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
292 }
293 return cc;
294}
295
296inline int qdio_siga_sync_q(struct qdio_q *q)
297{
298 if (q->is_input_q)
299 return qdio_siga_sync(q, 0, q->mask);
300 else
301 return qdio_siga_sync(q, q->mask, 0);
302}
303
304static inline int qdio_siga_sync_out(struct qdio_q *q)
305{
306 return qdio_siga_sync(q, ~0U, 0);
307}
308
309static inline int qdio_siga_sync_all(struct qdio_q *q)
310{
311 return qdio_siga_sync(q, ~0U, ~0U);
312}
313
314static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
315{
316 unsigned int fc = 0;
317 unsigned long schid;
318
319 if (!is_qebsm(q))
320 schid = *((u32 *)&q->irq_ptr->schid);
321 else {
322 schid = q->irq_ptr->sch_token;
323 fc |= 0x80;
324 }
325 return do_siga_output(schid, q->mask, busy_bit, fc);
326}
327
328static int qdio_siga_output(struct qdio_q *q)
329{
330 int cc;
331 u32 busy_bit;
332 u64 start_time = 0;
333
334 QDIO_DBF_TEXT5(0, trace, "sigaout");
335 QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
336
337 qdio_perf_stat_inc(&perf_stats.siga_out);
338again:
339 cc = qdio_do_siga_output(q, &busy_bit);
340 if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) {
341 if (!start_time)
342 start_time = get_usecs();
343 else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
344 goto again;
345 }
346
347 if (cc == 2 && busy_bit)
348 cc |= QDIO_ERROR_SIGA_BUSY;
349 if (cc)
350 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
351 return cc;
352}
353
354static inline int qdio_siga_input(struct qdio_q *q)
355{
356 int cc;
357
358 QDIO_DBF_TEXT4(0, trace, "sigain");
359 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
360
361 qdio_perf_stat_inc(&perf_stats.siga_in);
362
363 cc = do_siga_input(q->irq_ptr->schid, q->mask);
364 if (cc)
365 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *));
366 return cc;
367}
368
369/* called from thinint inbound handler */
370void qdio_sync_after_thinint(struct qdio_q *q)
371{
372 if (pci_out_supported(q)) {
373 if (need_siga_sync_thinint(q))
374 qdio_siga_sync_all(q);
375 else if (need_siga_sync_out_thinint(q))
376 qdio_siga_sync_out(q);
377 } else
378 qdio_siga_sync_q(q);
379}
380
381inline void qdio_stop_polling(struct qdio_q *q)
382{
383 spin_lock_bh(&q->u.in.lock);
384 if (!q->u.in.polling) {
385 spin_unlock_bh(&q->u.in.lock);
386 return;
387 }
388 q->u.in.polling = 0;
389 qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
390
391 /* show the card that we are not polling anymore */
392 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT);
393 spin_unlock_bh(&q->u.in.lock);
394}
395
396static void announce_buffer_error(struct qdio_q *q)
397{
398 char dbf_text[15];
399
400 if (q->is_input_q)
401 QDIO_DBF_TEXT3(1, trace, "inperr");
402 else
403 QDIO_DBF_TEXT3(0, trace, "outperr");
404
405 sprintf(dbf_text, "%x-%x-%x", q->first_to_check,
406 q->sbal[q->first_to_check]->element[14].flags,
407 q->sbal[q->first_to_check]->element[15].flags);
408 QDIO_DBF_TEXT3(1, trace, dbf_text);
409 QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256);
410
411 q->qdio_error = QDIO_ERROR_SLSB_STATE;
412}
413
414static int get_inbound_buffer_frontier(struct qdio_q *q)
415{
416 int count, stop;
417 unsigned char state;
418
419 /*
420 * If we still poll don't update last_move_ftc, keep the
421 * previously ACK buffer there.
422 */
423 if (!q->u.in.polling)
424 q->last_move_ftc = q->first_to_check;
425
426 /*
427 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
428 * would return 0.
429 */
430 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
431 stop = add_buf(q->first_to_check, count);
432
433 /*
434 * No siga sync here, as a PCI or we after a thin interrupt
435 * will sync the queues.
436 */
437
438 /* need to set count to 1 for non-qebsm */
439 if (!is_qebsm(q))
440 count = 1;
441
442check_next:
443 if (q->first_to_check == stop)
444 goto out;
445
446 count = get_buf_states(q, q->first_to_check, &state, count);
447 if (!count)
448 goto out;
449
450 switch (state) {
451 case SLSB_P_INPUT_PRIMED:
452 QDIO_DBF_TEXT5(0, trace, "inptprim");
453
454 /*
455 * Only ACK the first buffer. The ACK will be removed in
456 * qdio_stop_polling.
457 */
458 if (q->u.in.polling)
459 state = SLSB_P_INPUT_NOT_INIT;
460 else {
461 q->u.in.polling = 1;
462 state = SLSB_P_INPUT_ACK;
463 }
464 set_buf_state(q, q->first_to_check, state);
465
466 /*
467 * Need to change all PRIMED buffers to NOT_INIT, otherwise
468 * we're loosing initiative in the thinint code.
469 */
470 if (count > 1)
471 set_buf_states(q, next_buf(q->first_to_check),
472 SLSB_P_INPUT_NOT_INIT, count - 1);
473
474 /*
475 * No siga-sync needed for non-qebsm here, as the inbound queue
476 * will be synced on the next siga-r, resp.
477 * tiqdio_is_inbound_q_done will do the siga-sync.
478 */
479 q->first_to_check = add_buf(q->first_to_check, count);
480 atomic_sub(count, &q->nr_buf_used);
481 goto check_next;
482 case SLSB_P_INPUT_ERROR:
483 announce_buffer_error(q);
484 /* process the buffer, the upper layer will take care of it */
485 q->first_to_check = add_buf(q->first_to_check, count);
486 atomic_sub(count, &q->nr_buf_used);
487 break;
488 case SLSB_CU_INPUT_EMPTY:
489 case SLSB_P_INPUT_NOT_INIT:
490 case SLSB_P_INPUT_ACK:
491 QDIO_DBF_TEXT5(0, trace, "inpnipro");
492 break;
493 default:
494 BUG();
495 }
496out:
497 QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int));
498 return q->first_to_check;
499}
500
501int qdio_inbound_q_moved(struct qdio_q *q)
502{
503 int bufnr;
504
505 bufnr = get_inbound_buffer_frontier(q);
506
507 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
508 if (!need_siga_sync(q) && !pci_out_supported(q))
509 q->u.in.timestamp = get_usecs();
510
511 QDIO_DBF_TEXT4(0, trace, "inhasmvd");
512 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
513 return 1;
514 } else
515 return 0;
516}
517
518static int qdio_inbound_q_done(struct qdio_q *q)
519{
520 unsigned char state;
521#ifdef CONFIG_QDIO_DEBUG
522 char dbf_text[15];
523#endif
524
525 if (!atomic_read(&q->nr_buf_used))
526 return 1;
527
528 /*
529 * We need that one for synchronization with the adapter, as it
530 * does a kind of PCI avoidance.
531 */
532 qdio_siga_sync_q(q);
533
534 get_buf_state(q, q->first_to_check, &state);
535 if (state == SLSB_P_INPUT_PRIMED)
536 /* we got something to do */
537 return 0;
538
539 /* on VM, we don't poll, so the q is always done here */
540 if (need_siga_sync(q) || pci_out_supported(q))
541 return 1;
542
543 /*
544 * At this point we know, that inbound first_to_check
545 * has (probably) not moved (see qdio_inbound_processing).
546 */
547 if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
548#ifdef CONFIG_QDIO_DEBUG
549 QDIO_DBF_TEXT4(0, trace, "inqisdon");
550 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
551 sprintf(dbf_text, "pf%02x", q->first_to_check);
552 QDIO_DBF_TEXT4(0, trace, dbf_text);
553#endif /* CONFIG_QDIO_DEBUG */
554 return 1;
555 } else {
556#ifdef CONFIG_QDIO_DEBUG
557 QDIO_DBF_TEXT4(0, trace, "inqisntd");
558 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
559 sprintf(dbf_text, "pf%02x", q->first_to_check);
560 QDIO_DBF_TEXT4(0, trace, dbf_text);
561#endif /* CONFIG_QDIO_DEBUG */
562 return 0;
563 }
564}
565
566void qdio_kick_inbound_handler(struct qdio_q *q)
567{
568 int count, start, end;
569#ifdef CONFIG_QDIO_DEBUG
570 char dbf_text[15];
571#endif
572
573 qdio_perf_stat_inc(&perf_stats.inbound_handler);
574
575 start = q->first_to_kick;
576 end = q->first_to_check;
577 if (end >= start)
578 count = end - start;
579 else
580 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
581
582#ifdef CONFIG_QDIO_DEBUG
583 sprintf(dbf_text, "s=%2xc=%2x", start, count);
584 QDIO_DBF_TEXT4(0, trace, dbf_text);
585#endif /* CONFIG_QDIO_DEBUG */
586
587 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
588 return;
589
590 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr,
591 start, count, q->irq_ptr->int_parm);
592
593 /* for the next time */
594 q->first_to_kick = q->first_to_check;
595 q->qdio_error = 0;
596}
597
598static void __qdio_inbound_processing(struct qdio_q *q)
599{
600 qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
601again:
602 if (!qdio_inbound_q_moved(q))
603 return;
604
605 qdio_kick_inbound_handler(q);
606
607 if (!qdio_inbound_q_done(q))
608 /* means poll time is not yet over */
609 goto again;
610
611 qdio_stop_polling(q);
612 /*
613 * We need to check again to not lose initiative after
614 * resetting the ACK state.
615 */
616 if (!qdio_inbound_q_done(q))
617 goto again;
618}
619
620/* inbound tasklet */
621void qdio_inbound_processing(unsigned long data)
622{
623 struct qdio_q *q = (struct qdio_q *)data;
624 __qdio_inbound_processing(q);
625}
626
627static int get_outbound_buffer_frontier(struct qdio_q *q)
628{
629 int count, stop;
630 unsigned char state;
631
632 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
633 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
634 qdio_siga_sync_q(q);
635
636 /*
637 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
638 * would return 0.
639 */
640 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
641 stop = add_buf(q->first_to_check, count);
642
643 /* need to set count to 1 for non-qebsm */
644 if (!is_qebsm(q))
645 count = 1;
646
647check_next:
648 if (q->first_to_check == stop)
649 return q->first_to_check;
650
651 count = get_buf_states(q, q->first_to_check, &state, count);
652 if (!count)
653 return q->first_to_check;
654
655 switch (state) {
656 case SLSB_P_OUTPUT_EMPTY:
657 /* the adapter got it */
658 QDIO_DBF_TEXT5(0, trace, "outpempt");
659
660 atomic_sub(count, &q->nr_buf_used);
661 q->first_to_check = add_buf(q->first_to_check, count);
662 /*
663 * We fetch all buffer states at once. get_buf_states may
664 * return count < stop. For QEBSM we do not loop.
665 */
666 if (is_qebsm(q))
667 break;
668 goto check_next;
669 case SLSB_P_OUTPUT_ERROR:
670 announce_buffer_error(q);
671 /* process the buffer, the upper layer will take care of it */
672 q->first_to_check = add_buf(q->first_to_check, count);
673 atomic_sub(count, &q->nr_buf_used);
674 break;
675 case SLSB_CU_OUTPUT_PRIMED:
676 /* the adapter has not fetched the output yet */
677 QDIO_DBF_TEXT5(0, trace, "outpprim");
678 break;
679 case SLSB_P_OUTPUT_NOT_INIT:
680 case SLSB_P_OUTPUT_HALTED:
681 break;
682 default:
683 BUG();
684 }
685 return q->first_to_check;
686}
687
688/* all buffers processed? */
689static inline int qdio_outbound_q_done(struct qdio_q *q)
690{
691 return atomic_read(&q->nr_buf_used) == 0;
692}
693
694static inline int qdio_outbound_q_moved(struct qdio_q *q)
695{
696 int bufnr;
697
698 bufnr = get_outbound_buffer_frontier(q);
699
700 if ((bufnr != q->last_move_ftc) || q->qdio_error) {
701 q->last_move_ftc = bufnr;
702 QDIO_DBF_TEXT4(0, trace, "oqhasmvd");
703 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
704 return 1;
705 } else
706 return 0;
707}
708
709/*
710 * VM could present us cc=2 and busy bit set on SIGA-write
711 * during reconfiguration of their Guest LAN (only in iqdio mode,
712 * otherwise qdio is asynchronous and cc=2 and busy bit there will take
713 * the queues down immediately).
714 *
715 * Therefore qdio_siga_output will try for a short time constantly,
716 * if such a condition occurs. If it doesn't change, it will
717 * increase the busy_siga_counter and save the timestamp, and
718 * schedule the queue for later processing. qdio_outbound_processing
719 * will check out the counter. If non-zero, it will call qdio_kick_outbound_q
720 * as often as the value of the counter. This will attempt further SIGA
721 * instructions. For each successful SIGA, the counter is
722 * decreased, for failing SIGAs the counter remains the same, after
723 * all. After some time of no movement, qdio_kick_outbound_q will
724 * finally fail and reflect corresponding error codes to call
725 * the upper layer module and have it take the queues down.
726 *
727 * Note that this is a change from the original HiperSockets design
728 * (saying cc=2 and busy bit means take the queues down), but in
729 * these days Guest LAN didn't exist... excessive cc=2 with busy bit
730 * conditions will still take the queues down, but the threshold is
731 * higher due to the Guest LAN environment.
732 *
733 * Called from outbound tasklet and do_QDIO handler.
734 */
735static void qdio_kick_outbound_q(struct qdio_q *q)
736{
737 int rc;
738#ifdef CONFIG_QDIO_DEBUG
739 char dbf_text[15];
740
741 QDIO_DBF_TEXT5(0, trace, "kickoutq");
742 QDIO_DBF_HEX5(0, trace, &q, sizeof(void *));
743#endif /* CONFIG_QDIO_DEBUG */
744
745 if (!need_siga_out(q))
746 return;
747
748 rc = qdio_siga_output(q);
749 switch (rc) {
750 case 0:
751 /* went smooth this time, reset timestamp */
752 q->u.out.timestamp = 0;
753
754 /* TODO: improve error handling for CC=0 case */
755#ifdef CONFIG_QDIO_DEBUG
756 QDIO_DBF_TEXT3(0, trace, "cc2reslv");
757 sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr,
758 atomic_read(&q->u.out.busy_siga_counter));
759 QDIO_DBF_TEXT3(0, trace, dbf_text);
760#endif /* CONFIG_QDIO_DEBUG */
761 break;
762 /* cc=2 and busy bit */
763 case (2 | QDIO_ERROR_SIGA_BUSY):
764 atomic_inc(&q->u.out.busy_siga_counter);
765
766 /* if the last siga was successful, save timestamp here */
767 if (!q->u.out.timestamp)
768 q->u.out.timestamp = get_usecs();
769
770 /* if we're in time, don't touch qdio_error */
771 if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) {
772 tasklet_schedule(&q->tasklet);
773 break;
774 }
775 QDIO_DBF_TEXT2(0, trace, "cc2REPRT");
776#ifdef CONFIG_QDIO_DEBUG
777 sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr,
778 atomic_read(&q->u.out.busy_siga_counter));
779 QDIO_DBF_TEXT3(0, trace, dbf_text);
780#endif /* CONFIG_QDIO_DEBUG */
781 default:
782 /* for plain cc=1, 2 or 3 */
783 q->qdio_error = rc;
784 }
785}
786
787static void qdio_kick_outbound_handler(struct qdio_q *q)
788{
789 int start, end, count;
790#ifdef CONFIG_QDIO_DEBUG
791 char dbf_text[15];
792#endif
793
794 start = q->first_to_kick;
795 end = q->last_move_ftc;
796 if (end >= start)
797 count = end - start;
798 else
799 count = end + QDIO_MAX_BUFFERS_PER_Q - start;
800
801#ifdef CONFIG_QDIO_DEBUG
802 QDIO_DBF_TEXT4(0, trace, "kickouth");
803 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *));
804
805 sprintf(dbf_text, "s=%2xc=%2x", start, count);
806 QDIO_DBF_TEXT4(0, trace, dbf_text);
807#endif /* CONFIG_QDIO_DEBUG */
808
809 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
810 return;
811
812 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
813 q->irq_ptr->int_parm);
814
815 /* for the next time: */
816 q->first_to_kick = q->last_move_ftc;
817 q->qdio_error = 0;
818}
819
820static void __qdio_outbound_processing(struct qdio_q *q)
821{
822 int siga_attempts;
823
824 qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
825
826 /* see comment in qdio_kick_outbound_q */
827 siga_attempts = atomic_read(&q->u.out.busy_siga_counter);
828 while (siga_attempts--) {
829 atomic_dec(&q->u.out.busy_siga_counter);
830 qdio_kick_outbound_q(q);
831 }
832
833 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
834
835 if (qdio_outbound_q_moved(q))
836 qdio_kick_outbound_handler(q);
837
838 if (queue_type(q) == QDIO_ZFCP_QFMT) {
839 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
840 tasklet_schedule(&q->tasklet);
841 return;
842 }
843
844 /* bail out for HiperSockets unicast queues */
845 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
846 return;
847
848 if (q->u.out.pci_out_enabled)
849 return;
850
851 /*
852 * Now we know that queue type is either qeth without pci enabled
853 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
854 * EMPTY is noticed and outbound_handler is called after some time.
855 */
856 if (qdio_outbound_q_done(q))
857 del_timer(&q->u.out.timer);
858 else {
859 if (!timer_pending(&q->u.out.timer)) {
860 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
861 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
862 }
863 }
864}
865
866/* outbound tasklet */
867void qdio_outbound_processing(unsigned long data)
868{
869 struct qdio_q *q = (struct qdio_q *)data;
870 __qdio_outbound_processing(q);
871}
872
873void qdio_outbound_timer(unsigned long data)
874{
875 struct qdio_q *q = (struct qdio_q *)data;
876 tasklet_schedule(&q->tasklet);
877}
878
879/* called from thinint inbound tasklet */
880void qdio_check_outbound_after_thinint(struct qdio_q *q)
881{
882 struct qdio_q *out;
883 int i;
884
885 if (!pci_out_supported(q))
886 return;
887
888 for_each_output_queue(q->irq_ptr, out, i)
889 if (!qdio_outbound_q_done(out))
890 tasklet_schedule(&out->tasklet);
891}
892
893static inline void qdio_set_state(struct qdio_irq *irq_ptr,
894 enum qdio_irq_states state)
895{
896#ifdef CONFIG_QDIO_DEBUG
897 char dbf_text[15];
898
899 QDIO_DBF_TEXT5(0, trace, "newstate");
900 sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state);
901 QDIO_DBF_TEXT5(0, trace, dbf_text);
902#endif /* CONFIG_QDIO_DEBUG */
903
904 irq_ptr->state = state;
905 mb();
906}
907
908static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
909{
910 char dbf_text[15];
911
912 if (irb->esw.esw0.erw.cons) {
913 sprintf(dbf_text, "sens%4x", schid.sch_no);
914 QDIO_DBF_TEXT2(1, trace, dbf_text);
915 QDIO_DBF_HEX0(0, trace, irb, 64);
916 QDIO_DBF_HEX0(0, trace, irb->ecw, 64);
917 }
918}
919
920/* PCI interrupt handler */
921static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
922{
923 int i;
924 struct qdio_q *q;
925
926 qdio_perf_stat_inc(&perf_stats.pci_int);
927
928 for_each_input_queue(irq_ptr, q, i)
929 tasklet_schedule(&q->tasklet);
930
931 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
932 return;
933
934 for_each_output_queue(irq_ptr, q, i) {
935 if (qdio_outbound_q_done(q))
936 continue;
937
938 if (!siga_syncs_out_pci(q))
939 qdio_siga_sync_q(q);
940
941 tasklet_schedule(&q->tasklet);
942 }
943}
944
945static void qdio_handle_activate_check(struct ccw_device *cdev,
946 unsigned long intparm, int cstat, int dstat)
947{
948 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
949 struct qdio_q *q;
950 char dbf_text[15];
951
952 QDIO_DBF_TEXT2(1, trace, "ick2");
953 sprintf(dbf_text, "%s", cdev->dev.bus_id);
954 QDIO_DBF_TEXT2(1, trace, dbf_text);
955 QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int));
956 QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
957 QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
958
959 if (irq_ptr->nr_input_qs) {
960 q = irq_ptr->input_qs[0];
961 } else if (irq_ptr->nr_output_qs) {
962 q = irq_ptr->output_qs[0];
963 } else {
964 dump_stack();
965 goto no_handler;
966 }
967 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
968 0, -1, -1, irq_ptr->int_parm);
969no_handler:
970 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
971}
972
973static void qdio_call_shutdown(struct work_struct *work)
974{
975 struct ccw_device_private *priv;
976 struct ccw_device *cdev;
977
978 priv = container_of(work, struct ccw_device_private, kick_work);
979 cdev = priv->cdev;
980 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
981 put_device(&cdev->dev);
982}
983
984static void qdio_int_error(struct ccw_device *cdev)
985{
986 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
987
988 switch (irq_ptr->state) {
989 case QDIO_IRQ_STATE_INACTIVE:
990 case QDIO_IRQ_STATE_CLEANUP:
991 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
992 break;
993 case QDIO_IRQ_STATE_ESTABLISHED:
994 case QDIO_IRQ_STATE_ACTIVE:
995 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
996 if (get_device(&cdev->dev)) {
997 /* Can't call shutdown from interrupt context. */
998 PREPARE_WORK(&cdev->private->kick_work,
999 qdio_call_shutdown);
1000 queue_work(ccw_device_work, &cdev->private->kick_work);
1001 }
1002 break;
1003 default:
1004 WARN_ON(1);
1005 }
1006 wake_up(&cdev->private->wait_q);
1007}
1008
1009static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat,
1010 int dstat)
1011{
1012 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1013
1014 if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
1015 QDIO_DBF_TEXT2(1, setup, "eq:ckcon");
1016 goto error;
1017 }
1018
1019 if (!(dstat & DEV_STAT_DEV_END)) {
1020 QDIO_DBF_TEXT2(1, setup, "eq:no de");
1021 goto error;
1022 }
1023
1024 if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) {
1025 QDIO_DBF_TEXT2(1, setup, "eq:badio");
1026 goto error;
1027 }
1028 return 0;
1029error:
1030 QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int));
1031 QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
1032 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1033 return 1;
1034}
1035
1036static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1037 int dstat)
1038{
1039 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1040 char dbf_text[15];
1041
1042 sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no);
1043 QDIO_DBF_TEXT0(0, setup, dbf_text);
1044 QDIO_DBF_TEXT0(0, trace, dbf_text);
1045
1046 if (!qdio_establish_check_errors(cdev, cstat, dstat))
1047 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1048}
1049
1050/* qdio interrupt handler */
1051void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1052 struct irb *irb)
1053{
1054 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1055 int cstat, dstat;
1056 char dbf_text[15];
1057
1058 qdio_perf_stat_inc(&perf_stats.qdio_int);
1059
1060 if (!intparm || !irq_ptr) {
1061 sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no);
1062 QDIO_DBF_TEXT2(1, setup, dbf_text);
1063 return;
1064 }
1065
1066 if (IS_ERR(irb)) {
1067 switch (PTR_ERR(irb)) {
1068 case -EIO:
1069 sprintf(dbf_text, "ierr%4x",
1070 cdev->private->schid.sch_no);
1071 QDIO_DBF_TEXT2(1, setup, dbf_text);
1072 qdio_int_error(cdev);
1073 return;
1074 case -ETIMEDOUT:
1075 sprintf(dbf_text, "qtoh%4x",
1076 cdev->private->schid.sch_no);
1077 QDIO_DBF_TEXT2(1, setup, dbf_text);
1078 qdio_int_error(cdev);
1079 return;
1080 default:
1081 WARN_ON(1);
1082 return;
1083 }
1084 }
1085 qdio_irq_check_sense(irq_ptr->schid, irb);
1086
1087 cstat = irb->scsw.cmd.cstat;
1088 dstat = irb->scsw.cmd.dstat;
1089
1090 switch (irq_ptr->state) {
1091 case QDIO_IRQ_STATE_INACTIVE:
1092 qdio_establish_handle_irq(cdev, cstat, dstat);
1093 break;
1094
1095 case QDIO_IRQ_STATE_CLEANUP:
1096 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1097 break;
1098
1099 case QDIO_IRQ_STATE_ESTABLISHED:
1100 case QDIO_IRQ_STATE_ACTIVE:
1101 if (cstat & SCHN_STAT_PCI) {
1102 qdio_int_handler_pci(irq_ptr);
1103 /* no state change so no need to wake up wait_q */
1104 return;
1105 }
1106 if ((cstat & ~SCHN_STAT_PCI) || dstat) {
1107 qdio_handle_activate_check(cdev, intparm, cstat,
1108 dstat);
1109 break;
1110 }
1111 default:
1112 WARN_ON(1);
1113 }
1114 wake_up(&cdev->private->wait_q);
1115}
1116
1117/**
1118 * qdio_get_ssqd_desc - get qdio subchannel description
1119 * @cdev: ccw device to get description for
1120 *
1121 * Returns a pointer to the saved qdio subchannel description,
1122 * or NULL for not setup qdio devices.
1123 */
1124struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev)
1125{
1126 struct qdio_irq *irq_ptr;
1127
1128 QDIO_DBF_TEXT0(0, setup, "getssqd");
1129
1130 irq_ptr = cdev->private->qdio_data;
1131 if (!irq_ptr)
1132 return NULL;
1133
1134 return &irq_ptr->ssqd_desc;
1135}
1136EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1137
1138/**
1139 * qdio_cleanup - shutdown queues and free data structures
1140 * @cdev: associated ccw device
1141 * @how: use halt or clear to shutdown
1142 *
1143 * This function calls qdio_shutdown() for @cdev with method @how
1144 * and on success qdio_free() for @cdev.
1145 */
1146int qdio_cleanup(struct ccw_device *cdev, int how)
1147{
1148 struct qdio_irq *irq_ptr;
1149 char dbf_text[15];
1150 int rc;
1151
1152 irq_ptr = cdev->private->qdio_data;
1153 if (!irq_ptr)
1154 return -ENODEV;
1155
1156 sprintf(dbf_text, "qcln%4x", irq_ptr->schid.sch_no);
1157 QDIO_DBF_TEXT1(0, trace, dbf_text);
1158 QDIO_DBF_TEXT0(0, setup, dbf_text);
1159
1160 rc = qdio_shutdown(cdev, how);
1161 if (rc == 0)
1162 rc = qdio_free(cdev);
1163 return rc;
1164}
1165EXPORT_SYMBOL_GPL(qdio_cleanup);
1166
1167static void qdio_shutdown_queues(struct ccw_device *cdev)
1168{
1169 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1170 struct qdio_q *q;
1171 int i;
1172
1173 for_each_input_queue(irq_ptr, q, i)
1174 tasklet_disable(&q->tasklet);
1175
1176 for_each_output_queue(irq_ptr, q, i) {
1177 tasklet_disable(&q->tasklet);
1178 del_timer(&q->u.out.timer);
1179 }
1180}
1181
1182/**
1183 * qdio_shutdown - shut down a qdio subchannel
1184 * @cdev: associated ccw device
1185 * @how: use halt or clear to shutdown
1186 */
1187int qdio_shutdown(struct ccw_device *cdev, int how)
1188{
1189 struct qdio_irq *irq_ptr;
1190 int rc;
1191 unsigned long flags;
1192 char dbf_text[15];
1193
1194 irq_ptr = cdev->private->qdio_data;
1195 if (!irq_ptr)
1196 return -ENODEV;
1197
1198 mutex_lock(&irq_ptr->setup_mutex);
1199 /*
1200 * Subchannel was already shot down. We cannot prevent being called
1201 * twice since cio may trigger a shutdown asynchronously.
1202 */
1203 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1204 mutex_unlock(&irq_ptr->setup_mutex);
1205 return 0;
1206 }
1207
1208 sprintf(dbf_text, "qsqs%4x", irq_ptr->schid.sch_no);
1209 QDIO_DBF_TEXT1(0, trace, dbf_text);
1210 QDIO_DBF_TEXT0(0, setup, dbf_text);
1211
1212 tiqdio_remove_input_queues(irq_ptr);
1213 qdio_shutdown_queues(cdev);
1214 qdio_shutdown_debug_entries(irq_ptr, cdev);
1215
1216 /* cleanup subchannel */
1217 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1218
1219 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1220 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1221 else
1222 /* default behaviour is halt */
1223 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1224 if (rc) {
1225 sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no);
1226 QDIO_DBF_TEXT0(0, setup, dbf_text);
1227 sprintf(dbf_text, "rc=%d", rc);
1228 QDIO_DBF_TEXT0(0, setup, dbf_text);
1229 goto no_cleanup;
1230 }
1231
1232 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1233 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1234 wait_event_interruptible_timeout(cdev->private->wait_q,
1235 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1236 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1237 10 * HZ);
1238 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1239
1240no_cleanup:
1241 qdio_shutdown_thinint(irq_ptr);
1242
1243 /* restore interrupt handler */
1244 if ((void *)cdev->handler == (void *)qdio_int_handler)
1245 cdev->handler = irq_ptr->orig_handler;
1246 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1247
1248 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1249 mutex_unlock(&irq_ptr->setup_mutex);
1250 module_put(THIS_MODULE);
1251 if (rc)
1252 return rc;
1253 return 0;
1254}
1255EXPORT_SYMBOL_GPL(qdio_shutdown);
1256
1257/**
1258 * qdio_free - free data structures for a qdio subchannel
1259 * @cdev: associated ccw device
1260 */
1261int qdio_free(struct ccw_device *cdev)
1262{
1263 struct qdio_irq *irq_ptr;
1264 char dbf_text[15];
1265
1266 irq_ptr = cdev->private->qdio_data;
1267 if (!irq_ptr)
1268 return -ENODEV;
1269
1270 mutex_lock(&irq_ptr->setup_mutex);
1271
1272 sprintf(dbf_text, "qfqs%4x", irq_ptr->schid.sch_no);
1273 QDIO_DBF_TEXT1(0, trace, dbf_text);
1274 QDIO_DBF_TEXT0(0, setup, dbf_text);
1275
1276 cdev->private->qdio_data = NULL;
1277 mutex_unlock(&irq_ptr->setup_mutex);
1278
1279 qdio_release_memory(irq_ptr);
1280 return 0;
1281}
1282EXPORT_SYMBOL_GPL(qdio_free);
1283
1284/**
1285 * qdio_initialize - allocate and establish queues for a qdio subchannel
1286 * @init_data: initialization data
1287 *
1288 * This function first allocates queues via qdio_allocate() and on success
1289 * establishes them via qdio_establish().
1290 */
1291int qdio_initialize(struct qdio_initialize *init_data)
1292{
1293 int rc;
1294 char dbf_text[15];
1295
1296 sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no);
1297 QDIO_DBF_TEXT0(0, setup, dbf_text);
1298 QDIO_DBF_TEXT0(0, trace, dbf_text);
1299
1300 rc = qdio_allocate(init_data);
1301 if (rc)
1302 return rc;
1303
1304 rc = qdio_establish(init_data);
1305 if (rc)
1306 qdio_free(init_data->cdev);
1307 return rc;
1308}
1309EXPORT_SYMBOL_GPL(qdio_initialize);
1310
1311/**
1312 * qdio_allocate - allocate qdio queues and associated data
1313 * @init_data: initialization data
1314 */
1315int qdio_allocate(struct qdio_initialize *init_data)
1316{
1317 struct qdio_irq *irq_ptr;
1318 char dbf_text[15];
1319
1320 sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no);
1321 QDIO_DBF_TEXT0(0, setup, dbf_text);
1322 QDIO_DBF_TEXT0(0, trace, dbf_text);
1323
1324 if ((init_data->no_input_qs && !init_data->input_handler) ||
1325 (init_data->no_output_qs && !init_data->output_handler))
1326 return -EINVAL;
1327
1328 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1329 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1330 return -EINVAL;
1331
1332 if ((!init_data->input_sbal_addr_array) ||
1333 (!init_data->output_sbal_addr_array))
1334 return -EINVAL;
1335
1336 qdio_allocate_do_dbf(init_data);
1337
1338 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1339 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1340 if (!irq_ptr)
1341 goto out_err;
1342 QDIO_DBF_TEXT0(0, setup, "irq_ptr:");
1343 QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *));
1344
1345 mutex_init(&irq_ptr->setup_mutex);
1346
1347 /*
1348 * Allocate a page for the chsc calls in qdio_establish.
1349 * Must be pre-allocated since a zfcp recovery will call
1350 * qdio_establish. In case of low memory and swap on a zfcp disk
1351 * we may not be able to allocate memory otherwise.
1352 */
1353 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1354 if (!irq_ptr->chsc_page)
1355 goto out_rel;
1356
1357 /* qdr is used in ccw1.cda which is u32 */
1358 irq_ptr->qdr = kzalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA);
1359 if (!irq_ptr->qdr)
1360 goto out_rel;
1361 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1362
1363 QDIO_DBF_TEXT0(0, setup, "qdr:");
1364 QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *));
1365
1366 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1367 init_data->no_output_qs))
1368 goto out_rel;
1369
1370 init_data->cdev->private->qdio_data = irq_ptr;
1371 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1372 return 0;
1373out_rel:
1374 qdio_release_memory(irq_ptr);
1375out_err:
1376 return -ENOMEM;
1377}
1378EXPORT_SYMBOL_GPL(qdio_allocate);
1379
1380/**
1381 * qdio_establish - establish queues on a qdio subchannel
1382 * @init_data: initialization data
1383 */
1384int qdio_establish(struct qdio_initialize *init_data)
1385{
1386 char dbf_text[20];
1387 struct qdio_irq *irq_ptr;
1388 struct ccw_device *cdev = init_data->cdev;
1389 unsigned long saveflags;
1390 int rc;
1391
1392 irq_ptr = cdev->private->qdio_data;
1393 if (!irq_ptr)
1394 return -ENODEV;
1395
1396 if (cdev->private->state != DEV_STATE_ONLINE)
1397 return -EINVAL;
1398
1399 if (!try_module_get(THIS_MODULE))
1400 return -EINVAL;
1401
1402 sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no);
1403 QDIO_DBF_TEXT0(0, setup, dbf_text);
1404 QDIO_DBF_TEXT0(0, trace, dbf_text);
1405
1406 mutex_lock(&irq_ptr->setup_mutex);
1407 qdio_setup_irq(init_data);
1408
1409 rc = qdio_establish_thinint(irq_ptr);
1410 if (rc) {
1411 mutex_unlock(&irq_ptr->setup_mutex);
1412 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1413 return rc;
1414 }
1415
1416 /* establish q */
1417 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1418 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1419 irq_ptr->ccw.count = irq_ptr->equeue.count;
1420 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1421
1422 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1423 ccw_device_set_options_mask(cdev, 0);
1424
1425 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1426 if (rc) {
1427 sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no);
1428 QDIO_DBF_TEXT2(1, setup, dbf_text);
1429 sprintf(dbf_text, "eq:rc%4x", rc);
1430 QDIO_DBF_TEXT2(1, setup, dbf_text);
1431 }
1432 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1433
1434 if (rc) {
1435 mutex_unlock(&irq_ptr->setup_mutex);
1436 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1437 return rc;
1438 }
1439
1440 wait_event_interruptible_timeout(cdev->private->wait_q,
1441 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1442 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1443
1444 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1445 mutex_unlock(&irq_ptr->setup_mutex);
1446 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1447 return -EIO;
1448 }
1449
1450 qdio_setup_ssqd_info(irq_ptr);
1451 sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac);
1452 QDIO_DBF_TEXT2(0, setup, dbf_text);
1453
1454 /* qebsm is now setup if available, initialize buffer states */
1455 qdio_init_buf_states(irq_ptr);
1456
1457 mutex_unlock(&irq_ptr->setup_mutex);
1458 qdio_print_subchannel_info(irq_ptr, cdev);
1459 qdio_setup_debug_entries(irq_ptr, cdev);
1460 return 0;
1461}
1462EXPORT_SYMBOL_GPL(qdio_establish);
1463
1464/**
1465 * qdio_activate - activate queues on a qdio subchannel
1466 * @cdev: associated cdev
1467 */
1468int qdio_activate(struct ccw_device *cdev)
1469{
1470 struct qdio_irq *irq_ptr;
1471 int rc;
1472 unsigned long saveflags;
1473 char dbf_text[20];
1474
1475 irq_ptr = cdev->private->qdio_data;
1476 if (!irq_ptr)
1477 return -ENODEV;
1478
1479 if (cdev->private->state != DEV_STATE_ONLINE)
1480 return -EINVAL;
1481
1482 mutex_lock(&irq_ptr->setup_mutex);
1483 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1484 rc = -EBUSY;
1485 goto out;
1486 }
1487
1488 sprintf(dbf_text, "qact%4x", irq_ptr->schid.sch_no);
1489 QDIO_DBF_TEXT2(0, setup, dbf_text);
1490 QDIO_DBF_TEXT2(0, trace, dbf_text);
1491
1492 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1493 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1494 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1495 irq_ptr->ccw.cda = 0;
1496
1497 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1498 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1499
1500 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1501 0, DOIO_DENY_PREFETCH);
1502 if (rc) {
1503 sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no);
1504 QDIO_DBF_TEXT2(1, setup, dbf_text);
1505 sprintf(dbf_text, "aq:rc%4x", rc);
1506 QDIO_DBF_TEXT2(1, setup, dbf_text);
1507 }
1508 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1509
1510 if (rc)
1511 goto out;
1512
1513 if (is_thinint_irq(irq_ptr))
1514 tiqdio_add_input_queues(irq_ptr);
1515
1516 /* wait for subchannel to become active */
1517 msleep(5);
1518
1519 switch (irq_ptr->state) {
1520 case QDIO_IRQ_STATE_STOPPED:
1521 case QDIO_IRQ_STATE_ERR:
1522 mutex_unlock(&irq_ptr->setup_mutex);
1523 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1524 return -EIO;
1525 default:
1526 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1527 rc = 0;
1528 }
1529out:
1530 mutex_unlock(&irq_ptr->setup_mutex);
1531 return rc;
1532}
1533EXPORT_SYMBOL_GPL(qdio_activate);
1534
1535static inline int buf_in_between(int bufnr, int start, int count)
1536{
1537 int end = add_buf(start, count);
1538
1539 if (end > start) {
1540 if (bufnr >= start && bufnr < end)
1541 return 1;
1542 else
1543 return 0;
1544 }
1545
1546 /* wrap-around case */
1547 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1548 (bufnr < end))
1549 return 1;
1550 else
1551 return 0;
1552}
1553
1554/**
1555 * handle_inbound - reset processed input buffers
1556 * @q: queue containing the buffers
1557 * @callflags: flags
1558 * @bufnr: first buffer to process
1559 * @count: how many buffers are emptied
1560 */
1561static void handle_inbound(struct qdio_q *q, unsigned int callflags,
1562 int bufnr, int count)
1563{
1564 unsigned long flags;
1565 int used, rc;
1566
1567 /*
1568 * do_QDIO could run in parallel with the queue tasklet so the
1569 * upper-layer programm could empty the ACK'ed buffer here.
1570 * If that happens we must clear the polling flag, otherwise
1571 * qdio_stop_polling() could set the buffer to NOT_INIT after
1572 * it was set to EMPTY which would kill us.
1573 */
1574 spin_lock_irqsave(&q->u.in.lock, flags);
1575 if (q->u.in.polling)
1576 if (buf_in_between(q->last_move_ftc, bufnr, count))
1577 q->u.in.polling = 0;
1578
1579 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1580 spin_unlock_irqrestore(&q->u.in.lock, flags);
1581
1582 used = atomic_add_return(count, &q->nr_buf_used) - count;
1583 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1584
1585 /* no need to signal as long as the adapter had free buffers */
1586 if (used)
1587 return;
1588
1589 if (need_siga_in(q)) {
1590 rc = qdio_siga_input(q);
1591 if (rc)
1592 q->qdio_error = rc;
1593 }
1594}
1595
1596/**
1597 * handle_outbound - process filled outbound buffers
1598 * @q: queue containing the buffers
1599 * @callflags: flags
1600 * @bufnr: first buffer to process
1601 * @count: how many buffers are filled
1602 */
1603static void handle_outbound(struct qdio_q *q, unsigned int callflags,
1604 int bufnr, int count)
1605{
1606 unsigned char state;
1607 int used;
1608
1609 qdio_perf_stat_inc(&perf_stats.outbound_handler);
1610
1611 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1612 used = atomic_add_return(count, &q->nr_buf_used);
1613 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1614
1615 if (callflags & QDIO_FLAG_PCI_OUT)
1616 q->u.out.pci_out_enabled = 1;
1617 else
1618 q->u.out.pci_out_enabled = 0;
1619
1620 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1621 if (multicast_outbound(q))
1622 qdio_kick_outbound_q(q);
1623 else
1624 /*
1625 * One siga-w per buffer required for unicast
1626 * HiperSockets.
1627 */
1628 while (count--)
1629 qdio_kick_outbound_q(q);
1630 goto out;
1631 }
1632
1633 if (need_siga_sync(q)) {
1634 qdio_siga_sync_q(q);
1635 goto out;
1636 }
1637
1638 /* try to fast requeue buffers */
1639 get_buf_state(q, prev_buf(bufnr), &state);
1640 if (state != SLSB_CU_OUTPUT_PRIMED)
1641 qdio_kick_outbound_q(q);
1642 else {
1643 QDIO_DBF_TEXT5(0, trace, "fast-req");
1644 qdio_perf_stat_inc(&perf_stats.fast_requeue);
1645 }
1646out:
1647 /* Fixme: could wait forever if called from process context */
1648 tasklet_schedule(&q->tasklet);
1649}
1650
1651/**
1652 * do_QDIO - process input or output buffers
1653 * @cdev: associated ccw_device for the qdio subchannel
1654 * @callflags: input or output and special flags from the program
1655 * @q_nr: queue number
1656 * @bufnr: buffer number
1657 * @count: how many buffers to process
1658 */
1659int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1660 int q_nr, int bufnr, int count)
1661{
1662 struct qdio_irq *irq_ptr;
1663#ifdef CONFIG_QDIO_DEBUG
1664 char dbf_text[20];
1665
1666 sprintf(dbf_text, "doQD%04x", cdev->private->schid.sch_no);
1667 QDIO_DBF_TEXT3(0, trace, dbf_text);
1668#endif /* CONFIG_QDIO_DEBUG */
1669
1670 if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
1671 (count > QDIO_MAX_BUFFERS_PER_Q) ||
1672 (q_nr > QDIO_MAX_QUEUES_PER_IRQ))
1673 return -EINVAL;
1674
1675 if (!count)
1676 return 0;
1677
1678 irq_ptr = cdev->private->qdio_data;
1679 if (!irq_ptr)
1680 return -ENODEV;
1681
1682#ifdef CONFIG_QDIO_DEBUG
1683 if (callflags & QDIO_FLAG_SYNC_INPUT)
1684 QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr],
1685 sizeof(void *));
1686 else
1687 QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr],
1688 sizeof(void *));
1689
1690 sprintf(dbf_text, "flag%04x", callflags);
1691 QDIO_DBF_TEXT3(0, trace, dbf_text);
1692 sprintf(dbf_text, "qi%02xct%02x", bufnr, count);
1693 QDIO_DBF_TEXT3(0, trace, dbf_text);
1694#endif /* CONFIG_QDIO_DEBUG */
1695
1696 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1697 return -EBUSY;
1698
1699 if (callflags & QDIO_FLAG_SYNC_INPUT)
1700 handle_inbound(irq_ptr->input_qs[q_nr],
1701 callflags, bufnr, count);
1702 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1703 handle_outbound(irq_ptr->output_qs[q_nr],
1704 callflags, bufnr, count);
1705 else {
1706 QDIO_DBF_TEXT3(1, trace, "doQD:inv");
1707 return -EINVAL;
1708 }
1709 return 0;
1710}
1711EXPORT_SYMBOL_GPL(do_QDIO);
1712
1713static int __init init_QDIO(void)
1714{
1715 int rc;
1716
1717 rc = qdio_setup_init();
1718 if (rc)
1719 return rc;
1720 rc = tiqdio_allocate_memory();
1721 if (rc)
1722 goto out_cache;
1723 rc = qdio_debug_init();
1724 if (rc)
1725 goto out_ti;
1726 rc = qdio_setup_perf_stats();
1727 if (rc)
1728 goto out_debug;
1729 rc = tiqdio_register_thinints();
1730 if (rc)
1731 goto out_perf;
1732 return 0;
1733
1734out_perf:
1735 qdio_remove_perf_stats();
1736out_debug:
1737 qdio_debug_exit();
1738out_ti:
1739 tiqdio_free_memory();
1740out_cache:
1741 qdio_setup_exit();
1742 return rc;
1743}
1744
1745static void __exit exit_QDIO(void)
1746{
1747 tiqdio_unregister_thinints();
1748 tiqdio_free_memory();
1749 qdio_remove_perf_stats();
1750 qdio_debug_exit();
1751 qdio_setup_exit();
1752}
1753
1754module_init(init_QDIO);
1755module_exit(exit_QDIO);
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
new file mode 100644
index 000000000000..ea01b85b1cc9
--- /dev/null
+++ b/drivers/s390/cio/qdio_perf.c
@@ -0,0 +1,151 @@
1/*
2 * drivers/s390/cio/qdio_perf.c
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#include <linux/kernel.h>
9#include <linux/proc_fs.h>
10#include <linux/seq_file.h>
11#include <asm/ccwdev.h>
12
13#include "cio.h"
14#include "css.h"
15#include "device.h"
16#include "ioasm.h"
17#include "chsc.h"
18#include "qdio_debug.h"
19#include "qdio_perf.h"
20
21int qdio_performance_stats;
22struct qdio_perf_stats perf_stats;
23
24#ifdef CONFIG_PROC_FS
25static struct proc_dir_entry *qdio_perf_pde;
26#endif
27
28inline void qdio_perf_stat_inc(atomic_long_t *count)
29{
30 if (qdio_performance_stats)
31 atomic_long_inc(count);
32}
33
34inline void qdio_perf_stat_dec(atomic_long_t *count)
35{
36 if (qdio_performance_stats)
37 atomic_long_dec(count);
38}
39
40/*
41 * procfs functions
42 */
43static int qdio_perf_proc_show(struct seq_file *m, void *v)
44{
45 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
46 (long)atomic_long_read(&perf_stats.qdio_int));
47 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
48 (long)atomic_long_read(&perf_stats.pci_int));
49 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
50 (long)atomic_long_read(&perf_stats.thin_int));
51 seq_printf(m, "\n");
52 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
53 (long)atomic_long_read(&perf_stats.tasklet_inbound));
54 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
55 (long)atomic_long_read(&perf_stats.tasklet_outbound));
56 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
57 (long)atomic_long_read(&perf_stats.tasklet_thinint),
58 (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
59 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
60 (long)atomic_long_read(&perf_stats.thinint_inbound),
61 (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
62 seq_printf(m, "\n");
63 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
64 (long)atomic_long_read(&perf_stats.siga_in));
65 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
66 (long)atomic_long_read(&perf_stats.siga_out));
67 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
68 (long)atomic_long_read(&perf_stats.siga_sync));
69 seq_printf(m, "\n");
70 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
71 (long)atomic_long_read(&perf_stats.inbound_handler));
72 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
73 (long)atomic_long_read(&perf_stats.outbound_handler));
74 seq_printf(m, "\n");
75 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
76 (long)atomic_long_read(&perf_stats.fast_requeue));
77 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
78 (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
79 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
80 (long)atomic_long_read(&perf_stats.debug_stop_polling));
81 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
82 (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
83 seq_printf(m, "\n");
84 return 0;
85}
86static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
87{
88 return single_open(filp, qdio_perf_proc_show, NULL);
89}
90
91static struct file_operations qdio_perf_proc_fops = {
92 .owner = THIS_MODULE,
93 .open = qdio_perf_seq_open,
94 .read = seq_read,
95 .llseek = seq_lseek,
96 .release = single_release,
97};
98
99/*
100 * sysfs functions
101 */
102static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf)
103{
104 return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
105}
106
107static ssize_t qdio_perf_stats_store(struct bus_type *bus,
108 const char *buf, size_t count)
109{
110 unsigned long i;
111
112 if (strict_strtoul(buf, 16, &i) != 0)
113 return -EINVAL;
114 if ((i != 0) && (i != 1))
115 return -EINVAL;
116 if (i == qdio_performance_stats)
117 return count;
118
119 qdio_performance_stats = i;
120 /* reset performance statistics */
121 if (i == 0)
122 memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
123 return count;
124}
125
126static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show,
127 qdio_perf_stats_store);
128
129int __init qdio_setup_perf_stats(void)
130{
131 int rc;
132
133 rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
134 if (rc)
135 return rc;
136
137#ifdef CONFIG_PROC_FS
138 memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
139 qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO,
140 NULL, &qdio_perf_proc_fops);
141#endif
142 return 0;
143}
144
145void __exit qdio_remove_perf_stats(void)
146{
147#ifdef CONFIG_PROC_FS
148 remove_proc_entry("qdio_perf", NULL);
149#endif
150 bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
151}
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
new file mode 100644
index 000000000000..5c406a8b7387
--- /dev/null
+++ b/drivers/s390/cio/qdio_perf.h
@@ -0,0 +1,54 @@
1/*
2 * drivers/s390/cio/qdio_perf.h
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */
8#ifndef QDIO_PERF_H
9#define QDIO_PERF_H
10
11#include <linux/types.h>
12#include <linux/device.h>
13#include <asm/atomic.h>
14
15struct qdio_perf_stats {
16 /* interrupt handler calls */
17 atomic_long_t qdio_int;
18 atomic_long_t pci_int;
19 atomic_long_t thin_int;
20
21 /* tasklet runs */
22 atomic_long_t tasklet_inbound;
23 atomic_long_t tasklet_outbound;
24 atomic_long_t tasklet_thinint;
25 atomic_long_t tasklet_thinint_loop;
26 atomic_long_t thinint_inbound;
27 atomic_long_t thinint_inbound_loop;
28 atomic_long_t thinint_inbound_loop2;
29
30 /* signal adapter calls */
31 atomic_long_t siga_out;
32 atomic_long_t siga_in;
33 atomic_long_t siga_sync;
34
35 /* misc */
36 atomic_long_t inbound_handler;
37 atomic_long_t outbound_handler;
38 atomic_long_t fast_requeue;
39
40 /* for debugging */
41 atomic_long_t debug_tl_out_timer;
42 atomic_long_t debug_stop_polling;
43};
44
45extern struct qdio_perf_stats perf_stats;
46extern int qdio_performance_stats;
47
48int qdio_setup_perf_stats(void);
49void qdio_remove_perf_stats(void);
50
51extern void qdio_perf_stat_inc(atomic_long_t *count);
52extern void qdio_perf_stat_dec(atomic_long_t *count);
53
54#endif
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
new file mode 100644
index 000000000000..f0923a8aceda
--- /dev/null
+++ b/drivers/s390/cio/qdio_setup.c
@@ -0,0 +1,521 @@
1/*
2 * driver/s390/cio/qdio_setup.c
3 *
4 * qdio queue initialization
5 *
6 * Copyright (C) IBM Corp. 2008
7 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
8 */
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <asm/qdio.h>
12
13#include "cio.h"
14#include "css.h"
15#include "device.h"
16#include "ioasm.h"
17#include "chsc.h"
18#include "qdio.h"
19#include "qdio_debug.h"
20
21static struct kmem_cache *qdio_q_cache;
22
23/*
24 * qebsm is only available under 64bit but the adapter sets the feature
25 * flag anyway, so we manually override it.
26 */
27static inline int qebsm_possible(void)
28{
29#ifdef CONFIG_64BIT
30 return css_general_characteristics.qebsm;
31#endif
32 return 0;
33}
34
35/*
36 * qib_param_field: pointer to 128 bytes or NULL, if no param field
37 * nr_input_qs: pointer to nr_queues*128 words of data or NULL
38 */
39static void set_impl_params(struct qdio_irq *irq_ptr,
40 unsigned int qib_param_field_format,
41 unsigned char *qib_param_field,
42 unsigned long *input_slib_elements,
43 unsigned long *output_slib_elements)
44{
45 struct qdio_q *q;
46 int i, j;
47
48 if (!irq_ptr)
49 return;
50
51 WARN_ON((unsigned long)&irq_ptr->qib & 0xff);
52 irq_ptr->qib.pfmt = qib_param_field_format;
53 if (qib_param_field)
54 memcpy(irq_ptr->qib.parm, qib_param_field,
55 QDIO_MAX_BUFFERS_PER_Q);
56
57 if (!input_slib_elements)
58 goto output;
59
60 for_each_input_queue(irq_ptr, q, i) {
61 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
62 q->slib->slibe[j].parms =
63 input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
64 }
65output:
66 if (!output_slib_elements)
67 return;
68
69 for_each_output_queue(irq_ptr, q, i) {
70 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
71 q->slib->slibe[j].parms =
72 output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
73 }
74}
75
76static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
77{
78 struct qdio_q *q;
79 int i;
80
81 for (i = 0; i < nr_queues; i++) {
82 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
83 if (!q)
84 return -ENOMEM;
85 WARN_ON((unsigned long)q & 0xff);
86
87 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
88 if (!q->slib) {
89 kmem_cache_free(qdio_q_cache, q);
90 return -ENOMEM;
91 }
92 WARN_ON((unsigned long)q->slib & 0x7ff);
93 irq_ptr_qs[i] = q;
94 }
95 return 0;
96}
97
98int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
99{
100 int rc;
101
102 rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
103 if (rc)
104 return rc;
105 rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
106 return rc;
107}
108
109static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
110 qdio_handler_t *handler, int i)
111{
112 /* must be cleared by every qdio_establish */
113 memset(q, 0, ((char *)&q->slib) - ((char *)q));
114 memset(q->slib, 0, PAGE_SIZE);
115
116 q->irq_ptr = irq_ptr;
117 q->mask = 1 << (31 - i);
118 q->nr = i;
119 q->handler = handler;
120}
121
122static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
123 void **sbals_array, char *dbf_text, int i)
124{
125 struct qdio_q *prev;
126 int j;
127
128 QDIO_DBF_TEXT0(0, setup, dbf_text);
129 QDIO_DBF_HEX0(0, setup, &q, sizeof(void *));
130
131 q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
132
133 /* fill in sbal */
134 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
135 q->sbal[j] = *sbals_array++;
136 WARN_ON((unsigned long)q->sbal[j] & 0xff);
137 }
138
139 /* fill in slib */
140 if (i > 0) {
141 prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
142 : irq_ptr->output_qs[i - 1];
143 prev->slib->nsliba = (unsigned long)q->slib;
144 }
145
146 q->slib->sla = (unsigned long)q->sl;
147 q->slib->slsba = (unsigned long)&q->slsb.val[0];
148
149 /* fill in sl */
150 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
151 q->sl->element[j].sbal = (unsigned long)q->sbal[j];
152
153 QDIO_DBF_TEXT2(0, setup, "sl-sb-b0");
154 QDIO_DBF_HEX2(0, setup, q->sl, sizeof(void *));
155 QDIO_DBF_HEX2(0, setup, &q->slsb, sizeof(void *));
156 QDIO_DBF_HEX2(0, setup, q->sbal, sizeof(void *));
157}
158
159static void setup_queues(struct qdio_irq *irq_ptr,
160 struct qdio_initialize *qdio_init)
161{
162 char dbf_text[20];
163 struct qdio_q *q;
164 void **input_sbal_array = qdio_init->input_sbal_addr_array;
165 void **output_sbal_array = qdio_init->output_sbal_addr_array;
166 int i;
167
168 sprintf(dbf_text, "qfqs%4x", qdio_init->cdev->private->schid.sch_no);
169 QDIO_DBF_TEXT0(0, setup, dbf_text);
170
171 for_each_input_queue(irq_ptr, q, i) {
172 sprintf(dbf_text, "in-q%4x", i);
173 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
174
175 q->is_input_q = 1;
176 spin_lock_init(&q->u.in.lock);
177 setup_storage_lists(q, irq_ptr, input_sbal_array, dbf_text, i);
178 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
179
180 if (is_thinint_irq(irq_ptr))
181 tasklet_init(&q->tasklet, tiqdio_inbound_processing,
182 (unsigned long) q);
183 else
184 tasklet_init(&q->tasklet, qdio_inbound_processing,
185 (unsigned long) q);
186 }
187
188 for_each_output_queue(irq_ptr, q, i) {
189 sprintf(dbf_text, "outq%4x", i);
190 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
191
192 q->is_input_q = 0;
193 setup_storage_lists(q, irq_ptr, output_sbal_array,
194 dbf_text, i);
195 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
196
197 tasklet_init(&q->tasklet, qdio_outbound_processing,
198 (unsigned long) q);
199 setup_timer(&q->u.out.timer, (void(*)(unsigned long))
200 &qdio_outbound_timer, (unsigned long)q);
201 }
202}
203
204static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
205{
206 if (qdioac & AC1_SIGA_INPUT_NEEDED)
207 irq_ptr->siga_flag.input = 1;
208 if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
209 irq_ptr->siga_flag.output = 1;
210 if (qdioac & AC1_SIGA_SYNC_NEEDED)
211 irq_ptr->siga_flag.sync = 1;
212 if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)
213 irq_ptr->siga_flag.no_sync_ti = 1;
214 if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)
215 irq_ptr->siga_flag.no_sync_out_pci = 1;
216
217 if (irq_ptr->siga_flag.no_sync_out_pci &&
218 irq_ptr->siga_flag.no_sync_ti)
219 irq_ptr->siga_flag.no_sync_out_ti = 1;
220}
221
222static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
223 unsigned char qdioac, unsigned long token)
224{
225 char dbf_text[15];
226
227 if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
228 goto no_qebsm;
229 if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
230 (!(qdioac & AC1_SC_QEBSM_ENABLED)))
231 goto no_qebsm;
232
233 irq_ptr->sch_token = token;
234
235 QDIO_DBF_TEXT0(0, setup, "V=V:1");
236 sprintf(dbf_text, "%8lx", irq_ptr->sch_token);
237 QDIO_DBF_TEXT0(0, setup, dbf_text);
238 return;
239
240no_qebsm:
241 irq_ptr->sch_token = 0;
242 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
243 QDIO_DBF_TEXT0(0, setup, "noV=V");
244}
245
246static int __get_ssqd_info(struct qdio_irq *irq_ptr)
247{
248 struct chsc_ssqd_area *ssqd;
249 int rc;
250
251 QDIO_DBF_TEXT0(0, setup, "getssqd");
252 ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
253 memset(ssqd, 0, PAGE_SIZE);
254
255 ssqd->request = (struct chsc_header) {
256 .length = 0x0010,
257 .code = 0x0024,
258 };
259 ssqd->first_sch = irq_ptr->schid.sch_no;
260 ssqd->last_sch = irq_ptr->schid.sch_no;
261 ssqd->ssid = irq_ptr->schid.ssid;
262
263 if (chsc(ssqd))
264 return -EIO;
265 rc = chsc_error_from_response(ssqd->response.code);
266 if (rc)
267 return rc;
268
269 if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
270 !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
271 (ssqd->qdio_ssqd.sch != irq_ptr->schid.sch_no))
272 return -EINVAL;
273
274 memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd,
275 sizeof(struct qdio_ssqd_desc));
276 return 0;
277}
278
279void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
280{
281 unsigned char qdioac;
282 char dbf_text[15];
283 int rc;
284
285 rc = __get_ssqd_info(irq_ptr);
286 if (rc) {
287 QDIO_DBF_TEXT2(0, setup, "ssqdasig");
288 sprintf(dbf_text, "schno%x", irq_ptr->schid.sch_no);
289 QDIO_DBF_TEXT2(0, setup, dbf_text);
290 sprintf(dbf_text, "rc:%d", rc);
291 QDIO_DBF_TEXT2(0, setup, dbf_text);
292 /* all flags set, worst case */
293 qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
294 AC1_SIGA_SYNC_NEEDED;
295 } else
296 qdioac = irq_ptr->ssqd_desc.qdioac1;
297
298 check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
299 process_ac_flags(irq_ptr, qdioac);
300
301 sprintf(dbf_text, "qdioac%2x", qdioac);
302 QDIO_DBF_TEXT2(0, setup, dbf_text);
303}
304
305void qdio_release_memory(struct qdio_irq *irq_ptr)
306{
307 struct qdio_q *q;
308 int i;
309
310 /*
311 * Must check queue array manually since irq_ptr->nr_input_queues /
312 * irq_ptr->nr_input_queues may not yet be set.
313 */
314 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
315 q = irq_ptr->input_qs[i];
316 if (q) {
317 free_page((unsigned long) q->slib);
318 kmem_cache_free(qdio_q_cache, q);
319 }
320 }
321 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
322 q = irq_ptr->output_qs[i];
323 if (q) {
324 free_page((unsigned long) q->slib);
325 kmem_cache_free(qdio_q_cache, q);
326 }
327 }
328 kfree(irq_ptr->qdr);
329 free_page(irq_ptr->chsc_page);
330 free_page((unsigned long) irq_ptr);
331}
332
333static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
334 struct qdio_q **irq_ptr_qs,
335 int i, int nr)
336{
337 irq_ptr->qdr->qdf0[i + nr].sliba =
338 (unsigned long)irq_ptr_qs[i]->slib;
339
340 irq_ptr->qdr->qdf0[i + nr].sla =
341 (unsigned long)irq_ptr_qs[i]->sl;
342
343 irq_ptr->qdr->qdf0[i + nr].slsba =
344 (unsigned long)&irq_ptr_qs[i]->slsb.val[0];
345
346 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY;
347 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY;
348 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY;
349 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY;
350}
351
352static void setup_qdr(struct qdio_irq *irq_ptr,
353 struct qdio_initialize *qdio_init)
354{
355 int i;
356
357 irq_ptr->qdr->qfmt = qdio_init->q_format;
358 irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
359 irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
360 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
361 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
362 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
363 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY;
364
365 for (i = 0; i < qdio_init->no_input_qs; i++)
366 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
367
368 for (i = 0; i < qdio_init->no_output_qs; i++)
369 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i,
370 qdio_init->no_input_qs);
371}
372
373static void setup_qib(struct qdio_irq *irq_ptr,
374 struct qdio_initialize *init_data)
375{
376 if (qebsm_possible())
377 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
378
379 irq_ptr->qib.qfmt = init_data->q_format;
380 if (init_data->no_input_qs)
381 irq_ptr->qib.isliba =
382 (unsigned long)(irq_ptr->input_qs[0]->slib);
383 if (init_data->no_output_qs)
384 irq_ptr->qib.osliba =
385 (unsigned long)(irq_ptr->output_qs[0]->slib);
386 memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
387}
388
389int qdio_setup_irq(struct qdio_initialize *init_data)
390{
391 struct ciw *ciw;
392 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
393 int rc;
394
395 memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr));
396 /* wipes qib.ac, required by ar7063 */
397 memset(irq_ptr->qdr, 0, sizeof(struct qdr));
398
399 irq_ptr->int_parm = init_data->int_parm;
400 irq_ptr->nr_input_qs = init_data->no_input_qs;
401 irq_ptr->nr_output_qs = init_data->no_output_qs;
402
403 irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
404 irq_ptr->cdev = init_data->cdev;
405 setup_queues(irq_ptr, init_data);
406
407 setup_qib(irq_ptr, init_data);
408 qdio_setup_thinint(irq_ptr);
409 set_impl_params(irq_ptr, init_data->qib_param_field_format,
410 init_data->qib_param_field,
411 init_data->input_slib_elements,
412 init_data->output_slib_elements);
413
414 /* fill input and output descriptors */
415 setup_qdr(irq_ptr, init_data);
416
417 /* qdr, qib, sls, slsbs, slibs, sbales are filled now */
418
419 /* get qdio commands */
420 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
421 if (!ciw) {
422 QDIO_DBF_TEXT2(1, setup, "no eq");
423 rc = -EINVAL;
424 goto out_err;
425 }
426 irq_ptr->equeue = *ciw;
427
428 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
429 if (!ciw) {
430 QDIO_DBF_TEXT2(1, setup, "no aq");
431 rc = -EINVAL;
432 goto out_err;
433 }
434 irq_ptr->aqueue = *ciw;
435
436 /* set new interrupt handler */
437 irq_ptr->orig_handler = init_data->cdev->handler;
438 init_data->cdev->handler = qdio_int_handler;
439 return 0;
440out_err:
441 qdio_release_memory(irq_ptr);
442 return rc;
443}
444
445void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
446 struct ccw_device *cdev)
447{
448 char s[80];
449
450 sprintf(s, "%s ", cdev->dev.bus_id);
451
452 switch (irq_ptr->qib.qfmt) {
453 case QDIO_QETH_QFMT:
454 sprintf(s + strlen(s), "OSADE ");
455 break;
456 case QDIO_ZFCP_QFMT:
457 sprintf(s + strlen(s), "ZFCP ");
458 break;
459 case QDIO_IQDIO_QFMT:
460 sprintf(s + strlen(s), "HiperSockets ");
461 break;
462 }
463 sprintf(s + strlen(s), "using: ");
464
465 if (!is_thinint_irq(irq_ptr))
466 sprintf(s + strlen(s), "no");
467 sprintf(s + strlen(s), "AdapterInterrupts ");
468 if (!(irq_ptr->sch_token != 0))
469 sprintf(s + strlen(s), "no");
470 sprintf(s + strlen(s), "QEBSM ");
471 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
472 sprintf(s + strlen(s), "no");
473 sprintf(s + strlen(s), "OutboundPCI ");
474 if (!css_general_characteristics.aif_tdd)
475 sprintf(s + strlen(s), "no");
476 sprintf(s + strlen(s), "TDD\n");
477 printk(KERN_INFO "qdio: %s", s);
478
479 memset(s, 0, sizeof(s));
480 sprintf(s, "%s SIGA required: ", cdev->dev.bus_id);
481 if (irq_ptr->siga_flag.input)
482 sprintf(s + strlen(s), "Read ");
483 if (irq_ptr->siga_flag.output)
484 sprintf(s + strlen(s), "Write ");
485 if (irq_ptr->siga_flag.sync)
486 sprintf(s + strlen(s), "Sync ");
487 if (!irq_ptr->siga_flag.no_sync_ti)
488 sprintf(s + strlen(s), "SyncAI ");
489 if (!irq_ptr->siga_flag.no_sync_out_ti)
490 sprintf(s + strlen(s), "SyncOutAI ");
491 if (!irq_ptr->siga_flag.no_sync_out_pci)
492 sprintf(s + strlen(s), "SyncOutPCI");
493 sprintf(s + strlen(s), "\n");
494 printk(KERN_INFO "qdio: %s", s);
495}
496
497int __init qdio_setup_init(void)
498{
499 char dbf_text[15];
500
501 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
502 256, 0, NULL);
503 if (!qdio_q_cache)
504 return -ENOMEM;
505
506 /* Check for OSA/FCP thin interrupts (bit 67). */
507 sprintf(dbf_text, "thini%1x",
508 (css_general_characteristics.aif_osa) ? 1 : 0);
509 QDIO_DBF_TEXT0(0, setup, dbf_text);
510
511 /* Check for QEBSM support in general (bit 58). */
512 sprintf(dbf_text, "cssQBS:%1x",
513 (qebsm_possible()) ? 1 : 0);
514 QDIO_DBF_TEXT0(0, setup, dbf_text);
515 return 0;
516}
517
518void __exit qdio_setup_exit(void)
519{
520 kmem_cache_destroy(qdio_q_cache);
521}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
new file mode 100644
index 000000000000..9291a771d812
--- /dev/null
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -0,0 +1,380 @@
1/*
2 * linux/drivers/s390/cio/thinint_qdio.c
3 *
4 * thin interrupt support for qdio
5 *
6 * Copyright 2000-2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Cornelia Huck <cornelia.huck@de.ibm.com>
9 * Jan Glauber <jang@linux.vnet.ibm.com>
10 */
11#include <linux/io.h>
12#include <asm/atomic.h>
13#include <asm/debug.h>
14#include <asm/qdio.h>
15#include <asm/airq.h>
16#include <asm/isc.h>
17
18#include "cio.h"
19#include "ioasm.h"
20#include "qdio.h"
21#include "qdio_debug.h"
22#include "qdio_perf.h"
23
24/*
25 * Restriction: only 63 iqdio subchannels would have its own indicator,
26 * after that, subsequent subchannels share one indicator
27 */
28#define TIQDIO_NR_NONSHARED_IND 63
29#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
30#define TIQDIO_SHARED_IND 63
31
32/* list of thin interrupt input queues */
33static LIST_HEAD(tiq_list);
34
35/* adapter local summary indicator */
36static unsigned char *tiqdio_alsi;
37
38/* device state change indicators */
39struct indicator_t {
40 u32 ind; /* u32 because of compare-and-swap performance */
41 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
42};
43static struct indicator_t *q_indicators;
44
45static void tiqdio_tasklet_fn(unsigned long data);
46static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0);
47
48static int css_qdio_omit_svs;
49
50static inline unsigned long do_clear_global_summary(void)
51{
52 register unsigned long __fn asm("1") = 3;
53 register unsigned long __tmp asm("2");
54 register unsigned long __time asm("3");
55
56 asm volatile(
57 " .insn rre,0xb2650000,2,0"
58 : "+d" (__fn), "=d" (__tmp), "=d" (__time));
59 return __time;
60}
61
62/* returns addr for the device state change indicator */
63static u32 *get_indicator(void)
64{
65 int i;
66
67 for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
68 if (!atomic_read(&q_indicators[i].count)) {
69 atomic_set(&q_indicators[i].count, 1);
70 return &q_indicators[i].ind;
71 }
72
73 /* use the shared indicator */
74 atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
75 return &q_indicators[TIQDIO_SHARED_IND].ind;
76}
77
78static void put_indicator(u32 *addr)
79{
80 int i;
81
82 if (!addr)
83 return;
84 i = ((unsigned long)addr - (unsigned long)q_indicators) /
85 sizeof(struct indicator_t);
86 atomic_dec(&q_indicators[i].count);
87}
88
89void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
90{
91 struct qdio_q *q;
92 int i;
93
94 /* No TDD facility? If we must use SIGA-s we can also omit SVS. */
95 if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
96 css_qdio_omit_svs = 1;
97
98 for_each_input_queue(irq_ptr, q, i) {
99 list_add_rcu(&q->entry, &tiq_list);
100 synchronize_rcu();
101 }
102 xchg(irq_ptr->dsci, 1);
103 tasklet_schedule(&tiqdio_tasklet);
104}
105
106/*
107 * we cannot stop the tiqdio tasklet here since it is for all
108 * thinint qdio devices and it must run as long as there is a
109 * thinint device left
110 */
111void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
112{
113 struct qdio_q *q;
114 int i;
115
116 for_each_input_queue(irq_ptr, q, i) {
117 list_del_rcu(&q->entry);
118 synchronize_rcu();
119 }
120}
121
122static inline int tiqdio_inbound_q_done(struct qdio_q *q)
123{
124 unsigned char state;
125
126 if (!atomic_read(&q->nr_buf_used))
127 return 1;
128
129 qdio_siga_sync_q(q);
130 get_buf_state(q, q->first_to_check, &state);
131
132 if (state == SLSB_P_INPUT_PRIMED)
133 /* more work coming */
134 return 0;
135 return 1;
136}
137
138static inline int shared_ind(struct qdio_irq *irq_ptr)
139{
140 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
141}
142
143static void __tiqdio_inbound_processing(struct qdio_q *q)
144{
145 qdio_perf_stat_inc(&perf_stats.thinint_inbound);
146 qdio_sync_after_thinint(q);
147
148 /*
149 * Maybe we have work on our outbound queues... at least
150 * we have to check the PCI capable queues.
151 */
152 qdio_check_outbound_after_thinint(q);
153
154again:
155 if (!qdio_inbound_q_moved(q))
156 return;
157
158 qdio_kick_inbound_handler(q);
159
160 if (!tiqdio_inbound_q_done(q)) {
161 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
162 goto again;
163 }
164
165 qdio_stop_polling(q);
166 /*
167 * We need to check again to not lose initiative after
168 * resetting the ACK state.
169 */
170 if (!tiqdio_inbound_q_done(q)) {
171 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
172 goto again;
173 }
174}
175
176void tiqdio_inbound_processing(unsigned long data)
177{
178 struct qdio_q *q = (struct qdio_q *)data;
179
180 __tiqdio_inbound_processing(q);
181}
182
183/* check for work on all inbound thinint queues */
184static void tiqdio_tasklet_fn(unsigned long data)
185{
186 struct qdio_q *q;
187
188 qdio_perf_stat_inc(&perf_stats.tasklet_thinint);
189again:
190
191 /* protect tiq_list entries, only changed in activate or shutdown */
192 rcu_read_lock();
193
194 list_for_each_entry_rcu(q, &tiq_list, entry)
195 /* only process queues from changed sets */
196 if (*q->irq_ptr->dsci) {
197
198 /* only clear it if the indicator is non-shared */
199 if (!shared_ind(q->irq_ptr))
200 xchg(q->irq_ptr->dsci, 0);
201 /*
202 * don't call inbound processing directly since
203 * that could starve other thinint queues
204 */
205 tasklet_schedule(&q->tasklet);
206 }
207
208 rcu_read_unlock();
209
210 /*
211 * if we used the shared indicator clear it now after all queues
212 * were processed
213 */
214 if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) {
215 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
216
217 /* prevent racing */
218 if (*tiqdio_alsi)
219 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
220 }
221
222 /* check for more work */
223 if (*tiqdio_alsi) {
224 xchg(tiqdio_alsi, 0);
225 qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop);
226 goto again;
227 }
228}
229
230/**
231 * tiqdio_thinint_handler - thin interrupt handler for qdio
232 * @ind: pointer to adapter local summary indicator
233 * @drv_data: NULL
234 */
235static void tiqdio_thinint_handler(void *ind, void *drv_data)
236{
237 qdio_perf_stat_inc(&perf_stats.thin_int);
238
239 /*
240 * SVS only when needed: issue SVS to benefit from iqdio interrupt
241 * avoidance (SVS clears adapter interrupt suppression overwrite)
242 */
243 if (!css_qdio_omit_svs)
244 do_clear_global_summary();
245
246 /*
247 * reset local summary indicator (tiqdio_alsi) to stop adapter
248 * interrupts for now, the tasklet will clean all dsci's
249 */
250 xchg((u8 *)ind, 0);
251 tasklet_hi_schedule(&tiqdio_tasklet);
252}
253
254static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
255{
256 struct scssc_area *scssc_area;
257 char dbf_text[15];
258 void *ptr;
259 int rc;
260
261 scssc_area = (struct scssc_area *)irq_ptr->chsc_page;
262 memset(scssc_area, 0, PAGE_SIZE);
263
264 if (reset) {
265 scssc_area->summary_indicator_addr = 0;
266 scssc_area->subchannel_indicator_addr = 0;
267 } else {
268 scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi);
269 scssc_area->subchannel_indicator_addr =
270 virt_to_phys(irq_ptr->dsci);
271 }
272
273 scssc_area->request = (struct chsc_header) {
274 .length = 0x0fe0,
275 .code = 0x0021,
276 };
277 scssc_area->operation_code = 0;
278 scssc_area->ks = PAGE_DEFAULT_KEY;
279 scssc_area->kc = PAGE_DEFAULT_KEY;
280 scssc_area->isc = QDIO_AIRQ_ISC;
281 scssc_area->schid = irq_ptr->schid;
282
283 /* enable the time delay disablement facility */
284 if (css_general_characteristics.aif_tdd)
285 scssc_area->word_with_d_bit = 0x10000000;
286
287 rc = chsc(scssc_area);
288 if (rc)
289 return -EIO;
290
291 rc = chsc_error_from_response(scssc_area->response.code);
292 if (rc) {
293 sprintf(dbf_text, "sidR%4x", scssc_area->response.code);
294 QDIO_DBF_TEXT1(0, trace, dbf_text);
295 QDIO_DBF_TEXT1(0, setup, dbf_text);
296 ptr = &scssc_area->response;
297 QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN);
298 return rc;
299 }
300
301 QDIO_DBF_TEXT2(0, setup, "setscind");
302 QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr,
303 sizeof(unsigned long));
304 QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr,
305 sizeof(unsigned long));
306 return 0;
307}
308
309/* allocate non-shared indicators and shared indicator */
310int __init tiqdio_allocate_memory(void)
311{
312 q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
313 GFP_KERNEL);
314 if (!q_indicators)
315 return -ENOMEM;
316 return 0;
317}
318
319void tiqdio_free_memory(void)
320{
321 kfree(q_indicators);
322}
323
324int __init tiqdio_register_thinints(void)
325{
326 char dbf_text[20];
327
328 isc_register(QDIO_AIRQ_ISC);
329 tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler,
330 NULL, QDIO_AIRQ_ISC);
331 if (IS_ERR(tiqdio_alsi)) {
332 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi));
333 QDIO_DBF_TEXT0(0, setup, dbf_text);
334 tiqdio_alsi = NULL;
335 isc_unregister(QDIO_AIRQ_ISC);
336 return -ENOMEM;
337 }
338 return 0;
339}
340
341int qdio_establish_thinint(struct qdio_irq *irq_ptr)
342{
343 if (!is_thinint_irq(irq_ptr))
344 return 0;
345
346 /* Check for aif time delay disablement. If installed,
347 * omit SVS even under LPAR
348 */
349 if (css_general_characteristics.aif_tdd)
350 css_qdio_omit_svs = 1;
351 return set_subchannel_ind(irq_ptr, 0);
352}
353
354void qdio_setup_thinint(struct qdio_irq *irq_ptr)
355{
356 if (!is_thinint_irq(irq_ptr))
357 return;
358 irq_ptr->dsci = get_indicator();
359 QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *));
360}
361
362void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
363{
364 if (!is_thinint_irq(irq_ptr))
365 return;
366
367 /* reset adapter interrupt indicators */
368 put_indicator(irq_ptr->dsci);
369 set_subchannel_ind(irq_ptr, 1);
370}
371
372void __exit tiqdio_unregister_thinints(void)
373{
374 tasklet_disable(&tiqdio_tasklet);
375
376 if (tiqdio_alsi) {
377 s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
378 isc_unregister(QDIO_AIRQ_ISC);
379 }
380}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 699ac11debd8..1895dbb553cd 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -239,11 +239,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
239/*not used unless the microcode gets patched*/ 239/*not used unless the microcode gets patched*/
240#define QETH_PCI_TIMER_VALUE(card) 3 240#define QETH_PCI_TIMER_VALUE(card) 3
241 241
242#define QETH_MIN_INPUT_THRESHOLD 1
243#define QETH_MAX_INPUT_THRESHOLD 500
244#define QETH_MIN_OUTPUT_THRESHOLD 1
245#define QETH_MAX_OUTPUT_THRESHOLD 300
246
247/* priority queing */ 242/* priority queing */
248#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING 243#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
249#define QETH_DEFAULT_QUEUE 2 244#define QETH_DEFAULT_QUEUE 2
@@ -811,17 +806,14 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
811struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, 806struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
812 enum qeth_ipa_cmds, enum qeth_prot_versions); 807 enum qeth_ipa_cmds, enum qeth_prot_versions);
813int qeth_query_setadapterparms(struct qeth_card *); 808int qeth_query_setadapterparms(struct qeth_card *);
814int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, 809int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, const char *);
815 unsigned int, const char *);
816void qeth_queue_input_buffer(struct qeth_card *, int); 810void qeth_queue_input_buffer(struct qeth_card *, int);
817struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, 811struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
818 struct qdio_buffer *, struct qdio_buffer_element **, int *, 812 struct qdio_buffer *, struct qdio_buffer_element **, int *,
819 struct qeth_hdr **); 813 struct qeth_hdr **);
820void qeth_schedule_recovery(struct qeth_card *); 814void qeth_schedule_recovery(struct qeth_card *);
821void qeth_qdio_output_handler(struct ccw_device *, unsigned int, 815void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
822 unsigned int, unsigned int, 816 int, int, int, unsigned long);
823 unsigned int, int, int,
824 unsigned long);
825void qeth_clear_ipacmd_list(struct qeth_card *); 817void qeth_clear_ipacmd_list(struct qeth_card *);
826int qeth_qdio_clear_card(struct qeth_card *, int); 818int qeth_qdio_clear_card(struct qeth_card *, int);
827void qeth_clear_working_pool_list(struct qeth_card *); 819void qeth_clear_working_pool_list(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 0ac54dc638c2..c3ad89e302bd 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2073,7 +2073,7 @@ static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2073static int qeth_qdio_activate(struct qeth_card *card) 2073static int qeth_qdio_activate(struct qeth_card *card)
2074{ 2074{
2075 QETH_DBF_TEXT(SETUP, 3, "qdioact"); 2075 QETH_DBF_TEXT(SETUP, 3, "qdioact");
2076 return qdio_activate(CARD_DDEV(card), 0); 2076 return qdio_activate(CARD_DDEV(card));
2077} 2077}
2078 2078
2079static int qeth_dm_act(struct qeth_card *card) 2079static int qeth_dm_act(struct qeth_card *card)
@@ -2349,16 +2349,11 @@ int qeth_init_qdio_queues(struct qeth_card *card)
2349 card->qdio.in_q->next_buf_to_init = 2349 card->qdio.in_q->next_buf_to_init =
2350 card->qdio.in_buf_pool.buf_count - 1; 2350 card->qdio.in_buf_pool.buf_count - 1;
2351 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, 2351 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2352 card->qdio.in_buf_pool.buf_count - 1, NULL); 2352 card->qdio.in_buf_pool.buf_count - 1);
2353 if (rc) { 2353 if (rc) {
2354 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 2354 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2355 return rc; 2355 return rc;
2356 } 2356 }
2357 rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
2358 if (rc) {
2359 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2360 return rc;
2361 }
2362 /* outbound queue */ 2357 /* outbound queue */
2363 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2358 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2364 memset(card->qdio.out_qs[i]->qdio_bufs, 0, 2359 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
@@ -2559,9 +2554,9 @@ int qeth_query_setadapterparms(struct qeth_card *card)
2559EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); 2554EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
2560 2555
2561int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, 2556int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2562 unsigned int siga_error, const char *dbftext) 2557 const char *dbftext)
2563{ 2558{
2564 if (qdio_error || siga_error) { 2559 if (qdio_error) {
2565 QETH_DBF_TEXT(TRACE, 2, dbftext); 2560 QETH_DBF_TEXT(TRACE, 2, dbftext);
2566 QETH_DBF_TEXT(QERR, 2, dbftext); 2561 QETH_DBF_TEXT(QERR, 2, dbftext);
2567 QETH_DBF_TEXT_(QERR, 2, " F15=%02X", 2562 QETH_DBF_TEXT_(QERR, 2, " F15=%02X",
@@ -2569,7 +2564,6 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2569 QETH_DBF_TEXT_(QERR, 2, " F14=%02X", 2564 QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
2570 buf->element[14].flags & 0xff); 2565 buf->element[14].flags & 0xff);
2571 QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error); 2566 QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
2572 QETH_DBF_TEXT_(QERR, 2, " serr=%X", siga_error);
2573 return 1; 2567 return 1;
2574 } 2568 }
2575 return 0; 2569 return 0;
@@ -2622,9 +2616,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
2622 card->perf_stats.inbound_do_qdio_start_time = 2616 card->perf_stats.inbound_do_qdio_start_time =
2623 qeth_get_micros(); 2617 qeth_get_micros();
2624 } 2618 }
2625 rc = do_QDIO(CARD_DDEV(card), 2619 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
2626 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, 2620 queue->next_buf_to_init, count);
2627 0, queue->next_buf_to_init, count, NULL);
2628 if (card->options.performance_stats) 2621 if (card->options.performance_stats)
2629 card->perf_stats.inbound_do_qdio_time += 2622 card->perf_stats.inbound_do_qdio_time +=
2630 qeth_get_micros() - 2623 qeth_get_micros() -
@@ -2643,14 +2636,13 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
2643EXPORT_SYMBOL_GPL(qeth_queue_input_buffer); 2636EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
2644 2637
2645static int qeth_handle_send_error(struct qeth_card *card, 2638static int qeth_handle_send_error(struct qeth_card *card,
2646 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err, 2639 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
2647 unsigned int siga_err)
2648{ 2640{
2649 int sbalf15 = buffer->buffer->element[15].flags & 0xff; 2641 int sbalf15 = buffer->buffer->element[15].flags & 0xff;
2650 int cc = siga_err & 3; 2642 int cc = qdio_err & 3;
2651 2643
2652 QETH_DBF_TEXT(TRACE, 6, "hdsnderr"); 2644 QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
2653 qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr"); 2645 qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr");
2654 switch (cc) { 2646 switch (cc) {
2655 case 0: 2647 case 0:
2656 if (qdio_err) { 2648 if (qdio_err) {
@@ -2662,7 +2654,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
2662 } 2654 }
2663 return QETH_SEND_ERROR_NONE; 2655 return QETH_SEND_ERROR_NONE;
2664 case 2: 2656 case 2:
2665 if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) { 2657 if (qdio_err & QDIO_ERROR_SIGA_BUSY) {
2666 QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B"); 2658 QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B");
2667 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); 2659 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2668 return QETH_SEND_ERROR_KICK_IT; 2660 return QETH_SEND_ERROR_KICK_IT;
@@ -2758,8 +2750,8 @@ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2758 return 0; 2750 return 0;
2759} 2751}
2760 2752
2761static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, 2753static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2762 int index, int count) 2754 int count)
2763{ 2755{
2764 struct qeth_qdio_out_buffer *buf; 2756 struct qeth_qdio_out_buffer *buf;
2765 int rc; 2757 int rc;
@@ -2807,12 +2799,10 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2807 qeth_get_micros(); 2799 qeth_get_micros();
2808 } 2800 }
2809 qdio_flags = QDIO_FLAG_SYNC_OUTPUT; 2801 qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
2810 if (under_int)
2811 qdio_flags |= QDIO_FLAG_UNDER_INTERRUPT;
2812 if (atomic_read(&queue->set_pci_flags_count)) 2802 if (atomic_read(&queue->set_pci_flags_count))
2813 qdio_flags |= QDIO_FLAG_PCI_OUT; 2803 qdio_flags |= QDIO_FLAG_PCI_OUT;
2814 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, 2804 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
2815 queue->queue_no, index, count, NULL); 2805 queue->queue_no, index, count);
2816 if (queue->card->options.performance_stats) 2806 if (queue->card->options.performance_stats)
2817 queue->card->perf_stats.outbound_do_qdio_time += 2807 queue->card->perf_stats.outbound_do_qdio_time +=
2818 qeth_get_micros() - 2808 qeth_get_micros() -
@@ -2866,16 +2856,15 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2866 queue->card->perf_stats.bufs_sent_pack += 2856 queue->card->perf_stats.bufs_sent_pack +=
2867 flush_cnt; 2857 flush_cnt;
2868 if (flush_cnt) 2858 if (flush_cnt)
2869 qeth_flush_buffers(queue, 1, index, flush_cnt); 2859 qeth_flush_buffers(queue, index, flush_cnt);
2870 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 2860 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2871 } 2861 }
2872 } 2862 }
2873} 2863}
2874 2864
2875void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status, 2865void qeth_qdio_output_handler(struct ccw_device *ccwdev,
2876 unsigned int qdio_error, unsigned int siga_error, 2866 unsigned int qdio_error, int __queue, int first_element,
2877 unsigned int __queue, int first_element, int count, 2867 int count, unsigned long card_ptr)
2878 unsigned long card_ptr)
2879{ 2868{
2880 struct qeth_card *card = (struct qeth_card *) card_ptr; 2869 struct qeth_card *card = (struct qeth_card *) card_ptr;
2881 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; 2870 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
@@ -2883,15 +2872,12 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
2883 int i; 2872 int i;
2884 2873
2885 QETH_DBF_TEXT(TRACE, 6, "qdouhdl"); 2874 QETH_DBF_TEXT(TRACE, 6, "qdouhdl");
2886 if (status & QDIO_STATUS_LOOK_FOR_ERROR) { 2875 if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
2887 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { 2876 QETH_DBF_TEXT(TRACE, 2, "achkcond");
2888 QETH_DBF_TEXT(TRACE, 2, "achkcond"); 2877 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
2889 QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); 2878 netif_stop_queue(card->dev);
2890 QETH_DBF_TEXT_(TRACE, 2, "%08x", status); 2879 qeth_schedule_recovery(card);
2891 netif_stop_queue(card->dev); 2880 return;
2892 qeth_schedule_recovery(card);
2893 return;
2894 }
2895 } 2881 }
2896 if (card->options.performance_stats) { 2882 if (card->options.performance_stats) {
2897 card->perf_stats.outbound_handler_cnt++; 2883 card->perf_stats.outbound_handler_cnt++;
@@ -2901,8 +2887,7 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
2901 for (i = first_element; i < (first_element + count); ++i) { 2887 for (i = first_element; i < (first_element + count); ++i) {
2902 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 2888 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2903 /*we only handle the KICK_IT error by doing a recovery */ 2889 /*we only handle the KICK_IT error by doing a recovery */
2904 if (qeth_handle_send_error(card, buffer, 2890 if (qeth_handle_send_error(card, buffer, qdio_error)
2905 qdio_error, siga_error)
2906 == QETH_SEND_ERROR_KICK_IT){ 2891 == QETH_SEND_ERROR_KICK_IT){
2907 netif_stop_queue(card->dev); 2892 netif_stop_queue(card->dev);
2908 qeth_schedule_recovery(card); 2893 qeth_schedule_recovery(card);
@@ -3164,11 +3149,11 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
3164 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3149 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3165 if (ctx == NULL) { 3150 if (ctx == NULL) {
3166 qeth_fill_buffer(queue, buffer, skb); 3151 qeth_fill_buffer(queue, buffer, skb);
3167 qeth_flush_buffers(queue, 0, index, 1); 3152 qeth_flush_buffers(queue, index, 1);
3168 } else { 3153 } else {
3169 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index); 3154 flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
3170 WARN_ON(buffers_needed != flush_cnt); 3155 WARN_ON(buffers_needed != flush_cnt);
3171 qeth_flush_buffers(queue, 0, index, flush_cnt); 3156 qeth_flush_buffers(queue, index, flush_cnt);
3172 } 3157 }
3173 return 0; 3158 return 0;
3174out: 3159out:
@@ -3221,8 +3206,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3221 * again */ 3206 * again */
3222 if (atomic_read(&buffer->state) != 3207 if (atomic_read(&buffer->state) !=
3223 QETH_QDIO_BUF_EMPTY){ 3208 QETH_QDIO_BUF_EMPTY){
3224 qeth_flush_buffers(queue, 0, 3209 qeth_flush_buffers(queue, start_index,
3225 start_index, flush_count); 3210 flush_count);
3226 atomic_set(&queue->state, 3211 atomic_set(&queue->state,
3227 QETH_OUT_Q_UNLOCKED); 3212 QETH_OUT_Q_UNLOCKED);
3228 return -EBUSY; 3213 return -EBUSY;
@@ -3253,7 +3238,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3253 flush_count += tmp; 3238 flush_count += tmp;
3254out: 3239out:
3255 if (flush_count) 3240 if (flush_count)
3256 qeth_flush_buffers(queue, 0, start_index, flush_count); 3241 qeth_flush_buffers(queue, start_index, flush_count);
3257 else if (!atomic_read(&queue->set_pci_flags_count)) 3242 else if (!atomic_read(&queue->set_pci_flags_count))
3258 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); 3243 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
3259 /* 3244 /*
@@ -3274,7 +3259,7 @@ out:
3274 if (!flush_count && !atomic_read(&queue->set_pci_flags_count)) 3259 if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
3275 flush_count += qeth_flush_buffers_on_no_pci(queue); 3260 flush_count += qeth_flush_buffers_on_no_pci(queue);
3276 if (flush_count) 3261 if (flush_count)
3277 qeth_flush_buffers(queue, 0, start_index, flush_count); 3262 qeth_flush_buffers(queue, start_index, flush_count);
3278 } 3263 }
3279 /* at this point the queue is UNLOCKED again */ 3264 /* at this point the queue is UNLOCKED again */
3280 if (queue->card->options.performance_stats && do_pack) 3265 if (queue->card->options.performance_stats && do_pack)
@@ -3686,10 +3671,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
3686 init_data.q_format = qeth_get_qdio_q_format(card); 3671 init_data.q_format = qeth_get_qdio_q_format(card);
3687 init_data.qib_param_field_format = 0; 3672 init_data.qib_param_field_format = 0;
3688 init_data.qib_param_field = qib_param_field; 3673 init_data.qib_param_field = qib_param_field;
3689 init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
3690 init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
3691 init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
3692 init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
3693 init_data.no_input_qs = 1; 3674 init_data.no_input_qs = 1;
3694 init_data.no_output_qs = card->qdio.no_out_queues; 3675 init_data.no_output_qs = card->qdio.no_out_queues;
3695 init_data.input_handler = card->discipline.input_handler; 3676 init_data.input_handler = card->discipline.input_handler;
@@ -3751,8 +3732,9 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3751 3732
3752int qeth_core_hardsetup_card(struct qeth_card *card) 3733int qeth_core_hardsetup_card(struct qeth_card *card)
3753{ 3734{
3735 struct qdio_ssqd_desc *qdio_ssqd;
3754 int retries = 3; 3736 int retries = 3;
3755 int mpno; 3737 int mpno = 0;
3756 int rc; 3738 int rc;
3757 3739
3758 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 3740 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
@@ -3784,7 +3766,10 @@ retry:
3784 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3766 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3785 return rc; 3767 return rc;
3786 } 3768 }
3787 mpno = qdio_get_ssqd_pct(CARD_DDEV(card)); 3769
3770 qdio_ssqd = qdio_get_ssqd_desc(CARD_DDEV(card));
3771 if (qdio_ssqd)
3772 mpno = qdio_ssqd->pcnt;
3788 if (mpno) 3773 if (mpno)
3789 mpno = min(mpno - 1, QETH_MAX_PORTNO); 3774 mpno = min(mpno - 1, QETH_MAX_PORTNO);
3790 if (card->info.portno > mpno) { 3775 if (card->info.portno > mpno) {
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index f682f7b14480..3fbc3bdec0c5 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -726,8 +726,7 @@ tx_drop:
726} 726}
727 727
728static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, 728static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
729 unsigned int status, unsigned int qdio_err, 729 unsigned int qdio_err, unsigned int queue,
730 unsigned int siga_err, unsigned int queue,
731 int first_element, int count, unsigned long card_ptr) 730 int first_element, int count, unsigned long card_ptr)
732{ 731{
733 struct net_device *net_dev; 732 struct net_device *net_dev;
@@ -742,23 +741,20 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
742 card->perf_stats.inbound_cnt++; 741 card->perf_stats.inbound_cnt++;
743 card->perf_stats.inbound_start_time = qeth_get_micros(); 742 card->perf_stats.inbound_start_time = qeth_get_micros();
744 } 743 }
745 if (status & QDIO_STATUS_LOOK_FOR_ERROR) { 744 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
746 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { 745 QETH_DBF_TEXT(TRACE, 1, "qdinchk");
747 QETH_DBF_TEXT(TRACE, 1, "qdinchk"); 746 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
748 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); 747 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
749 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element, 748 count);
750 count); 749 QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
751 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status); 750 qeth_schedule_recovery(card);
752 qeth_schedule_recovery(card); 751 return;
753 return;
754 }
755 } 752 }
756 for (i = first_element; i < (first_element + count); ++i) { 753 for (i = first_element; i < (first_element + count); ++i) {
757 index = i % QDIO_MAX_BUFFERS_PER_Q; 754 index = i % QDIO_MAX_BUFFERS_PER_Q;
758 buffer = &card->qdio.in_q->bufs[index]; 755 buffer = &card->qdio.in_q->bufs[index];
759 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) && 756 if (!(qdio_err &&
760 qeth_check_qdio_errors(buffer->buffer, 757 qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr")))
761 qdio_err, siga_err, "qinerr")))
762 qeth_l2_process_inbound_buffer(card, buffer, index); 758 qeth_l2_process_inbound_buffer(card, buffer, index);
763 /* clear buffer and give back to hardware */ 759 /* clear buffer and give back to hardware */
764 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 760 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 06deaee50f6d..22f64aa6dd1f 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2939,8 +2939,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
2939} 2939}
2940 2940
2941static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, 2941static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
2942 unsigned int status, unsigned int qdio_err, 2942 unsigned int qdio_err, unsigned int queue, int first_element,
2943 unsigned int siga_err, unsigned int queue, int first_element,
2944 int count, unsigned long card_ptr) 2943 int count, unsigned long card_ptr)
2945{ 2944{
2946 struct net_device *net_dev; 2945 struct net_device *net_dev;
@@ -2955,23 +2954,21 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
2955 card->perf_stats.inbound_cnt++; 2954 card->perf_stats.inbound_cnt++;
2956 card->perf_stats.inbound_start_time = qeth_get_micros(); 2955 card->perf_stats.inbound_start_time = qeth_get_micros();
2957 } 2956 }
2958 if (status & QDIO_STATUS_LOOK_FOR_ERROR) { 2957 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
2959 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { 2958 QETH_DBF_TEXT(TRACE, 1, "qdinchk");
2960 QETH_DBF_TEXT(TRACE, 1, "qdinchk"); 2959 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
2961 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); 2960 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
2962 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", 2961 first_element, count);
2963 first_element, count); 2962 QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
2964 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status); 2963 qeth_schedule_recovery(card);
2965 qeth_schedule_recovery(card); 2964 return;
2966 return;
2967 }
2968 } 2965 }
2969 for (i = first_element; i < (first_element + count); ++i) { 2966 for (i = first_element; i < (first_element + count); ++i) {
2970 index = i % QDIO_MAX_BUFFERS_PER_Q; 2967 index = i % QDIO_MAX_BUFFERS_PER_Q;
2971 buffer = &card->qdio.in_q->bufs[index]; 2968 buffer = &card->qdio.in_q->bufs[index];
2972 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) && 2969 if (!(qdio_err &&
2973 qeth_check_qdio_errors(buffer->buffer, 2970 qeth_check_qdio_errors(buffer->buffer,
2974 qdio_err, siga_err, "qinerr"))) 2971 qdio_err, "qinerr")))
2975 qeth_l3_process_inbound_buffer(card, buffer, index); 2972 qeth_l3_process_inbound_buffer(card, buffer, index);
2976 /* clear buffer and give back to hardware */ 2973 /* clear buffer and give back to hardware */
2977 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 2974 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 36169c6944fd..fca48b88fc53 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -297,15 +297,13 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
297/** 297/**
298 * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure 298 * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure
299 * @adapter: adapter affected by this QDIO related event 299 * @adapter: adapter affected by this QDIO related event
300 * @status: as passed by qdio module
301 * @qdio_error: as passed by qdio module 300 * @qdio_error: as passed by qdio module
302 * @siga_error: as passed by qdio module
303 * @sbal_index: first buffer with error condition, as passed by qdio module 301 * @sbal_index: first buffer with error condition, as passed by qdio module
304 * @sbal_count: number of buffers affected, as passed by qdio module 302 * @sbal_count: number of buffers affected, as passed by qdio module
305 */ 303 */
306void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, 304void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter,
307 unsigned int qdio_error, unsigned int siga_error, 305 unsigned int qdio_error, int sbal_index,
308 int sbal_index, int sbal_count) 306 int sbal_count)
309{ 307{
310 struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf; 308 struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
311 unsigned long flags; 309 unsigned long flags;
@@ -313,9 +311,7 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
313 spin_lock_irqsave(&adapter->hba_dbf_lock, flags); 311 spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
314 memset(r, 0, sizeof(*r)); 312 memset(r, 0, sizeof(*r));
315 strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); 313 strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE);
316 r->u.qdio.status = status;
317 r->u.qdio.qdio_error = qdio_error; 314 r->u.qdio.qdio_error = qdio_error;
318 r->u.qdio.siga_error = siga_error;
319 r->u.qdio.sbal_index = sbal_index; 315 r->u.qdio.sbal_index = sbal_index;
320 r->u.qdio.sbal_count = sbal_count; 316 r->u.qdio.sbal_count = sbal_count;
321 debug_event(adapter->hba_dbf, 0, r, sizeof(*r)); 317 debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
@@ -398,9 +394,7 @@ static void zfcp_hba_dbf_view_status(char **p,
398 394
399static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r) 395static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r)
400{ 396{
401 zfcp_dbf_out(p, "status", "0x%08x", r->status);
402 zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error); 397 zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error);
403 zfcp_dbf_out(p, "siga_error", "0x%08x", r->siga_error);
404 zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index); 398 zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index);
405 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); 399 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
406} 400}
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index d04aea604974..0ddb18449d11 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -139,9 +139,7 @@ struct zfcp_hba_dbf_record_status {
139} __attribute__ ((packed)); 139} __attribute__ ((packed));
140 140
141struct zfcp_hba_dbf_record_qdio { 141struct zfcp_hba_dbf_record_qdio {
142 u32 status;
143 u32 qdio_error; 142 u32 qdio_error;
144 u32 siga_error;
145 u8 sbal_index; 143 u8 sbal_index;
146 u8 sbal_count; 144 u8 sbal_count;
147} __attribute__ ((packed)); 145} __attribute__ ((packed));
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 8065b2b224b7..edfdb21591f3 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -48,9 +48,8 @@ extern void zfcp_rec_dbf_event_action(u8, struct zfcp_erp_action *);
48extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *); 48extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
49extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *, 49extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
50 struct fsf_status_read_buffer *); 50 struct fsf_status_read_buffer *);
51extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, 51extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int,
52 unsigned int, unsigned int, unsigned int, 52 int);
53 int, int);
54extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *); 53extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
55extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); 54extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
56extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); 55extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 72e3094796d4..d6dbd653fde9 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -74,17 +74,15 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
74 } 74 }
75} 75}
76 76
77static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int status, 77static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
78 unsigned int qdio_err, unsigned int siga_err, 78 int queue_no, int first, int count,
79 unsigned int queue_no, int first, int count,
80 unsigned long parm) 79 unsigned long parm)
81{ 80{
82 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; 81 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
83 struct zfcp_qdio_queue *queue = &adapter->req_q; 82 struct zfcp_qdio_queue *queue = &adapter->req_q;
84 83
85 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { 84 if (unlikely(qdio_err)) {
86 zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err, 85 zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
87 first, count);
88 zfcp_qdio_handler_error(adapter, 140); 86 zfcp_qdio_handler_error(adapter, 140);
89 return; 87 return;
90 } 88 }
@@ -129,8 +127,7 @@ static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
129 127
130 count = atomic_read(&queue->count) + processed; 128 count = atomic_read(&queue->count) + processed;
131 129
132 retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, 130 retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count);
133 0, start, count, NULL);
134 131
135 if (unlikely(retval)) { 132 if (unlikely(retval)) {
136 atomic_set(&queue->count, count); 133 atomic_set(&queue->count, count);
@@ -142,9 +139,8 @@ static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
142 } 139 }
143} 140}
144 141
145static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status, 142static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
146 unsigned int qdio_err, unsigned int siga_err, 143 int queue_no, int first, int count,
147 unsigned int queue_no, int first, int count,
148 unsigned long parm) 144 unsigned long parm)
149{ 145{
150 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; 146 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
@@ -152,9 +148,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status,
152 volatile struct qdio_buffer_element *sbale; 148 volatile struct qdio_buffer_element *sbale;
153 int sbal_idx, sbale_idx, sbal_no; 149 int sbal_idx, sbale_idx, sbal_no;
154 150
155 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { 151 if (unlikely(qdio_err)) {
156 zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err, 152 zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
157 first, count);
158 zfcp_qdio_handler_error(adapter, 147); 153 zfcp_qdio_handler_error(adapter, 147);
159 return; 154 return;
160 } 155 }
@@ -362,7 +357,7 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
362 } 357 }
363 358
364 retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first, 359 retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first,
365 count, NULL); 360 count);
366 if (unlikely(retval)) { 361 if (unlikely(retval)) {
367 zfcp_qdio_zero_sbals(req_q->sbal, first, count); 362 zfcp_qdio_zero_sbals(req_q->sbal, first, count);
368 return retval; 363 return retval;
@@ -400,10 +395,6 @@ int zfcp_qdio_allocate(struct zfcp_adapter *adapter)
400 init_data->qib_param_field = NULL; 395 init_data->qib_param_field = NULL;
401 init_data->input_slib_elements = NULL; 396 init_data->input_slib_elements = NULL;
402 init_data->output_slib_elements = NULL; 397 init_data->output_slib_elements = NULL;
403 init_data->min_input_threshold = 1;
404 init_data->max_input_threshold = 5000;
405 init_data->min_output_threshold = 1;
406 init_data->max_output_threshold = 1000;
407 init_data->no_input_qs = 1; 398 init_data->no_input_qs = 1;
408 init_data->no_output_qs = 1; 399 init_data->no_output_qs = 1;
409 init_data->input_handler = zfcp_qdio_int_resp; 400 init_data->input_handler = zfcp_qdio_int_resp;
@@ -436,9 +427,7 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
436 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 427 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
437 spin_unlock(&req_q->lock); 428 spin_unlock(&req_q->lock);
438 429
439 while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR) 430 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
440 == -EINPROGRESS)
441 ssleep(1);
442 431
443 /* cleanup used outbound sbals */ 432 /* cleanup used outbound sbals */
444 count = atomic_read(&req_q->count); 433 count = atomic_read(&req_q->count);
@@ -473,7 +462,7 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
473 return -EIO; 462 return -EIO;
474 } 463 }
475 464
476 if (qdio_activate(adapter->ccw_device, 0)) { 465 if (qdio_activate(adapter->ccw_device)) {
477 dev_err(&adapter->ccw_device->dev, 466 dev_err(&adapter->ccw_device->dev,
478 "Activate of QDIO queues failed.\n"); 467 "Activate of QDIO queues failed.\n");
479 goto failed_qdio; 468 goto failed_qdio;
@@ -487,7 +476,7 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
487 } 476 }
488 477
489 if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, 478 if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
490 QDIO_MAX_BUFFERS_PER_Q, NULL)) { 479 QDIO_MAX_BUFFERS_PER_Q)) {
491 dev_err(&adapter->ccw_device->dev, 480 dev_err(&adapter->ccw_device->dev,
492 "Init of QDIO response queue failed.\n"); 481 "Init of QDIO response queue failed.\n");
493 goto failed_qdio; 482 goto failed_qdio;
@@ -501,9 +490,6 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
501 return 0; 490 return 0;
502 491
503failed_qdio: 492failed_qdio:
504 while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR) 493 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
505 == -EINPROGRESS)
506 ssleep(1);
507
508 return -EIO; 494 return -EIO;
509} 495}