aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio
diff options
context:
space:
mode:
authorfrank.blaschka@de.ibm.com <frank.blaschka@de.ibm.com>2011-08-07 21:33:55 -0400
committerDavid S. Miller <davem@davemloft.net>2011-08-13 04:10:16 -0400
commit104ea556ee7f40039c9c635d0c267b1fde084a81 (patch)
tree5b4af497551a3f2e2cb2f24030d028392aae07e0 /drivers/s390/cio
parent3881ac441f642d56503818123446f7298442236b (diff)
qdio: support asynchronous delivery of storage blocks
This patch introduces support for asynchronous delivery of storage blocks for Hipersockets. Upper layers may exploit this functionality to reuse SBALs for which the delivery status is still pending. Signed-off-by: Einar Lueck <elelueck@de.ibm.com> Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com> Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/qdio.h29
-rw-r--r--drivers/s390/cio/qdio_debug.c3
-rw-r--r--drivers/s390/cio/qdio_main.c203
-rw-r--r--drivers/s390/cio/qdio_setup.c83
-rw-r--r--drivers/s390/cio/qdio_thinint.c88
5 files changed, 337 insertions, 69 deletions
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index e5c966462c5a..2b21f65a8950 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -44,6 +44,7 @@ enum qdio_irq_states {
44#define SLSB_STATE_NOT_INIT 0x0 44#define SLSB_STATE_NOT_INIT 0x0
45#define SLSB_STATE_EMPTY 0x1 45#define SLSB_STATE_EMPTY 0x1
46#define SLSB_STATE_PRIMED 0x2 46#define SLSB_STATE_PRIMED 0x2
47#define SLSB_STATE_PENDING 0x3
47#define SLSB_STATE_HALTED 0xe 48#define SLSB_STATE_HALTED 0xe
48#define SLSB_STATE_ERROR 0xf 49#define SLSB_STATE_ERROR 0xf
49#define SLSB_TYPE_INPUT 0x0 50#define SLSB_TYPE_INPUT 0x0
@@ -67,6 +68,8 @@ enum qdio_irq_states {
67 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */ 68 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
68#define SLSB_P_OUTPUT_EMPTY \ 69#define SLSB_P_OUTPUT_EMPTY \
69 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */ 70 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */
71#define SLSB_P_OUTPUT_PENDING \
72 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_PENDING) /* 0xa3 */
70#define SLSB_CU_OUTPUT_PRIMED \ 73#define SLSB_CU_OUTPUT_PRIMED \
71 (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */ 74 (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */
72#define SLSB_P_OUTPUT_HALTED \ 75#define SLSB_P_OUTPUT_HALTED \
@@ -97,6 +100,7 @@ enum qdio_irq_states {
97#define QDIO_SIGA_WRITE 0x00 100#define QDIO_SIGA_WRITE 0x00
98#define QDIO_SIGA_READ 0x01 101#define QDIO_SIGA_READ 0x01
99#define QDIO_SIGA_SYNC 0x02 102#define QDIO_SIGA_SYNC 0x02
103#define QDIO_SIGA_WRITEQ 0x04
100#define QDIO_SIGA_QEBSM_FLAG 0x80 104#define QDIO_SIGA_QEBSM_FLAG 0x80
101 105
102#ifdef CONFIG_64BIT 106#ifdef CONFIG_64BIT
@@ -253,6 +257,12 @@ struct qdio_input_q {
253struct qdio_output_q { 257struct qdio_output_q {
254 /* PCIs are enabled for the queue */ 258 /* PCIs are enabled for the queue */
255 int pci_out_enabled; 259 int pci_out_enabled;
260 /* cq: use asynchronous output buffers */
261 int use_cq;
262 /* cq: aobs used for particual SBAL */
263 struct qaob **aobs;
264 /* cq: sbal state related to asynchronous operation */
265 struct qdio_outbuf_state *sbal_state;
256 /* timer to check for more outbound work */ 266 /* timer to check for more outbound work */
257 struct timer_list timer; 267 struct timer_list timer;
258 /* used SBALs before tasklet schedule */ 268 /* used SBALs before tasklet schedule */
@@ -432,9 +442,20 @@ struct indicator_t {
432 442
433extern struct indicator_t *q_indicators; 443extern struct indicator_t *q_indicators;
434 444
435static inline int shared_ind(u32 *dsci) 445static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq)
436{ 446{
437 return dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 447 return irq->nr_input_qs > 1;
448}
449
450static inline int references_shared_dsci(struct qdio_irq *irq)
451{
452 return irq->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
453}
454
455static inline int shared_ind(struct qdio_q *q)
456{
457 struct qdio_irq *i = q->irq_ptr;
458 return references_shared_dsci(i) || has_multiple_inq_on_dsci(i);
438} 459}
439 460
440/* prototypes for thin interrupt */ 461/* prototypes for thin interrupt */
@@ -449,6 +470,7 @@ void tiqdio_free_memory(void);
449int tiqdio_register_thinints(void); 470int tiqdio_register_thinints(void);
450void tiqdio_unregister_thinints(void); 471void tiqdio_unregister_thinints(void);
451 472
473
452/* prototypes for setup */ 474/* prototypes for setup */
453void qdio_inbound_processing(unsigned long data); 475void qdio_inbound_processing(unsigned long data);
454void qdio_outbound_processing(unsigned long data); 476void qdio_outbound_processing(unsigned long data);
@@ -469,6 +491,9 @@ int qdio_setup_create_sysfs(struct ccw_device *cdev);
469void qdio_setup_destroy_sysfs(struct ccw_device *cdev); 491void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
470int qdio_setup_init(void); 492int qdio_setup_init(void);
471void qdio_setup_exit(void); 493void qdio_setup_exit(void);
494int qdio_enable_async_operation(struct qdio_output_q *q);
495void qdio_disable_async_operation(struct qdio_output_q *q);
496struct qaob *qdio_allocate_aob(void);
472 497
473int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, 498int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
474 unsigned char *state); 499 unsigned char *state);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 0e615cb912d0..aaf7f935bfd3 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -76,6 +76,9 @@ static int qstat_show(struct seq_file *m, void *v)
76 case SLSB_P_OUTPUT_NOT_INIT: 76 case SLSB_P_OUTPUT_NOT_INIT:
77 seq_printf(m, "N"); 77 seq_printf(m, "N");
78 break; 78 break;
79 case SLSB_P_OUTPUT_PENDING:
80 seq_printf(m, "P");
81 break;
79 case SLSB_P_INPUT_PRIMED: 82 case SLSB_P_INPUT_PRIMED:
80 case SLSB_CU_OUTPUT_PRIMED: 83 case SLSB_CU_OUTPUT_PRIMED:
81 seq_printf(m, "+"); 84 seq_printf(m, "+");
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 288c9140290e..a7153f2f3aff 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -14,6 +14,7 @@
14#include <linux/timer.h> 14#include <linux/timer.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/io.h>
17#include <linux/kernel_stat.h> 18#include <linux/kernel_stat.h>
18#include <linux/atomic.h> 19#include <linux/atomic.h>
19#include <asm/debug.h> 20#include <asm/debug.h>
@@ -77,11 +78,13 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask,
77 * Note: For IQDC unicast queues only the highest priority queue is processed. 78 * Note: For IQDC unicast queues only the highest priority queue is processed.
78 */ 79 */
79static inline int do_siga_output(unsigned long schid, unsigned long mask, 80static inline int do_siga_output(unsigned long schid, unsigned long mask,
80 unsigned int *bb, unsigned int fc) 81 unsigned int *bb, unsigned int fc,
82 unsigned long aob)
81{ 83{
82 register unsigned long __fc asm("0") = fc; 84 register unsigned long __fc asm("0") = fc;
83 register unsigned long __schid asm("1") = schid; 85 register unsigned long __schid asm("1") = schid;
84 register unsigned long __mask asm("2") = mask; 86 register unsigned long __mask asm("2") = mask;
87 register unsigned long __aob asm("3") = aob;
85 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION; 88 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
86 89
87 asm volatile( 90 asm volatile(
@@ -90,7 +93,8 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask,
90 " srl %0,28\n" 93 " srl %0,28\n"
91 "1:\n" 94 "1:\n"
92 EX_TABLE(0b, 1b) 95 EX_TABLE(0b, 1b)
93 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) 96 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask),
97 "+d" (__aob)
94 : : "cc", "memory"); 98 : : "cc", "memory");
95 *bb = ((unsigned int) __fc) >> 31; 99 *bb = ((unsigned int) __fc) >> 31;
96 return cc; 100 return cc;
@@ -212,7 +216,7 @@ again:
212/* returns number of examined buffers and their common state in *state */ 216/* returns number of examined buffers and their common state in *state */
213static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, 217static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
214 unsigned char *state, unsigned int count, 218 unsigned char *state, unsigned int count,
215 int auto_ack) 219 int auto_ack, int merge_pending)
216{ 220{
217 unsigned char __state = 0; 221 unsigned char __state = 0;
218 int i; 222 int i;
@@ -224,9 +228,14 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
224 return qdio_do_eqbs(q, state, bufnr, count, auto_ack); 228 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
225 229
226 for (i = 0; i < count; i++) { 230 for (i = 0; i < count; i++) {
227 if (!__state) 231 if (!__state) {
228 __state = q->slsb.val[bufnr]; 232 __state = q->slsb.val[bufnr];
229 else if (q->slsb.val[bufnr] != __state) 233 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
234 __state = SLSB_P_OUTPUT_EMPTY;
235 } else if (merge_pending) {
236 if ((q->slsb.val[bufnr] & __state) != __state)
237 break;
238 } else if (q->slsb.val[bufnr] != __state)
230 break; 239 break;
231 bufnr = next_buf(bufnr); 240 bufnr = next_buf(bufnr);
232 } 241 }
@@ -237,7 +246,7 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
237static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, 246static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
238 unsigned char *state, int auto_ack) 247 unsigned char *state, int auto_ack)
239{ 248{
240 return get_buf_states(q, bufnr, state, 1, auto_ack); 249 return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
241} 250}
242 251
243/* wrap-around safe setting of slsb states, returns number of changed buffers */ 252/* wrap-around safe setting of slsb states, returns number of changed buffers */
@@ -308,19 +317,28 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
308 return qdio_siga_sync(q, q->mask, 0); 317 return qdio_siga_sync(q, q->mask, 0);
309} 318}
310 319
311static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) 320static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
321 unsigned long aob)
312{ 322{
313 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 323 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
314 unsigned int fc = QDIO_SIGA_WRITE; 324 unsigned int fc = QDIO_SIGA_WRITE;
315 u64 start_time = 0; 325 u64 start_time = 0;
316 int retries = 0, cc; 326 int retries = 0, cc;
327 unsigned long laob = 0;
328
329 if (q->u.out.use_cq && aob != 0) {
330 fc = QDIO_SIGA_WRITEQ;
331 laob = aob;
332 }
317 333
318 if (is_qebsm(q)) { 334 if (is_qebsm(q)) {
319 schid = q->irq_ptr->sch_token; 335 schid = q->irq_ptr->sch_token;
320 fc |= QDIO_SIGA_QEBSM_FLAG; 336 fc |= QDIO_SIGA_QEBSM_FLAG;
321 } 337 }
322again: 338again:
323 cc = do_siga_output(schid, q->mask, busy_bit, fc); 339 WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
340 (aob && fc != QDIO_SIGA_WRITEQ));
341 cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
324 342
325 /* hipersocket busy condition */ 343 /* hipersocket busy condition */
326 if (unlikely(*busy_bit)) { 344 if (unlikely(*busy_bit)) {
@@ -379,7 +397,7 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
379{ 397{
380 if (need_siga_sync(q)) 398 if (need_siga_sync(q))
381 qdio_siga_sync_q(q); 399 qdio_siga_sync_q(q);
382 return get_buf_states(q, bufnr, state, 1, 0); 400 return get_buf_states(q, bufnr, state, 1, 0, 0);
383} 401}
384 402
385static inline void qdio_stop_polling(struct qdio_q *q) 403static inline void qdio_stop_polling(struct qdio_q *q)
@@ -507,7 +525,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
507 * No siga sync here, as a PCI or we after a thin interrupt 525 * No siga sync here, as a PCI or we after a thin interrupt
508 * already sync'ed the queues. 526 * already sync'ed the queues.
509 */ 527 */
510 count = get_buf_states(q, q->first_to_check, &state, count, 1); 528 count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
511 if (!count) 529 if (!count)
512 goto out; 530 goto out;
513 531
@@ -590,6 +608,107 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
590 return 0; 608 return 0;
591} 609}
592 610
611static inline int contains_aobs(struct qdio_q *q)
612{
613 return !q->is_input_q && q->u.out.use_cq;
614}
615
616static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
617 int i, struct qaob *aob)
618{
619 int tmp;
620
621 DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
622 (unsigned long) virt_to_phys(aob));
623 DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
624 (unsigned long) aob->res0[0]);
625 DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
626 (unsigned long) aob->res0[1]);
627 DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
628 (unsigned long) aob->res0[2]);
629 DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
630 (unsigned long) aob->res0[3]);
631 DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
632 (unsigned long) aob->res0[4]);
633 DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
634 (unsigned long) aob->res0[5]);
635 DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
636 DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
637 DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
638 DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
639 DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
640 DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
641 DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
642 for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
643 DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
644 (unsigned long) aob->sba[tmp]);
645 DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
646 (unsigned long) q->sbal[i]->element[tmp].addr);
647 DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
648 DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
649 q->sbal[i]->element[tmp].length);
650 }
651 DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
652 for (tmp = 0; tmp < 2; ++tmp) {
653 DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
654 (unsigned long) aob->res4[tmp]);
655 }
656 DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
657 DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
658}
659
660static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
661{
662 unsigned char state = 0;
663 int j, b = start;
664
665 if (!contains_aobs(q))
666 return;
667
668 for (j = 0; j < count; ++j) {
669 get_buf_state(q, b, &state, 0);
670 if (state == SLSB_P_OUTPUT_PENDING) {
671 struct qaob *aob = q->u.out.aobs[b];
672 if (aob == NULL)
673 continue;
674
675 BUG_ON(q->u.out.sbal_state == NULL);
676 q->u.out.sbal_state[b].flags |=
677 QDIO_OUTBUF_STATE_FLAG_PENDING;
678 q->u.out.aobs[b] = NULL;
679 } else if (state == SLSB_P_OUTPUT_EMPTY) {
680 BUG_ON(q->u.out.sbal_state == NULL);
681 q->u.out.sbal_state[b].aob = NULL;
682 }
683 b = next_buf(b);
684 }
685}
686
687static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
688 int bufnr)
689{
690 unsigned long phys_aob = 0;
691
692 if (!q->use_cq)
693 goto out;
694
695 if (!q->aobs[bufnr]) {
696 struct qaob *aob = qdio_allocate_aob();
697 q->aobs[bufnr] = aob;
698 }
699 if (q->aobs[bufnr]) {
700 BUG_ON(q->sbal_state == NULL);
701 q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
702 q->sbal_state[bufnr].aob = q->aobs[bufnr];
703 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
704 phys_aob = virt_to_phys(q->aobs[bufnr]);
705 BUG_ON(phys_aob & 0xFF);
706 }
707
708out:
709 return phys_aob;
710}
711
593static void qdio_kick_handler(struct qdio_q *q) 712static void qdio_kick_handler(struct qdio_q *q)
594{ 713{
595 int start = q->first_to_kick; 714 int start = q->first_to_kick;
@@ -610,6 +729,8 @@ static void qdio_kick_handler(struct qdio_q *q)
610 start, count); 729 start, count);
611 } 730 }
612 731
732 qdio_handle_aobs(q, start, count);
733
613 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, 734 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
614 q->irq_ptr->int_parm); 735 q->irq_ptr->int_parm);
615 736
@@ -672,23 +793,26 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
672 */ 793 */
673 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 794 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
674 stop = add_buf(q->first_to_check, count); 795 stop = add_buf(q->first_to_check, count);
675
676 if (q->first_to_check == stop) 796 if (q->first_to_check == stop)
677 return q->first_to_check; 797 goto out;
678 798
679 count = get_buf_states(q, q->first_to_check, &state, count, 0); 799 count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
680 if (!count) 800 if (!count)
681 return q->first_to_check; 801 goto out;
682 802
683 switch (state) { 803 switch (state) {
804 case SLSB_P_OUTPUT_PENDING:
805 BUG();
684 case SLSB_P_OUTPUT_EMPTY: 806 case SLSB_P_OUTPUT_EMPTY:
685 /* the adapter got it */ 807 /* the adapter got it */
686 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count); 808 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
809 "out empty:%1d %02x", q->nr, count);
687 810
688 atomic_sub(count, &q->nr_buf_used); 811 atomic_sub(count, &q->nr_buf_used);
689 q->first_to_check = add_buf(q->first_to_check, count); 812 q->first_to_check = add_buf(q->first_to_check, count);
690 if (q->irq_ptr->perf_stat_enabled) 813 if (q->irq_ptr->perf_stat_enabled)
691 account_sbals(q, count); 814 account_sbals(q, count);
815
692 break; 816 break;
693 case SLSB_P_OUTPUT_ERROR: 817 case SLSB_P_OUTPUT_ERROR:
694 process_buffer_error(q, count); 818 process_buffer_error(q, count);
@@ -701,7 +825,8 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
701 /* the adapter has not fetched the output yet */ 825 /* the adapter has not fetched the output yet */
702 if (q->irq_ptr->perf_stat_enabled) 826 if (q->irq_ptr->perf_stat_enabled)
703 q->q_stats.nr_sbal_nop++; 827 q->q_stats.nr_sbal_nop++;
704 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); 828 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
829 q->nr);
705 break; 830 break;
706 case SLSB_P_OUTPUT_NOT_INIT: 831 case SLSB_P_OUTPUT_NOT_INIT:
707 case SLSB_P_OUTPUT_HALTED: 832 case SLSB_P_OUTPUT_HALTED:
@@ -709,6 +834,8 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
709 default: 834 default:
710 BUG(); 835 BUG();
711 } 836 }
837
838out:
712 return q->first_to_check; 839 return q->first_to_check;
713} 840}
714 841
@@ -732,7 +859,7 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
732 return 0; 859 return 0;
733} 860}
734 861
735static int qdio_kick_outbound_q(struct qdio_q *q) 862static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
736{ 863{
737 int retries = 0, cc; 864 int retries = 0, cc;
738 unsigned int busy_bit; 865 unsigned int busy_bit;
@@ -744,7 +871,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
744retry: 871retry:
745 qperf_inc(q, siga_write); 872 qperf_inc(q, siga_write);
746 873
747 cc = qdio_siga_output(q, &busy_bit); 874 cc = qdio_siga_output(q, &busy_bit, aob);
748 switch (cc) { 875 switch (cc) {
749 case 0: 876 case 0:
750 break; 877 break;
@@ -921,8 +1048,9 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
921 } 1048 }
922 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, 1049 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
923 q->irq_ptr->int_parm); 1050 q->irq_ptr->int_parm);
924 } else 1051 } else {
925 tasklet_schedule(&q->tasklet); 1052 tasklet_schedule(&q->tasklet);
1053 }
926 } 1054 }
927 1055
928 if (!pci_out_supported(q)) 1056 if (!pci_out_supported(q))
@@ -1236,6 +1364,26 @@ out_err:
1236} 1364}
1237EXPORT_SYMBOL_GPL(qdio_allocate); 1365EXPORT_SYMBOL_GPL(qdio_allocate);
1238 1366
1367static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1368{
1369 struct qdio_q *q = irq_ptr->input_qs[0];
1370 int i, use_cq = 0;
1371
1372 if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1373 use_cq = 1;
1374
1375 for_each_output_queue(irq_ptr, q, i) {
1376 if (use_cq) {
1377 if (qdio_enable_async_operation(&q->u.out) < 0) {
1378 use_cq = 0;
1379 continue;
1380 }
1381 } else
1382 qdio_disable_async_operation(&q->u.out);
1383 }
1384 DBF_EVENT("use_cq:%d", use_cq);
1385}
1386
1239/** 1387/**
1240 * qdio_establish - establish queues on a qdio subchannel 1388 * qdio_establish - establish queues on a qdio subchannel
1241 * @init_data: initialization data 1389 * @init_data: initialization data
@@ -1301,6 +1449,8 @@ int qdio_establish(struct qdio_initialize *init_data)
1301 qdio_setup_ssqd_info(irq_ptr); 1449 qdio_setup_ssqd_info(irq_ptr);
1302 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); 1450 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1303 1451
1452 qdio_detect_hsicq(irq_ptr);
1453
1304 /* qebsm is now setup if available, initialize buffer states */ 1454 /* qebsm is now setup if available, initialize buffer states */
1305 qdio_init_buf_states(irq_ptr); 1455 qdio_init_buf_states(irq_ptr);
1306 1456
@@ -1480,17 +1630,21 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1480 q->u.out.pci_out_enabled = 0; 1630 q->u.out.pci_out_enabled = 0;
1481 1631
1482 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1632 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1483 /* One SIGA-W per buffer required for unicast HiperSockets. */ 1633 unsigned long phys_aob = 0;
1634
1635 /* One SIGA-W per buffer required for unicast HSI */
1484 WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); 1636 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1485 1637
1486 rc = qdio_kick_outbound_q(q); 1638 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1639
1640 rc = qdio_kick_outbound_q(q, phys_aob);
1487 } else if (need_siga_sync(q)) { 1641 } else if (need_siga_sync(q)) {
1488 rc = qdio_siga_sync_q(q); 1642 rc = qdio_siga_sync_q(q);
1489 } else { 1643 } else {
1490 /* try to fast requeue buffers */ 1644 /* try to fast requeue buffers */
1491 get_buf_state(q, prev_buf(bufnr), &state, 0); 1645 get_buf_state(q, prev_buf(bufnr), &state, 0);
1492 if (state != SLSB_CU_OUTPUT_PRIMED) 1646 if (state != SLSB_CU_OUTPUT_PRIMED)
1493 rc = qdio_kick_outbound_q(q); 1647 rc = qdio_kick_outbound_q(q, 0);
1494 else 1648 else
1495 qperf_inc(q, fast_requeue); 1649 qperf_inc(q, fast_requeue);
1496 } 1650 }
@@ -1518,6 +1672,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1518{ 1672{
1519 struct qdio_irq *irq_ptr; 1673 struct qdio_irq *irq_ptr;
1520 1674
1675
1521 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) 1676 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1522 return -EINVAL; 1677 return -EINVAL;
1523 1678
@@ -1562,7 +1717,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
1562 1717
1563 WARN_ON(queue_irqs_enabled(q)); 1718 WARN_ON(queue_irqs_enabled(q));
1564 1719
1565 if (!shared_ind(q->irq_ptr->dsci)) 1720 if (!shared_ind(q))
1566 xchg(q->irq_ptr->dsci, 0); 1721 xchg(q->irq_ptr->dsci, 0);
1567 1722
1568 qdio_stop_polling(q); 1723 qdio_stop_polling(q);
@@ -1572,7 +1727,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
1572 * We need to check again to not lose initiative after 1727 * We need to check again to not lose initiative after
1573 * resetting the ACK state. 1728 * resetting the ACK state.
1574 */ 1729 */
1575 if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci) 1730 if (!shared_ind(q) && *q->irq_ptr->dsci)
1576 goto rescan; 1731 goto rescan;
1577 if (!qdio_inbound_q_done(q)) 1732 if (!qdio_inbound_q_done(q))
1578 goto rescan; 1733 goto rescan;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 89107d0938c4..dd8bd670a6b8 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -19,6 +19,22 @@
19#include "qdio_debug.h" 19#include "qdio_debug.h"
20 20
21static struct kmem_cache *qdio_q_cache; 21static struct kmem_cache *qdio_q_cache;
22static struct kmem_cache *qdio_aob_cache;
23
24struct qaob *qdio_allocate_aob()
25{
26 struct qaob *aob;
27
28 aob = kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
29 return aob;
30}
31EXPORT_SYMBOL_GPL(qdio_allocate_aob);
32
33void qdio_release_aob(struct qaob *aob)
34{
35 kmem_cache_free(qdio_aob_cache, aob);
36}
37EXPORT_SYMBOL_GPL(qdio_release_aob);
22 38
23/* 39/*
24 * qebsm is only available under 64bit but the adapter sets the feature 40 * qebsm is only available under 64bit but the adapter sets the feature
@@ -154,29 +170,36 @@ static void setup_queues(struct qdio_irq *irq_ptr,
154 struct qdio_q *q; 170 struct qdio_q *q;
155 void **input_sbal_array = qdio_init->input_sbal_addr_array; 171 void **input_sbal_array = qdio_init->input_sbal_addr_array;
156 void **output_sbal_array = qdio_init->output_sbal_addr_array; 172 void **output_sbal_array = qdio_init->output_sbal_addr_array;
173 struct qdio_outbuf_state *output_sbal_state_array =
174 qdio_init->output_sbal_state_array;
157 int i; 175 int i;
158 176
159 for_each_input_queue(irq_ptr, q, i) { 177 for_each_input_queue(irq_ptr, q, i) {
160 DBF_EVENT("in-q:%1d", i); 178 DBF_EVENT("inq:%1d", i);
161 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); 179 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
162 180
163 q->is_input_q = 1; 181 q->is_input_q = 1;
164 q->u.in.queue_start_poll = qdio_init->queue_start_poll; 182 q->u.in.queue_start_poll = qdio_init->queue_start_poll[i];
183
165 setup_storage_lists(q, irq_ptr, input_sbal_array, i); 184 setup_storage_lists(q, irq_ptr, input_sbal_array, i);
166 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 185 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
167 186
168 if (is_thinint_irq(irq_ptr)) 187 if (is_thinint_irq(irq_ptr)) {
169 tasklet_init(&q->tasklet, tiqdio_inbound_processing, 188 tasklet_init(&q->tasklet, tiqdio_inbound_processing,
170 (unsigned long) q); 189 (unsigned long) q);
171 else 190 } else {
172 tasklet_init(&q->tasklet, qdio_inbound_processing, 191 tasklet_init(&q->tasklet, qdio_inbound_processing,
173 (unsigned long) q); 192 (unsigned long) q);
193 }
174 } 194 }
175 195
176 for_each_output_queue(irq_ptr, q, i) { 196 for_each_output_queue(irq_ptr, q, i) {
177 DBF_EVENT("outq:%1d", i); 197 DBF_EVENT("outq:%1d", i);
178 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); 198 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
179 199
200 q->u.out.sbal_state = output_sbal_state_array;
201 output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
202
180 q->is_input_q = 0; 203 q->is_input_q = 0;
181 q->u.out.scan_threshold = qdio_init->scan_threshold; 204 q->u.out.scan_threshold = qdio_init->scan_threshold;
182 setup_storage_lists(q, irq_ptr, output_sbal_array, i); 205 setup_storage_lists(q, irq_ptr, output_sbal_array, i);
@@ -311,6 +334,19 @@ void qdio_release_memory(struct qdio_irq *irq_ptr)
311 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { 334 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
312 q = irq_ptr->output_qs[i]; 335 q = irq_ptr->output_qs[i];
313 if (q) { 336 if (q) {
337 if (q->u.out.use_cq) {
338 int n;
339
340 for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
341 struct qaob *aob = q->u.out.aobs[n];
342 if (aob) {
343 qdio_release_aob(aob);
344 q->u.out.aobs[n] = NULL;
345 }
346 }
347
348 qdio_disable_async_operation(&q->u.out);
349 }
314 free_page((unsigned long) q->slib); 350 free_page((unsigned long) q->slib);
315 kmem_cache_free(qdio_q_cache, q); 351 kmem_cache_free(qdio_q_cache, q);
316 } 352 }
@@ -465,23 +501,60 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
465 printk(KERN_INFO "%s", s); 501 printk(KERN_INFO "%s", s);
466} 502}
467 503
504int qdio_enable_async_operation(struct qdio_output_q *outq)
505{
506 outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q,
507 GFP_ATOMIC);
508 if (!outq->aobs) {
509 outq->use_cq = 0;
510 return -ENOMEM;
511 }
512 outq->use_cq = 1;
513 return 0;
514}
515
516void qdio_disable_async_operation(struct qdio_output_q *q)
517{
518 kfree(q->aobs);
519 q->aobs = NULL;
520 q->use_cq = 0;
521}
522
468int __init qdio_setup_init(void) 523int __init qdio_setup_init(void)
469{ 524{
525 int rc;
526
470 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), 527 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
471 256, 0, NULL); 528 256, 0, NULL);
472 if (!qdio_q_cache) 529 if (!qdio_q_cache)
473 return -ENOMEM; 530 return -ENOMEM;
474 531
532 qdio_aob_cache = kmem_cache_create("qdio_aob",
533 sizeof(struct qaob),
534 sizeof(struct qaob),
535 0,
536 NULL);
537 if (!qdio_aob_cache) {
538 rc = -ENOMEM;
539 goto free_qdio_q_cache;
540 }
541
475 /* Check for OSA/FCP thin interrupts (bit 67). */ 542 /* Check for OSA/FCP thin interrupts (bit 67). */
476 DBF_EVENT("thinint:%1d", 543 DBF_EVENT("thinint:%1d",
477 (css_general_characteristics.aif_osa) ? 1 : 0); 544 (css_general_characteristics.aif_osa) ? 1 : 0);
478 545
479 /* Check for QEBSM support in general (bit 58). */ 546 /* Check for QEBSM support in general (bit 58). */
480 DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0); 547 DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
481 return 0; 548 rc = 0;
549out:
550 return rc;
551free_qdio_q_cache:
552 kmem_cache_destroy(qdio_q_cache);
553 goto out;
482} 554}
483 555
484void qdio_setup_exit(void) 556void qdio_setup_exit(void)
485{ 557{
558 kmem_cache_destroy(qdio_aob_cache);
486 kmem_cache_destroy(qdio_q_cache); 559 kmem_cache_destroy(qdio_q_cache);
487} 560}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 2a1d4dfaf859..a3e3949d7b69 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -67,12 +67,9 @@ static void put_indicator(u32 *addr)
67 67
68void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) 68void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
69{ 69{
70 struct qdio_q *q;
71 int i;
72
73 mutex_lock(&tiq_list_lock); 70 mutex_lock(&tiq_list_lock);
74 for_each_input_queue(irq_ptr, q, i) 71 BUG_ON(irq_ptr->nr_input_qs < 1);
75 list_add_rcu(&q->entry, &tiq_list); 72 list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
76 mutex_unlock(&tiq_list_lock); 73 mutex_unlock(&tiq_list_lock);
77 xchg(irq_ptr->dsci, 1 << 7); 74 xchg(irq_ptr->dsci, 1 << 7);
78} 75}
@@ -80,19 +77,17 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
80void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) 77void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
81{ 78{
82 struct qdio_q *q; 79 struct qdio_q *q;
83 int i;
84 80
85 for (i = 0; i < irq_ptr->nr_input_qs; i++) { 81 BUG_ON(irq_ptr->nr_input_qs < 1);
86 q = irq_ptr->input_qs[i]; 82 q = irq_ptr->input_qs[0];
87 /* if establish triggered an error */ 83 /* if establish triggered an error */
88 if (!q || !q->entry.prev || !q->entry.next) 84 if (!q || !q->entry.prev || !q->entry.next)
89 continue; 85 return;
90 86
91 mutex_lock(&tiq_list_lock); 87 mutex_lock(&tiq_list_lock);
92 list_del_rcu(&q->entry); 88 list_del_rcu(&q->entry);
93 mutex_unlock(&tiq_list_lock); 89 mutex_unlock(&tiq_list_lock);
94 synchronize_rcu(); 90 synchronize_rcu();
95 }
96} 91}
97 92
98static inline u32 clear_shared_ind(void) 93static inline u32 clear_shared_ind(void)
@@ -102,6 +97,40 @@ static inline u32 clear_shared_ind(void)
102 return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 97 return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
103} 98}
104 99
100static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
101{
102 struct qdio_q *q;
103 int i;
104
105 for_each_input_queue(irq, q, i) {
106 if (!references_shared_dsci(irq) &&
107 has_multiple_inq_on_dsci(irq))
108 xchg(q->irq_ptr->dsci, 0);
109
110 if (q->u.in.queue_start_poll) {
111 /* skip if polling is enabled or already in work */
112 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
113 &q->u.in.queue_irq_state)) {
114 qperf_inc(q, int_discarded);
115 continue;
116 }
117
118 /* avoid dsci clear here, done after processing */
119 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
120 q->irq_ptr->int_parm);
121 } else {
122 if (!shared_ind(q))
123 xchg(q->irq_ptr->dsci, 0);
124
125 /*
126 * Call inbound processing but not directly
127 * since that could starve other thinint queues.
128 */
129 tasklet_schedule(&q->tasklet);
130 }
131 }
132}
133
105/** 134/**
106 * tiqdio_thinint_handler - thin interrupt handler for qdio 135 * tiqdio_thinint_handler - thin interrupt handler for qdio
107 * @alsi: pointer to adapter local summary indicator 136 * @alsi: pointer to adapter local summary indicator
@@ -120,35 +149,18 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
120 149
121 /* check for work on all inbound thinint queues */ 150 /* check for work on all inbound thinint queues */
122 list_for_each_entry_rcu(q, &tiq_list, entry) { 151 list_for_each_entry_rcu(q, &tiq_list, entry) {
152 struct qdio_irq *irq;
123 153
124 /* only process queues from changed sets */ 154 /* only process queues from changed sets */
125 if (unlikely(shared_ind(q->irq_ptr->dsci))) { 155 irq = q->irq_ptr;
156 if (unlikely(references_shared_dsci(irq))) {
126 if (!si_used) 157 if (!si_used)
127 continue; 158 continue;
128 } else if (!*q->irq_ptr->dsci) 159 } else if (!*irq->dsci)
129 continue; 160 continue;
130 161
131 if (q->u.in.queue_start_poll) { 162 tiqdio_call_inq_handlers(irq);
132 /* skip if polling is enabled or already in work */
133 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
134 &q->u.in.queue_irq_state)) {
135 qperf_inc(q, int_discarded);
136 continue;
137 }
138 163
139 /* avoid dsci clear here, done after processing */
140 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
141 q->irq_ptr->int_parm);
142 } else {
143 /* only clear it if the indicator is non-shared */
144 if (!shared_ind(q->irq_ptr->dsci))
145 xchg(q->irq_ptr->dsci, 0);
146 /*
147 * Call inbound processing but not directly
148 * since that could starve other thinint queues.
149 */
150 tasklet_schedule(&q->tasklet);
151 }
152 qperf_inc(q, adapter_int); 164 qperf_inc(q, adapter_int);
153 } 165 }
154 rcu_read_unlock(); 166 rcu_read_unlock();