diff options
Diffstat (limited to 'drivers/s390/cio/qdio_main.c')
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 203 |
1 files changed, 179 insertions, 24 deletions
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 288c9140290e..a7153f2f3aff 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/timer.h> | 14 | #include <linux/timer.h> |
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/gfp.h> | 16 | #include <linux/gfp.h> |
17 | #include <linux/io.h> | ||
17 | #include <linux/kernel_stat.h> | 18 | #include <linux/kernel_stat.h> |
18 | #include <linux/atomic.h> | 19 | #include <linux/atomic.h> |
19 | #include <asm/debug.h> | 20 | #include <asm/debug.h> |
@@ -77,11 +78,13 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask, | |||
77 | * Note: For IQDC unicast queues only the highest priority queue is processed. | 78 | * Note: For IQDC unicast queues only the highest priority queue is processed. |
78 | */ | 79 | */ |
79 | static inline int do_siga_output(unsigned long schid, unsigned long mask, | 80 | static inline int do_siga_output(unsigned long schid, unsigned long mask, |
80 | unsigned int *bb, unsigned int fc) | 81 | unsigned int *bb, unsigned int fc, |
82 | unsigned long aob) | ||
81 | { | 83 | { |
82 | register unsigned long __fc asm("0") = fc; | 84 | register unsigned long __fc asm("0") = fc; |
83 | register unsigned long __schid asm("1") = schid; | 85 | register unsigned long __schid asm("1") = schid; |
84 | register unsigned long __mask asm("2") = mask; | 86 | register unsigned long __mask asm("2") = mask; |
87 | register unsigned long __aob asm("3") = aob; | ||
85 | int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION; | 88 | int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION; |
86 | 89 | ||
87 | asm volatile( | 90 | asm volatile( |
@@ -90,7 +93,8 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask, | |||
90 | " srl %0,28\n" | 93 | " srl %0,28\n" |
91 | "1:\n" | 94 | "1:\n" |
92 | EX_TABLE(0b, 1b) | 95 | EX_TABLE(0b, 1b) |
93 | : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) | 96 | : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask), |
97 | "+d" (__aob) | ||
94 | : : "cc", "memory"); | 98 | : : "cc", "memory"); |
95 | *bb = ((unsigned int) __fc) >> 31; | 99 | *bb = ((unsigned int) __fc) >> 31; |
96 | return cc; | 100 | return cc; |
@@ -212,7 +216,7 @@ again: | |||
212 | /* returns number of examined buffers and their common state in *state */ | 216 | /* returns number of examined buffers and their common state in *state */ |
213 | static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, | 217 | static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, |
214 | unsigned char *state, unsigned int count, | 218 | unsigned char *state, unsigned int count, |
215 | int auto_ack) | 219 | int auto_ack, int merge_pending) |
216 | { | 220 | { |
217 | unsigned char __state = 0; | 221 | unsigned char __state = 0; |
218 | int i; | 222 | int i; |
@@ -224,9 +228,14 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, | |||
224 | return qdio_do_eqbs(q, state, bufnr, count, auto_ack); | 228 | return qdio_do_eqbs(q, state, bufnr, count, auto_ack); |
225 | 229 | ||
226 | for (i = 0; i < count; i++) { | 230 | for (i = 0; i < count; i++) { |
227 | if (!__state) | 231 | if (!__state) { |
228 | __state = q->slsb.val[bufnr]; | 232 | __state = q->slsb.val[bufnr]; |
229 | else if (q->slsb.val[bufnr] != __state) | 233 | if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) |
234 | __state = SLSB_P_OUTPUT_EMPTY; | ||
235 | } else if (merge_pending) { | ||
236 | if ((q->slsb.val[bufnr] & __state) != __state) | ||
237 | break; | ||
238 | } else if (q->slsb.val[bufnr] != __state) | ||
230 | break; | 239 | break; |
231 | bufnr = next_buf(bufnr); | 240 | bufnr = next_buf(bufnr); |
232 | } | 241 | } |
@@ -237,7 +246,7 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, | |||
237 | static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, | 246 | static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, |
238 | unsigned char *state, int auto_ack) | 247 | unsigned char *state, int auto_ack) |
239 | { | 248 | { |
240 | return get_buf_states(q, bufnr, state, 1, auto_ack); | 249 | return get_buf_states(q, bufnr, state, 1, auto_ack, 0); |
241 | } | 250 | } |
242 | 251 | ||
243 | /* wrap-around safe setting of slsb states, returns number of changed buffers */ | 252 | /* wrap-around safe setting of slsb states, returns number of changed buffers */ |
@@ -308,19 +317,28 @@ static inline int qdio_siga_sync_q(struct qdio_q *q) | |||
308 | return qdio_siga_sync(q, q->mask, 0); | 317 | return qdio_siga_sync(q, q->mask, 0); |
309 | } | 318 | } |
310 | 319 | ||
311 | static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) | 320 | static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, |
321 | unsigned long aob) | ||
312 | { | 322 | { |
313 | unsigned long schid = *((u32 *) &q->irq_ptr->schid); | 323 | unsigned long schid = *((u32 *) &q->irq_ptr->schid); |
314 | unsigned int fc = QDIO_SIGA_WRITE; | 324 | unsigned int fc = QDIO_SIGA_WRITE; |
315 | u64 start_time = 0; | 325 | u64 start_time = 0; |
316 | int retries = 0, cc; | 326 | int retries = 0, cc; |
327 | unsigned long laob = 0; | ||
328 | |||
329 | if (q->u.out.use_cq && aob != 0) { | ||
330 | fc = QDIO_SIGA_WRITEQ; | ||
331 | laob = aob; | ||
332 | } | ||
317 | 333 | ||
318 | if (is_qebsm(q)) { | 334 | if (is_qebsm(q)) { |
319 | schid = q->irq_ptr->sch_token; | 335 | schid = q->irq_ptr->sch_token; |
320 | fc |= QDIO_SIGA_QEBSM_FLAG; | 336 | fc |= QDIO_SIGA_QEBSM_FLAG; |
321 | } | 337 | } |
322 | again: | 338 | again: |
323 | cc = do_siga_output(schid, q->mask, busy_bit, fc); | 339 | WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) || |
340 | (aob && fc != QDIO_SIGA_WRITEQ)); | ||
341 | cc = do_siga_output(schid, q->mask, busy_bit, fc, laob); | ||
324 | 342 | ||
325 | /* hipersocket busy condition */ | 343 | /* hipersocket busy condition */ |
326 | if (unlikely(*busy_bit)) { | 344 | if (unlikely(*busy_bit)) { |
@@ -379,7 +397,7 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, | |||
379 | { | 397 | { |
380 | if (need_siga_sync(q)) | 398 | if (need_siga_sync(q)) |
381 | qdio_siga_sync_q(q); | 399 | qdio_siga_sync_q(q); |
382 | return get_buf_states(q, bufnr, state, 1, 0); | 400 | return get_buf_states(q, bufnr, state, 1, 0, 0); |
383 | } | 401 | } |
384 | 402 | ||
385 | static inline void qdio_stop_polling(struct qdio_q *q) | 403 | static inline void qdio_stop_polling(struct qdio_q *q) |
@@ -507,7 +525,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) | |||
507 | * No siga sync here, as a PCI or we after a thin interrupt | 525 | * No siga sync here, as a PCI or we after a thin interrupt |
508 | * already sync'ed the queues. | 526 | * already sync'ed the queues. |
509 | */ | 527 | */ |
510 | count = get_buf_states(q, q->first_to_check, &state, count, 1); | 528 | count = get_buf_states(q, q->first_to_check, &state, count, 1, 0); |
511 | if (!count) | 529 | if (!count) |
512 | goto out; | 530 | goto out; |
513 | 531 | ||
@@ -590,6 +608,107 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) | |||
590 | return 0; | 608 | return 0; |
591 | } | 609 | } |
592 | 610 | ||
611 | static inline int contains_aobs(struct qdio_q *q) | ||
612 | { | ||
613 | return !q->is_input_q && q->u.out.use_cq; | ||
614 | } | ||
615 | |||
616 | static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q, | ||
617 | int i, struct qaob *aob) | ||
618 | { | ||
619 | int tmp; | ||
620 | |||
621 | DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i, | ||
622 | (unsigned long) virt_to_phys(aob)); | ||
623 | DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx", | ||
624 | (unsigned long) aob->res0[0]); | ||
625 | DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx", | ||
626 | (unsigned long) aob->res0[1]); | ||
627 | DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx", | ||
628 | (unsigned long) aob->res0[2]); | ||
629 | DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx", | ||
630 | (unsigned long) aob->res0[3]); | ||
631 | DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx", | ||
632 | (unsigned long) aob->res0[4]); | ||
633 | DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx", | ||
634 | (unsigned long) aob->res0[5]); | ||
635 | DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1); | ||
636 | DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2); | ||
637 | DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3); | ||
638 | DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc); | ||
639 | DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags); | ||
640 | DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs); | ||
641 | DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count); | ||
642 | for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) { | ||
643 | DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp, | ||
644 | (unsigned long) aob->sba[tmp]); | ||
645 | DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp, | ||
646 | (unsigned long) q->sbal[i]->element[tmp].addr); | ||
647 | DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]); | ||
648 | DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp, | ||
649 | q->sbal[i]->element[tmp].length); | ||
650 | } | ||
651 | DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0); | ||
652 | for (tmp = 0; tmp < 2; ++tmp) { | ||
653 | DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp, | ||
654 | (unsigned long) aob->res4[tmp]); | ||
655 | } | ||
656 | DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1); | ||
657 | DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2); | ||
658 | } | ||
659 | |||
660 | static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count) | ||
661 | { | ||
662 | unsigned char state = 0; | ||
663 | int j, b = start; | ||
664 | |||
665 | if (!contains_aobs(q)) | ||
666 | return; | ||
667 | |||
668 | for (j = 0; j < count; ++j) { | ||
669 | get_buf_state(q, b, &state, 0); | ||
670 | if (state == SLSB_P_OUTPUT_PENDING) { | ||
671 | struct qaob *aob = q->u.out.aobs[b]; | ||
672 | if (aob == NULL) | ||
673 | continue; | ||
674 | |||
675 | BUG_ON(q->u.out.sbal_state == NULL); | ||
676 | q->u.out.sbal_state[b].flags |= | ||
677 | QDIO_OUTBUF_STATE_FLAG_PENDING; | ||
678 | q->u.out.aobs[b] = NULL; | ||
679 | } else if (state == SLSB_P_OUTPUT_EMPTY) { | ||
680 | BUG_ON(q->u.out.sbal_state == NULL); | ||
681 | q->u.out.sbal_state[b].aob = NULL; | ||
682 | } | ||
683 | b = next_buf(b); | ||
684 | } | ||
685 | } | ||
686 | |||
687 | static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, | ||
688 | int bufnr) | ||
689 | { | ||
690 | unsigned long phys_aob = 0; | ||
691 | |||
692 | if (!q->use_cq) | ||
693 | goto out; | ||
694 | |||
695 | if (!q->aobs[bufnr]) { | ||
696 | struct qaob *aob = qdio_allocate_aob(); | ||
697 | q->aobs[bufnr] = aob; | ||
698 | } | ||
699 | if (q->aobs[bufnr]) { | ||
700 | BUG_ON(q->sbal_state == NULL); | ||
701 | q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE; | ||
702 | q->sbal_state[bufnr].aob = q->aobs[bufnr]; | ||
703 | q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user; | ||
704 | phys_aob = virt_to_phys(q->aobs[bufnr]); | ||
705 | BUG_ON(phys_aob & 0xFF); | ||
706 | } | ||
707 | |||
708 | out: | ||
709 | return phys_aob; | ||
710 | } | ||
711 | |||
593 | static void qdio_kick_handler(struct qdio_q *q) | 712 | static void qdio_kick_handler(struct qdio_q *q) |
594 | { | 713 | { |
595 | int start = q->first_to_kick; | 714 | int start = q->first_to_kick; |
@@ -610,6 +729,8 @@ static void qdio_kick_handler(struct qdio_q *q) | |||
610 | start, count); | 729 | start, count); |
611 | } | 730 | } |
612 | 731 | ||
732 | qdio_handle_aobs(q, start, count); | ||
733 | |||
613 | q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, | 734 | q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, |
614 | q->irq_ptr->int_parm); | 735 | q->irq_ptr->int_parm); |
615 | 736 | ||
@@ -672,23 +793,26 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) | |||
672 | */ | 793 | */ |
673 | count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); | 794 | count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); |
674 | stop = add_buf(q->first_to_check, count); | 795 | stop = add_buf(q->first_to_check, count); |
675 | |||
676 | if (q->first_to_check == stop) | 796 | if (q->first_to_check == stop) |
677 | return q->first_to_check; | 797 | goto out; |
678 | 798 | ||
679 | count = get_buf_states(q, q->first_to_check, &state, count, 0); | 799 | count = get_buf_states(q, q->first_to_check, &state, count, 0, 1); |
680 | if (!count) | 800 | if (!count) |
681 | return q->first_to_check; | 801 | goto out; |
682 | 802 | ||
683 | switch (state) { | 803 | switch (state) { |
804 | case SLSB_P_OUTPUT_PENDING: | ||
805 | BUG(); | ||
684 | case SLSB_P_OUTPUT_EMPTY: | 806 | case SLSB_P_OUTPUT_EMPTY: |
685 | /* the adapter got it */ | 807 | /* the adapter got it */ |
686 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count); | 808 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, |
809 | "out empty:%1d %02x", q->nr, count); | ||
687 | 810 | ||
688 | atomic_sub(count, &q->nr_buf_used); | 811 | atomic_sub(count, &q->nr_buf_used); |
689 | q->first_to_check = add_buf(q->first_to_check, count); | 812 | q->first_to_check = add_buf(q->first_to_check, count); |
690 | if (q->irq_ptr->perf_stat_enabled) | 813 | if (q->irq_ptr->perf_stat_enabled) |
691 | account_sbals(q, count); | 814 | account_sbals(q, count); |
815 | |||
692 | break; | 816 | break; |
693 | case SLSB_P_OUTPUT_ERROR: | 817 | case SLSB_P_OUTPUT_ERROR: |
694 | process_buffer_error(q, count); | 818 | process_buffer_error(q, count); |
@@ -701,7 +825,8 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) | |||
701 | /* the adapter has not fetched the output yet */ | 825 | /* the adapter has not fetched the output yet */ |
702 | if (q->irq_ptr->perf_stat_enabled) | 826 | if (q->irq_ptr->perf_stat_enabled) |
703 | q->q_stats.nr_sbal_nop++; | 827 | q->q_stats.nr_sbal_nop++; |
704 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); | 828 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", |
829 | q->nr); | ||
705 | break; | 830 | break; |
706 | case SLSB_P_OUTPUT_NOT_INIT: | 831 | case SLSB_P_OUTPUT_NOT_INIT: |
707 | case SLSB_P_OUTPUT_HALTED: | 832 | case SLSB_P_OUTPUT_HALTED: |
@@ -709,6 +834,8 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) | |||
709 | default: | 834 | default: |
710 | BUG(); | 835 | BUG(); |
711 | } | 836 | } |
837 | |||
838 | out: | ||
712 | return q->first_to_check; | 839 | return q->first_to_check; |
713 | } | 840 | } |
714 | 841 | ||
@@ -732,7 +859,7 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q) | |||
732 | return 0; | 859 | return 0; |
733 | } | 860 | } |
734 | 861 | ||
735 | static int qdio_kick_outbound_q(struct qdio_q *q) | 862 | static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob) |
736 | { | 863 | { |
737 | int retries = 0, cc; | 864 | int retries = 0, cc; |
738 | unsigned int busy_bit; | 865 | unsigned int busy_bit; |
@@ -744,7 +871,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q) | |||
744 | retry: | 871 | retry: |
745 | qperf_inc(q, siga_write); | 872 | qperf_inc(q, siga_write); |
746 | 873 | ||
747 | cc = qdio_siga_output(q, &busy_bit); | 874 | cc = qdio_siga_output(q, &busy_bit, aob); |
748 | switch (cc) { | 875 | switch (cc) { |
749 | case 0: | 876 | case 0: |
750 | break; | 877 | break; |
@@ -921,8 +1048,9 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) | |||
921 | } | 1048 | } |
922 | q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, | 1049 | q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, |
923 | q->irq_ptr->int_parm); | 1050 | q->irq_ptr->int_parm); |
924 | } else | 1051 | } else { |
925 | tasklet_schedule(&q->tasklet); | 1052 | tasklet_schedule(&q->tasklet); |
1053 | } | ||
926 | } | 1054 | } |
927 | 1055 | ||
928 | if (!pci_out_supported(q)) | 1056 | if (!pci_out_supported(q)) |
@@ -1236,6 +1364,26 @@ out_err: | |||
1236 | } | 1364 | } |
1237 | EXPORT_SYMBOL_GPL(qdio_allocate); | 1365 | EXPORT_SYMBOL_GPL(qdio_allocate); |
1238 | 1366 | ||
1367 | static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) | ||
1368 | { | ||
1369 | struct qdio_q *q = irq_ptr->input_qs[0]; | ||
1370 | int i, use_cq = 0; | ||
1371 | |||
1372 | if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT) | ||
1373 | use_cq = 1; | ||
1374 | |||
1375 | for_each_output_queue(irq_ptr, q, i) { | ||
1376 | if (use_cq) { | ||
1377 | if (qdio_enable_async_operation(&q->u.out) < 0) { | ||
1378 | use_cq = 0; | ||
1379 | continue; | ||
1380 | } | ||
1381 | } else | ||
1382 | qdio_disable_async_operation(&q->u.out); | ||
1383 | } | ||
1384 | DBF_EVENT("use_cq:%d", use_cq); | ||
1385 | } | ||
1386 | |||
1239 | /** | 1387 | /** |
1240 | * qdio_establish - establish queues on a qdio subchannel | 1388 | * qdio_establish - establish queues on a qdio subchannel |
1241 | * @init_data: initialization data | 1389 | * @init_data: initialization data |
@@ -1301,6 +1449,8 @@ int qdio_establish(struct qdio_initialize *init_data) | |||
1301 | qdio_setup_ssqd_info(irq_ptr); | 1449 | qdio_setup_ssqd_info(irq_ptr); |
1302 | DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); | 1450 | DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); |
1303 | 1451 | ||
1452 | qdio_detect_hsicq(irq_ptr); | ||
1453 | |||
1304 | /* qebsm is now setup if available, initialize buffer states */ | 1454 | /* qebsm is now setup if available, initialize buffer states */ |
1305 | qdio_init_buf_states(irq_ptr); | 1455 | qdio_init_buf_states(irq_ptr); |
1306 | 1456 | ||
@@ -1480,17 +1630,21 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, | |||
1480 | q->u.out.pci_out_enabled = 0; | 1630 | q->u.out.pci_out_enabled = 0; |
1481 | 1631 | ||
1482 | if (queue_type(q) == QDIO_IQDIO_QFMT) { | 1632 | if (queue_type(q) == QDIO_IQDIO_QFMT) { |
1483 | /* One SIGA-W per buffer required for unicast HiperSockets. */ | 1633 | unsigned long phys_aob = 0; |
1634 | |||
1635 | /* One SIGA-W per buffer required for unicast HSI */ | ||
1484 | WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); | 1636 | WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); |
1485 | 1637 | ||
1486 | rc = qdio_kick_outbound_q(q); | 1638 | phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); |
1639 | |||
1640 | rc = qdio_kick_outbound_q(q, phys_aob); | ||
1487 | } else if (need_siga_sync(q)) { | 1641 | } else if (need_siga_sync(q)) { |
1488 | rc = qdio_siga_sync_q(q); | 1642 | rc = qdio_siga_sync_q(q); |
1489 | } else { | 1643 | } else { |
1490 | /* try to fast requeue buffers */ | 1644 | /* try to fast requeue buffers */ |
1491 | get_buf_state(q, prev_buf(bufnr), &state, 0); | 1645 | get_buf_state(q, prev_buf(bufnr), &state, 0); |
1492 | if (state != SLSB_CU_OUTPUT_PRIMED) | 1646 | if (state != SLSB_CU_OUTPUT_PRIMED) |
1493 | rc = qdio_kick_outbound_q(q); | 1647 | rc = qdio_kick_outbound_q(q, 0); |
1494 | else | 1648 | else |
1495 | qperf_inc(q, fast_requeue); | 1649 | qperf_inc(q, fast_requeue); |
1496 | } | 1650 | } |
@@ -1518,6 +1672,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, | |||
1518 | { | 1672 | { |
1519 | struct qdio_irq *irq_ptr; | 1673 | struct qdio_irq *irq_ptr; |
1520 | 1674 | ||
1675 | |||
1521 | if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) | 1676 | if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) |
1522 | return -EINVAL; | 1677 | return -EINVAL; |
1523 | 1678 | ||
@@ -1562,7 +1717,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr) | |||
1562 | 1717 | ||
1563 | WARN_ON(queue_irqs_enabled(q)); | 1718 | WARN_ON(queue_irqs_enabled(q)); |
1564 | 1719 | ||
1565 | if (!shared_ind(q->irq_ptr->dsci)) | 1720 | if (!shared_ind(q)) |
1566 | xchg(q->irq_ptr->dsci, 0); | 1721 | xchg(q->irq_ptr->dsci, 0); |
1567 | 1722 | ||
1568 | qdio_stop_polling(q); | 1723 | qdio_stop_polling(q); |
@@ -1572,7 +1727,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr) | |||
1572 | * We need to check again to not lose initiative after | 1727 | * We need to check again to not lose initiative after |
1573 | * resetting the ACK state. | 1728 | * resetting the ACK state. |
1574 | */ | 1729 | */ |
1575 | if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci) | 1730 | if (!shared_ind(q) && *q->irq_ptr->dsci) |
1576 | goto rescan; | 1731 | goto rescan; |
1577 | if (!qdio_inbound_q_done(q)) | 1732 | if (!qdio_inbound_q_done(q)) |
1578 | goto rescan; | 1733 | goto rescan; |