aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-25 07:25:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-25 07:25:22 -0400
commit8a9ea3237e7eb5c25f09e429ad242ae5a3d5ea22 (patch)
treea0a63398a9983667d52cbbbf4e2405b4f22b1d83 /drivers/s390
parent1be025d3cb40cd295123af2c394f7229ef9b30ca (diff)
parent8b3408f8ee994973869d8ba32c5bf482bc4ddca4 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1745 commits) dp83640: free packet queues on remove dp83640: use proper function to free transmit time stamping packets ipv6: Do not use routes from locally generated RAs |PATCH net-next] tg3: add tx_dropped counter be2net: don't create multiple RX/TX rings in multi channel mode be2net: don't create multiple TXQs in BE2 be2net: refactor VF setup/teardown code into be_vf_setup/clear() be2net: add vlan/rx-mode/flow-control config to be_setup() net_sched: cls_flow: use skb_header_pointer() ipv4: avoid useless call of the function check_peer_pmtu TCP: remove TCP_DEBUG net: Fix driver name for mdio-gpio.c ipv4: tcp: fix TOS value in ACK messages sent from TIME_WAIT rtnetlink: Add missing manual netlink notification in dev_change_net_namespaces ipv4: fix ipsec forward performance regression jme: fix irq storm after suspend/resume route: fix ICMP redirect validation net: hold sock reference while processing tx timestamps tcp: md5: add more const attributes Add ethtool -g support to virtio_net ... Fix up conflicts in: - drivers/net/Kconfig: The split-up generated a trivial conflict with removal of a stale reference to Documentation/networking/net-modules.txt. Remove it from the new location instead. - fs/sysfs/dir.c: Fairly nasty conflicts with the sysfs rb-tree usage, conflicting with Eric Biederman's changes for tagged directories.
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/cio/qdio.h38
-rw-r--r--drivers/s390/cio/qdio_debug.c3
-rw-r--r--drivers/s390/cio/qdio_main.c208
-rw-r--r--drivers/s390/cio/qdio_setup.c83
-rw-r--r--drivers/s390/cio/qdio_thinint.c88
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/qeth_core.h50
-rw-r--r--drivers/s390/net/qeth_core_main.c780
-rw-r--r--drivers/s390/net/qeth_l2_main.c4
-rw-r--r--drivers/s390/net/qeth_l3.h4
-rw-r--r--drivers/s390/net/qeth_l3_main.c92
-rw-r--r--drivers/s390/net/qeth_l3_sys.c110
12 files changed, 1251 insertions, 211 deletions
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index e5c966462c5a..3dd86441da3d 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -44,6 +44,7 @@ enum qdio_irq_states {
44#define SLSB_STATE_NOT_INIT 0x0 44#define SLSB_STATE_NOT_INIT 0x0
45#define SLSB_STATE_EMPTY 0x1 45#define SLSB_STATE_EMPTY 0x1
46#define SLSB_STATE_PRIMED 0x2 46#define SLSB_STATE_PRIMED 0x2
47#define SLSB_STATE_PENDING 0x3
47#define SLSB_STATE_HALTED 0xe 48#define SLSB_STATE_HALTED 0xe
48#define SLSB_STATE_ERROR 0xf 49#define SLSB_STATE_ERROR 0xf
49#define SLSB_TYPE_INPUT 0x0 50#define SLSB_TYPE_INPUT 0x0
@@ -67,6 +68,8 @@ enum qdio_irq_states {
67 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */ 68 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
68#define SLSB_P_OUTPUT_EMPTY \ 69#define SLSB_P_OUTPUT_EMPTY \
69 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */ 70 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */
71#define SLSB_P_OUTPUT_PENDING \
72 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_PENDING) /* 0xa3 */
70#define SLSB_CU_OUTPUT_PRIMED \ 73#define SLSB_CU_OUTPUT_PRIMED \
71 (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */ 74 (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */
72#define SLSB_P_OUTPUT_HALTED \ 75#define SLSB_P_OUTPUT_HALTED \
@@ -84,19 +87,11 @@ enum qdio_irq_states {
84#define CHSC_FLAG_QDIO_CAPABILITY 0x80 87#define CHSC_FLAG_QDIO_CAPABILITY 0x80
85#define CHSC_FLAG_VALIDITY 0x40 88#define CHSC_FLAG_VALIDITY 0x40
86 89
87/* qdio adapter-characteristics-1 flag */
88#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */
89#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */
90#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */
91#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */
92#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */
93#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
94#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
95
96/* SIGA flags */ 90/* SIGA flags */
97#define QDIO_SIGA_WRITE 0x00 91#define QDIO_SIGA_WRITE 0x00
98#define QDIO_SIGA_READ 0x01 92#define QDIO_SIGA_READ 0x01
99#define QDIO_SIGA_SYNC 0x02 93#define QDIO_SIGA_SYNC 0x02
94#define QDIO_SIGA_WRITEQ 0x04
100#define QDIO_SIGA_QEBSM_FLAG 0x80 95#define QDIO_SIGA_QEBSM_FLAG 0x80
101 96
102#ifdef CONFIG_64BIT 97#ifdef CONFIG_64BIT
@@ -253,6 +248,12 @@ struct qdio_input_q {
253struct qdio_output_q { 248struct qdio_output_q {
254 /* PCIs are enabled for the queue */ 249 /* PCIs are enabled for the queue */
255 int pci_out_enabled; 250 int pci_out_enabled;
251 /* cq: use asynchronous output buffers */
252 int use_cq;
253 /* cq: aobs used for particual SBAL */
254 struct qaob **aobs;
255 /* cq: sbal state related to asynchronous operation */
256 struct qdio_outbuf_state *sbal_state;
256 /* timer to check for more outbound work */ 257 /* timer to check for more outbound work */
257 struct timer_list timer; 258 struct timer_list timer;
258 /* used SBALs before tasklet schedule */ 259 /* used SBALs before tasklet schedule */
@@ -432,9 +433,20 @@ struct indicator_t {
432 433
433extern struct indicator_t *q_indicators; 434extern struct indicator_t *q_indicators;
434 435
435static inline int shared_ind(u32 *dsci) 436static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq)
437{
438 return irq->nr_input_qs > 1;
439}
440
441static inline int references_shared_dsci(struct qdio_irq *irq)
436{ 442{
437 return dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 443 return irq->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
444}
445
446static inline int shared_ind(struct qdio_q *q)
447{
448 struct qdio_irq *i = q->irq_ptr;
449 return references_shared_dsci(i) || has_multiple_inq_on_dsci(i);
438} 450}
439 451
440/* prototypes for thin interrupt */ 452/* prototypes for thin interrupt */
@@ -449,6 +461,7 @@ void tiqdio_free_memory(void);
449int tiqdio_register_thinints(void); 461int tiqdio_register_thinints(void);
450void tiqdio_unregister_thinints(void); 462void tiqdio_unregister_thinints(void);
451 463
464
452/* prototypes for setup */ 465/* prototypes for setup */
453void qdio_inbound_processing(unsigned long data); 466void qdio_inbound_processing(unsigned long data);
454void qdio_outbound_processing(unsigned long data); 467void qdio_outbound_processing(unsigned long data);
@@ -469,6 +482,9 @@ int qdio_setup_create_sysfs(struct ccw_device *cdev);
469void qdio_setup_destroy_sysfs(struct ccw_device *cdev); 482void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
470int qdio_setup_init(void); 483int qdio_setup_init(void);
471void qdio_setup_exit(void); 484void qdio_setup_exit(void);
485int qdio_enable_async_operation(struct qdio_output_q *q);
486void qdio_disable_async_operation(struct qdio_output_q *q);
487struct qaob *qdio_allocate_aob(void);
472 488
473int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, 489int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
474 unsigned char *state); 490 unsigned char *state);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 0e615cb912d0..aaf7f935bfd3 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -76,6 +76,9 @@ static int qstat_show(struct seq_file *m, void *v)
76 case SLSB_P_OUTPUT_NOT_INIT: 76 case SLSB_P_OUTPUT_NOT_INIT:
77 seq_printf(m, "N"); 77 seq_printf(m, "N");
78 break; 78 break;
79 case SLSB_P_OUTPUT_PENDING:
80 seq_printf(m, "P");
81 break;
79 case SLSB_P_INPUT_PRIMED: 82 case SLSB_P_INPUT_PRIMED:
80 case SLSB_CU_OUTPUT_PRIMED: 83 case SLSB_CU_OUTPUT_PRIMED:
81 seq_printf(m, "+"); 84 seq_printf(m, "+");
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 288c9140290e..9a122280246c 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -14,6 +14,7 @@
14#include <linux/timer.h> 14#include <linux/timer.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/io.h>
17#include <linux/kernel_stat.h> 18#include <linux/kernel_stat.h>
18#include <linux/atomic.h> 19#include <linux/atomic.h>
19#include <asm/debug.h> 20#include <asm/debug.h>
@@ -77,11 +78,13 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask,
77 * Note: For IQDC unicast queues only the highest priority queue is processed. 78 * Note: For IQDC unicast queues only the highest priority queue is processed.
78 */ 79 */
79static inline int do_siga_output(unsigned long schid, unsigned long mask, 80static inline int do_siga_output(unsigned long schid, unsigned long mask,
80 unsigned int *bb, unsigned int fc) 81 unsigned int *bb, unsigned int fc,
82 unsigned long aob)
81{ 83{
82 register unsigned long __fc asm("0") = fc; 84 register unsigned long __fc asm("0") = fc;
83 register unsigned long __schid asm("1") = schid; 85 register unsigned long __schid asm("1") = schid;
84 register unsigned long __mask asm("2") = mask; 86 register unsigned long __mask asm("2") = mask;
87 register unsigned long __aob asm("3") = aob;
85 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION; 88 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
86 89
87 asm volatile( 90 asm volatile(
@@ -90,7 +93,8 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask,
90 " srl %0,28\n" 93 " srl %0,28\n"
91 "1:\n" 94 "1:\n"
92 EX_TABLE(0b, 1b) 95 EX_TABLE(0b, 1b)
93 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) 96 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask),
97 "+d" (__aob)
94 : : "cc", "memory"); 98 : : "cc", "memory");
95 *bb = ((unsigned int) __fc) >> 31; 99 *bb = ((unsigned int) __fc) >> 31;
96 return cc; 100 return cc;
@@ -212,7 +216,7 @@ again:
212/* returns number of examined buffers and their common state in *state */ 216/* returns number of examined buffers and their common state in *state */
213static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, 217static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
214 unsigned char *state, unsigned int count, 218 unsigned char *state, unsigned int count,
215 int auto_ack) 219 int auto_ack, int merge_pending)
216{ 220{
217 unsigned char __state = 0; 221 unsigned char __state = 0;
218 int i; 222 int i;
@@ -224,9 +228,14 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
224 return qdio_do_eqbs(q, state, bufnr, count, auto_ack); 228 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
225 229
226 for (i = 0; i < count; i++) { 230 for (i = 0; i < count; i++) {
227 if (!__state) 231 if (!__state) {
228 __state = q->slsb.val[bufnr]; 232 __state = q->slsb.val[bufnr];
229 else if (q->slsb.val[bufnr] != __state) 233 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
234 __state = SLSB_P_OUTPUT_EMPTY;
235 } else if (merge_pending) {
236 if ((q->slsb.val[bufnr] & __state) != __state)
237 break;
238 } else if (q->slsb.val[bufnr] != __state)
230 break; 239 break;
231 bufnr = next_buf(bufnr); 240 bufnr = next_buf(bufnr);
232 } 241 }
@@ -237,7 +246,7 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
237static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, 246static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
238 unsigned char *state, int auto_ack) 247 unsigned char *state, int auto_ack)
239{ 248{
240 return get_buf_states(q, bufnr, state, 1, auto_ack); 249 return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
241} 250}
242 251
243/* wrap-around safe setting of slsb states, returns number of changed buffers */ 252/* wrap-around safe setting of slsb states, returns number of changed buffers */
@@ -308,19 +317,28 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
308 return qdio_siga_sync(q, q->mask, 0); 317 return qdio_siga_sync(q, q->mask, 0);
309} 318}
310 319
311static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) 320static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
321 unsigned long aob)
312{ 322{
313 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 323 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
314 unsigned int fc = QDIO_SIGA_WRITE; 324 unsigned int fc = QDIO_SIGA_WRITE;
315 u64 start_time = 0; 325 u64 start_time = 0;
316 int retries = 0, cc; 326 int retries = 0, cc;
327 unsigned long laob = 0;
328
329 if (q->u.out.use_cq && aob != 0) {
330 fc = QDIO_SIGA_WRITEQ;
331 laob = aob;
332 }
317 333
318 if (is_qebsm(q)) { 334 if (is_qebsm(q)) {
319 schid = q->irq_ptr->sch_token; 335 schid = q->irq_ptr->sch_token;
320 fc |= QDIO_SIGA_QEBSM_FLAG; 336 fc |= QDIO_SIGA_QEBSM_FLAG;
321 } 337 }
322again: 338again:
323 cc = do_siga_output(schid, q->mask, busy_bit, fc); 339 WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
340 (aob && fc != QDIO_SIGA_WRITEQ));
341 cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
324 342
325 /* hipersocket busy condition */ 343 /* hipersocket busy condition */
326 if (unlikely(*busy_bit)) { 344 if (unlikely(*busy_bit)) {
@@ -379,7 +397,7 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
379{ 397{
380 if (need_siga_sync(q)) 398 if (need_siga_sync(q))
381 qdio_siga_sync_q(q); 399 qdio_siga_sync_q(q);
382 return get_buf_states(q, bufnr, state, 1, 0); 400 return get_buf_states(q, bufnr, state, 1, 0, 0);
383} 401}
384 402
385static inline void qdio_stop_polling(struct qdio_q *q) 403static inline void qdio_stop_polling(struct qdio_q *q)
@@ -507,7 +525,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
507 * No siga sync here, as a PCI or we after a thin interrupt 525 * No siga sync here, as a PCI or we after a thin interrupt
508 * already sync'ed the queues. 526 * already sync'ed the queues.
509 */ 527 */
510 count = get_buf_states(q, q->first_to_check, &state, count, 1); 528 count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
511 if (!count) 529 if (!count)
512 goto out; 530 goto out;
513 531
@@ -590,6 +608,107 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
590 return 0; 608 return 0;
591} 609}
592 610
611static inline int contains_aobs(struct qdio_q *q)
612{
613 return !q->is_input_q && q->u.out.use_cq;
614}
615
616static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
617 int i, struct qaob *aob)
618{
619 int tmp;
620
621 DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
622 (unsigned long) virt_to_phys(aob));
623 DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
624 (unsigned long) aob->res0[0]);
625 DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
626 (unsigned long) aob->res0[1]);
627 DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
628 (unsigned long) aob->res0[2]);
629 DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
630 (unsigned long) aob->res0[3]);
631 DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
632 (unsigned long) aob->res0[4]);
633 DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
634 (unsigned long) aob->res0[5]);
635 DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
636 DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
637 DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
638 DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
639 DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
640 DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
641 DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
642 for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
643 DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
644 (unsigned long) aob->sba[tmp]);
645 DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
646 (unsigned long) q->sbal[i]->element[tmp].addr);
647 DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
648 DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
649 q->sbal[i]->element[tmp].length);
650 }
651 DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
652 for (tmp = 0; tmp < 2; ++tmp) {
653 DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
654 (unsigned long) aob->res4[tmp]);
655 }
656 DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
657 DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
658}
659
660static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
661{
662 unsigned char state = 0;
663 int j, b = start;
664
665 if (!contains_aobs(q))
666 return;
667
668 for (j = 0; j < count; ++j) {
669 get_buf_state(q, b, &state, 0);
670 if (state == SLSB_P_OUTPUT_PENDING) {
671 struct qaob *aob = q->u.out.aobs[b];
672 if (aob == NULL)
673 continue;
674
675 BUG_ON(q->u.out.sbal_state == NULL);
676 q->u.out.sbal_state[b].flags |=
677 QDIO_OUTBUF_STATE_FLAG_PENDING;
678 q->u.out.aobs[b] = NULL;
679 } else if (state == SLSB_P_OUTPUT_EMPTY) {
680 BUG_ON(q->u.out.sbal_state == NULL);
681 q->u.out.sbal_state[b].aob = NULL;
682 }
683 b = next_buf(b);
684 }
685}
686
687static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
688 int bufnr)
689{
690 unsigned long phys_aob = 0;
691
692 if (!q->use_cq)
693 goto out;
694
695 if (!q->aobs[bufnr]) {
696 struct qaob *aob = qdio_allocate_aob();
697 q->aobs[bufnr] = aob;
698 }
699 if (q->aobs[bufnr]) {
700 BUG_ON(q->sbal_state == NULL);
701 q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
702 q->sbal_state[bufnr].aob = q->aobs[bufnr];
703 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
704 phys_aob = virt_to_phys(q->aobs[bufnr]);
705 BUG_ON(phys_aob & 0xFF);
706 }
707
708out:
709 return phys_aob;
710}
711
593static void qdio_kick_handler(struct qdio_q *q) 712static void qdio_kick_handler(struct qdio_q *q)
594{ 713{
595 int start = q->first_to_kick; 714 int start = q->first_to_kick;
@@ -610,6 +729,8 @@ static void qdio_kick_handler(struct qdio_q *q)
610 start, count); 729 start, count);
611 } 730 }
612 731
732 qdio_handle_aobs(q, start, count);
733
613 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, 734 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
614 q->irq_ptr->int_parm); 735 q->irq_ptr->int_parm);
615 736
@@ -672,23 +793,26 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
672 */ 793 */
673 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 794 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
674 stop = add_buf(q->first_to_check, count); 795 stop = add_buf(q->first_to_check, count);
675
676 if (q->first_to_check == stop) 796 if (q->first_to_check == stop)
677 return q->first_to_check; 797 goto out;
678 798
679 count = get_buf_states(q, q->first_to_check, &state, count, 0); 799 count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
680 if (!count) 800 if (!count)
681 return q->first_to_check; 801 goto out;
682 802
683 switch (state) { 803 switch (state) {
804 case SLSB_P_OUTPUT_PENDING:
805 BUG();
684 case SLSB_P_OUTPUT_EMPTY: 806 case SLSB_P_OUTPUT_EMPTY:
685 /* the adapter got it */ 807 /* the adapter got it */
686 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count); 808 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
809 "out empty:%1d %02x", q->nr, count);
687 810
688 atomic_sub(count, &q->nr_buf_used); 811 atomic_sub(count, &q->nr_buf_used);
689 q->first_to_check = add_buf(q->first_to_check, count); 812 q->first_to_check = add_buf(q->first_to_check, count);
690 if (q->irq_ptr->perf_stat_enabled) 813 if (q->irq_ptr->perf_stat_enabled)
691 account_sbals(q, count); 814 account_sbals(q, count);
815
692 break; 816 break;
693 case SLSB_P_OUTPUT_ERROR: 817 case SLSB_P_OUTPUT_ERROR:
694 process_buffer_error(q, count); 818 process_buffer_error(q, count);
@@ -701,7 +825,8 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
701 /* the adapter has not fetched the output yet */ 825 /* the adapter has not fetched the output yet */
702 if (q->irq_ptr->perf_stat_enabled) 826 if (q->irq_ptr->perf_stat_enabled)
703 q->q_stats.nr_sbal_nop++; 827 q->q_stats.nr_sbal_nop++;
704 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); 828 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
829 q->nr);
705 break; 830 break;
706 case SLSB_P_OUTPUT_NOT_INIT: 831 case SLSB_P_OUTPUT_NOT_INIT:
707 case SLSB_P_OUTPUT_HALTED: 832 case SLSB_P_OUTPUT_HALTED:
@@ -709,6 +834,8 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
709 default: 834 default:
710 BUG(); 835 BUG();
711 } 836 }
837
838out:
712 return q->first_to_check; 839 return q->first_to_check;
713} 840}
714 841
@@ -732,7 +859,7 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
732 return 0; 859 return 0;
733} 860}
734 861
735static int qdio_kick_outbound_q(struct qdio_q *q) 862static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
736{ 863{
737 int retries = 0, cc; 864 int retries = 0, cc;
738 unsigned int busy_bit; 865 unsigned int busy_bit;
@@ -744,7 +871,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
744retry: 871retry:
745 qperf_inc(q, siga_write); 872 qperf_inc(q, siga_write);
746 873
747 cc = qdio_siga_output(q, &busy_bit); 874 cc = qdio_siga_output(q, &busy_bit, aob);
748 switch (cc) { 875 switch (cc) {
749 case 0: 876 case 0:
750 break; 877 break;
@@ -921,8 +1048,9 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
921 } 1048 }
922 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, 1049 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
923 q->irq_ptr->int_parm); 1050 q->irq_ptr->int_parm);
924 } else 1051 } else {
925 tasklet_schedule(&q->tasklet); 1052 tasklet_schedule(&q->tasklet);
1053 }
926 } 1054 }
927 1055
928 if (!pci_out_supported(q)) 1056 if (!pci_out_supported(q))
@@ -1236,6 +1364,26 @@ out_err:
1236} 1364}
1237EXPORT_SYMBOL_GPL(qdio_allocate); 1365EXPORT_SYMBOL_GPL(qdio_allocate);
1238 1366
1367static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1368{
1369 struct qdio_q *q = irq_ptr->input_qs[0];
1370 int i, use_cq = 0;
1371
1372 if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1373 use_cq = 1;
1374
1375 for_each_output_queue(irq_ptr, q, i) {
1376 if (use_cq) {
1377 if (qdio_enable_async_operation(&q->u.out) < 0) {
1378 use_cq = 0;
1379 continue;
1380 }
1381 } else
1382 qdio_disable_async_operation(&q->u.out);
1383 }
1384 DBF_EVENT("use_cq:%d", use_cq);
1385}
1386
1239/** 1387/**
1240 * qdio_establish - establish queues on a qdio subchannel 1388 * qdio_establish - establish queues on a qdio subchannel
1241 * @init_data: initialization data 1389 * @init_data: initialization data
@@ -1301,6 +1449,8 @@ int qdio_establish(struct qdio_initialize *init_data)
1301 qdio_setup_ssqd_info(irq_ptr); 1449 qdio_setup_ssqd_info(irq_ptr);
1302 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); 1450 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1303 1451
1452 qdio_detect_hsicq(irq_ptr);
1453
1304 /* qebsm is now setup if available, initialize buffer states */ 1454 /* qebsm is now setup if available, initialize buffer states */
1305 qdio_init_buf_states(irq_ptr); 1455 qdio_init_buf_states(irq_ptr);
1306 1456
@@ -1442,12 +1592,9 @@ set:
1442 used = atomic_add_return(count, &q->nr_buf_used) - count; 1592 used = atomic_add_return(count, &q->nr_buf_used) - count;
1443 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); 1593 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1444 1594
1445 /* no need to signal as long as the adapter had free buffers */
1446 if (used)
1447 return 0;
1448
1449 if (need_siga_in(q)) 1595 if (need_siga_in(q))
1450 return qdio_siga_input(q); 1596 return qdio_siga_input(q);
1597
1451 return 0; 1598 return 0;
1452} 1599}
1453 1600
@@ -1480,17 +1627,21 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1480 q->u.out.pci_out_enabled = 0; 1627 q->u.out.pci_out_enabled = 0;
1481 1628
1482 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1629 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1483 /* One SIGA-W per buffer required for unicast HiperSockets. */ 1630 unsigned long phys_aob = 0;
1631
1632 /* One SIGA-W per buffer required for unicast HSI */
1484 WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); 1633 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1485 1634
1486 rc = qdio_kick_outbound_q(q); 1635 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1636
1637 rc = qdio_kick_outbound_q(q, phys_aob);
1487 } else if (need_siga_sync(q)) { 1638 } else if (need_siga_sync(q)) {
1488 rc = qdio_siga_sync_q(q); 1639 rc = qdio_siga_sync_q(q);
1489 } else { 1640 } else {
1490 /* try to fast requeue buffers */ 1641 /* try to fast requeue buffers */
1491 get_buf_state(q, prev_buf(bufnr), &state, 0); 1642 get_buf_state(q, prev_buf(bufnr), &state, 0);
1492 if (state != SLSB_CU_OUTPUT_PRIMED) 1643 if (state != SLSB_CU_OUTPUT_PRIMED)
1493 rc = qdio_kick_outbound_q(q); 1644 rc = qdio_kick_outbound_q(q, 0);
1494 else 1645 else
1495 qperf_inc(q, fast_requeue); 1646 qperf_inc(q, fast_requeue);
1496 } 1647 }
@@ -1518,6 +1669,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1518{ 1669{
1519 struct qdio_irq *irq_ptr; 1670 struct qdio_irq *irq_ptr;
1520 1671
1672
1521 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) 1673 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1522 return -EINVAL; 1674 return -EINVAL;
1523 1675
@@ -1562,7 +1714,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
1562 1714
1563 WARN_ON(queue_irqs_enabled(q)); 1715 WARN_ON(queue_irqs_enabled(q));
1564 1716
1565 if (!shared_ind(q->irq_ptr->dsci)) 1717 if (!shared_ind(q))
1566 xchg(q->irq_ptr->dsci, 0); 1718 xchg(q->irq_ptr->dsci, 0);
1567 1719
1568 qdio_stop_polling(q); 1720 qdio_stop_polling(q);
@@ -1572,7 +1724,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
1572 * We need to check again to not lose initiative after 1724 * We need to check again to not lose initiative after
1573 * resetting the ACK state. 1725 * resetting the ACK state.
1574 */ 1726 */
1575 if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci) 1727 if (!shared_ind(q) && *q->irq_ptr->dsci)
1576 goto rescan; 1728 goto rescan;
1577 if (!qdio_inbound_q_done(q)) 1729 if (!qdio_inbound_q_done(q))
1578 goto rescan; 1730 goto rescan;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 89107d0938c4..dd8bd670a6b8 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -19,6 +19,22 @@
19#include "qdio_debug.h" 19#include "qdio_debug.h"
20 20
21static struct kmem_cache *qdio_q_cache; 21static struct kmem_cache *qdio_q_cache;
22static struct kmem_cache *qdio_aob_cache;
23
24struct qaob *qdio_allocate_aob()
25{
26 struct qaob *aob;
27
28 aob = kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
29 return aob;
30}
31EXPORT_SYMBOL_GPL(qdio_allocate_aob);
32
33void qdio_release_aob(struct qaob *aob)
34{
35 kmem_cache_free(qdio_aob_cache, aob);
36}
37EXPORT_SYMBOL_GPL(qdio_release_aob);
22 38
23/* 39/*
24 * qebsm is only available under 64bit but the adapter sets the feature 40 * qebsm is only available under 64bit but the adapter sets the feature
@@ -154,29 +170,36 @@ static void setup_queues(struct qdio_irq *irq_ptr,
154 struct qdio_q *q; 170 struct qdio_q *q;
155 void **input_sbal_array = qdio_init->input_sbal_addr_array; 171 void **input_sbal_array = qdio_init->input_sbal_addr_array;
156 void **output_sbal_array = qdio_init->output_sbal_addr_array; 172 void **output_sbal_array = qdio_init->output_sbal_addr_array;
173 struct qdio_outbuf_state *output_sbal_state_array =
174 qdio_init->output_sbal_state_array;
157 int i; 175 int i;
158 176
159 for_each_input_queue(irq_ptr, q, i) { 177 for_each_input_queue(irq_ptr, q, i) {
160 DBF_EVENT("in-q:%1d", i); 178 DBF_EVENT("inq:%1d", i);
161 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); 179 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
162 180
163 q->is_input_q = 1; 181 q->is_input_q = 1;
164 q->u.in.queue_start_poll = qdio_init->queue_start_poll; 182 q->u.in.queue_start_poll = qdio_init->queue_start_poll[i];
183
165 setup_storage_lists(q, irq_ptr, input_sbal_array, i); 184 setup_storage_lists(q, irq_ptr, input_sbal_array, i);
166 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 185 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
167 186
168 if (is_thinint_irq(irq_ptr)) 187 if (is_thinint_irq(irq_ptr)) {
169 tasklet_init(&q->tasklet, tiqdio_inbound_processing, 188 tasklet_init(&q->tasklet, tiqdio_inbound_processing,
170 (unsigned long) q); 189 (unsigned long) q);
171 else 190 } else {
172 tasklet_init(&q->tasklet, qdio_inbound_processing, 191 tasklet_init(&q->tasklet, qdio_inbound_processing,
173 (unsigned long) q); 192 (unsigned long) q);
193 }
174 } 194 }
175 195
176 for_each_output_queue(irq_ptr, q, i) { 196 for_each_output_queue(irq_ptr, q, i) {
177 DBF_EVENT("outq:%1d", i); 197 DBF_EVENT("outq:%1d", i);
178 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); 198 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
179 199
200 q->u.out.sbal_state = output_sbal_state_array;
201 output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
202
180 q->is_input_q = 0; 203 q->is_input_q = 0;
181 q->u.out.scan_threshold = qdio_init->scan_threshold; 204 q->u.out.scan_threshold = qdio_init->scan_threshold;
182 setup_storage_lists(q, irq_ptr, output_sbal_array, i); 205 setup_storage_lists(q, irq_ptr, output_sbal_array, i);
@@ -311,6 +334,19 @@ void qdio_release_memory(struct qdio_irq *irq_ptr)
311 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { 334 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
312 q = irq_ptr->output_qs[i]; 335 q = irq_ptr->output_qs[i];
313 if (q) { 336 if (q) {
337 if (q->u.out.use_cq) {
338 int n;
339
340 for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
341 struct qaob *aob = q->u.out.aobs[n];
342 if (aob) {
343 qdio_release_aob(aob);
344 q->u.out.aobs[n] = NULL;
345 }
346 }
347
348 qdio_disable_async_operation(&q->u.out);
349 }
314 free_page((unsigned long) q->slib); 350 free_page((unsigned long) q->slib);
315 kmem_cache_free(qdio_q_cache, q); 351 kmem_cache_free(qdio_q_cache, q);
316 } 352 }
@@ -465,23 +501,60 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
465 printk(KERN_INFO "%s", s); 501 printk(KERN_INFO "%s", s);
466} 502}
467 503
504int qdio_enable_async_operation(struct qdio_output_q *outq)
505{
506 outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q,
507 GFP_ATOMIC);
508 if (!outq->aobs) {
509 outq->use_cq = 0;
510 return -ENOMEM;
511 }
512 outq->use_cq = 1;
513 return 0;
514}
515
516void qdio_disable_async_operation(struct qdio_output_q *q)
517{
518 kfree(q->aobs);
519 q->aobs = NULL;
520 q->use_cq = 0;
521}
522
468int __init qdio_setup_init(void) 523int __init qdio_setup_init(void)
469{ 524{
525 int rc;
526
470 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), 527 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
471 256, 0, NULL); 528 256, 0, NULL);
472 if (!qdio_q_cache) 529 if (!qdio_q_cache)
473 return -ENOMEM; 530 return -ENOMEM;
474 531
532 qdio_aob_cache = kmem_cache_create("qdio_aob",
533 sizeof(struct qaob),
534 sizeof(struct qaob),
535 0,
536 NULL);
537 if (!qdio_aob_cache) {
538 rc = -ENOMEM;
539 goto free_qdio_q_cache;
540 }
541
475 /* Check for OSA/FCP thin interrupts (bit 67). */ 542 /* Check for OSA/FCP thin interrupts (bit 67). */
476 DBF_EVENT("thinint:%1d", 543 DBF_EVENT("thinint:%1d",
477 (css_general_characteristics.aif_osa) ? 1 : 0); 544 (css_general_characteristics.aif_osa) ? 1 : 0);
478 545
479 /* Check for QEBSM support in general (bit 58). */ 546 /* Check for QEBSM support in general (bit 58). */
480 DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0); 547 DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
481 return 0; 548 rc = 0;
549out:
550 return rc;
551free_qdio_q_cache:
552 kmem_cache_destroy(qdio_q_cache);
553 goto out;
482} 554}
483 555
484void qdio_setup_exit(void) 556void qdio_setup_exit(void)
485{ 557{
558 kmem_cache_destroy(qdio_aob_cache);
486 kmem_cache_destroy(qdio_q_cache); 559 kmem_cache_destroy(qdio_q_cache);
487} 560}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 2a1d4dfaf859..a3e3949d7b69 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -67,12 +67,9 @@ static void put_indicator(u32 *addr)
67 67
68void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) 68void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
69{ 69{
70 struct qdio_q *q;
71 int i;
72
73 mutex_lock(&tiq_list_lock); 70 mutex_lock(&tiq_list_lock);
74 for_each_input_queue(irq_ptr, q, i) 71 BUG_ON(irq_ptr->nr_input_qs < 1);
75 list_add_rcu(&q->entry, &tiq_list); 72 list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
76 mutex_unlock(&tiq_list_lock); 73 mutex_unlock(&tiq_list_lock);
77 xchg(irq_ptr->dsci, 1 << 7); 74 xchg(irq_ptr->dsci, 1 << 7);
78} 75}
@@ -80,19 +77,17 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
80void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) 77void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
81{ 78{
82 struct qdio_q *q; 79 struct qdio_q *q;
83 int i;
84 80
85 for (i = 0; i < irq_ptr->nr_input_qs; i++) { 81 BUG_ON(irq_ptr->nr_input_qs < 1);
86 q = irq_ptr->input_qs[i]; 82 q = irq_ptr->input_qs[0];
87 /* if establish triggered an error */ 83 /* if establish triggered an error */
88 if (!q || !q->entry.prev || !q->entry.next) 84 if (!q || !q->entry.prev || !q->entry.next)
89 continue; 85 return;
90 86
91 mutex_lock(&tiq_list_lock); 87 mutex_lock(&tiq_list_lock);
92 list_del_rcu(&q->entry); 88 list_del_rcu(&q->entry);
93 mutex_unlock(&tiq_list_lock); 89 mutex_unlock(&tiq_list_lock);
94 synchronize_rcu(); 90 synchronize_rcu();
95 }
96} 91}
97 92
98static inline u32 clear_shared_ind(void) 93static inline u32 clear_shared_ind(void)
@@ -102,6 +97,40 @@ static inline u32 clear_shared_ind(void)
102 return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 97 return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
103} 98}
104 99
100static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
101{
102 struct qdio_q *q;
103 int i;
104
105 for_each_input_queue(irq, q, i) {
106 if (!references_shared_dsci(irq) &&
107 has_multiple_inq_on_dsci(irq))
108 xchg(q->irq_ptr->dsci, 0);
109
110 if (q->u.in.queue_start_poll) {
111 /* skip if polling is enabled or already in work */
112 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
113 &q->u.in.queue_irq_state)) {
114 qperf_inc(q, int_discarded);
115 continue;
116 }
117
118 /* avoid dsci clear here, done after processing */
119 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
120 q->irq_ptr->int_parm);
121 } else {
122 if (!shared_ind(q))
123 xchg(q->irq_ptr->dsci, 0);
124
125 /*
126 * Call inbound processing but not directly
127 * since that could starve other thinint queues.
128 */
129 tasklet_schedule(&q->tasklet);
130 }
131 }
132}
133
105/** 134/**
106 * tiqdio_thinint_handler - thin interrupt handler for qdio 135 * tiqdio_thinint_handler - thin interrupt handler for qdio
107 * @alsi: pointer to adapter local summary indicator 136 * @alsi: pointer to adapter local summary indicator
@@ -120,35 +149,18 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
120 149
121 /* check for work on all inbound thinint queues */ 150 /* check for work on all inbound thinint queues */
122 list_for_each_entry_rcu(q, &tiq_list, entry) { 151 list_for_each_entry_rcu(q, &tiq_list, entry) {
152 struct qdio_irq *irq;
123 153
124 /* only process queues from changed sets */ 154 /* only process queues from changed sets */
125 if (unlikely(shared_ind(q->irq_ptr->dsci))) { 155 irq = q->irq_ptr;
156 if (unlikely(references_shared_dsci(irq))) {
126 if (!si_used) 157 if (!si_used)
127 continue; 158 continue;
128 } else if (!*q->irq_ptr->dsci) 159 } else if (!*irq->dsci)
129 continue; 160 continue;
130 161
131 if (q->u.in.queue_start_poll) { 162 tiqdio_call_inq_handlers(irq);
132 /* skip if polling is enabled or already in work */
133 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
134 &q->u.in.queue_irq_state)) {
135 qperf_inc(q, int_discarded);
136 continue;
137 }
138 163
139 /* avoid dsci clear here, done after processing */
140 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
141 q->irq_ptr->int_parm);
142 } else {
143 /* only clear it if the indicator is non-shared */
144 if (!shared_ind(q->irq_ptr->dsci))
145 xchg(q->irq_ptr->dsci, 0);
146 /*
147 * Call inbound processing but not directly
148 * since that could starve other thinint queues.
149 */
150 tasklet_schedule(&q->tasklet);
151 }
152 qperf_inc(q, adapter_int); 164 qperf_inc(q, adapter_int);
153 } 165 }
154 rcu_read_unlock(); 166 rcu_read_unlock();
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index c3b8064a102d..fb246b944b16 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2122,7 +2122,7 @@ static const struct net_device_ops lcs_mc_netdev_ops = {
2122 .ndo_stop = lcs_stop_device, 2122 .ndo_stop = lcs_stop_device,
2123 .ndo_get_stats = lcs_getstats, 2123 .ndo_get_stats = lcs_getstats,
2124 .ndo_start_xmit = lcs_start_xmit, 2124 .ndo_start_xmit = lcs_start_xmit,
2125 .ndo_set_multicast_list = lcs_set_multicast_list, 2125 .ndo_set_rx_mode = lcs_set_multicast_list,
2126}; 2126};
2127 2127
2128static int 2128static int
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 26a4110eeb2d..b77c65ed1381 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -110,6 +110,10 @@ struct qeth_perf_stats {
110 110
111 unsigned int sc_dp_p; 111 unsigned int sc_dp_p;
112 unsigned int sc_p_dp; 112 unsigned int sc_p_dp;
113 /* qdio_cq_handler: number of times called, time spent in */
114 __u64 cq_start_time;
115 unsigned int cq_cnt;
116 unsigned int cq_time;
113 /* qdio_input_handler: number of times called, time spent in */ 117 /* qdio_input_handler: number of times called, time spent in */
114 __u64 inbound_start_time; 118 __u64 inbound_start_time;
115 unsigned int inbound_cnt; 119 unsigned int inbound_cnt;
@@ -213,6 +217,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
213 */ 217 */
214#define QETH_TX_TIMEOUT 100 * HZ 218#define QETH_TX_TIMEOUT 100 * HZ
215#define QETH_RCD_TIMEOUT 60 * HZ 219#define QETH_RCD_TIMEOUT 60 * HZ
220#define QETH_RECLAIM_WORK_TIME HZ
216#define QETH_HEADER_SIZE 32 221#define QETH_HEADER_SIZE 32
217#define QETH_MAX_PORTNO 15 222#define QETH_MAX_PORTNO 15
218 223
@@ -231,7 +236,8 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
231#define QETH_IN_BUF_COUNT_MAX 128 236#define QETH_IN_BUF_COUNT_MAX 128
232#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12) 237#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
233#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \ 238#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
234 ((card)->qdio.in_buf_pool.buf_count / 2) 239 ((card)->ssqd.qdioac1 & AC1_SIGA_INPUT_NEEDED ? 1 : \
240 ((card)->qdio.in_buf_pool.buf_count / 2))
235 241
236/* buffers we have to be behind before we get a PCI */ 242/* buffers we have to be behind before we get a PCI */
237#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1) 243#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
@@ -260,6 +266,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
260 266
261/* large receive scatter gather copy break */ 267/* large receive scatter gather copy break */
262#define QETH_RX_SG_CB (PAGE_SIZE >> 1) 268#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
269#define QETH_RX_PULL_LEN 256
263 270
264struct qeth_hdr_layer3 { 271struct qeth_hdr_layer3 {
265 __u8 id; 272 __u8 id;
@@ -375,6 +382,21 @@ enum qeth_qdio_buffer_states {
375 * outbound: filled by driver; owned by hardware in order to be sent 382 * outbound: filled by driver; owned by hardware in order to be sent
376 */ 383 */
377 QETH_QDIO_BUF_PRIMED, 384 QETH_QDIO_BUF_PRIMED,
385 /*
386 * inbound: not applicable
387 * outbound: identified to be pending in TPQ
388 */
389 QETH_QDIO_BUF_PENDING,
390 /*
391 * inbound: not applicable
392 * outbound: found in completion queue
393 */
394 QETH_QDIO_BUF_IN_CQ,
395 /*
396 * inbound: not applicable
397 * outbound: handled via transfer pending / completion queue
398 */
399 QETH_QDIO_BUF_HANDLED_DELAYED,
378}; 400};
379 401
380enum qeth_qdio_info_states { 402enum qeth_qdio_info_states {
@@ -399,6 +421,7 @@ struct qeth_qdio_buffer {
399 struct qdio_buffer *buffer; 421 struct qdio_buffer *buffer;
400 /* the buffer pool entry currently associated to this buffer */ 422 /* the buffer pool entry currently associated to this buffer */
401 struct qeth_buffer_pool_entry *pool_entry; 423 struct qeth_buffer_pool_entry *pool_entry;
424 struct sk_buff *rx_skb;
402}; 425};
403 426
404struct qeth_qdio_q { 427struct qeth_qdio_q {
@@ -412,8 +435,11 @@ struct qeth_qdio_out_buffer {
412 atomic_t state; 435 atomic_t state;
413 int next_element_to_fill; 436 int next_element_to_fill;
414 struct sk_buff_head skb_list; 437 struct sk_buff_head skb_list;
415 struct list_head ctx_list;
416 int is_header[16]; 438 int is_header[16];
439
440 struct qaob *aob;
441 struct qeth_qdio_out_q *q;
442 struct qeth_qdio_out_buffer *next_pending;
417}; 443};
418 444
419struct qeth_card; 445struct qeth_card;
@@ -426,7 +452,8 @@ enum qeth_out_q_states {
426 452
427struct qeth_qdio_out_q { 453struct qeth_qdio_out_q {
428 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; 454 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
429 struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q]; 455 struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
456 struct qdio_outbuf_state *bufstates; /* convenience pointer */
430 int queue_no; 457 int queue_no;
431 struct qeth_card *card; 458 struct qeth_card *card;
432 atomic_t state; 459 atomic_t state;
@@ -447,7 +474,9 @@ struct qeth_qdio_out_q {
447struct qeth_qdio_info { 474struct qeth_qdio_info {
448 atomic_t state; 475 atomic_t state;
449 /* input */ 476 /* input */
477 int no_in_queues;
450 struct qeth_qdio_q *in_q; 478 struct qeth_qdio_q *in_q;
479 struct qeth_qdio_q *c_q;
451 struct qeth_qdio_buffer_pool in_buf_pool; 480 struct qeth_qdio_buffer_pool in_buf_pool;
452 struct qeth_qdio_buffer_pool init_pool; 481 struct qeth_qdio_buffer_pool init_pool;
453 int in_buf_size; 482 int in_buf_size;
@@ -455,6 +484,7 @@ struct qeth_qdio_info {
455 /* output */ 484 /* output */
456 int no_out_queues; 485 int no_out_queues;
457 struct qeth_qdio_out_q **out_qs; 486 struct qeth_qdio_out_q **out_qs;
487 struct qdio_outbuf_state *out_bufstates;
458 488
459 /* priority queueing */ 489 /* priority queueing */
460 int do_prio_queueing; 490 int do_prio_queueing;
@@ -526,6 +556,12 @@ enum qeth_cmd_buffer_state {
526 BUF_STATE_PROCESSED, 556 BUF_STATE_PROCESSED,
527}; 557};
528 558
559enum qeth_cq {
560 QETH_CQ_DISABLED = 0,
561 QETH_CQ_ENABLED = 1,
562 QETH_CQ_NOTAVAILABLE = 2,
563};
564
529struct qeth_ipato { 565struct qeth_ipato {
530 int enabled; 566 int enabled;
531 int invert4; 567 int invert4;
@@ -650,6 +686,8 @@ struct qeth_card_options {
650 int rx_sg_cb; 686 int rx_sg_cb;
651 enum qeth_ipa_isolation_modes isolation; 687 enum qeth_ipa_isolation_modes isolation;
652 int sniffer; 688 int sniffer;
689 enum qeth_cq cq;
690 char hsuid[9];
653}; 691};
654 692
655/* 693/*
@@ -747,6 +785,8 @@ struct qeth_card {
747 struct mutex discipline_mutex; 785 struct mutex discipline_mutex;
748 struct napi_struct napi; 786 struct napi_struct napi;
749 struct qeth_rx rx; 787 struct qeth_rx rx;
788 struct delayed_work buffer_reclaim_work;
789 int reclaim_index;
750}; 790};
751 791
752struct qeth_card_list_struct { 792struct qeth_card_list_struct {
@@ -812,6 +852,7 @@ int qeth_core_create_device_attributes(struct device *);
812void qeth_core_remove_device_attributes(struct device *); 852void qeth_core_remove_device_attributes(struct device *);
813int qeth_core_create_osn_attributes(struct device *); 853int qeth_core_create_osn_attributes(struct device *);
814void qeth_core_remove_osn_attributes(struct device *); 854void qeth_core_remove_osn_attributes(struct device *);
855void qeth_buffer_reclaim_work(struct work_struct *);
815 856
816/* exports for qeth discipline device drivers */ 857/* exports for qeth discipline device drivers */
817extern struct qeth_card_list_struct qeth_core_card_list; 858extern struct qeth_card_list_struct qeth_core_card_list;
@@ -840,7 +881,7 @@ int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *,
840 unsigned int, const char *); 881 unsigned int, const char *);
841void qeth_queue_input_buffer(struct qeth_card *, int); 882void qeth_queue_input_buffer(struct qeth_card *, int);
842struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, 883struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
843 struct qdio_buffer *, struct qdio_buffer_element **, int *, 884 struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
844 struct qeth_hdr **); 885 struct qeth_hdr **);
845void qeth_schedule_recovery(struct qeth_card *); 886void qeth_schedule_recovery(struct qeth_card *);
846void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long); 887void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long);
@@ -887,6 +928,7 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
887int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); 928int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
888int qeth_set_access_ctrl_online(struct qeth_card *card); 929int qeth_set_access_ctrl_online(struct qeth_card *card);
889int qeth_hdr_chk_and_bounce(struct sk_buff *, int); 930int qeth_hdr_chk_and_bounce(struct sk_buff *, int);
931int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
890int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); 932int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
891int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot); 933int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
892 934
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 4550573c25e5..81534437373a 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -21,6 +21,7 @@
21#include <linux/mii.h> 21#include <linux/mii.h>
22#include <linux/kthread.h> 22#include <linux/kthread.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <net/iucv/af_iucv.h>
24 25
25#include <asm/ebcdic.h> 26#include <asm/ebcdic.h>
26#include <asm/io.h> 27#include <asm/io.h>
@@ -44,6 +45,7 @@ struct qeth_card_list_struct qeth_core_card_list;
44EXPORT_SYMBOL_GPL(qeth_core_card_list); 45EXPORT_SYMBOL_GPL(qeth_core_card_list);
45struct kmem_cache *qeth_core_header_cache; 46struct kmem_cache *qeth_core_header_cache;
46EXPORT_SYMBOL_GPL(qeth_core_header_cache); 47EXPORT_SYMBOL_GPL(qeth_core_header_cache);
48static struct kmem_cache *qeth_qdio_outbuf_cache;
47 49
48static struct device *qeth_core_root_dev; 50static struct device *qeth_core_root_dev;
49static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY; 51static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY;
@@ -56,6 +58,14 @@ static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
56static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32); 58static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32);
57static void qeth_free_buffer_pool(struct qeth_card *); 59static void qeth_free_buffer_pool(struct qeth_card *);
58static int qeth_qdio_establish(struct qeth_card *); 60static int qeth_qdio_establish(struct qeth_card *);
61static void qeth_free_qdio_buffers(struct qeth_card *);
62static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
63 struct qeth_qdio_out_buffer *buf,
64 enum iucv_tx_notify notification);
65static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
66static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
67 struct qeth_qdio_out_buffer *buf,
68 enum qeth_qdio_buffer_states newbufstate);
59 69
60 70
61static inline const char *qeth_get_cardname(struct qeth_card *card) 71static inline const char *qeth_get_cardname(struct qeth_card *card)
@@ -199,7 +209,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card)
199 209
200 QETH_CARD_TEXT(card, 5, "alocpool"); 210 QETH_CARD_TEXT(card, 5, "alocpool");
201 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { 211 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
202 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL); 212 pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
203 if (!pool_entry) { 213 if (!pool_entry) {
204 qeth_free_buffer_pool(card); 214 qeth_free_buffer_pool(card);
205 return -ENOMEM; 215 return -ENOMEM;
@@ -239,6 +249,196 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
239} 249}
240EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); 250EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
241 251
252static inline int qeth_cq_init(struct qeth_card *card)
253{
254 int rc;
255
256 if (card->options.cq == QETH_CQ_ENABLED) {
257 QETH_DBF_TEXT(SETUP, 2, "cqinit");
258 memset(card->qdio.c_q->qdio_bufs, 0,
259 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
260 card->qdio.c_q->next_buf_to_init = 127;
261 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
262 card->qdio.no_in_queues - 1, 0,
263 127);
264 if (rc) {
265 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
266 goto out;
267 }
268 }
269 rc = 0;
270out:
271 return rc;
272}
273
274static inline int qeth_alloc_cq(struct qeth_card *card)
275{
276 int rc;
277
278 if (card->options.cq == QETH_CQ_ENABLED) {
279 int i;
280 struct qdio_outbuf_state *outbuf_states;
281
282 QETH_DBF_TEXT(SETUP, 2, "cqon");
283 card->qdio.c_q = kzalloc(sizeof(struct qeth_qdio_q),
284 GFP_KERNEL);
285 if (!card->qdio.c_q) {
286 rc = -1;
287 goto kmsg_out;
288 }
289 QETH_DBF_HEX(SETUP, 2, &card->qdio.c_q, sizeof(void *));
290
291 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
292 card->qdio.c_q->bufs[i].buffer =
293 &card->qdio.c_q->qdio_bufs[i];
294 }
295
296 card->qdio.no_in_queues = 2;
297
298 card->qdio.out_bufstates = (struct qdio_outbuf_state *)
299 kzalloc(card->qdio.no_out_queues *
300 QDIO_MAX_BUFFERS_PER_Q *
301 sizeof(struct qdio_outbuf_state), GFP_KERNEL);
302 outbuf_states = card->qdio.out_bufstates;
303 if (outbuf_states == NULL) {
304 rc = -1;
305 goto free_cq_out;
306 }
307 for (i = 0; i < card->qdio.no_out_queues; ++i) {
308 card->qdio.out_qs[i]->bufstates = outbuf_states;
309 outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
310 }
311 } else {
312 QETH_DBF_TEXT(SETUP, 2, "nocq");
313 card->qdio.c_q = NULL;
314 card->qdio.no_in_queues = 1;
315 }
316 QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues);
317 rc = 0;
318out:
319 return rc;
320free_cq_out:
321 kfree(card->qdio.c_q);
322 card->qdio.c_q = NULL;
323kmsg_out:
324 dev_err(&card->gdev->dev, "Failed to create completion queue\n");
325 goto out;
326}
327
328static inline void qeth_free_cq(struct qeth_card *card)
329{
330 if (card->qdio.c_q) {
331 --card->qdio.no_in_queues;
332 kfree(card->qdio.c_q);
333 card->qdio.c_q = NULL;
334 }
335 kfree(card->qdio.out_bufstates);
336 card->qdio.out_bufstates = NULL;
337}
338
339static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
340 int delayed) {
341 enum iucv_tx_notify n;
342
343 switch (sbalf15) {
344 case 0:
345 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
346 break;
347 case 4:
348 case 16:
349 case 17:
350 case 18:
351 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
352 TX_NOTIFY_UNREACHABLE;
353 break;
354 default:
355 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
356 TX_NOTIFY_GENERALERROR;
357 break;
358 }
359
360 return n;
361}
362
363static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
364 int bidx, int forced_cleanup)
365{
366 if (q->bufs[bidx]->next_pending != NULL) {
367 struct qeth_qdio_out_buffer *head = q->bufs[bidx];
368 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
369
370 while (c) {
371 if (forced_cleanup ||
372 atomic_read(&c->state) ==
373 QETH_QDIO_BUF_HANDLED_DELAYED) {
374 struct qeth_qdio_out_buffer *f = c;
375 QETH_CARD_TEXT(f->q->card, 5, "fp");
376 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
377 /* release here to avoid interleaving between
378 outbound tasklet and inbound tasklet
379 regarding notifications and lifecycle */
380 qeth_release_skbs(c);
381
382 c = f->next_pending;
383 BUG_ON(head->next_pending != f);
384 head->next_pending = c;
385 kmem_cache_free(qeth_qdio_outbuf_cache, f);
386 } else {
387 head = c;
388 c = c->next_pending;
389 }
390
391 }
392 }
393}
394
395
396static inline void qeth_qdio_handle_aob(struct qeth_card *card,
397 unsigned long phys_aob_addr) {
398 struct qaob *aob;
399 struct qeth_qdio_out_buffer *buffer;
400 enum iucv_tx_notify notification;
401
402 aob = (struct qaob *) phys_to_virt(phys_aob_addr);
403 QETH_CARD_TEXT(card, 5, "haob");
404 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
405 buffer = (struct qeth_qdio_out_buffer *) aob->user1;
406 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
407
408 BUG_ON(buffer == NULL);
409
410 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
411 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
412 notification = TX_NOTIFY_OK;
413 } else {
414 BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING);
415
416 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
417 notification = TX_NOTIFY_DELAYED_OK;
418 }
419
420 if (aob->aorc != 0) {
421 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
422 notification = qeth_compute_cq_notification(aob->aorc, 1);
423 }
424 qeth_notify_skbs(buffer->q, buffer, notification);
425
426 buffer->aob = NULL;
427 qeth_clear_output_buffer(buffer->q, buffer,
428 QETH_QDIO_BUF_HANDLED_DELAYED);
429 /* from here on: do not touch buffer anymore */
430 qdio_release_aob(aob);
431}
432
433static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
434{
435 return card->options.cq == QETH_CQ_ENABLED &&
436 card->qdio.c_q != NULL &&
437 queue != 0 &&
438 queue == card->qdio.no_in_queues - 1;
439}
440
441
242static int qeth_issue_next_read(struct qeth_card *card) 442static int qeth_issue_next_read(struct qeth_card *card)
243{ 443{
244 int rc; 444 int rc;
@@ -589,7 +789,7 @@ static int qeth_setup_channel(struct qeth_channel *channel)
589 QETH_DBF_TEXT(SETUP, 2, "setupch"); 789 QETH_DBF_TEXT(SETUP, 2, "setupch");
590 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { 790 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
591 channel->iob[cnt].data = 791 channel->iob[cnt].data =
592 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); 792 kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
593 if (channel->iob[cnt].data == NULL) 793 if (channel->iob[cnt].data == NULL)
594 break; 794 break;
595 channel->iob[cnt].state = BUF_STATE_FREE; 795 channel->iob[cnt].state = BUF_STATE_FREE;
@@ -681,6 +881,7 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread);
681void qeth_schedule_recovery(struct qeth_card *card) 881void qeth_schedule_recovery(struct qeth_card *card)
682{ 882{
683 QETH_CARD_TEXT(card, 2, "startrec"); 883 QETH_CARD_TEXT(card, 2, "startrec");
884 WARN_ON(1);
684 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) 885 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
685 schedule_work(&card->kernel_thread_starter); 886 schedule_work(&card->kernel_thread_starter);
686} 887}
@@ -883,22 +1084,60 @@ out:
883 return; 1084 return;
884} 1085}
885 1086
886static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 1087static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
887 struct qeth_qdio_out_buffer *buf) 1088 struct qeth_qdio_out_buffer *buf,
1089 enum iucv_tx_notify notification)
888{ 1090{
889 int i;
890 struct sk_buff *skb; 1091 struct sk_buff *skb;
891 1092
892 /* is PCI flag set on buffer? */ 1093 if (skb_queue_empty(&buf->skb_list))
893 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) 1094 goto out;
894 atomic_dec(&queue->set_pci_flags_count); 1095 skb = skb_peek(&buf->skb_list);
1096 while (skb) {
1097 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1098 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1099 if (skb->protocol == ETH_P_AF_IUCV) {
1100 if (skb->sk) {
1101 struct iucv_sock *iucv = iucv_sk(skb->sk);
1102 iucv->sk_txnotify(skb, notification);
1103 }
1104 }
1105 if (skb_queue_is_last(&buf->skb_list, skb))
1106 skb = NULL;
1107 else
1108 skb = skb_queue_next(&buf->skb_list, skb);
1109 }
1110out:
1111 return;
1112}
1113
1114static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
1115{
1116 struct sk_buff *skb;
895 1117
896 skb = skb_dequeue(&buf->skb_list); 1118 skb = skb_dequeue(&buf->skb_list);
897 while (skb) { 1119 while (skb) {
1120 QETH_CARD_TEXT(buf->q->card, 5, "skbr");
1121 QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
898 atomic_dec(&skb->users); 1122 atomic_dec(&skb->users);
899 dev_kfree_skb_any(skb); 1123 dev_kfree_skb_any(skb);
900 skb = skb_dequeue(&buf->skb_list); 1124 skb = skb_dequeue(&buf->skb_list);
901 } 1125 }
1126}
1127
1128static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1129 struct qeth_qdio_out_buffer *buf,
1130 enum qeth_qdio_buffer_states newbufstate)
1131{
1132 int i;
1133
1134 /* is PCI flag set on buffer? */
1135 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1136 atomic_dec(&queue->set_pci_flags_count);
1137
1138 if (newbufstate == QETH_QDIO_BUF_EMPTY) {
1139 qeth_release_skbs(buf);
1140 }
902 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { 1141 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
903 if (buf->buffer->element[i].addr && buf->is_header[i]) 1142 if (buf->buffer->element[i].addr && buf->is_header[i])
904 kmem_cache_free(qeth_core_header_cache, 1143 kmem_cache_free(qeth_core_header_cache,
@@ -912,21 +1151,36 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
912 buf->buffer->element[15].eflags = 0; 1151 buf->buffer->element[15].eflags = 0;
913 buf->buffer->element[15].sflags = 0; 1152 buf->buffer->element[15].sflags = 0;
914 buf->next_element_to_fill = 0; 1153 buf->next_element_to_fill = 0;
915 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 1154 atomic_set(&buf->state, newbufstate);
1155}
1156
1157static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
1158{
1159 int j;
1160
1161 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1162 if (!q->bufs[j])
1163 continue;
1164 qeth_cleanup_handled_pending(q, j, free);
1165 qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY);
1166 if (free) {
1167 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1168 q->bufs[j] = NULL;
1169 }
1170 }
916} 1171}
917 1172
918void qeth_clear_qdio_buffers(struct qeth_card *card) 1173void qeth_clear_qdio_buffers(struct qeth_card *card)
919{ 1174{
920 int i, j; 1175 int i;
921 1176
922 QETH_CARD_TEXT(card, 2, "clearqdbf"); 1177 QETH_CARD_TEXT(card, 2, "clearqdbf");
923 /* clear outbound buffers to free skbs */ 1178 /* clear outbound buffers to free skbs */
924 for (i = 0; i < card->qdio.no_out_queues; ++i) 1179 for (i = 0; i < card->qdio.no_out_queues; ++i) {
925 if (card->qdio.out_qs[i]) { 1180 if (card->qdio.out_qs[i]) {
926 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) 1181 qeth_clear_outq_buffers(card->qdio.out_qs[i], 0);
927 qeth_clear_output_buffer(card->qdio.out_qs[i],
928 &card->qdio.out_qs[i]->bufs[j]);
929 } 1182 }
1183 }
930} 1184}
931EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers); 1185EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
932 1186
@@ -950,6 +1204,11 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
950 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == 1204 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
951 QETH_QDIO_UNINITIALIZED) 1205 QETH_QDIO_UNINITIALIZED)
952 return; 1206 return;
1207
1208 qeth_free_cq(card);
1209 cancel_delayed_work_sync(&card->buffer_reclaim_work);
1210 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
1211 kfree_skb(card->qdio.in_q->bufs[j].rx_skb);
953 kfree(card->qdio.in_q); 1212 kfree(card->qdio.in_q);
954 card->qdio.in_q = NULL; 1213 card->qdio.in_q = NULL;
955 /* inbound buffer pool */ 1214 /* inbound buffer pool */
@@ -957,9 +1216,7 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
957 /* free outbound qdio_qs */ 1216 /* free outbound qdio_qs */
958 if (card->qdio.out_qs) { 1217 if (card->qdio.out_qs) {
959 for (i = 0; i < card->qdio.no_out_queues; ++i) { 1218 for (i = 0; i < card->qdio.no_out_queues; ++i) {
960 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) 1219 qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
961 qeth_clear_output_buffer(card->qdio.out_qs[i],
962 &card->qdio.out_qs[i]->bufs[j]);
963 kfree(card->qdio.out_qs[i]); 1220 kfree(card->qdio.out_qs[i]);
964 } 1221 }
965 kfree(card->qdio.out_qs); 1222 kfree(card->qdio.out_qs);
@@ -995,27 +1252,29 @@ static void qeth_get_channel_path_desc(struct qeth_card *card)
995 ccwdev = card->data.ccwdev; 1252 ccwdev = card->data.ccwdev;
996 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0); 1253 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
997 if (chp_dsc != NULL) { 1254 if (chp_dsc != NULL) {
998 /* CHPP field bit 6 == 1 -> single queue */ 1255 if (card->info.type != QETH_CARD_TYPE_IQD) {
999 if ((chp_dsc->chpp & 0x02) == 0x02) { 1256 /* CHPP field bit 6 == 1 -> single queue */
1000 if ((atomic_read(&card->qdio.state) != 1257 if ((chp_dsc->chpp & 0x02) == 0x02) {
1001 QETH_QDIO_UNINITIALIZED) && 1258 if ((atomic_read(&card->qdio.state) !=
1002 (card->qdio.no_out_queues == 4)) 1259 QETH_QDIO_UNINITIALIZED) &&
1003 /* change from 4 to 1 outbound queues */ 1260 (card->qdio.no_out_queues == 4))
1004 qeth_free_qdio_buffers(card); 1261 /* change from 4 to 1 outbound queues */
1005 card->qdio.no_out_queues = 1; 1262 qeth_free_qdio_buffers(card);
1006 if (card->qdio.default_out_queue != 0) 1263 card->qdio.no_out_queues = 1;
1007 dev_info(&card->gdev->dev, 1264 if (card->qdio.default_out_queue != 0)
1265 dev_info(&card->gdev->dev,
1008 "Priority Queueing not supported\n"); 1266 "Priority Queueing not supported\n");
1009 card->qdio.default_out_queue = 0; 1267 card->qdio.default_out_queue = 0;
1010 } else { 1268 } else {
1011 if ((atomic_read(&card->qdio.state) != 1269 if ((atomic_read(&card->qdio.state) !=
1012 QETH_QDIO_UNINITIALIZED) && 1270 QETH_QDIO_UNINITIALIZED) &&
1013 (card->qdio.no_out_queues == 1)) { 1271 (card->qdio.no_out_queues == 1)) {
1014 /* change from 1 to 4 outbound queues */ 1272 /* change from 1 to 4 outbound queues */
1015 qeth_free_qdio_buffers(card); 1273 qeth_free_qdio_buffers(card);
1016 card->qdio.default_out_queue = 2; 1274 card->qdio.default_out_queue = 2;
1275 }
1276 card->qdio.no_out_queues = 4;
1017 } 1277 }
1018 card->qdio.no_out_queues = 4;
1019 } 1278 }
1020 card->info.func_level = 0x4100 + chp_dsc->desc; 1279 card->info.func_level = 0x4100 + chp_dsc->desc;
1021 kfree(chp_dsc); 1280 kfree(chp_dsc);
@@ -1051,6 +1310,7 @@ static void qeth_set_intial_options(struct qeth_card *card)
1051 card->options.performance_stats = 0; 1310 card->options.performance_stats = 0;
1052 card->options.rx_sg_cb = QETH_RX_SG_CB; 1311 card->options.rx_sg_cb = QETH_RX_SG_CB;
1053 card->options.isolation = ISOLATION_MODE_NONE; 1312 card->options.isolation = ISOLATION_MODE_NONE;
1313 card->options.cq = QETH_CQ_DISABLED;
1054} 1314}
1055 1315
1056static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1316static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
@@ -1119,6 +1379,7 @@ static int qeth_setup_card(struct qeth_card *card)
1119 card->ipato.invert6 = 0; 1379 card->ipato.invert6 = 0;
1120 /* init QDIO stuff */ 1380 /* init QDIO stuff */
1121 qeth_init_qdio_info(card); 1381 qeth_init_qdio_info(card);
1382 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1122 return 0; 1383 return 0;
1123} 1384}
1124 1385
@@ -1140,7 +1401,7 @@ static struct qeth_card *qeth_alloc_card(void)
1140 if (!card) 1401 if (!card)
1141 goto out; 1402 goto out;
1142 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1403 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1143 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); 1404 card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
1144 if (!card->ip_tbd_list) { 1405 if (!card->ip_tbd_list) {
1145 QETH_DBF_TEXT(SETUP, 0, "iptbdnom"); 1406 QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
1146 goto out_card; 1407 goto out_card;
@@ -1180,6 +1441,7 @@ static int qeth_determine_card_type(struct qeth_card *card)
1180 card->info.type = known_devices[i][QETH_DEV_MODEL_IND]; 1441 card->info.type = known_devices[i][QETH_DEV_MODEL_IND];
1181 card->qdio.no_out_queues = 1442 card->qdio.no_out_queues =
1182 known_devices[i][QETH_QUEUE_NO_IND]; 1443 known_devices[i][QETH_QUEUE_NO_IND];
1444 card->qdio.no_in_queues = 1;
1183 card->info.is_multicast_different = 1445 card->info.is_multicast_different =
1184 known_devices[i][QETH_MULTICAST_IND]; 1446 known_devices[i][QETH_MULTICAST_IND];
1185 qeth_get_channel_path_desc(card); 1447 qeth_get_channel_path_desc(card);
@@ -2027,6 +2289,37 @@ static int qeth_ulp_setup(struct qeth_card *card)
2027 return rc; 2289 return rc;
2028} 2290}
2029 2291
2292static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2293{
2294 int rc;
2295 struct qeth_qdio_out_buffer *newbuf;
2296
2297 rc = 0;
2298 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2299 if (!newbuf) {
2300 rc = -ENOMEM;
2301 goto out;
2302 }
2303 newbuf->buffer = &q->qdio_bufs[bidx];
2304 skb_queue_head_init(&newbuf->skb_list);
2305 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2306 newbuf->q = q;
2307 newbuf->aob = NULL;
2308 newbuf->next_pending = q->bufs[bidx];
2309 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2310 q->bufs[bidx] = newbuf;
2311 if (q->bufstates) {
2312 q->bufstates[bidx].user = newbuf;
2313 QETH_CARD_TEXT_(q->card, 2, "nbs%d", bidx);
2314 QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf);
2315 QETH_CARD_TEXT_(q->card, 2, "%lx",
2316 (long) newbuf->next_pending);
2317 }
2318out:
2319 return rc;
2320}
2321
2322
2030static int qeth_alloc_qdio_buffers(struct qeth_card *card) 2323static int qeth_alloc_qdio_buffers(struct qeth_card *card)
2031{ 2324{
2032 int i, j; 2325 int i, j;
@@ -2037,52 +2330,63 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
2037 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) 2330 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2038 return 0; 2331 return 0;
2039 2332
2040 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), 2333 card->qdio.in_q = kzalloc(sizeof(struct qeth_qdio_q),
2041 GFP_KERNEL); 2334 GFP_KERNEL);
2042 if (!card->qdio.in_q) 2335 if (!card->qdio.in_q)
2043 goto out_nomem; 2336 goto out_nomem;
2044 QETH_DBF_TEXT(SETUP, 2, "inq"); 2337 QETH_DBF_TEXT(SETUP, 2, "inq");
2045 QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *)); 2338 QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *));
2046 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q)); 2339 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
2047 /* give inbound qeth_qdio_buffers their qdio_buffers */ 2340 /* give inbound qeth_qdio_buffers their qdio_buffers */
2048 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) 2341 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
2049 card->qdio.in_q->bufs[i].buffer = 2342 card->qdio.in_q->bufs[i].buffer =
2050 &card->qdio.in_q->qdio_bufs[i]; 2343 &card->qdio.in_q->qdio_bufs[i];
2344 card->qdio.in_q->bufs[i].rx_skb = NULL;
2345 }
2051 /* inbound buffer pool */ 2346 /* inbound buffer pool */
2052 if (qeth_alloc_buffer_pool(card)) 2347 if (qeth_alloc_buffer_pool(card))
2053 goto out_freeinq; 2348 goto out_freeinq;
2349
2054 /* outbound */ 2350 /* outbound */
2055 card->qdio.out_qs = 2351 card->qdio.out_qs =
2056 kmalloc(card->qdio.no_out_queues * 2352 kzalloc(card->qdio.no_out_queues *
2057 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL); 2353 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
2058 if (!card->qdio.out_qs) 2354 if (!card->qdio.out_qs)
2059 goto out_freepool; 2355 goto out_freepool;
2060 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2356 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2061 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q), 2357 card->qdio.out_qs[i] = kzalloc(sizeof(struct qeth_qdio_out_q),
2062 GFP_KERNEL); 2358 GFP_KERNEL);
2063 if (!card->qdio.out_qs[i]) 2359 if (!card->qdio.out_qs[i])
2064 goto out_freeoutq; 2360 goto out_freeoutq;
2065 QETH_DBF_TEXT_(SETUP, 2, "outq %i", i); 2361 QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
2066 QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *)); 2362 QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
2067 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
2068 card->qdio.out_qs[i]->queue_no = i; 2363 card->qdio.out_qs[i]->queue_no = i;
2069 /* give outbound qeth_qdio_buffers their qdio_buffers */ 2364 /* give outbound qeth_qdio_buffers their qdio_buffers */
2070 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2365 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2071 card->qdio.out_qs[i]->bufs[j].buffer = 2366 BUG_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
2072 &card->qdio.out_qs[i]->qdio_bufs[j]; 2367 if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
2073 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j]. 2368 goto out_freeoutqbufs;
2074 skb_list);
2075 lockdep_set_class(
2076 &card->qdio.out_qs[i]->bufs[j].skb_list.lock,
2077 &qdio_out_skb_queue_key);
2078 INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
2079 } 2369 }
2080 } 2370 }
2371
2372 /* completion */
2373 if (qeth_alloc_cq(card))
2374 goto out_freeoutq;
2375
2081 return 0; 2376 return 0;
2082 2377
2378out_freeoutqbufs:
2379 while (j > 0) {
2380 --j;
2381 kmem_cache_free(qeth_qdio_outbuf_cache,
2382 card->qdio.out_qs[i]->bufs[j]);
2383 card->qdio.out_qs[i]->bufs[j] = NULL;
2384 }
2083out_freeoutq: 2385out_freeoutq:
2084 while (i > 0) 2386 while (i > 0) {
2085 kfree(card->qdio.out_qs[--i]); 2387 kfree(card->qdio.out_qs[--i]);
2388 qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
2389 }
2086 kfree(card->qdio.out_qs); 2390 kfree(card->qdio.out_qs);
2087 card->qdio.out_qs = NULL; 2391 card->qdio.out_qs = NULL;
2088out_freepool: 2392out_freepool:
@@ -2353,6 +2657,12 @@ static int qeth_init_input_buffer(struct qeth_card *card,
2353 struct qeth_buffer_pool_entry *pool_entry; 2657 struct qeth_buffer_pool_entry *pool_entry;
2354 int i; 2658 int i;
2355 2659
2660 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2661 buf->rx_skb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN);
2662 if (!buf->rx_skb)
2663 return 1;
2664 }
2665
2356 pool_entry = qeth_find_free_buffer_pool_entry(card); 2666 pool_entry = qeth_find_free_buffer_pool_entry(card);
2357 if (!pool_entry) 2667 if (!pool_entry)
2358 return 1; 2668 return 1;
@@ -2399,13 +2709,21 @@ int qeth_init_qdio_queues(struct qeth_card *card)
2399 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 2709 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2400 return rc; 2710 return rc;
2401 } 2711 }
2712
2713 /* completion */
2714 rc = qeth_cq_init(card);
2715 if (rc) {
2716 return rc;
2717 }
2718
2402 /* outbound queue */ 2719 /* outbound queue */
2403 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2720 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2404 memset(card->qdio.out_qs[i]->qdio_bufs, 0, 2721 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
2405 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); 2722 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
2406 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2723 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2407 qeth_clear_output_buffer(card->qdio.out_qs[i], 2724 qeth_clear_output_buffer(card->qdio.out_qs[i],
2408 &card->qdio.out_qs[i]->bufs[j]); 2725 card->qdio.out_qs[i]->bufs[j],
2726 QETH_QDIO_BUF_EMPTY);
2409 } 2727 }
2410 card->qdio.out_qs[i]->card = card; 2728 card->qdio.out_qs[i]->card = card;
2411 card->qdio.out_qs[i]->next_buf_to_fill = 0; 2729 card->qdio.out_qs[i]->next_buf_to_fill = 0;
@@ -2734,9 +3052,19 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
2734} 3052}
2735EXPORT_SYMBOL_GPL(qeth_check_qdio_errors); 3053EXPORT_SYMBOL_GPL(qeth_check_qdio_errors);
2736 3054
3055void qeth_buffer_reclaim_work(struct work_struct *work)
3056{
3057 struct qeth_card *card = container_of(work, struct qeth_card,
3058 buffer_reclaim_work.work);
3059
3060 QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
3061 qeth_queue_input_buffer(card, card->reclaim_index);
3062}
3063
2737void qeth_queue_input_buffer(struct qeth_card *card, int index) 3064void qeth_queue_input_buffer(struct qeth_card *card, int index)
2738{ 3065{
2739 struct qeth_qdio_q *queue = card->qdio.in_q; 3066 struct qeth_qdio_q *queue = card->qdio.in_q;
3067 struct list_head *lh;
2740 int count; 3068 int count;
2741 int i; 3069 int i;
2742 int rc; 3070 int rc;
@@ -2768,6 +3096,20 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
2768 atomic_add_unless(&card->force_alloc_skb, -1, 0); 3096 atomic_add_unless(&card->force_alloc_skb, -1, 0);
2769 } 3097 }
2770 3098
3099 if (!count) {
3100 i = 0;
3101 list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3102 i++;
3103 if (i == card->qdio.in_buf_pool.buf_count) {
3104 QETH_CARD_TEXT(card, 2, "qsarbw");
3105 card->reclaim_index = index;
3106 schedule_delayed_work(
3107 &card->buffer_reclaim_work,
3108 QETH_RECLAIM_WORK_TIME);
3109 }
3110 return;
3111 }
3112
2771 /* 3113 /*
2772 * according to old code it should be avoided to requeue all 3114 * according to old code it should be avoided to requeue all
2773 * 128 buffers in order to benefit from PCI avoidance. 3115 * 128 buffers in order to benefit from PCI avoidance.
@@ -2787,8 +3129,6 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
2787 qeth_get_micros() - 3129 qeth_get_micros() -
2788 card->perf_stats.inbound_do_qdio_start_time; 3130 card->perf_stats.inbound_do_qdio_start_time;
2789 if (rc) { 3131 if (rc) {
2790 dev_warn(&card->gdev->dev,
2791 "QDIO reported an error, rc=%i\n", rc);
2792 QETH_CARD_TEXT(card, 2, "qinberr"); 3132 QETH_CARD_TEXT(card, 2, "qinberr");
2793 } 3133 }
2794 queue->next_buf_to_init = (queue->next_buf_to_init + count) % 3134 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
@@ -2862,12 +3202,12 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2862 queue->card->perf_stats.sc_p_dp++; 3202 queue->card->perf_stats.sc_p_dp++;
2863 queue->do_pack = 0; 3203 queue->do_pack = 0;
2864 /* flush packing buffers */ 3204 /* flush packing buffers */
2865 buffer = &queue->bufs[queue->next_buf_to_fill]; 3205 buffer = queue->bufs[queue->next_buf_to_fill];
2866 if ((atomic_read(&buffer->state) == 3206 if ((atomic_read(&buffer->state) ==
2867 QETH_QDIO_BUF_EMPTY) && 3207 QETH_QDIO_BUF_EMPTY) &&
2868 (buffer->next_element_to_fill > 0)) { 3208 (buffer->next_element_to_fill > 0)) {
2869 atomic_set(&buffer->state, 3209 atomic_set(&buffer->state,
2870 QETH_QDIO_BUF_PRIMED); 3210 QETH_QDIO_BUF_PRIMED);
2871 flush_count++; 3211 flush_count++;
2872 queue->next_buf_to_fill = 3212 queue->next_buf_to_fill =
2873 (queue->next_buf_to_fill + 1) % 3213 (queue->next_buf_to_fill + 1) %
@@ -2878,6 +3218,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2878 return flush_count; 3218 return flush_count;
2879} 3219}
2880 3220
3221
2881/* 3222/*
2882 * Called to flush a packing buffer if no more pci flags are on the queue. 3223 * Called to flush a packing buffer if no more pci flags are on the queue.
2883 * Checks if there is a packing buffer and prepares it to be flushed. 3224 * Checks if there is a packing buffer and prepares it to be flushed.
@@ -2887,7 +3228,7 @@ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2887{ 3228{
2888 struct qeth_qdio_out_buffer *buffer; 3229 struct qeth_qdio_out_buffer *buffer;
2889 3230
2890 buffer = &queue->bufs[queue->next_buf_to_fill]; 3231 buffer = queue->bufs[queue->next_buf_to_fill];
2891 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && 3232 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
2892 (buffer->next_element_to_fill > 0)) { 3233 (buffer->next_element_to_fill > 0)) {
2893 /* it's a packing buffer */ 3234 /* it's a packing buffer */
@@ -2908,10 +3249,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2908 unsigned int qdio_flags; 3249 unsigned int qdio_flags;
2909 3250
2910 for (i = index; i < index + count; ++i) { 3251 for (i = index; i < index + count; ++i) {
2911 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 3252 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3253 buf = queue->bufs[bidx];
2912 buf->buffer->element[buf->next_element_to_fill - 1].eflags |= 3254 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
2913 SBAL_EFLAGS_LAST_ENTRY; 3255 SBAL_EFLAGS_LAST_ENTRY;
2914 3256
3257 if (queue->bufstates)
3258 queue->bufstates[bidx].user = buf;
3259
2915 if (queue->card->info.type == QETH_CARD_TYPE_IQD) 3260 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2916 continue; 3261 continue;
2917 3262
@@ -2963,6 +3308,9 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2963 if (rc == QDIO_ERROR_SIGA_TARGET) 3308 if (rc == QDIO_ERROR_SIGA_TARGET)
2964 return; 3309 return;
2965 QETH_CARD_TEXT(queue->card, 2, "flushbuf"); 3310 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3311 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3312 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3313 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
2966 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); 3314 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
2967 3315
2968 /* this must not happen under normal circumstances. if it 3316 /* this must not happen under normal circumstances. if it
@@ -3024,14 +3372,120 @@ void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
3024} 3372}
3025EXPORT_SYMBOL_GPL(qeth_qdio_start_poll); 3373EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
3026 3374
3375int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3376{
3377 int rc;
3378
3379 if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
3380 rc = -1;
3381 goto out;
3382 } else {
3383 if (card->options.cq == cq) {
3384 rc = 0;
3385 goto out;
3386 }
3387
3388 if (card->state != CARD_STATE_DOWN &&
3389 card->state != CARD_STATE_RECOVER) {
3390 rc = -1;
3391 goto out;
3392 }
3393
3394 qeth_free_qdio_buffers(card);
3395 card->options.cq = cq;
3396 rc = 0;
3397 }
3398out:
3399 return rc;
3400
3401}
3402EXPORT_SYMBOL_GPL(qeth_configure_cq);
3403
3404
3405static void qeth_qdio_cq_handler(struct qeth_card *card,
3406 unsigned int qdio_err,
3407 unsigned int queue, int first_element, int count) {
3408 struct qeth_qdio_q *cq = card->qdio.c_q;
3409 int i;
3410 int rc;
3411
3412 if (!qeth_is_cq(card, queue))
3413 goto out;
3414
3415 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3416 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3417 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3418
3419 if (qdio_err) {
3420 netif_stop_queue(card->dev);
3421 qeth_schedule_recovery(card);
3422 goto out;
3423 }
3424
3425 if (card->options.performance_stats) {
3426 card->perf_stats.cq_cnt++;
3427 card->perf_stats.cq_start_time = qeth_get_micros();
3428 }
3429
3430 for (i = first_element; i < first_element + count; ++i) {
3431 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3432 struct qdio_buffer *buffer = &cq->qdio_bufs[bidx];
3433 int e;
3434
3435 e = 0;
3436 while (buffer->element[e].addr) {
3437 unsigned long phys_aob_addr;
3438
3439 phys_aob_addr = (unsigned long) buffer->element[e].addr;
3440 qeth_qdio_handle_aob(card, phys_aob_addr);
3441 buffer->element[e].addr = NULL;
3442 buffer->element[e].eflags = 0;
3443 buffer->element[e].sflags = 0;
3444 buffer->element[e].length = 0;
3445
3446 ++e;
3447 }
3448
3449 buffer->element[15].eflags = 0;
3450 buffer->element[15].sflags = 0;
3451 }
3452 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3453 card->qdio.c_q->next_buf_to_init,
3454 count);
3455 if (rc) {
3456 dev_warn(&card->gdev->dev,
3457 "QDIO reported an error, rc=%i\n", rc);
3458 QETH_CARD_TEXT(card, 2, "qcqherr");
3459 }
3460 card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
3461 + count) % QDIO_MAX_BUFFERS_PER_Q;
3462
3463 netif_wake_queue(card->dev);
3464
3465 if (card->options.performance_stats) {
3466 int delta_t = qeth_get_micros();
3467 delta_t -= card->perf_stats.cq_start_time;
3468 card->perf_stats.cq_time += delta_t;
3469 }
3470out:
3471 return;
3472}
3473
3027void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err, 3474void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err,
3028 unsigned int queue, int first_element, int count, 3475 unsigned int queue, int first_elem, int count,
3029 unsigned long card_ptr) 3476 unsigned long card_ptr)
3030{ 3477{
3031 struct qeth_card *card = (struct qeth_card *)card_ptr; 3478 struct qeth_card *card = (struct qeth_card *)card_ptr;
3032 3479
3033 if (qdio_err) 3480 QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3481 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3482
3483 if (qeth_is_cq(card, queue))
3484 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
3485 else if (qdio_err)
3034 qeth_schedule_recovery(card); 3486 qeth_schedule_recovery(card);
3487
3488
3035} 3489}
3036EXPORT_SYMBOL_GPL(qeth_qdio_input_handler); 3490EXPORT_SYMBOL_GPL(qeth_qdio_input_handler);
3037 3491
@@ -3057,9 +3511,45 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3057 qeth_get_micros(); 3511 qeth_get_micros();
3058 } 3512 }
3059 for (i = first_element; i < (first_element + count); ++i) { 3513 for (i = first_element; i < (first_element + count); ++i) {
3060 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 3514 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3515 buffer = queue->bufs[bidx];
3061 qeth_handle_send_error(card, buffer, qdio_error); 3516 qeth_handle_send_error(card, buffer, qdio_error);
3062 qeth_clear_output_buffer(queue, buffer); 3517
3518 if (queue->bufstates &&
3519 (queue->bufstates[bidx].flags &
3520 QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
3521 BUG_ON(card->options.cq != QETH_CQ_ENABLED);
3522
3523 if (atomic_cmpxchg(&buffer->state,
3524 QETH_QDIO_BUF_PRIMED,
3525 QETH_QDIO_BUF_PENDING) ==
3526 QETH_QDIO_BUF_PRIMED) {
3527 qeth_notify_skbs(queue, buffer,
3528 TX_NOTIFY_PENDING);
3529 }
3530 buffer->aob = queue->bufstates[bidx].aob;
3531 QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
3532 QETH_CARD_TEXT(queue->card, 5, "aob");
3533 QETH_CARD_TEXT_(queue->card, 5, "%lx",
3534 virt_to_phys(buffer->aob));
3535 BUG_ON(bidx < 0 || bidx >= QDIO_MAX_BUFFERS_PER_Q);
3536 if (qeth_init_qdio_out_buf(queue, bidx)) {
3537 QETH_CARD_TEXT(card, 2, "outofbuf");
3538 qeth_schedule_recovery(card);
3539 }
3540 } else {
3541 if (card->options.cq == QETH_CQ_ENABLED) {
3542 enum iucv_tx_notify n;
3543
3544 n = qeth_compute_cq_notification(
3545 buffer->buffer->element[15].sflags, 0);
3546 qeth_notify_skbs(queue, buffer, n);
3547 }
3548
3549 qeth_clear_output_buffer(queue, buffer,
3550 QETH_QDIO_BUF_EMPTY);
3551 }
3552 qeth_cleanup_handled_pending(queue, bidx, 0);
3063 } 3553 }
3064 atomic_sub(count, &queue->used_buffers); 3554 atomic_sub(count, &queue->used_buffers);
3065 /* check if we need to do something on this outbound queue */ 3555 /* check if we need to do something on this outbound queue */
@@ -3204,7 +3694,8 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3204 3694
3205 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3695 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3206 frag = &skb_shinfo(skb)->frags[cnt]; 3696 frag = &skb_shinfo(skb)->frags[cnt];
3207 buffer->element[element].addr = (char *)page_to_phys(frag->page) 3697 buffer->element[element].addr = (char *)
3698 page_to_phys(skb_frag_page(frag))
3208 + frag->page_offset; 3699 + frag->page_offset;
3209 buffer->element[element].length = frag->size; 3700 buffer->element[element].length = frag->size;
3210 buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; 3701 buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG;
@@ -3291,7 +3782,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
3291 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); 3782 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3292 /* ... now we've got the queue */ 3783 /* ... now we've got the queue */
3293 index = queue->next_buf_to_fill; 3784 index = queue->next_buf_to_fill;
3294 buffer = &queue->bufs[queue->next_buf_to_fill]; 3785 buffer = queue->bufs[queue->next_buf_to_fill];
3295 /* 3786 /*
3296 * check if buffer is empty to make sure that we do not 'overtake' 3787 * check if buffer is empty to make sure that we do not 'overtake'
3297 * ourselves and try to fill a buffer that is already primed 3788 * ourselves and try to fill a buffer that is already primed
@@ -3325,7 +3816,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3325 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, 3816 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3326 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); 3817 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3327 start_index = queue->next_buf_to_fill; 3818 start_index = queue->next_buf_to_fill;
3328 buffer = &queue->bufs[queue->next_buf_to_fill]; 3819 buffer = queue->bufs[queue->next_buf_to_fill];
3329 /* 3820 /*
3330 * check if buffer is empty to make sure that we do not 'overtake' 3821 * check if buffer is empty to make sure that we do not 'overtake'
3331 * ourselves and try to fill a buffer that is already primed 3822 * ourselves and try to fill a buffer that is already primed
@@ -3347,7 +3838,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3347 queue->next_buf_to_fill = 3838 queue->next_buf_to_fill =
3348 (queue->next_buf_to_fill + 1) % 3839 (queue->next_buf_to_fill + 1) %
3349 QDIO_MAX_BUFFERS_PER_Q; 3840 QDIO_MAX_BUFFERS_PER_Q;
3350 buffer = &queue->bufs[queue->next_buf_to_fill]; 3841 buffer = queue->bufs[queue->next_buf_to_fill];
3351 /* we did a step forward, so check buffer state 3842 /* we did a step forward, so check buffer state
3352 * again */ 3843 * again */
3353 if (atomic_read(&buffer->state) != 3844 if (atomic_read(&buffer->state) !=
@@ -3925,6 +4416,20 @@ static void qeth_determine_capabilities(struct qeth_card *card)
3925 if (rc) 4416 if (rc)
3926 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 4417 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
3927 4418
4419 QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt);
4420 QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac1);
4421 QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac3);
4422 QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt);
4423 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
4424 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
4425 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
4426 dev_info(&card->gdev->dev,
4427 "Completion Queueing supported\n");
4428 } else {
4429 card->options.cq = QETH_CQ_NOTAVAILABLE;
4430 }
4431
4432
3928out_offline: 4433out_offline:
3929 if (ddev_offline == 1) 4434 if (ddev_offline == 1)
3930 ccw_device_set_offline(ddev); 4435 ccw_device_set_offline(ddev);
@@ -3932,11 +4437,30 @@ out:
3932 return; 4437 return;
3933} 4438}
3934 4439
4440static inline void qeth_qdio_establish_cq(struct qeth_card *card,
4441 struct qdio_buffer **in_sbal_ptrs,
4442 void (**queue_start_poll) (struct ccw_device *, int, unsigned long)) {
4443 int i;
4444
4445 if (card->options.cq == QETH_CQ_ENABLED) {
4446 int offset = QDIO_MAX_BUFFERS_PER_Q *
4447 (card->qdio.no_in_queues - 1);
4448 i = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1);
4449 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4450 in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
4451 virt_to_phys(card->qdio.c_q->bufs[i].buffer);
4452 }
4453
4454 queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
4455 }
4456}
4457
3935static int qeth_qdio_establish(struct qeth_card *card) 4458static int qeth_qdio_establish(struct qeth_card *card)
3936{ 4459{
3937 struct qdio_initialize init_data; 4460 struct qdio_initialize init_data;
3938 char *qib_param_field; 4461 char *qib_param_field;
3939 struct qdio_buffer **in_sbal_ptrs; 4462 struct qdio_buffer **in_sbal_ptrs;
4463 void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
3940 struct qdio_buffer **out_sbal_ptrs; 4464 struct qdio_buffer **out_sbal_ptrs;
3941 int i, j, k; 4465 int i, j, k;
3942 int rc = 0; 4466 int rc = 0;
@@ -3945,34 +4469,48 @@ static int qeth_qdio_establish(struct qeth_card *card)
3945 4469
3946 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char), 4470 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
3947 GFP_KERNEL); 4471 GFP_KERNEL);
3948 if (!qib_param_field) 4472 if (!qib_param_field) {
3949 return -ENOMEM; 4473 rc = -ENOMEM;
4474 goto out_free_nothing;
4475 }
3950 4476
3951 qeth_create_qib_param_field(card, qib_param_field); 4477 qeth_create_qib_param_field(card, qib_param_field);
3952 qeth_create_qib_param_field_blkt(card, qib_param_field); 4478 qeth_create_qib_param_field_blkt(card, qib_param_field);
3953 4479
3954 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *), 4480 in_sbal_ptrs = kzalloc(card->qdio.no_in_queues *
4481 QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
3955 GFP_KERNEL); 4482 GFP_KERNEL);
3956 if (!in_sbal_ptrs) { 4483 if (!in_sbal_ptrs) {
3957 kfree(qib_param_field); 4484 rc = -ENOMEM;
3958 return -ENOMEM; 4485 goto out_free_qib_param;
3959 } 4486 }
3960 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) 4487 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
3961 in_sbal_ptrs[i] = (struct qdio_buffer *) 4488 in_sbal_ptrs[i] = (struct qdio_buffer *)
3962 virt_to_phys(card->qdio.in_q->bufs[i].buffer); 4489 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
4490 }
4491
4492 queue_start_poll = kzalloc(sizeof(void *) * card->qdio.no_in_queues,
4493 GFP_KERNEL);
4494 if (!queue_start_poll) {
4495 rc = -ENOMEM;
4496 goto out_free_in_sbals;
4497 }
4498 for (i = 0; i < card->qdio.no_in_queues; ++i)
4499 queue_start_poll[i] = card->discipline.start_poll;
4500
4501 qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
3963 4502
3964 out_sbal_ptrs = 4503 out_sbal_ptrs =
3965 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q * 4504 kzalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
3966 sizeof(void *), GFP_KERNEL); 4505 sizeof(void *), GFP_KERNEL);
3967 if (!out_sbal_ptrs) { 4506 if (!out_sbal_ptrs) {
3968 kfree(in_sbal_ptrs); 4507 rc = -ENOMEM;
3969 kfree(qib_param_field); 4508 goto out_free_queue_start_poll;
3970 return -ENOMEM;
3971 } 4509 }
3972 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i) 4510 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
3973 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) { 4511 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
3974 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys( 4512 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
3975 card->qdio.out_qs[i]->bufs[j].buffer); 4513 card->qdio.out_qs[i]->bufs[j]->buffer);
3976 } 4514 }
3977 4515
3978 memset(&init_data, 0, sizeof(struct qdio_initialize)); 4516 memset(&init_data, 0, sizeof(struct qdio_initialize));
@@ -3980,14 +4518,15 @@ static int qeth_qdio_establish(struct qeth_card *card)
3980 init_data.q_format = qeth_get_qdio_q_format(card); 4518 init_data.q_format = qeth_get_qdio_q_format(card);
3981 init_data.qib_param_field_format = 0; 4519 init_data.qib_param_field_format = 0;
3982 init_data.qib_param_field = qib_param_field; 4520 init_data.qib_param_field = qib_param_field;
3983 init_data.no_input_qs = 1; 4521 init_data.no_input_qs = card->qdio.no_in_queues;
3984 init_data.no_output_qs = card->qdio.no_out_queues; 4522 init_data.no_output_qs = card->qdio.no_out_queues;
3985 init_data.input_handler = card->discipline.input_handler; 4523 init_data.input_handler = card->discipline.input_handler;
3986 init_data.output_handler = card->discipline.output_handler; 4524 init_data.output_handler = card->discipline.output_handler;
3987 init_data.queue_start_poll = card->discipline.start_poll; 4525 init_data.queue_start_poll = queue_start_poll;
3988 init_data.int_parm = (unsigned long) card; 4526 init_data.int_parm = (unsigned long) card;
3989 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; 4527 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3990 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; 4528 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
4529 init_data.output_sbal_state_array = card->qdio.out_bufstates;
3991 init_data.scan_threshold = 4530 init_data.scan_threshold =
3992 (card->info.type == QETH_CARD_TYPE_IQD) ? 8 : 32; 4531 (card->info.type == QETH_CARD_TYPE_IQD) ? 8 : 32;
3993 4532
@@ -4004,10 +4543,26 @@ static int qeth_qdio_establish(struct qeth_card *card)
4004 qdio_free(CARD_DDEV(card)); 4543 qdio_free(CARD_DDEV(card));
4005 } 4544 }
4006 } 4545 }
4546
4547 switch (card->options.cq) {
4548 case QETH_CQ_ENABLED:
4549 dev_info(&card->gdev->dev, "Completion Queue support enabled");
4550 break;
4551 case QETH_CQ_DISABLED:
4552 dev_info(&card->gdev->dev, "Completion Queue support disabled");
4553 break;
4554 default:
4555 break;
4556 }
4007out: 4557out:
4008 kfree(out_sbal_ptrs); 4558 kfree(out_sbal_ptrs);
4559out_free_queue_start_poll:
4560 kfree(queue_start_poll);
4561out_free_in_sbals:
4009 kfree(in_sbal_ptrs); 4562 kfree(in_sbal_ptrs);
4563out_free_qib_param:
4010 kfree(qib_param_field); 4564 kfree(qib_param_field);
4565out_free_nothing:
4011 return rc; 4566 return rc;
4012} 4567}
4013 4568
@@ -4144,29 +4699,36 @@ out:
4144} 4699}
4145EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); 4700EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
4146 4701
4147static inline int qeth_create_skb_frag(struct qdio_buffer_element *element, 4702static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer,
4703 struct qdio_buffer_element *element,
4148 struct sk_buff **pskb, int offset, int *pfrag, int data_len) 4704 struct sk_buff **pskb, int offset, int *pfrag, int data_len)
4149{ 4705{
4150 struct page *page = virt_to_page(element->addr); 4706 struct page *page = virt_to_page(element->addr);
4151 if (*pskb == NULL) { 4707 if (*pskb == NULL) {
4152 /* the upper protocol layers assume that there is data in the 4708 if (qethbuffer->rx_skb) {
4153 * skb itself. Copy a small amount (64 bytes) to make them 4709 /* only if qeth_card.options.cq == QETH_CQ_ENABLED */
4154 * happy. */ 4710 *pskb = qethbuffer->rx_skb;
4155 *pskb = dev_alloc_skb(64 + ETH_HLEN); 4711 qethbuffer->rx_skb = NULL;
4156 if (!(*pskb)) 4712 } else {
4157 return -ENOMEM; 4713 *pskb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN);
4714 if (!(*pskb))
4715 return -ENOMEM;
4716 }
4717
4158 skb_reserve(*pskb, ETH_HLEN); 4718 skb_reserve(*pskb, ETH_HLEN);
4159 if (data_len <= 64) { 4719 if (data_len <= QETH_RX_PULL_LEN) {
4160 memcpy(skb_put(*pskb, data_len), element->addr + offset, 4720 memcpy(skb_put(*pskb, data_len), element->addr + offset,
4161 data_len); 4721 data_len);
4162 } else { 4722 } else {
4163 get_page(page); 4723 get_page(page);
4164 memcpy(skb_put(*pskb, 64), element->addr + offset, 64); 4724 memcpy(skb_put(*pskb, QETH_RX_PULL_LEN),
4165 skb_fill_page_desc(*pskb, *pfrag, page, offset + 64, 4725 element->addr + offset, QETH_RX_PULL_LEN);
4166 data_len - 64); 4726 skb_fill_page_desc(*pskb, *pfrag, page,
4167 (*pskb)->data_len += data_len - 64; 4727 offset + QETH_RX_PULL_LEN,
4168 (*pskb)->len += data_len - 64; 4728 data_len - QETH_RX_PULL_LEN);
4169 (*pskb)->truesize += data_len - 64; 4729 (*pskb)->data_len += data_len - QETH_RX_PULL_LEN;
4730 (*pskb)->len += data_len - QETH_RX_PULL_LEN;
4731 (*pskb)->truesize += data_len - QETH_RX_PULL_LEN;
4170 (*pfrag)++; 4732 (*pfrag)++;
4171 } 4733 }
4172 } else { 4734 } else {
@@ -4177,15 +4739,18 @@ static inline int qeth_create_skb_frag(struct qdio_buffer_element *element,
4177 (*pskb)->truesize += data_len; 4739 (*pskb)->truesize += data_len;
4178 (*pfrag)++; 4740 (*pfrag)++;
4179 } 4741 }
4742
4743
4180 return 0; 4744 return 0;
4181} 4745}
4182 4746
4183struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, 4747struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
4184 struct qdio_buffer *buffer, 4748 struct qeth_qdio_buffer *qethbuffer,
4185 struct qdio_buffer_element **__element, int *__offset, 4749 struct qdio_buffer_element **__element, int *__offset,
4186 struct qeth_hdr **hdr) 4750 struct qeth_hdr **hdr)
4187{ 4751{
4188 struct qdio_buffer_element *element = *__element; 4752 struct qdio_buffer_element *element = *__element;
4753 struct qdio_buffer *buffer = qethbuffer->buffer;
4189 int offset = *__offset; 4754 int offset = *__offset;
4190 struct sk_buff *skb = NULL; 4755 struct sk_buff *skb = NULL;
4191 int skb_len = 0; 4756 int skb_len = 0;
@@ -4230,9 +4795,10 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
4230 if (!skb_len) 4795 if (!skb_len)
4231 return NULL; 4796 return NULL;
4232 4797
4233 if ((skb_len >= card->options.rx_sg_cb) && 4798 if (((skb_len >= card->options.rx_sg_cb) &&
4234 (!(card->info.type == QETH_CARD_TYPE_OSN)) && 4799 (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
4235 (!atomic_read(&card->force_alloc_skb))) { 4800 (!atomic_read(&card->force_alloc_skb))) ||
4801 (card->options.cq == QETH_CQ_ENABLED)) {
4236 use_rx_sg = 1; 4802 use_rx_sg = 1;
4237 } else { 4803 } else {
4238 skb = dev_alloc_skb(skb_len + headroom); 4804 skb = dev_alloc_skb(skb_len + headroom);
@@ -4247,8 +4813,8 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
4247 data_len = min(skb_len, (int)(element->length - offset)); 4813 data_len = min(skb_len, (int)(element->length - offset));
4248 if (data_len) { 4814 if (data_len) {
4249 if (use_rx_sg) { 4815 if (use_rx_sg) {
4250 if (qeth_create_skb_frag(element, &skb, offset, 4816 if (qeth_create_skb_frag(qethbuffer, element,
4251 &frag, data_len)) 4817 &skb, offset, &frag, data_len))
4252 goto no_mem; 4818 goto no_mem;
4253 } else { 4819 } else {
4254 memcpy(skb_put(skb, data_len), data_ptr, 4820 memcpy(skb_put(skb, data_len), data_ptr,
@@ -4650,6 +5216,8 @@ static struct {
4650 {"tx do_QDIO count"}, 5216 {"tx do_QDIO count"},
4651 {"tx csum"}, 5217 {"tx csum"},
4652 {"tx lin"}, 5218 {"tx lin"},
5219 {"cq handler count"},
5220 {"cq handler time"}
4653}; 5221};
4654 5222
4655int qeth_core_get_sset_count(struct net_device *dev, int stringset) 5223int qeth_core_get_sset_count(struct net_device *dev, int stringset)
@@ -4708,6 +5276,8 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
4708 data[32] = card->perf_stats.outbound_do_qdio_cnt; 5276 data[32] = card->perf_stats.outbound_do_qdio_cnt;
4709 data[33] = card->perf_stats.tx_csum; 5277 data[33] = card->perf_stats.tx_csum;
4710 data[34] = card->perf_stats.tx_lin; 5278 data[34] = card->perf_stats.tx_lin;
5279 data[35] = card->perf_stats.cq_cnt;
5280 data[36] = card->perf_stats.cq_time;
4711} 5281}
4712EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); 5282EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
4713 5283
@@ -4866,7 +5436,16 @@ static int __init qeth_core_init(void)
4866 goto slab_err; 5436 goto slab_err;
4867 } 5437 }
4868 5438
5439 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
5440 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
5441 if (!qeth_qdio_outbuf_cache) {
5442 rc = -ENOMEM;
5443 goto cqslab_err;
5444 }
5445
4869 return 0; 5446 return 0;
5447cqslab_err:
5448 kmem_cache_destroy(qeth_core_header_cache);
4870slab_err: 5449slab_err:
4871 root_device_unregister(qeth_core_root_dev); 5450 root_device_unregister(qeth_core_root_dev);
4872register_err: 5451register_err:
@@ -4891,6 +5470,7 @@ static void __exit qeth_core_exit(void)
4891 &driver_attr_group); 5470 &driver_attr_group);
4892 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); 5471 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
4893 ccw_driver_unregister(&qeth_ccw_driver); 5472 ccw_driver_unregister(&qeth_ccw_driver);
5473 kmem_cache_destroy(qeth_qdio_outbuf_cache);
4894 kmem_cache_destroy(qeth_core_header_cache); 5474 kmem_cache_destroy(qeth_core_header_cache);
4895 qeth_unregister_dbf_views(); 5475 qeth_unregister_dbf_views();
4896 pr_info("core functions removed\n"); 5476 pr_info("core functions removed\n");
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index b70b47fbd6cd..a21ae3d549db 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -409,7 +409,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
409 BUG_ON(!budget); 409 BUG_ON(!budget);
410 while (budget) { 410 while (budget) {
411 skb = qeth_core_get_next_skb(card, 411 skb = qeth_core_get_next_skb(card,
412 card->qdio.in_q->bufs[card->rx.b_index].buffer, 412 &card->qdio.in_q->bufs[card->rx.b_index],
413 &card->rx.b_element, &card->rx.e_offset, &hdr); 413 &card->rx.b_element, &card->rx.e_offset, &hdr);
414 if (!skb) { 414 if (!skb) {
415 *done = 1; 415 *done = 1;
@@ -925,7 +925,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
925 .ndo_get_stats = qeth_get_stats, 925 .ndo_get_stats = qeth_get_stats,
926 .ndo_start_xmit = qeth_l2_hard_start_xmit, 926 .ndo_start_xmit = qeth_l2_hard_start_xmit,
927 .ndo_validate_addr = eth_validate_addr, 927 .ndo_validate_addr = eth_validate_addr,
928 .ndo_set_multicast_list = qeth_l2_set_multicast_list, 928 .ndo_set_rx_mode = qeth_l2_set_multicast_list,
929 .ndo_do_ioctl = qeth_l2_do_ioctl, 929 .ndo_do_ioctl = qeth_l2_do_ioctl,
930 .ndo_set_mac_address = qeth_l2_set_mac_address, 930 .ndo_set_mac_address = qeth_l2_set_mac_address,
931 .ndo_change_mtu = qeth_change_mtu, 931 .ndo_change_mtu = qeth_change_mtu,
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 14a43aeb0c2a..e367315a63f0 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -63,5 +63,9 @@ int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
63void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, 63void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
64 const u8 *); 64 const u8 *);
65int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); 65int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
66struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
67int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
68int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
69void qeth_l3_set_ip_addr_list(struct qeth_card *);
66 70
67#endif /* __QETH_L3_H__ */ 71#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fafb8c299540..ce735204d317 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -29,6 +29,7 @@
29#include <net/ip.h> 29#include <net/ip.h>
30#include <net/arp.h> 30#include <net/arp.h>
31#include <net/ip6_checksum.h> 31#include <net/ip6_checksum.h>
32#include <net/iucv/af_iucv.h>
32 33
33#include "qeth_l3.h" 34#include "qeth_l3.h"
34 35
@@ -267,7 +268,7 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
267 } 268 }
268} 269}
269 270
270static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr) 271int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
271{ 272{
272 unsigned long flags; 273 unsigned long flags;
273 int rc = 0; 274 int rc = 0;
@@ -286,7 +287,7 @@ static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
286 return rc; 287 return rc;
287} 288}
288 289
289static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) 290int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
290{ 291{
291 unsigned long flags; 292 unsigned long flags;
292 int rc = 0; 293 int rc = 0;
@@ -305,7 +306,7 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
305} 306}
306 307
307 308
308static struct qeth_ipaddr *qeth_l3_get_addr_buffer( 309struct qeth_ipaddr *qeth_l3_get_addr_buffer(
309 enum qeth_prot_versions prot) 310 enum qeth_prot_versions prot)
310{ 311{
311 struct qeth_ipaddr *addr; 312 struct qeth_ipaddr *addr;
@@ -421,7 +422,7 @@ again:
421 list_splice(&fail_list, &card->ip_list); 422 list_splice(&fail_list, &card->ip_list);
422} 423}
423 424
424static void qeth_l3_set_ip_addr_list(struct qeth_card *card) 425void qeth_l3_set_ip_addr_list(struct qeth_card *card)
425{ 426{
426 struct list_head *tbd_list; 427 struct list_head *tbd_list;
427 struct qeth_ipaddr *todo, *addr; 428 struct qeth_ipaddr *todo, *addr;
@@ -438,7 +439,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
438 439
439 spin_lock_irqsave(&card->ip_lock, flags); 440 spin_lock_irqsave(&card->ip_lock, flags);
440 tbd_list = card->ip_tbd_list; 441 tbd_list = card->ip_tbd_list;
441 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); 442 card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
442 if (!card->ip_tbd_list) { 443 if (!card->ip_tbd_list) {
443 QETH_CARD_TEXT(card, 0, "silnomem"); 444 QETH_CARD_TEXT(card, 0, "silnomem");
444 card->ip_tbd_list = tbd_list; 445 card->ip_tbd_list = tbd_list;
@@ -1993,12 +1994,13 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
1993 __u16 vlan_tag = 0; 1994 __u16 vlan_tag = 0;
1994 int is_vlan; 1995 int is_vlan;
1995 unsigned int len; 1996 unsigned int len;
1997 __u16 magic;
1996 1998
1997 *done = 0; 1999 *done = 0;
1998 BUG_ON(!budget); 2000 BUG_ON(!budget);
1999 while (budget) { 2001 while (budget) {
2000 skb = qeth_core_get_next_skb(card, 2002 skb = qeth_core_get_next_skb(card,
2001 card->qdio.in_q->bufs[card->rx.b_index].buffer, 2003 &card->qdio.in_q->bufs[card->rx.b_index],
2002 &card->rx.b_element, &card->rx.e_offset, &hdr); 2004 &card->rx.b_element, &card->rx.e_offset, &hdr);
2003 if (!skb) { 2005 if (!skb) {
2004 *done = 1; 2006 *done = 1;
@@ -2007,12 +2009,26 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
2007 skb->dev = card->dev; 2009 skb->dev = card->dev;
2008 switch (hdr->hdr.l3.id) { 2010 switch (hdr->hdr.l3.id) {
2009 case QETH_HEADER_TYPE_LAYER3: 2011 case QETH_HEADER_TYPE_LAYER3:
2010 is_vlan = qeth_l3_rebuild_skb(card, skb, hdr, 2012 magic = *(__u16 *)skb->data;
2013 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2014 (magic == ETH_P_AF_IUCV)) {
2015 skb->protocol = ETH_P_AF_IUCV;
2016 skb->pkt_type = PACKET_HOST;
2017 skb->mac_header = NET_SKB_PAD;
2018 skb->dev = card->dev;
2019 len = skb->len;
2020 card->dev->header_ops->create(skb, card->dev, 0,
2021 card->dev->dev_addr, "FAKELL",
2022 card->dev->addr_len);
2023 netif_receive_skb(skb);
2024 } else {
2025 is_vlan = qeth_l3_rebuild_skb(card, skb, hdr,
2011 &vlan_tag); 2026 &vlan_tag);
2012 len = skb->len; 2027 len = skb->len;
2013 if (is_vlan && !card->options.sniffer) 2028 if (is_vlan && !card->options.sniffer)
2014 __vlan_hwaccel_put_tag(skb, vlan_tag); 2029 __vlan_hwaccel_put_tag(skb, vlan_tag);
2015 napi_gro_receive(&card->napi, skb); 2030 napi_gro_receive(&card->napi, skb);
2031 }
2016 break; 2032 break;
2017 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */ 2033 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
2018 skb->pkt_type = PACKET_HOST; 2034 skb->pkt_type = PACKET_HOST;
@@ -2784,6 +2800,30 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
2784 return cast_type; 2800 return cast_type;
2785} 2801}
2786 2802
2803static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
2804 struct qeth_hdr *hdr, struct sk_buff *skb)
2805{
2806 char daddr[16];
2807 struct af_iucv_trans_hdr *iucv_hdr;
2808
2809 skb_pull(skb, 14);
2810 card->dev->header_ops->create(skb, card->dev, 0,
2811 card->dev->dev_addr, card->dev->dev_addr,
2812 card->dev->addr_len);
2813 skb_pull(skb, 14);
2814 iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
2815 memset(hdr, 0, sizeof(struct qeth_hdr));
2816 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2817 hdr->hdr.l3.ext_flags = 0;
2818 hdr->hdr.l3.length = skb->len;
2819 hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
2820 memset(daddr, 0, sizeof(daddr));
2821 daddr[0] = 0xfe;
2822 daddr[1] = 0x80;
2823 memcpy(&daddr[8], iucv_hdr->destUserID, 8);
2824 memcpy(hdr->hdr.l3.dest_addr, daddr, 16);
2825}
2826
2787static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, 2827static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2788 struct sk_buff *skb, int ipv, int cast_type) 2828 struct sk_buff *skb, int ipv, int cast_type)
2789{ 2829{
@@ -2936,8 +2976,11 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2936 int data_offset = -1; 2976 int data_offset = -1;
2937 int nr_frags; 2977 int nr_frags;
2938 2978
2939 if (((card->info.type == QETH_CARD_TYPE_IQD) && (!ipv)) || 2979 if (((card->info.type == QETH_CARD_TYPE_IQD) &&
2940 card->options.sniffer) 2980 (((card->options.cq != QETH_CQ_ENABLED) && !ipv) ||
2981 ((card->options.cq == QETH_CQ_ENABLED) &&
2982 (skb->protocol != ETH_P_AF_IUCV)))) ||
2983 card->options.sniffer)
2941 goto tx_drop; 2984 goto tx_drop;
2942 2985
2943 if ((card->state != CARD_STATE_UP) || !card->lan_online) { 2986 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
@@ -2959,7 +3002,10 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2959 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && 3002 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
2960 (skb_shinfo(skb)->nr_frags == 0)) { 3003 (skb_shinfo(skb)->nr_frags == 0)) {
2961 new_skb = skb; 3004 new_skb = skb;
2962 data_offset = ETH_HLEN; 3005 if (new_skb->protocol == ETH_P_AF_IUCV)
3006 data_offset = 0;
3007 else
3008 data_offset = ETH_HLEN;
2963 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); 3009 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
2964 if (!hdr) 3010 if (!hdr)
2965 goto tx_drop; 3011 goto tx_drop;
@@ -2993,7 +3039,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2993 tag = (u16 *)(new_skb->data + 12); 3039 tag = (u16 *)(new_skb->data + 12);
2994 *tag = __constant_htons(ETH_P_8021Q); 3040 *tag = __constant_htons(ETH_P_8021Q);
2995 *(tag + 1) = htons(vlan_tx_tag_get(new_skb)); 3041 *(tag + 1) = htons(vlan_tx_tag_get(new_skb));
2996 new_skb->vlan_tci = 0;
2997 } 3042 }
2998 } 3043 }
2999 3044
@@ -3025,9 +3070,13 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3025 qeth_l3_fill_header(card, hdr, new_skb, ipv, 3070 qeth_l3_fill_header(card, hdr, new_skb, ipv,
3026 cast_type); 3071 cast_type);
3027 } else { 3072 } else {
3028 qeth_l3_fill_header(card, hdr, new_skb, ipv, 3073 if (new_skb->protocol == ETH_P_AF_IUCV)
3029 cast_type); 3074 qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb);
3030 hdr->hdr.l3.length = new_skb->len - data_offset; 3075 else {
3076 qeth_l3_fill_header(card, hdr, new_skb, ipv,
3077 cast_type);
3078 hdr->hdr.l3.length = new_skb->len - data_offset;
3079 }
3031 } 3080 }
3032 3081
3033 if (skb->ip_summed == CHECKSUM_PARTIAL) 3082 if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -3226,7 +3275,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
3226 .ndo_get_stats = qeth_get_stats, 3275 .ndo_get_stats = qeth_get_stats,
3227 .ndo_start_xmit = qeth_l3_hard_start_xmit, 3276 .ndo_start_xmit = qeth_l3_hard_start_xmit,
3228 .ndo_validate_addr = eth_validate_addr, 3277 .ndo_validate_addr = eth_validate_addr,
3229 .ndo_set_multicast_list = qeth_l3_set_multicast_list, 3278 .ndo_set_rx_mode = qeth_l3_set_multicast_list,
3230 .ndo_do_ioctl = qeth_l3_do_ioctl, 3279 .ndo_do_ioctl = qeth_l3_do_ioctl,
3231 .ndo_change_mtu = qeth_change_mtu, 3280 .ndo_change_mtu = qeth_change_mtu,
3232 .ndo_fix_features = qeth_l3_fix_features, 3281 .ndo_fix_features = qeth_l3_fix_features,
@@ -3242,7 +3291,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
3242 .ndo_get_stats = qeth_get_stats, 3291 .ndo_get_stats = qeth_get_stats,
3243 .ndo_start_xmit = qeth_l3_hard_start_xmit, 3292 .ndo_start_xmit = qeth_l3_hard_start_xmit,
3244 .ndo_validate_addr = eth_validate_addr, 3293 .ndo_validate_addr = eth_validate_addr,
3245 .ndo_set_multicast_list = qeth_l3_set_multicast_list, 3294 .ndo_set_rx_mode = qeth_l3_set_multicast_list,
3246 .ndo_do_ioctl = qeth_l3_do_ioctl, 3295 .ndo_do_ioctl = qeth_l3_do_ioctl,
3247 .ndo_change_mtu = qeth_change_mtu, 3296 .ndo_change_mtu = qeth_change_mtu,
3248 .ndo_fix_features = qeth_l3_fix_features, 3297 .ndo_fix_features = qeth_l3_fix_features,
@@ -3290,6 +3339,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3290 card->dev->flags |= IFF_NOARP; 3339 card->dev->flags |= IFF_NOARP;
3291 card->dev->netdev_ops = &qeth_l3_netdev_ops; 3340 card->dev->netdev_ops = &qeth_l3_netdev_ops;
3292 qeth_l3_iqd_read_initial_mac(card); 3341 qeth_l3_iqd_read_initial_mac(card);
3342 if (card->options.hsuid[0])
3343 memcpy(card->dev->perm_addr, card->options.hsuid, 9);
3293 } else 3344 } else
3294 return -ENODEV; 3345 return -ENODEV;
3295 3346
@@ -3660,7 +3711,6 @@ static int qeth_l3_ip6_event(struct notifier_block *this,
3660 struct qeth_ipaddr *addr; 3711 struct qeth_ipaddr *addr;
3661 struct qeth_card *card; 3712 struct qeth_card *card;
3662 3713
3663
3664 card = qeth_l3_get_card_from_dev(dev); 3714 card = qeth_l3_get_card_from_dev(dev);
3665 if (!card) 3715 if (!card)
3666 return NOTIFY_DONE; 3716 return NOTIFY_DONE;
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index cd99210296e2..0ea2fbfe0e99 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -9,7 +9,7 @@
9 */ 9 */
10 10
11#include <linux/slab.h> 11#include <linux/slab.h>
12 12#include <asm/ebcdic.h>
13#include "qeth_l3.h" 13#include "qeth_l3.h"
14 14
15#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ 15#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
@@ -308,6 +308,8 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
308 308
309 if (card->info.type != QETH_CARD_TYPE_IQD) 309 if (card->info.type != QETH_CARD_TYPE_IQD)
310 return -EPERM; 310 return -EPERM;
311 if (card->options.cq == QETH_CQ_ENABLED)
312 return -EPERM;
311 313
312 mutex_lock(&card->conf_mutex); 314 mutex_lock(&card->conf_mutex);
313 if ((card->state != CARD_STATE_DOWN) && 315 if ((card->state != CARD_STATE_DOWN) &&
@@ -347,6 +349,111 @@ out:
347static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show, 349static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
348 qeth_l3_dev_sniffer_store); 350 qeth_l3_dev_sniffer_store);
349 351
352
353static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
354 struct device_attribute *attr, char *buf)
355{
356 struct qeth_card *card = dev_get_drvdata(dev);
357 char tmp_hsuid[9];
358
359 if (!card)
360 return -EINVAL;
361
362 if (card->info.type != QETH_CARD_TYPE_IQD)
363 return -EPERM;
364
365 if (card->state == CARD_STATE_DOWN)
366 return -EPERM;
367
368 memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid));
369 EBCASC(tmp_hsuid, 8);
370 return sprintf(buf, "%s\n", tmp_hsuid);
371}
372
373static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
374 struct device_attribute *attr, const char *buf, size_t count)
375{
376 struct qeth_card *card = dev_get_drvdata(dev);
377 struct qeth_ipaddr *addr;
378 char *tmp;
379 int i;
380
381 if (!card)
382 return -EINVAL;
383
384 if (card->info.type != QETH_CARD_TYPE_IQD)
385 return -EPERM;
386 if (card->state != CARD_STATE_DOWN &&
387 card->state != CARD_STATE_RECOVER)
388 return -EPERM;
389 if (card->options.sniffer)
390 return -EPERM;
391 if (card->options.cq == QETH_CQ_NOTAVAILABLE)
392 return -EPERM;
393
394 tmp = strsep((char **)&buf, "\n");
395 if (strlen(tmp) > 8)
396 return -EINVAL;
397
398 if (card->options.hsuid[0]) {
399 /* delete old ip address */
400 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
401 if (addr != NULL) {
402 addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
403 addr->u.a6.addr.s6_addr32[1] = 0x00000000;
404 for (i = 8; i < 16; i++)
405 addr->u.a6.addr.s6_addr[i] =
406 card->options.hsuid[i - 8];
407 addr->u.a6.pfxlen = 0;
408 addr->type = QETH_IP_TYPE_NORMAL;
409 } else
410 return -ENOMEM;
411 if (!qeth_l3_delete_ip(card, addr))
412 kfree(addr);
413 qeth_l3_set_ip_addr_list(card);
414 }
415
416 if (strlen(tmp) == 0) {
417 /* delete ip address only */
418 card->options.hsuid[0] = '\0';
419 if (card->dev)
420 memcpy(card->dev->perm_addr, card->options.hsuid, 9);
421 qeth_configure_cq(card, QETH_CQ_DISABLED);
422 return count;
423 }
424
425 if (qeth_configure_cq(card, QETH_CQ_ENABLED))
426 return -EPERM;
427
428 for (i = 0; i < 8; i++)
429 card->options.hsuid[i] = ' ';
430 card->options.hsuid[8] = '\0';
431 strncpy(card->options.hsuid, tmp, strlen(tmp));
432 ASCEBC(card->options.hsuid, 8);
433 if (card->dev)
434 memcpy(card->dev->perm_addr, card->options.hsuid, 9);
435
436 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
437 if (addr != NULL) {
438 addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
439 addr->u.a6.addr.s6_addr32[1] = 0x00000000;
440 for (i = 8; i < 16; i++)
441 addr->u.a6.addr.s6_addr[i] = card->options.hsuid[i - 8];
442 addr->u.a6.pfxlen = 0;
443 addr->type = QETH_IP_TYPE_NORMAL;
444 } else
445 return -ENOMEM;
446 if (!qeth_l3_add_ip(card, addr))
447 kfree(addr);
448 qeth_l3_set_ip_addr_list(card);
449
450 return count;
451}
452
453static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show,
454 qeth_l3_dev_hsuid_store);
455
456
350static struct attribute *qeth_l3_device_attrs[] = { 457static struct attribute *qeth_l3_device_attrs[] = {
351 &dev_attr_route4.attr, 458 &dev_attr_route4.attr,
352 &dev_attr_route6.attr, 459 &dev_attr_route6.attr,
@@ -354,6 +461,7 @@ static struct attribute *qeth_l3_device_attrs[] = {
354 &dev_attr_broadcast_mode.attr, 461 &dev_attr_broadcast_mode.attr,
355 &dev_attr_canonical_macaddr.attr, 462 &dev_attr_canonical_macaddr.attr,
356 &dev_attr_sniffer.attr, 463 &dev_attr_sniffer.attr,
464 &dev_attr_hsuid.attr,
357 NULL, 465 NULL,
358}; 466};
359 467