aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea
diff options
context:
space:
mode:
authorJan-Bernd Themann <ossthema@de.ibm.com>2007-02-28 12:34:10 -0500
committerJeff Garzik <jeff@garzik.org>2007-04-28 11:00:56 -0400
commit18604c54854549ee0ad65e27ca9cb91c96af784c (patch)
tree5392b07aaf9b1b1c9a4f7f8879e28cf4d53acb03 /drivers/net/ehea
parent1acf2318dd136edfbfa30f1f33b43f69f2e2ec6c (diff)
ehea: NAPI multi queue TX/RX path for SMP
This patch provides a functionality that allows parallel RX processing on multiple RX queues by using dummy netdevices. Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/ehea')
-rw-r--r--drivers/net/ehea/ehea.h11
-rw-r--r--drivers/net/ehea/ehea_main.c280
-rw-r--r--drivers/net/ehea/ehea_qmr.h7
3 files changed, 169 insertions, 129 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index e595d6b38e7c..d593513f82d0 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -39,7 +39,7 @@
39#include <asm/io.h> 39#include <asm/io.h>
40 40
41#define DRV_NAME "ehea" 41#define DRV_NAME "ehea"
42#define DRV_VERSION "EHEA_0048" 42#define DRV_VERSION "EHEA_0052"
43 43
44#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ 44#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
45 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 45 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -78,8 +78,6 @@
78#define EHEA_RQ2_PKT_SIZE 1522 78#define EHEA_RQ2_PKT_SIZE 1522
79#define EHEA_L_PKT_SIZE 256 /* low latency */ 79#define EHEA_L_PKT_SIZE 256 /* low latency */
80 80
81#define EHEA_POLL_MAX_RWQE 1000
82
83/* Send completion signaling */ 81/* Send completion signaling */
84#define EHEA_SIG_IV_LONG 1 82#define EHEA_SIG_IV_LONG 1
85 83
@@ -357,8 +355,8 @@ struct ehea_port_res {
357 struct ehea_qp *qp; 355 struct ehea_qp *qp;
358 struct ehea_cq *send_cq; 356 struct ehea_cq *send_cq;
359 struct ehea_cq *recv_cq; 357 struct ehea_cq *recv_cq;
360 struct ehea_eq *send_eq; 358 struct ehea_eq *eq;
361 struct ehea_eq *recv_eq; 359 struct net_device *d_netdev;
362 spinlock_t send_lock; 360 spinlock_t send_lock;
363 struct ehea_q_skb_arr rq1_skba; 361 struct ehea_q_skb_arr rq1_skba;
364 struct ehea_q_skb_arr rq2_skba; 362 struct ehea_q_skb_arr rq2_skba;
@@ -372,7 +370,6 @@ struct ehea_port_res {
372 int swqe_count; 370 int swqe_count;
373 u32 swqe_id_counter; 371 u32 swqe_id_counter;
374 u64 tx_packets; 372 u64 tx_packets;
375 struct tasklet_struct send_comp_task;
376 spinlock_t recv_lock; 373 spinlock_t recv_lock;
377 struct port_state p_state; 374 struct port_state p_state;
378 u64 rx_packets; 375 u64 rx_packets;
@@ -416,7 +413,9 @@ struct ehea_port {
416 char int_aff_name[EHEA_IRQ_NAME_SIZE]; 413 char int_aff_name[EHEA_IRQ_NAME_SIZE];
417 int allmulti; /* Indicates IFF_ALLMULTI state */ 414 int allmulti; /* Indicates IFF_ALLMULTI state */
418 int promisc; /* Indicates IFF_PROMISC state */ 415 int promisc; /* Indicates IFF_PROMISC state */
416 int num_tx_qps;
419 int num_add_tx_qps; 417 int num_add_tx_qps;
418 int num_mcs;
420 int resets; 419 int resets;
421 u64 mac_addr; 420 u64 mac_addr;
422 u32 logical_port_id; 421 u32 logical_port_id;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 3527b391214d..8d65eb772b28 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -51,13 +51,18 @@ static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
51static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; 51static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
52static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; 52static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
53static int sq_entries = EHEA_DEF_ENTRIES_SQ; 53static int sq_entries = EHEA_DEF_ENTRIES_SQ;
54static int use_mcs = 0;
55static int num_tx_qps = EHEA_NUM_TX_QP;
54 56
55module_param(msg_level, int, 0); 57module_param(msg_level, int, 0);
56module_param(rq1_entries, int, 0); 58module_param(rq1_entries, int, 0);
57module_param(rq2_entries, int, 0); 59module_param(rq2_entries, int, 0);
58module_param(rq3_entries, int, 0); 60module_param(rq3_entries, int, 0);
59module_param(sq_entries, int, 0); 61module_param(sq_entries, int, 0);
62module_param(use_mcs, int, 0);
63module_param(num_tx_qps, int, 0);
60 64
65MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
61MODULE_PARM_DESC(msg_level, "msg_level"); 66MODULE_PARM_DESC(msg_level, "msg_level");
62MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 " 67MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
63 "[2^x - 1], x = [6..14]. Default = " 68 "[2^x - 1], x = [6..14]. Default = "
@@ -71,6 +76,7 @@ MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
71MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " 76MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
72 "[2^x - 1], x = [6..14]. Default = " 77 "[2^x - 1], x = [6..14]. Default = "
73 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")"); 78 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
79MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 ");
74 80
75void ehea_dump(void *adr, int len, char *msg) { 81void ehea_dump(void *adr, int len, char *msg) {
76 int x; 82 int x;
@@ -197,7 +203,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
197 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); 203 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
198 if (!skb) { 204 if (!skb) {
199 ehea_error("%s: no mem for skb/%d wqes filled", 205 ehea_error("%s: no mem for skb/%d wqes filled",
200 dev->name, i); 206 pr->port->netdev->name, i);
201 q_skba->os_skbs = fill_wqes - i; 207 q_skba->os_skbs = fill_wqes - i;
202 ret = -ENOMEM; 208 ret = -ENOMEM;
203 break; 209 break;
@@ -345,10 +351,11 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
345 return 0; 351 return 0;
346} 352}
347 353
348static int ehea_poll(struct net_device *dev, int *budget) 354static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
355 struct ehea_port_res *pr,
356 int *budget)
349{ 357{
350 struct ehea_port *port = netdev_priv(dev); 358 struct ehea_port *port = pr->port;
351 struct ehea_port_res *pr = &port->port_res[0];
352 struct ehea_qp *qp = pr->qp; 359 struct ehea_qp *qp = pr->qp;
353 struct ehea_cqe *cqe; 360 struct ehea_cqe *cqe;
354 struct sk_buff *skb; 361 struct sk_buff *skb;
@@ -359,14 +366,12 @@ static int ehea_poll(struct net_device *dev, int *budget)
359 int skb_arr_rq2_len = pr->rq2_skba.len; 366 int skb_arr_rq2_len = pr->rq2_skba.len;
360 int skb_arr_rq3_len = pr->rq3_skba.len; 367 int skb_arr_rq3_len = pr->rq3_skba.len;
361 int processed, processed_rq1, processed_rq2, processed_rq3; 368 int processed, processed_rq1, processed_rq2, processed_rq3;
362 int wqe_index, last_wqe_index, rq, intreq, my_quota, port_reset; 369 int wqe_index, last_wqe_index, rq, my_quota, port_reset;
363 370
364 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; 371 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
365 last_wqe_index = 0; 372 last_wqe_index = 0;
366 my_quota = min(*budget, dev->quota); 373 my_quota = min(*budget, dev->quota);
367 my_quota = min(my_quota, EHEA_POLL_MAX_RWQE);
368 374
369 /* rq0 is low latency RQ */
370 cqe = ehea_poll_rq1(qp, &wqe_index); 375 cqe = ehea_poll_rq1(qp, &wqe_index);
371 while ((my_quota > 0) && cqe) { 376 while ((my_quota > 0) && cqe) {
372 ehea_inc_rq1(qp); 377 ehea_inc_rq1(qp);
@@ -386,7 +391,8 @@ static int ehea_poll(struct net_device *dev, int *budget)
386 if (unlikely(!skb)) { 391 if (unlikely(!skb)) {
387 if (netif_msg_rx_err(port)) 392 if (netif_msg_rx_err(port))
388 ehea_error("LL rq1: skb=NULL"); 393 ehea_error("LL rq1: skb=NULL");
389 skb = netdev_alloc_skb(dev, 394
395 skb = netdev_alloc_skb(port->netdev,
390 EHEA_L_PKT_SIZE); 396 EHEA_L_PKT_SIZE);
391 if (!skb) 397 if (!skb)
392 break; 398 break;
@@ -402,7 +408,7 @@ static int ehea_poll(struct net_device *dev, int *budget)
402 ehea_error("rq2: skb=NULL"); 408 ehea_error("rq2: skb=NULL");
403 break; 409 break;
404 } 410 }
405 ehea_fill_skb(dev, skb, cqe); 411 ehea_fill_skb(port->netdev, skb, cqe);
406 processed_rq2++; 412 processed_rq2++;
407 } else { /* RQ3 */ 413 } else { /* RQ3 */
408 skb = get_skb_by_index(skb_arr_rq3, 414 skb = get_skb_by_index(skb_arr_rq3,
@@ -412,7 +418,7 @@ static int ehea_poll(struct net_device *dev, int *budget)
412 ehea_error("rq3: skb=NULL"); 418 ehea_error("rq3: skb=NULL");
413 break; 419 break;
414 } 420 }
415 ehea_fill_skb(dev, skb, cqe); 421 ehea_fill_skb(port->netdev, skb, cqe);
416 processed_rq3++; 422 processed_rq3++;
417 } 423 }
418 424
@@ -421,8 +427,7 @@ static int ehea_poll(struct net_device *dev, int *budget)
421 cqe->vlan_tag); 427 cqe->vlan_tag);
422 else 428 else
423 netif_receive_skb(skb); 429 netif_receive_skb(skb);
424 430 } else {
425 } else { /* Error occured */
426 pr->p_state.poll_receive_errors++; 431 pr->p_state.poll_receive_errors++;
427 port_reset = ehea_treat_poll_error(pr, rq, cqe, 432 port_reset = ehea_treat_poll_error(pr, rq, cqe,
428 &processed_rq2, 433 &processed_rq2,
@@ -433,32 +438,18 @@ static int ehea_poll(struct net_device *dev, int *budget)
433 cqe = ehea_poll_rq1(qp, &wqe_index); 438 cqe = ehea_poll_rq1(qp, &wqe_index);
434 } 439 }
435 440
436 dev->quota -= processed;
437 *budget -= processed;
438
439 pr->p_state.ehea_poll += 1;
440 pr->rx_packets += processed; 441 pr->rx_packets += processed;
442 *budget -= processed;
441 443
442 ehea_refill_rq1(pr, last_wqe_index, processed_rq1); 444 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
443 ehea_refill_rq2(pr, processed_rq2); 445 ehea_refill_rq2(pr, processed_rq2);
444 ehea_refill_rq3(pr, processed_rq3); 446 ehea_refill_rq3(pr, processed_rq3);
445 447
446 intreq = ((pr->p_state.ehea_poll & 0xF) == 0xF); 448 cqe = ehea_poll_rq1(qp, &wqe_index);
447 449 return cqe;
448 if (!cqe || intreq) {
449 netif_rx_complete(dev);
450 ehea_reset_cq_ep(pr->recv_cq);
451 ehea_reset_cq_n1(pr->recv_cq);
452 cqe = hw_qeit_get_valid(&qp->hw_rqueue1);
453 if (!cqe || intreq)
454 return 0;
455 if (!netif_rx_reschedule(dev, my_quota))
456 return 0;
457 }
458 return 1;
459} 450}
460 451
461void free_sent_skbs(struct ehea_cqe *cqe, struct ehea_port_res *pr) 452static void ehea_free_sent_skbs(struct ehea_cqe *cqe, struct ehea_port_res *pr)
462{ 453{
463 struct sk_buff *skb; 454 struct sk_buff *skb;
464 int index, max_index_mask, i; 455 int index, max_index_mask, i;
@@ -479,26 +470,19 @@ void free_sent_skbs(struct ehea_cqe *cqe, struct ehea_port_res *pr)
479 } 470 }
480} 471}
481 472
482#define MAX_SENDCOMP_QUOTA 400 473static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
483void ehea_send_irq_tasklet(unsigned long data)
484{ 474{
485 struct ehea_port_res *pr = (struct ehea_port_res*)data;
486 struct ehea_cq *send_cq = pr->send_cq; 475 struct ehea_cq *send_cq = pr->send_cq;
487 struct ehea_cqe *cqe; 476 struct ehea_cqe *cqe;
488 int quota = MAX_SENDCOMP_QUOTA; 477 int quota = my_quota;
489 int cqe_counter = 0; 478 int cqe_counter = 0;
490 int swqe_av = 0; 479 int swqe_av = 0;
491 unsigned long flags; 480 unsigned long flags;
492 481
493 do { 482 cqe = ehea_poll_cq(send_cq);
494 cqe = ehea_poll_cq(send_cq); 483 while(cqe && (quota > 0)) {
495 if (!cqe) { 484 ehea_inc_cq(send_cq);
496 ehea_reset_cq_ep(send_cq); 485
497 ehea_reset_cq_n1(send_cq);
498 cqe = ehea_poll_cq(send_cq);
499 if (!cqe)
500 break;
501 }
502 cqe_counter++; 486 cqe_counter++;
503 rmb(); 487 rmb();
504 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 488 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
@@ -515,16 +499,19 @@ void ehea_send_irq_tasklet(unsigned long data)
515 499
516 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id) 500 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
517 == EHEA_SWQE2_TYPE)) 501 == EHEA_SWQE2_TYPE))
518 free_sent_skbs(cqe, pr); 502 ehea_free_sent_skbs(cqe, pr);
519 503
520 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); 504 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
521 quota--; 505 quota--;
522 } while (quota > 0); 506
507 cqe = ehea_poll_cq(send_cq);
508 };
523 509
524 ehea_update_feca(send_cq, cqe_counter); 510 ehea_update_feca(send_cq, cqe_counter);
525 atomic_add(swqe_av, &pr->swqe_avail); 511 atomic_add(swqe_av, &pr->swqe_avail);
526 512
527 spin_lock_irqsave(&pr->netif_queue, flags); 513 spin_lock_irqsave(&pr->netif_queue, flags);
514
528 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail) 515 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
529 >= pr->swqe_refill_th)) { 516 >= pr->swqe_refill_th)) {
530 netif_wake_queue(pr->port->netdev); 517 netif_wake_queue(pr->port->netdev);
@@ -532,22 +519,55 @@ void ehea_send_irq_tasklet(unsigned long data)
532 } 519 }
533 spin_unlock_irqrestore(&pr->netif_queue, flags); 520 spin_unlock_irqrestore(&pr->netif_queue, flags);
534 521
535 if (unlikely(cqe)) 522 return cqe;
536 tasklet_hi_schedule(&pr->send_comp_task);
537} 523}
538 524
539static irqreturn_t ehea_send_irq_handler(int irq, void *param) 525#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
526
527static int ehea_poll(struct net_device *dev, int *budget)
540{ 528{
541 struct ehea_port_res *pr = param; 529 struct ehea_port_res *pr = dev->priv;
542 tasklet_hi_schedule(&pr->send_comp_task); 530 struct ehea_cqe *cqe;
543 return IRQ_HANDLED; 531 struct ehea_cqe *cqe_skb = NULL;
532 int force_irq, wqe_index;
533
534 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
535 cqe_skb = ehea_poll_cq(pr->send_cq);
536
537 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
538
539 if ((!cqe && !cqe_skb) || force_irq) {
540 pr->poll_counter = 0;
541 netif_rx_complete(dev);
542 ehea_reset_cq_ep(pr->recv_cq);
543 ehea_reset_cq_ep(pr->send_cq);
544 ehea_reset_cq_n1(pr->recv_cq);
545 ehea_reset_cq_n1(pr->send_cq);
546 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
547 cqe_skb = ehea_poll_cq(pr->send_cq);
548
549 if (!cqe && !cqe_skb)
550 return 0;
551
552 if (!netif_rx_reschedule(dev, dev->quota))
553 return 0;
554 }
555
556 cqe = ehea_proc_rwqes(dev, pr, budget);
557 cqe_skb = ehea_proc_cqes(pr, 300);
558
559 if (cqe || cqe_skb)
560 pr->poll_counter++;
561
562 return 1;
544} 563}
545 564
546static irqreturn_t ehea_recv_irq_handler(int irq, void *param) 565static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
547{ 566{
548 struct ehea_port_res *pr = param; 567 struct ehea_port_res *pr = param;
549 struct ehea_port *port = pr->port; 568
550 netif_rx_schedule(port->netdev); 569 netif_rx_schedule(pr->d_netdev);
570
551 return IRQ_HANDLED; 571 return IRQ_HANDLED;
552} 572}
553 573
@@ -650,19 +670,25 @@ int ehea_sense_port_attr(struct ehea_port *port)
650 } 670 }
651 671
652 port->autoneg = 1; 672 port->autoneg = 1;
673 port->num_mcs = cb0->num_default_qps;
653 674
654 /* Number of default QPs */ 675 /* Number of default QPs */
655 port->num_def_qps = cb0->num_default_qps; 676 if (use_mcs)
677 port->num_def_qps = cb0->num_default_qps;
678 else
679 port->num_def_qps = 1;
656 680
657 if (!port->num_def_qps) { 681 if (!port->num_def_qps) {
658 ret = -EINVAL; 682 ret = -EINVAL;
659 goto out_free; 683 goto out_free;
660 } 684 }
661 685
662 if (port->num_def_qps >= EHEA_NUM_TX_QP) 686 port->num_tx_qps = num_tx_qps;
687
688 if (port->num_def_qps >= port->num_tx_qps)
663 port->num_add_tx_qps = 0; 689 port->num_add_tx_qps = 0;
664 else 690 else
665 port->num_add_tx_qps = EHEA_NUM_TX_QP - port->num_def_qps; 691 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
666 692
667 ret = 0; 693 ret = 0;
668out_free: 694out_free:
@@ -882,23 +908,6 @@ static int ehea_reg_interrupts(struct net_device *dev)
882 struct ehea_port_res *pr; 908 struct ehea_port_res *pr;
883 int i, ret; 909 int i, ret;
884 910
885 for (i = 0; i < port->num_def_qps; i++) {
886 pr = &port->port_res[i];
887 snprintf(pr->int_recv_name, EHEA_IRQ_NAME_SIZE - 1
888 , "%s-recv%d", dev->name, i);
889 ret = ibmebus_request_irq(NULL, pr->recv_eq->attr.ist1,
890 ehea_recv_irq_handler,
891 IRQF_DISABLED, pr->int_recv_name, pr);
892 if (ret) {
893 ehea_error("failed registering irq for ehea_recv_int:"
894 "port_res_nr:%d, ist=%X", i,
895 pr->recv_eq->attr.ist1);
896 goto out_free_seq;
897 }
898 if (netif_msg_ifup(port))
899 ehea_info("irq_handle 0x%X for funct ehea_recv_int %d "
900 "registered", pr->recv_eq->attr.ist1, i);
901 }
902 911
903 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff", 912 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
904 dev->name); 913 dev->name);
@@ -916,41 +925,41 @@ static int ehea_reg_interrupts(struct net_device *dev)
916 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler " 925 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
917 "registered", port->qp_eq->attr.ist1); 926 "registered", port->qp_eq->attr.ist1);
918 927
928
919 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 929 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
920 pr = &port->port_res[i]; 930 pr = &port->port_res[i];
921 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, 931 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
922 "%s-send%d", dev->name, i); 932 "%s-queue%d", dev->name, i);
923 ret = ibmebus_request_irq(NULL, pr->send_eq->attr.ist1, 933 ret = ibmebus_request_irq(NULL, pr->eq->attr.ist1,
924 ehea_send_irq_handler, 934 ehea_recv_irq_handler,
925 IRQF_DISABLED, pr->int_send_name, 935 IRQF_DISABLED, pr->int_send_name,
926 pr); 936 pr);
927 if (ret) { 937 if (ret) {
928 ehea_error("failed registering irq for ehea_send " 938 ehea_error("failed registering irq for ehea_queue "
929 "port_res_nr:%d, ist=%X", i, 939 "port_res_nr:%d, ist=%X", i,
930 pr->send_eq->attr.ist1); 940 pr->eq->attr.ist1);
931 goto out_free_req; 941 goto out_free_req;
932 } 942 }
933 if (netif_msg_ifup(port)) 943 if (netif_msg_ifup(port))
934 ehea_info("irq_handle 0x%X for function ehea_send_int " 944 ehea_info("irq_handle 0x%X for function ehea_queue_int "
935 "%d registered", pr->send_eq->attr.ist1, i); 945 "%d registered", pr->eq->attr.ist1, i);
936 } 946 }
937out: 947out:
938 return ret; 948 return ret;
939 949
950
940out_free_req: 951out_free_req:
941 while (--i >= 0) { 952 while (--i >= 0) {
942 u32 ist = port->port_res[i].send_eq->attr.ist1; 953 u32 ist = port->port_res[i].eq->attr.ist1;
943 ibmebus_free_irq(NULL, ist, &port->port_res[i]); 954 ibmebus_free_irq(NULL, ist, &port->port_res[i]);
944 } 955 }
956
945out_free_qpeq: 957out_free_qpeq:
946 ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port); 958 ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port);
947 i = port->num_def_qps; 959 i = port->num_def_qps;
948out_free_seq: 960
949 while (--i >= 0) {
950 u32 ist = port->port_res[i].recv_eq->attr.ist1;
951 ibmebus_free_irq(NULL, ist, &port->port_res[i]);
952 }
953 goto out; 961 goto out;
962
954} 963}
955 964
956static void ehea_free_interrupts(struct net_device *dev) 965static void ehea_free_interrupts(struct net_device *dev)
@@ -960,21 +969,13 @@ static void ehea_free_interrupts(struct net_device *dev)
960 int i; 969 int i;
961 970
962 /* send */ 971 /* send */
972
963 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 973 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
964 pr = &port->port_res[i]; 974 pr = &port->port_res[i];
965 ibmebus_free_irq(NULL, pr->send_eq->attr.ist1, pr); 975 ibmebus_free_irq(NULL, pr->eq->attr.ist1, pr);
966 if (netif_msg_intr(port)) 976 if (netif_msg_intr(port))
967 ehea_info("free send irq for res %d with handle 0x%X", 977 ehea_info("free send irq for res %d with handle 0x%X",
968 i, pr->send_eq->attr.ist1); 978 i, pr->eq->attr.ist1);
969 }
970
971 /* receive */
972 for (i = 0; i < port->num_def_qps; i++) {
973 pr = &port->port_res[i];
974 ibmebus_free_irq(NULL, pr->recv_eq->attr.ist1, pr);
975 if (netif_msg_intr(port))
976 ehea_info("free recv irq for res %d with handle 0x%X",
977 i, pr->recv_eq->attr.ist1);
978 } 979 }
979 980
980 /* associated events */ 981 /* associated events */
@@ -1003,9 +1004,14 @@ static int ehea_configure_port(struct ehea_port *port)
1003 PXLY_RC_VLAN_FILTER) 1004 PXLY_RC_VLAN_FILTER)
1004 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1); 1005 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1005 1006
1006 for (i = 0; i < port->num_def_qps; i++) 1007 for (i = 0; i < port->num_mcs; i++)
1007 cb0->default_qpn_arr[i] = port->port_res[0].qp->init_attr.qp_nr; 1008 if (use_mcs)
1008 1009 cb0->default_qpn_arr[i] =
1010 port->port_res[i].qp->init_attr.qp_nr;
1011 else
1012 cb0->default_qpn_arr[i] =
1013 port->port_res[0].qp->init_attr.qp_nr;
1014
1009 if (netif_msg_ifup(port)) 1015 if (netif_msg_ifup(port))
1010 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port"); 1016 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1011 1017
@@ -1108,20 +1114,14 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1108 spin_lock_init(&pr->xmit_lock); 1114 spin_lock_init(&pr->xmit_lock);
1109 spin_lock_init(&pr->netif_queue); 1115 spin_lock_init(&pr->netif_queue);
1110 1116
1111 pr->recv_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); 1117 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1112 if (!pr->recv_eq) { 1118 if (!pr->eq) {
1113 ehea_error("create_eq failed (recv_eq)"); 1119 ehea_error("create_eq failed (eq)");
1114 goto out_free;
1115 }
1116
1117 pr->send_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1118 if (!pr->send_eq) {
1119 ehea_error("create_eq failed (send_eq)");
1120 goto out_free; 1120 goto out_free;
1121 } 1121 }
1122 1122
1123 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, 1123 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1124 pr->recv_eq->fw_handle, 1124 pr->eq->fw_handle,
1125 port->logical_port_id); 1125 port->logical_port_id);
1126 if (!pr->recv_cq) { 1126 if (!pr->recv_cq) {
1127 ehea_error("create_cq failed (cq_recv)"); 1127 ehea_error("create_cq failed (cq_recv)");
@@ -1129,7 +1129,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1129 } 1129 }
1130 1130
1131 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, 1131 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1132 pr->send_eq->fw_handle, 1132 pr->eq->fw_handle,
1133 port->logical_port_id); 1133 port->logical_port_id);
1134 if (!pr->send_cq) { 1134 if (!pr->send_cq) {
1135 ehea_error("create_cq failed (cq_send)"); 1135 ehea_error("create_cq failed (cq_send)");
@@ -1194,11 +1194,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1194 ret = -EIO; 1194 ret = -EIO;
1195 goto out_free; 1195 goto out_free;
1196 } 1196 }
1197 tasklet_init(&pr->send_comp_task, ehea_send_irq_tasklet, 1197
1198 (unsigned long)pr);
1199 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); 1198 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1200 1199
1201 kfree(init_attr); 1200 kfree(init_attr);
1201
1202 pr->d_netdev = alloc_netdev(0, "", ether_setup);
1203 if (!pr->d_netdev)
1204 goto out_free;
1205 pr->d_netdev->priv = pr;
1206 pr->d_netdev->weight = 64;
1207 pr->d_netdev->poll = ehea_poll;
1208 set_bit(__LINK_STATE_START, &pr->d_netdev->state);
1209 strcpy(pr->d_netdev->name, port->netdev->name);
1210
1202 ret = 0; 1211 ret = 0;
1203 goto out; 1212 goto out;
1204 1213
@@ -1211,8 +1220,7 @@ out_free:
1211 ehea_destroy_qp(pr->qp); 1220 ehea_destroy_qp(pr->qp);
1212 ehea_destroy_cq(pr->send_cq); 1221 ehea_destroy_cq(pr->send_cq);
1213 ehea_destroy_cq(pr->recv_cq); 1222 ehea_destroy_cq(pr->recv_cq);
1214 ehea_destroy_eq(pr->send_eq); 1223 ehea_destroy_eq(pr->eq);
1215 ehea_destroy_eq(pr->recv_eq);
1216out: 1224out:
1217 return ret; 1225 return ret;
1218} 1226}
@@ -1221,13 +1229,14 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1221{ 1229{
1222 int ret, i; 1230 int ret, i;
1223 1231
1232 free_netdev(pr->d_netdev);
1233
1224 ret = ehea_destroy_qp(pr->qp); 1234 ret = ehea_destroy_qp(pr->qp);
1225 1235
1226 if (!ret) { 1236 if (!ret) {
1227 ehea_destroy_cq(pr->send_cq); 1237 ehea_destroy_cq(pr->send_cq);
1228 ehea_destroy_cq(pr->recv_cq); 1238 ehea_destroy_cq(pr->recv_cq);
1229 ehea_destroy_eq(pr->send_eq); 1239 ehea_destroy_eq(pr->eq);
1230 ehea_destroy_eq(pr->recv_eq);
1231 1240
1232 for (i = 0; i < pr->rq1_skba.len; i++) 1241 for (i = 0; i < pr->rq1_skba.len; i++)
1233 if (pr->rq1_skba.arr[i]) 1242 if (pr->rq1_skba.arr[i])
@@ -1792,6 +1801,22 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1792 dev_kfree_skb(skb); 1801 dev_kfree_skb(skb);
1793} 1802}
1794 1803
1804static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
1805{
1806 struct tcphdr *tcp;
1807 u32 tmp;
1808
1809 if ((skb->protocol == htons(ETH_P_IP)) &&
1810 (skb->nh.iph->protocol == IPPROTO_TCP)) {
1811 tcp = (struct tcphdr*)(skb->nh.raw + (skb->nh.iph->ihl * 4));
1812 tmp = (tcp->source + (tcp->dest << 16)) % 31;
1813 tmp += skb->nh.iph->daddr % 31;
1814 return tmp % num_qps;
1815 }
1816 else
1817 return 0;
1818}
1819
1795static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) 1820static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1796{ 1821{
1797 struct ehea_port *port = netdev_priv(dev); 1822 struct ehea_port *port = netdev_priv(dev);
@@ -1799,9 +1824,18 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1799 unsigned long flags; 1824 unsigned long flags;
1800 u32 lkey; 1825 u32 lkey;
1801 int swqe_index; 1826 int swqe_index;
1802 struct ehea_port_res *pr = &port->port_res[0]; 1827 struct ehea_port_res *pr;
1828
1829 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
1830
1803 1831
1804 spin_lock(&pr->xmit_lock); 1832 if (!spin_trylock(&pr->xmit_lock))
1833 return NETDEV_TX_BUSY;
1834
1835 if (pr->queue_stopped) {
1836 spin_unlock(&pr->xmit_lock);
1837 return NETDEV_TX_BUSY;
1838 }
1805 1839
1806 swqe = ehea_get_swqe(pr->qp, &swqe_index); 1840 swqe = ehea_get_swqe(pr->qp, &swqe_index);
1807 memset(swqe, 0, SWQE_HEADER_SIZE); 1841 memset(swqe, 0, SWQE_HEADER_SIZE);
@@ -2060,7 +2094,7 @@ static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2060 } 2094 }
2061 2095
2062 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries; 2096 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2063 pr_cfg.max_entries_scq = sq_entries; 2097 pr_cfg.max_entries_scq = sq_entries * 2;
2064 pr_cfg.max_entries_sq = sq_entries; 2098 pr_cfg.max_entries_sq = sq_entries;
2065 pr_cfg.max_entries_rq1 = rq1_entries; 2099 pr_cfg.max_entries_rq1 = rq1_entries;
2066 pr_cfg.max_entries_rq2 = rq2_entries; 2100 pr_cfg.max_entries_rq2 = rq2_entries;
@@ -2208,8 +2242,10 @@ static int ehea_down(struct net_device *dev)
2208 ehea_drop_multicast_list(dev); 2242 ehea_drop_multicast_list(dev);
2209 ehea_free_interrupts(dev); 2243 ehea_free_interrupts(dev);
2210 2244
2211 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) 2245 for (i = 0; i < port->num_def_qps; i++)
2212 tasklet_kill(&port->port_res[i].send_comp_task); 2246 while (test_bit(__LINK_STATE_RX_SCHED,
2247 &port->port_res[i].d_netdev->state))
2248 msleep(1);
2213 2249
2214 ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 2250 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2215 ret = ehea_clean_all_portres(port); 2251 ret = ehea_clean_all_portres(port);
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 1ff60983504d..99d6b70a087f 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -320,6 +320,11 @@ static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
320 return hw_qeit_get_valid(queue); 320 return hw_qeit_get_valid(queue);
321} 321}
322 322
323static inline void ehea_inc_cq(struct ehea_cq *cq)
324{
325 hw_qeit_inc(&cq->hw_queue);
326}
327
323static inline void ehea_inc_rq1(struct ehea_qp *qp) 328static inline void ehea_inc_rq1(struct ehea_qp *qp)
324{ 329{
325 hw_qeit_inc(&qp->hw_rqueue1); 330 hw_qeit_inc(&qp->hw_rqueue1);
@@ -327,7 +332,7 @@ static inline void ehea_inc_rq1(struct ehea_qp *qp)
327 332
328static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq) 333static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
329{ 334{
330 return hw_qeit_get_inc_valid(&my_cq->hw_queue); 335 return hw_qeit_get_valid(&my_cq->hw_queue);
331} 336}
332 337
333#define EHEA_CQ_REGISTER_ORIG 0 338#define EHEA_CQ_REGISTER_ORIG 0