aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea/ehea_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ehea/ehea_main.c')
-rw-r--r--drivers/net/ehea/ehea_main.c940
1 files changed, 601 insertions, 339 deletions
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 58364a0ff378..c7a5614e66c0 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -51,13 +51,18 @@ static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
51static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; 51static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
52static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; 52static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
53static int sq_entries = EHEA_DEF_ENTRIES_SQ; 53static int sq_entries = EHEA_DEF_ENTRIES_SQ;
54static int use_mcs = 0;
55static int num_tx_qps = EHEA_NUM_TX_QP;
54 56
55module_param(msg_level, int, 0); 57module_param(msg_level, int, 0);
56module_param(rq1_entries, int, 0); 58module_param(rq1_entries, int, 0);
57module_param(rq2_entries, int, 0); 59module_param(rq2_entries, int, 0);
58module_param(rq3_entries, int, 0); 60module_param(rq3_entries, int, 0);
59module_param(sq_entries, int, 0); 61module_param(sq_entries, int, 0);
62module_param(use_mcs, int, 0);
63module_param(num_tx_qps, int, 0);
60 64
65MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
61MODULE_PARM_DESC(msg_level, "msg_level"); 66MODULE_PARM_DESC(msg_level, "msg_level");
62MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 " 67MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
63 "[2^x - 1], x = [6..14]. Default = " 68 "[2^x - 1], x = [6..14]. Default = "
@@ -71,6 +76,29 @@ MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
71MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " 76MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
72 "[2^x - 1], x = [6..14]. Default = " 77 "[2^x - 1], x = [6..14]. Default = "
73 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")"); 78 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
79MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 ");
80
81static int port_name_cnt = 0;
82
83static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
84 const struct of_device_id *id);
85
86static int __devexit ehea_remove(struct ibmebus_dev *dev);
87
88static struct of_device_id ehea_device_table[] = {
89 {
90 .name = "lhea",
91 .compatible = "IBM,lhea",
92 },
93 {},
94};
95
96static struct ibmebus_driver ehea_driver = {
97 .name = "ehea",
98 .id_table = ehea_device_table,
99 .probe = ehea_probe_adapter,
100 .remove = ehea_remove,
101};
74 102
75void ehea_dump(void *adr, int len, char *msg) { 103void ehea_dump(void *adr, int len, char *msg) {
76 int x; 104 int x;
@@ -197,7 +225,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
197 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); 225 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
198 if (!skb) { 226 if (!skb) {
199 ehea_error("%s: no mem for skb/%d wqes filled", 227 ehea_error("%s: no mem for skb/%d wqes filled",
200 dev->name, i); 228 pr->port->netdev->name, i);
201 q_skba->os_skbs = fill_wqes - i; 229 q_skba->os_skbs = fill_wqes - i;
202 ret = -ENOMEM; 230 ret = -ENOMEM;
203 break; 231 break;
@@ -321,6 +349,13 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
321{ 349{
322 struct sk_buff *skb; 350 struct sk_buff *skb;
323 351
352 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
353 pr->p_stats.err_tcp_cksum++;
354 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
355 pr->p_stats.err_ip_cksum++;
356 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
357 pr->p_stats.err_frame_crc++;
358
324 if (netif_msg_rx_err(pr->port)) { 359 if (netif_msg_rx_err(pr->port)) {
325 ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr); 360 ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr);
326 ehea_dump(cqe, sizeof(*cqe), "CQE"); 361 ehea_dump(cqe, sizeof(*cqe), "CQE");
@@ -345,10 +380,11 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
345 return 0; 380 return 0;
346} 381}
347 382
348static int ehea_poll(struct net_device *dev, int *budget) 383static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
384 struct ehea_port_res *pr,
385 int *budget)
349{ 386{
350 struct ehea_port *port = netdev_priv(dev); 387 struct ehea_port *port = pr->port;
351 struct ehea_port_res *pr = &port->port_res[0];
352 struct ehea_qp *qp = pr->qp; 388 struct ehea_qp *qp = pr->qp;
353 struct ehea_cqe *cqe; 389 struct ehea_cqe *cqe;
354 struct sk_buff *skb; 390 struct sk_buff *skb;
@@ -359,14 +395,12 @@ static int ehea_poll(struct net_device *dev, int *budget)
359 int skb_arr_rq2_len = pr->rq2_skba.len; 395 int skb_arr_rq2_len = pr->rq2_skba.len;
360 int skb_arr_rq3_len = pr->rq3_skba.len; 396 int skb_arr_rq3_len = pr->rq3_skba.len;
361 int processed, processed_rq1, processed_rq2, processed_rq3; 397 int processed, processed_rq1, processed_rq2, processed_rq3;
362 int wqe_index, last_wqe_index, rq, intreq, my_quota, port_reset; 398 int wqe_index, last_wqe_index, rq, my_quota, port_reset;
363 399
364 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; 400 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
365 last_wqe_index = 0; 401 last_wqe_index = 0;
366 my_quota = min(*budget, dev->quota); 402 my_quota = min(*budget, dev->quota);
367 my_quota = min(my_quota, EHEA_POLL_MAX_RWQE);
368 403
369 /* rq0 is low latency RQ */
370 cqe = ehea_poll_rq1(qp, &wqe_index); 404 cqe = ehea_poll_rq1(qp, &wqe_index);
371 while ((my_quota > 0) && cqe) { 405 while ((my_quota > 0) && cqe) {
372 ehea_inc_rq1(qp); 406 ehea_inc_rq1(qp);
@@ -386,7 +420,8 @@ static int ehea_poll(struct net_device *dev, int *budget)
386 if (unlikely(!skb)) { 420 if (unlikely(!skb)) {
387 if (netif_msg_rx_err(port)) 421 if (netif_msg_rx_err(port))
388 ehea_error("LL rq1: skb=NULL"); 422 ehea_error("LL rq1: skb=NULL");
389 skb = netdev_alloc_skb(dev, 423
424 skb = netdev_alloc_skb(port->netdev,
390 EHEA_L_PKT_SIZE); 425 EHEA_L_PKT_SIZE);
391 if (!skb) 426 if (!skb)
392 break; 427 break;
@@ -402,7 +437,7 @@ static int ehea_poll(struct net_device *dev, int *budget)
402 ehea_error("rq2: skb=NULL"); 437 ehea_error("rq2: skb=NULL");
403 break; 438 break;
404 } 439 }
405 ehea_fill_skb(dev, skb, cqe); 440 ehea_fill_skb(port->netdev, skb, cqe);
406 processed_rq2++; 441 processed_rq2++;
407 } else { /* RQ3 */ 442 } else { /* RQ3 */
408 skb = get_skb_by_index(skb_arr_rq3, 443 skb = get_skb_by_index(skb_arr_rq3,
@@ -412,7 +447,7 @@ static int ehea_poll(struct net_device *dev, int *budget)
412 ehea_error("rq3: skb=NULL"); 447 ehea_error("rq3: skb=NULL");
413 break; 448 break;
414 } 449 }
415 ehea_fill_skb(dev, skb, cqe); 450 ehea_fill_skb(port->netdev, skb, cqe);
416 processed_rq3++; 451 processed_rq3++;
417 } 452 }
418 453
@@ -421,9 +456,8 @@ static int ehea_poll(struct net_device *dev, int *budget)
421 cqe->vlan_tag); 456 cqe->vlan_tag);
422 else 457 else
423 netif_receive_skb(skb); 458 netif_receive_skb(skb);
424 459 } else {
425 } else { /* Error occured */ 460 pr->p_stats.poll_receive_errors++;
426 pr->p_state.poll_receive_errors++;
427 port_reset = ehea_treat_poll_error(pr, rq, cqe, 461 port_reset = ehea_treat_poll_error(pr, rq, cqe,
428 &processed_rq2, 462 &processed_rq2,
429 &processed_rq3); 463 &processed_rq3);
@@ -433,72 +467,32 @@ static int ehea_poll(struct net_device *dev, int *budget)
433 cqe = ehea_poll_rq1(qp, &wqe_index); 467 cqe = ehea_poll_rq1(qp, &wqe_index);
434 } 468 }
435 469
436 dev->quota -= processed;
437 *budget -= processed;
438
439 pr->p_state.ehea_poll += 1;
440 pr->rx_packets += processed; 470 pr->rx_packets += processed;
471 *budget -= processed;
441 472
442 ehea_refill_rq1(pr, last_wqe_index, processed_rq1); 473 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
443 ehea_refill_rq2(pr, processed_rq2); 474 ehea_refill_rq2(pr, processed_rq2);
444 ehea_refill_rq3(pr, processed_rq3); 475 ehea_refill_rq3(pr, processed_rq3);
445 476
446 intreq = ((pr->p_state.ehea_poll & 0xF) == 0xF); 477 cqe = ehea_poll_rq1(qp, &wqe_index);
447 478 return cqe;
448 if (!cqe || intreq) {
449 netif_rx_complete(dev);
450 ehea_reset_cq_ep(pr->recv_cq);
451 ehea_reset_cq_n1(pr->recv_cq);
452 cqe = hw_qeit_get_valid(&qp->hw_rqueue1);
453 if (!cqe || intreq)
454 return 0;
455 if (!netif_rx_reschedule(dev, my_quota))
456 return 0;
457 }
458 return 1;
459} 479}
460 480
461void free_sent_skbs(struct ehea_cqe *cqe, struct ehea_port_res *pr) 481static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
462{ 482{
463 struct sk_buff *skb; 483 struct sk_buff *skb;
464 int index, max_index_mask, i;
465
466 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
467 max_index_mask = pr->sq_skba.len - 1;
468 for (i = 0; i < EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); i++) {
469 skb = pr->sq_skba.arr[index];
470 if (likely(skb)) {
471 dev_kfree_skb(skb);
472 pr->sq_skba.arr[index] = NULL;
473 } else {
474 ehea_error("skb=NULL, wr_id=%lX, loop=%d, index=%d",
475 cqe->wr_id, i, index);
476 }
477 index--;
478 index &= max_index_mask;
479 }
480}
481
482#define MAX_SENDCOMP_QUOTA 400
483void ehea_send_irq_tasklet(unsigned long data)
484{
485 struct ehea_port_res *pr = (struct ehea_port_res*)data;
486 struct ehea_cq *send_cq = pr->send_cq; 484 struct ehea_cq *send_cq = pr->send_cq;
487 struct ehea_cqe *cqe; 485 struct ehea_cqe *cqe;
488 int quota = MAX_SENDCOMP_QUOTA; 486 int quota = my_quota;
489 int cqe_counter = 0; 487 int cqe_counter = 0;
490 int swqe_av = 0; 488 int swqe_av = 0;
489 int index;
491 unsigned long flags; 490 unsigned long flags;
492 491
493 do { 492 cqe = ehea_poll_cq(send_cq);
494 cqe = ehea_poll_cq(send_cq); 493 while(cqe && (quota > 0)) {
495 if (!cqe) { 494 ehea_inc_cq(send_cq);
496 ehea_reset_cq_ep(send_cq); 495
497 ehea_reset_cq_n1(send_cq);
498 cqe = ehea_poll_cq(send_cq);
499 if (!cqe)
500 break;
501 }
502 cqe_counter++; 496 cqe_counter++;
503 rmb(); 497 rmb();
504 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 498 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
@@ -514,17 +508,25 @@ void ehea_send_irq_tasklet(unsigned long data)
514 ehea_dump(cqe, sizeof(*cqe), "CQE"); 508 ehea_dump(cqe, sizeof(*cqe), "CQE");
515 509
516 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id) 510 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
517 == EHEA_SWQE2_TYPE)) 511 == EHEA_SWQE2_TYPE)) {
518 free_sent_skbs(cqe, pr); 512
513 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
514 skb = pr->sq_skba.arr[index];
515 dev_kfree_skb(skb);
516 pr->sq_skba.arr[index] = NULL;
517 }
519 518
520 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); 519 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
521 quota--; 520 quota--;
522 } while (quota > 0); 521
522 cqe = ehea_poll_cq(send_cq);
523 };
523 524
524 ehea_update_feca(send_cq, cqe_counter); 525 ehea_update_feca(send_cq, cqe_counter);
525 atomic_add(swqe_av, &pr->swqe_avail); 526 atomic_add(swqe_av, &pr->swqe_avail);
526 527
527 spin_lock_irqsave(&pr->netif_queue, flags); 528 spin_lock_irqsave(&pr->netif_queue, flags);
529
528 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail) 530 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
529 >= pr->swqe_refill_th)) { 531 >= pr->swqe_refill_th)) {
530 netif_wake_queue(pr->port->netdev); 532 netif_wake_queue(pr->port->netdev);
@@ -532,22 +534,55 @@ void ehea_send_irq_tasklet(unsigned long data)
532 } 534 }
533 spin_unlock_irqrestore(&pr->netif_queue, flags); 535 spin_unlock_irqrestore(&pr->netif_queue, flags);
534 536
535 if (unlikely(cqe)) 537 return cqe;
536 tasklet_hi_schedule(&pr->send_comp_task);
537} 538}
538 539
539static irqreturn_t ehea_send_irq_handler(int irq, void *param) 540#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
541
542static int ehea_poll(struct net_device *dev, int *budget)
540{ 543{
541 struct ehea_port_res *pr = param; 544 struct ehea_port_res *pr = dev->priv;
542 tasklet_hi_schedule(&pr->send_comp_task); 545 struct ehea_cqe *cqe;
543 return IRQ_HANDLED; 546 struct ehea_cqe *cqe_skb = NULL;
547 int force_irq, wqe_index;
548
549 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
550 cqe_skb = ehea_poll_cq(pr->send_cq);
551
552 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
553
554 if ((!cqe && !cqe_skb) || force_irq) {
555 pr->poll_counter = 0;
556 netif_rx_complete(dev);
557 ehea_reset_cq_ep(pr->recv_cq);
558 ehea_reset_cq_ep(pr->send_cq);
559 ehea_reset_cq_n1(pr->recv_cq);
560 ehea_reset_cq_n1(pr->send_cq);
561 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
562 cqe_skb = ehea_poll_cq(pr->send_cq);
563
564 if (!cqe && !cqe_skb)
565 return 0;
566
567 if (!netif_rx_reschedule(dev, dev->quota))
568 return 0;
569 }
570
571 cqe = ehea_proc_rwqes(dev, pr, budget);
572 cqe_skb = ehea_proc_cqes(pr, 300);
573
574 if (cqe || cqe_skb)
575 pr->poll_counter++;
576
577 return 1;
544} 578}
545 579
546static irqreturn_t ehea_recv_irq_handler(int irq, void *param) 580static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
547{ 581{
548 struct ehea_port_res *pr = param; 582 struct ehea_port_res *pr = param;
549 struct ehea_port *port = pr->port; 583
550 netif_rx_schedule(port->netdev); 584 netif_rx_schedule(pr->d_netdev);
585
551 return IRQ_HANDLED; 586 return IRQ_HANDLED;
552} 587}
553 588
@@ -580,7 +615,7 @@ static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
580{ 615{
581 int i; 616 int i;
582 617
583 for (i = 0; i < adapter->num_ports; i++) 618 for (i = 0; i < EHEA_MAX_PORTS; i++)
584 if (adapter->port[i]) 619 if (adapter->port[i])
585 if (adapter->port[i]->logical_port_id == logical_port) 620 if (adapter->port[i]->logical_port_id == logical_port)
586 return adapter->port[i]; 621 return adapter->port[i];
@@ -650,19 +685,25 @@ int ehea_sense_port_attr(struct ehea_port *port)
650 } 685 }
651 686
652 port->autoneg = 1; 687 port->autoneg = 1;
688 port->num_mcs = cb0->num_default_qps;
653 689
654 /* Number of default QPs */ 690 /* Number of default QPs */
655 port->num_def_qps = cb0->num_default_qps; 691 if (use_mcs)
692 port->num_def_qps = cb0->num_default_qps;
693 else
694 port->num_def_qps = 1;
656 695
657 if (!port->num_def_qps) { 696 if (!port->num_def_qps) {
658 ret = -EINVAL; 697 ret = -EINVAL;
659 goto out_free; 698 goto out_free;
660 } 699 }
661 700
662 if (port->num_def_qps >= EHEA_NUM_TX_QP) 701 port->num_tx_qps = num_tx_qps;
702
703 if (port->num_def_qps >= port->num_tx_qps)
663 port->num_add_tx_qps = 0; 704 port->num_add_tx_qps = 0;
664 else 705 else
665 port->num_add_tx_qps = EHEA_NUM_TX_QP - port->num_def_qps; 706 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
666 707
667 ret = 0; 708 ret = 0;
668out_free: 709out_free:
@@ -882,23 +923,6 @@ static int ehea_reg_interrupts(struct net_device *dev)
882 struct ehea_port_res *pr; 923 struct ehea_port_res *pr;
883 int i, ret; 924 int i, ret;
884 925
885 for (i = 0; i < port->num_def_qps; i++) {
886 pr = &port->port_res[i];
887 snprintf(pr->int_recv_name, EHEA_IRQ_NAME_SIZE - 1
888 , "%s-recv%d", dev->name, i);
889 ret = ibmebus_request_irq(NULL, pr->recv_eq->attr.ist1,
890 ehea_recv_irq_handler,
891 IRQF_DISABLED, pr->int_recv_name, pr);
892 if (ret) {
893 ehea_error("failed registering irq for ehea_recv_int:"
894 "port_res_nr:%d, ist=%X", i,
895 pr->recv_eq->attr.ist1);
896 goto out_free_seq;
897 }
898 if (netif_msg_ifup(port))
899 ehea_info("irq_handle 0x%X for funct ehea_recv_int %d "
900 "registered", pr->recv_eq->attr.ist1, i);
901 }
902 926
903 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff", 927 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
904 dev->name); 928 dev->name);
@@ -916,41 +940,41 @@ static int ehea_reg_interrupts(struct net_device *dev)
916 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler " 940 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
917 "registered", port->qp_eq->attr.ist1); 941 "registered", port->qp_eq->attr.ist1);
918 942
943
919 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 944 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
920 pr = &port->port_res[i]; 945 pr = &port->port_res[i];
921 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, 946 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
922 "%s-send%d", dev->name, i); 947 "%s-queue%d", dev->name, i);
923 ret = ibmebus_request_irq(NULL, pr->send_eq->attr.ist1, 948 ret = ibmebus_request_irq(NULL, pr->eq->attr.ist1,
924 ehea_send_irq_handler, 949 ehea_recv_irq_handler,
925 IRQF_DISABLED, pr->int_send_name, 950 IRQF_DISABLED, pr->int_send_name,
926 pr); 951 pr);
927 if (ret) { 952 if (ret) {
928 ehea_error("failed registering irq for ehea_send " 953 ehea_error("failed registering irq for ehea_queue "
929 "port_res_nr:%d, ist=%X", i, 954 "port_res_nr:%d, ist=%X", i,
930 pr->send_eq->attr.ist1); 955 pr->eq->attr.ist1);
931 goto out_free_req; 956 goto out_free_req;
932 } 957 }
933 if (netif_msg_ifup(port)) 958 if (netif_msg_ifup(port))
934 ehea_info("irq_handle 0x%X for function ehea_send_int " 959 ehea_info("irq_handle 0x%X for function ehea_queue_int "
935 "%d registered", pr->send_eq->attr.ist1, i); 960 "%d registered", pr->eq->attr.ist1, i);
936 } 961 }
937out: 962out:
938 return ret; 963 return ret;
939 964
965
940out_free_req: 966out_free_req:
941 while (--i >= 0) { 967 while (--i >= 0) {
942 u32 ist = port->port_res[i].send_eq->attr.ist1; 968 u32 ist = port->port_res[i].eq->attr.ist1;
943 ibmebus_free_irq(NULL, ist, &port->port_res[i]); 969 ibmebus_free_irq(NULL, ist, &port->port_res[i]);
944 } 970 }
971
945out_free_qpeq: 972out_free_qpeq:
946 ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port); 973 ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port);
947 i = port->num_def_qps; 974 i = port->num_def_qps;
948out_free_seq: 975
949 while (--i >= 0) {
950 u32 ist = port->port_res[i].recv_eq->attr.ist1;
951 ibmebus_free_irq(NULL, ist, &port->port_res[i]);
952 }
953 goto out; 976 goto out;
977
954} 978}
955 979
956static void ehea_free_interrupts(struct net_device *dev) 980static void ehea_free_interrupts(struct net_device *dev)
@@ -960,21 +984,13 @@ static void ehea_free_interrupts(struct net_device *dev)
960 int i; 984 int i;
961 985
962 /* send */ 986 /* send */
987
963 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 988 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
964 pr = &port->port_res[i]; 989 pr = &port->port_res[i];
965 ibmebus_free_irq(NULL, pr->send_eq->attr.ist1, pr); 990 ibmebus_free_irq(NULL, pr->eq->attr.ist1, pr);
966 if (netif_msg_intr(port)) 991 if (netif_msg_intr(port))
967 ehea_info("free send irq for res %d with handle 0x%X", 992 ehea_info("free send irq for res %d with handle 0x%X",
968 i, pr->send_eq->attr.ist1); 993 i, pr->eq->attr.ist1);
969 }
970
971 /* receive */
972 for (i = 0; i < port->num_def_qps; i++) {
973 pr = &port->port_res[i];
974 ibmebus_free_irq(NULL, pr->recv_eq->attr.ist1, pr);
975 if (netif_msg_intr(port))
976 ehea_info("free recv irq for res %d with handle 0x%X",
977 i, pr->recv_eq->attr.ist1);
978 } 994 }
979 995
980 /* associated events */ 996 /* associated events */
@@ -1003,8 +1019,13 @@ static int ehea_configure_port(struct ehea_port *port)
1003 PXLY_RC_VLAN_FILTER) 1019 PXLY_RC_VLAN_FILTER)
1004 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1); 1020 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1005 1021
1006 for (i = 0; i < port->num_def_qps; i++) 1022 for (i = 0; i < port->num_mcs; i++)
1007 cb0->default_qpn_arr[i] = port->port_res[0].qp->init_attr.qp_nr; 1023 if (use_mcs)
1024 cb0->default_qpn_arr[i] =
1025 port->port_res[i].qp->init_attr.qp_nr;
1026 else
1027 cb0->default_qpn_arr[i] =
1028 port->port_res[0].qp->init_attr.qp_nr;
1008 1029
1009 if (netif_msg_ifup(port)) 1030 if (netif_msg_ifup(port))
1010 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port"); 1031 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
@@ -1027,52 +1048,35 @@ out:
1027 return ret; 1048 return ret;
1028} 1049}
1029 1050
1030static int ehea_gen_smrs(struct ehea_port_res *pr) 1051int ehea_gen_smrs(struct ehea_port_res *pr)
1031{ 1052{
1032 u64 hret; 1053 int ret;
1033 struct ehea_adapter *adapter = pr->port->adapter; 1054 struct ehea_adapter *adapter = pr->port->adapter;
1034 1055
1035 hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle, 1056 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1036 adapter->mr.vaddr, EHEA_MR_ACC_CTRL, 1057 if (ret)
1037 adapter->pd, &pr->send_mr);
1038 if (hret != H_SUCCESS)
1039 goto out; 1058 goto out;
1040 1059
1041 hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle, 1060 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1042 adapter->mr.vaddr, EHEA_MR_ACC_CTRL, 1061 if (ret)
1043 adapter->pd, &pr->recv_mr); 1062 goto out_free;
1044 if (hret != H_SUCCESS)
1045 goto out_freeres;
1046 1063
1047 return 0; 1064 return 0;
1048 1065
1049out_freeres: 1066out_free:
1050 hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle); 1067 ehea_rem_mr(&pr->send_mr);
1051 if (hret != H_SUCCESS)
1052 ehea_error("failed freeing SMR");
1053out: 1068out:
1069 ehea_error("Generating SMRS failed\n");
1054 return -EIO; 1070 return -EIO;
1055} 1071}
1056 1072
1057static int ehea_rem_smrs(struct ehea_port_res *pr) 1073int ehea_rem_smrs(struct ehea_port_res *pr)
1058{ 1074{
1059 struct ehea_adapter *adapter = pr->port->adapter; 1075 if ((ehea_rem_mr(&pr->send_mr))
1060 int ret = 0; 1076 || (ehea_rem_mr(&pr->recv_mr)))
1061 u64 hret; 1077 return -EIO;
1062 1078 else
1063 hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle); 1079 return 0;
1064 if (hret != H_SUCCESS) {
1065 ret = -EIO;
1066 ehea_error("failed freeing send SMR for pr=%p", pr);
1067 }
1068
1069 hret = ehea_h_free_resource(adapter->handle, pr->recv_mr.handle);
1070 if (hret != H_SUCCESS) {
1071 ret = -EIO;
1072 ehea_error("failed freeing recv SMR for pr=%p", pr);
1073 }
1074
1075 return ret;
1076} 1080}
1077 1081
1078static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries) 1082static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
@@ -1103,25 +1107,17 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1103 memset(pr, 0, sizeof(struct ehea_port_res)); 1107 memset(pr, 0, sizeof(struct ehea_port_res));
1104 1108
1105 pr->port = port; 1109 pr->port = port;
1106 spin_lock_init(&pr->send_lock);
1107 spin_lock_init(&pr->recv_lock);
1108 spin_lock_init(&pr->xmit_lock); 1110 spin_lock_init(&pr->xmit_lock);
1109 spin_lock_init(&pr->netif_queue); 1111 spin_lock_init(&pr->netif_queue);
1110 1112
1111 pr->recv_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); 1113 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1112 if (!pr->recv_eq) { 1114 if (!pr->eq) {
1113 ehea_error("create_eq failed (recv_eq)"); 1115 ehea_error("create_eq failed (eq)");
1114 goto out_free;
1115 }
1116
1117 pr->send_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1118 if (!pr->send_eq) {
1119 ehea_error("create_eq failed (send_eq)");
1120 goto out_free; 1116 goto out_free;
1121 } 1117 }
1122 1118
1123 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, 1119 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1124 pr->recv_eq->fw_handle, 1120 pr->eq->fw_handle,
1125 port->logical_port_id); 1121 port->logical_port_id);
1126 if (!pr->recv_cq) { 1122 if (!pr->recv_cq) {
1127 ehea_error("create_cq failed (cq_recv)"); 1123 ehea_error("create_cq failed (cq_recv)");
@@ -1129,7 +1125,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1129 } 1125 }
1130 1126
1131 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, 1127 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1132 pr->send_eq->fw_handle, 1128 pr->eq->fw_handle,
1133 port->logical_port_id); 1129 port->logical_port_id);
1134 if (!pr->send_cq) { 1130 if (!pr->send_cq) {
1135 ehea_error("create_cq failed (cq_send)"); 1131 ehea_error("create_cq failed (cq_send)");
@@ -1194,11 +1190,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1194 ret = -EIO; 1190 ret = -EIO;
1195 goto out_free; 1191 goto out_free;
1196 } 1192 }
1197 tasklet_init(&pr->send_comp_task, ehea_send_irq_tasklet, 1193
1198 (unsigned long)pr);
1199 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); 1194 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1200 1195
1201 kfree(init_attr); 1196 kfree(init_attr);
1197
1198 pr->d_netdev = alloc_netdev(0, "", ether_setup);
1199 if (!pr->d_netdev)
1200 goto out_free;
1201 pr->d_netdev->priv = pr;
1202 pr->d_netdev->weight = 64;
1203 pr->d_netdev->poll = ehea_poll;
1204 set_bit(__LINK_STATE_START, &pr->d_netdev->state);
1205 strcpy(pr->d_netdev->name, port->netdev->name);
1206
1202 ret = 0; 1207 ret = 0;
1203 goto out; 1208 goto out;
1204 1209
@@ -1211,8 +1216,7 @@ out_free:
1211 ehea_destroy_qp(pr->qp); 1216 ehea_destroy_qp(pr->qp);
1212 ehea_destroy_cq(pr->send_cq); 1217 ehea_destroy_cq(pr->send_cq);
1213 ehea_destroy_cq(pr->recv_cq); 1218 ehea_destroy_cq(pr->recv_cq);
1214 ehea_destroy_eq(pr->send_eq); 1219 ehea_destroy_eq(pr->eq);
1215 ehea_destroy_eq(pr->recv_eq);
1216out: 1220out:
1217 return ret; 1221 return ret;
1218} 1222}
@@ -1221,13 +1225,14 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1221{ 1225{
1222 int ret, i; 1226 int ret, i;
1223 1227
1228 free_netdev(pr->d_netdev);
1229
1224 ret = ehea_destroy_qp(pr->qp); 1230 ret = ehea_destroy_qp(pr->qp);
1225 1231
1226 if (!ret) { 1232 if (!ret) {
1227 ehea_destroy_cq(pr->send_cq); 1233 ehea_destroy_cq(pr->send_cq);
1228 ehea_destroy_cq(pr->recv_cq); 1234 ehea_destroy_cq(pr->recv_cq);
1229 ehea_destroy_eq(pr->send_eq); 1235 ehea_destroy_eq(pr->eq);
1230 ehea_destroy_eq(pr->recv_eq);
1231 1236
1232 for (i = 0; i < pr->rq1_skba.len; i++) 1237 for (i = 0; i < pr->rq1_skba.len; i++)
1233 if (pr->rq1_skba.arr[i]) 1238 if (pr->rq1_skba.arr[i])
@@ -1792,6 +1797,22 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1792 dev_kfree_skb(skb); 1797 dev_kfree_skb(skb);
1793} 1798}
1794 1799
1800static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
1801{
1802 struct tcphdr *tcp;
1803 u32 tmp;
1804
1805 if ((skb->protocol == htons(ETH_P_IP)) &&
1806 (skb->nh.iph->protocol == IPPROTO_TCP)) {
1807 tcp = (struct tcphdr*)(skb->nh.raw + (skb->nh.iph->ihl * 4));
1808 tmp = (tcp->source + (tcp->dest << 16)) % 31;
1809 tmp += skb->nh.iph->daddr % 31;
1810 return tmp % num_qps;
1811 }
1812 else
1813 return 0;
1814}
1815
1795static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) 1816static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1796{ 1817{
1797 struct ehea_port *port = netdev_priv(dev); 1818 struct ehea_port *port = netdev_priv(dev);
@@ -1799,9 +1820,17 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1799 unsigned long flags; 1820 unsigned long flags;
1800 u32 lkey; 1821 u32 lkey;
1801 int swqe_index; 1822 int swqe_index;
1802 struct ehea_port_res *pr = &port->port_res[0]; 1823 struct ehea_port_res *pr;
1824
1825 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
1803 1826
1804 spin_lock(&pr->xmit_lock); 1827 if (!spin_trylock(&pr->xmit_lock))
1828 return NETDEV_TX_BUSY;
1829
1830 if (pr->queue_stopped) {
1831 spin_unlock(&pr->xmit_lock);
1832 return NETDEV_TX_BUSY;
1833 }
1805 1834
1806 swqe = ehea_get_swqe(pr->qp, &swqe_index); 1835 swqe = ehea_get_swqe(pr->qp, &swqe_index);
1807 memset(swqe, 0, SWQE_HEADER_SIZE); 1836 memset(swqe, 0, SWQE_HEADER_SIZE);
@@ -1824,6 +1853,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1824 swqe->wr_id = 1853 swqe->wr_id =
1825 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE) 1854 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
1826 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) 1855 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
1856 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
1827 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); 1857 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
1828 pr->sq_skba.arr[pr->sq_skba.index] = skb; 1858 pr->sq_skba.arr[pr->sq_skba.index] = skb;
1829 1859
@@ -1832,14 +1862,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1832 1862
1833 lkey = pr->send_mr.lkey; 1863 lkey = pr->send_mr.lkey;
1834 ehea_xmit2(skb, dev, swqe, lkey); 1864 ehea_xmit2(skb, dev, swqe, lkey);
1835 1865 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1836 if (pr->swqe_count >= (EHEA_SIG_IV_LONG - 1)) {
1837 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
1838 EHEA_SIG_IV_LONG);
1839 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1840 pr->swqe_count = 0;
1841 } else
1842 pr->swqe_count += 1;
1843 } 1866 }
1844 pr->swqe_id_counter += 1; 1867 pr->swqe_id_counter += 1;
1845 1868
@@ -1859,6 +1882,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1859 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { 1882 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
1860 spin_lock_irqsave(&pr->netif_queue, flags); 1883 spin_lock_irqsave(&pr->netif_queue, flags);
1861 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { 1884 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
1885 pr->p_stats.queue_stopped++;
1862 netif_stop_queue(dev); 1886 netif_stop_queue(dev);
1863 pr->queue_stopped = 1; 1887 pr->queue_stopped = 1;
1864 } 1888 }
@@ -2060,7 +2084,7 @@ static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2060 } 2084 }
2061 2085
2062 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries; 2086 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2063 pr_cfg.max_entries_scq = sq_entries; 2087 pr_cfg.max_entries_scq = sq_entries * 2;
2064 pr_cfg.max_entries_sq = sq_entries; 2088 pr_cfg.max_entries_sq = sq_entries;
2065 pr_cfg.max_entries_rq1 = rq1_entries; 2089 pr_cfg.max_entries_rq1 = rq1_entries;
2066 pr_cfg.max_entries_rq2 = rq2_entries; 2090 pr_cfg.max_entries_rq2 = rq2_entries;
@@ -2109,6 +2133,28 @@ static int ehea_clean_all_portres(struct ehea_port *port)
2109 return ret; 2133 return ret;
2110} 2134}
2111 2135
2136static void ehea_remove_adapter_mr (struct ehea_adapter *adapter)
2137{
2138 int i;
2139
2140 for (i=0; i < EHEA_MAX_PORTS; i++)
2141 if (adapter->port[i])
2142 return;
2143
2144 ehea_rem_mr(&adapter->mr);
2145}
2146
2147static int ehea_add_adapter_mr (struct ehea_adapter *adapter)
2148{
2149 int i;
2150
2151 for (i=0; i < EHEA_MAX_PORTS; i++)
2152 if (adapter->port[i])
2153 return 0;
2154
2155 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2156}
2157
2112static int ehea_up(struct net_device *dev) 2158static int ehea_up(struct net_device *dev)
2113{ 2159{
2114 int ret, i; 2160 int ret, i;
@@ -2208,8 +2254,10 @@ static int ehea_down(struct net_device *dev)
2208 ehea_drop_multicast_list(dev); 2254 ehea_drop_multicast_list(dev);
2209 ehea_free_interrupts(dev); 2255 ehea_free_interrupts(dev);
2210 2256
2211 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) 2257 for (i = 0; i < port->num_def_qps; i++)
2212 tasklet_kill(&port->port_res[i].send_comp_task); 2258 while (test_bit(__LINK_STATE_RX_SCHED,
2259 &port->port_res[i].d_netdev->state))
2260 msleep(1);
2213 2261
2214 ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 2262 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2215 ret = ehea_clean_all_portres(port); 2263 ret = ehea_clean_all_portres(port);
@@ -2276,8 +2324,6 @@ static void ehea_tx_watchdog(struct net_device *dev)
2276int ehea_sense_adapter_attr(struct ehea_adapter *adapter) 2324int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2277{ 2325{
2278 struct hcp_query_ehea *cb; 2326 struct hcp_query_ehea *cb;
2279 struct device_node *lhea_dn = NULL;
2280 struct device_node *eth_dn = NULL;
2281 u64 hret; 2327 u64 hret;
2282 int ret; 2328 int ret;
2283 2329
@@ -2294,18 +2340,6 @@ int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2294 goto out_herr; 2340 goto out_herr;
2295 } 2341 }
2296 2342
2297 /* Determine the number of available logical ports
2298 * by counting the child nodes of the lhea OFDT entry
2299 */
2300 adapter->num_ports = 0;
2301 lhea_dn = of_find_node_by_name(lhea_dn, "lhea");
2302 do {
2303 eth_dn = of_get_next_child(lhea_dn, eth_dn);
2304 if (eth_dn)
2305 adapter->num_ports++;
2306 } while ( eth_dn );
2307 of_node_put(lhea_dn);
2308
2309 adapter->max_mc_mac = cb->max_mc_mac - 1; 2343 adapter->max_mc_mac = cb->max_mc_mac - 1;
2310 ret = 0; 2344 ret = 0;
2311 2345
@@ -2315,79 +2349,188 @@ out:
2315 return ret; 2349 return ret;
2316} 2350}
2317 2351
2318static int ehea_setup_single_port(struct ehea_port *port, 2352int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2319 struct device_node *dn)
2320{ 2353{
2321 int ret;
2322 u64 hret;
2323 struct net_device *dev = port->netdev;
2324 struct ehea_adapter *adapter = port->adapter;
2325 struct hcp_ehea_port_cb4 *cb4; 2354 struct hcp_ehea_port_cb4 *cb4;
2326 u32 *dn_log_port_id; 2355 u64 hret;
2327 int jumbo = 0; 2356 int ret = 0;
2328
2329 sema_init(&port->port_lock, 1);
2330 port->state = EHEA_PORT_DOWN;
2331 port->sig_comp_iv = sq_entries / 10;
2332
2333 if (!dn) {
2334 ehea_error("bad device node: dn=%p", dn);
2335 ret = -EINVAL;
2336 goto out;
2337 }
2338
2339 port->of_dev_node = dn;
2340
2341 /* Determine logical port id */
2342 dn_log_port_id = (u32*)get_property(dn, "ibm,hea-port-no", NULL);
2343
2344 if (!dn_log_port_id) {
2345 ehea_error("bad device node: dn_log_port_id=%p",
2346 dn_log_port_id);
2347 ret = -EINVAL;
2348 goto out;
2349 }
2350 port->logical_port_id = *dn_log_port_id;
2351
2352 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2353 if (!port->mc_list) {
2354 ret = -ENOMEM;
2355 goto out;
2356 }
2357
2358 INIT_LIST_HEAD(&port->mc_list->list);
2359 2357
2360 ret = ehea_sense_port_attr(port); 2358 *jumbo = 0;
2361 if (ret)
2362 goto out;
2363 2359
2364 /* Enable Jumbo frames */ 2360 /* (Try to) enable *jumbo frames */
2365 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2361 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2366 if (!cb4) { 2362 if (!cb4) {
2367 ehea_error("no mem for cb4"); 2363 ehea_error("no mem for cb4");
2364 ret = -ENOMEM;
2365 goto out;
2368 } else { 2366 } else {
2369 hret = ehea_h_query_ehea_port(adapter->handle, 2367 hret = ehea_h_query_ehea_port(port->adapter->handle,
2370 port->logical_port_id, 2368 port->logical_port_id,
2371 H_PORT_CB4, 2369 H_PORT_CB4,
2372 H_PORT_CB4_JUMBO, cb4); 2370 H_PORT_CB4_JUMBO, cb4);
2373
2374 if (hret == H_SUCCESS) { 2371 if (hret == H_SUCCESS) {
2375 if (cb4->jumbo_frame) 2372 if (cb4->jumbo_frame)
2376 jumbo = 1; 2373 *jumbo = 1;
2377 else { 2374 else {
2378 cb4->jumbo_frame = 1; 2375 cb4->jumbo_frame = 1;
2379 hret = ehea_h_modify_ehea_port(adapter->handle, 2376 hret = ehea_h_modify_ehea_port(port->adapter->
2377 handle,
2380 port-> 2378 port->
2381 logical_port_id, 2379 logical_port_id,
2382 H_PORT_CB4, 2380 H_PORT_CB4,
2383 H_PORT_CB4_JUMBO, 2381 H_PORT_CB4_JUMBO,
2384 cb4); 2382 cb4);
2385 if (hret == H_SUCCESS) 2383 if (hret == H_SUCCESS)
2386 jumbo = 1; 2384 *jumbo = 1;
2387 } 2385 }
2388 } 2386 } else
2387 ret = -EINVAL;
2388
2389 kfree(cb4); 2389 kfree(cb4);
2390 } 2390 }
2391out:
2392 return ret;
2393}
2394
2395static ssize_t ehea_show_port_id(struct device *dev,
2396 struct device_attribute *attr, char *buf)
2397{
2398 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2399 return sprintf(buf, "0x%X", port->logical_port_id);
2400}
2401
2402static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
2403 NULL);
2404
2405static void __devinit logical_port_release(struct device *dev)
2406{
2407 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2408 of_node_put(port->ofdev.node);
2409}
2410
2411static int ehea_driver_sysfs_add(struct device *dev,
2412 struct device_driver *driver)
2413{
2414 int ret;
2415
2416 ret = sysfs_create_link(&driver->kobj, &dev->kobj,
2417 kobject_name(&dev->kobj));
2418 if (ret == 0) {
2419 ret = sysfs_create_link(&dev->kobj, &driver->kobj,
2420 "driver");
2421 if (ret)
2422 sysfs_remove_link(&driver->kobj,
2423 kobject_name(&dev->kobj));
2424 }
2425 return ret;
2426}
2427
2428static void ehea_driver_sysfs_remove(struct device *dev,
2429 struct device_driver *driver)
2430{
2431 struct device_driver *drv = driver;
2432
2433 if (drv) {
2434 sysfs_remove_link(&drv->kobj, kobject_name(&dev->kobj));
2435 sysfs_remove_link(&dev->kobj, "driver");
2436 }
2437}
2438
2439static struct device *ehea_register_port(struct ehea_port *port,
2440 struct device_node *dn)
2441{
2442 int ret;
2443
2444 port->ofdev.node = of_node_get(dn);
2445 port->ofdev.dev.parent = &port->adapter->ebus_dev->ofdev.dev;
2446 port->ofdev.dev.bus = &ibmebus_bus_type;
2447
2448 sprintf(port->ofdev.dev.bus_id, "port%d", port_name_cnt++);
2449 port->ofdev.dev.release = logical_port_release;
2450
2451 ret = of_device_register(&port->ofdev);
2452 if (ret) {
2453 ehea_error("failed to register device. ret=%d", ret);
2454 goto out;
2455 }
2456
2457 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2458 if (ret) {
2459 ehea_error("failed to register attributes, ret=%d", ret);
2460 goto out_unreg_of_dev;
2461 }
2462
2463 ret = ehea_driver_sysfs_add(&port->ofdev.dev, &ehea_driver.driver);
2464 if (ret) {
2465 ehea_error("failed to register sysfs driver link");
2466 goto out_rem_dev_file;
2467 }
2468
2469 return &port->ofdev.dev;
2470
2471out_rem_dev_file:
2472 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2473out_unreg_of_dev:
2474 of_device_unregister(&port->ofdev);
2475out:
2476 return NULL;
2477}
2478
2479static void ehea_unregister_port(struct ehea_port *port)
2480{
2481 ehea_driver_sysfs_remove(&port->ofdev.dev, &ehea_driver.driver);
2482 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2483 of_device_unregister(&port->ofdev);
2484}
2485
2486struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2487 u32 logical_port_id,
2488 struct device_node *dn)
2489{
2490 int ret;
2491 struct net_device *dev;
2492 struct ehea_port *port;
2493 struct device *port_dev;
2494 int jumbo;
2495
2496 /* allocate memory for the port structures */
2497 dev = alloc_etherdev(sizeof(struct ehea_port));
2498
2499 if (!dev) {
2500 ehea_error("no mem for net_device");
2501 ret = -ENOMEM;
2502 goto out_err;
2503 }
2504
2505 port = netdev_priv(dev);
2506
2507 sema_init(&port->port_lock, 1);
2508 port->state = EHEA_PORT_DOWN;
2509 port->sig_comp_iv = sq_entries / 10;
2510
2511 port->adapter = adapter;
2512 port->netdev = dev;
2513 port->logical_port_id = logical_port_id;
2514
2515 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2516
2517 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2518 if (!port->mc_list) {
2519 ret = -ENOMEM;
2520 goto out_free_ethdev;
2521 }
2522
2523 INIT_LIST_HEAD(&port->mc_list->list);
2524
2525 ret = ehea_sense_port_attr(port);
2526 if (ret)
2527 goto out_free_mc_list;
2528
2529 port_dev = ehea_register_port(port, dn);
2530 if (!port_dev)
2531 goto out_free_mc_list;
2532
2533 SET_NETDEV_DEV(dev, port_dev);
2391 2534
2392 /* initialize net_device structure */ 2535 /* initialize net_device structure */
2393 SET_MODULE_OWNER(dev); 2536 SET_MODULE_OWNER(dev);
@@ -2420,84 +2563,225 @@ static int ehea_setup_single_port(struct ehea_port *port,
2420 ret = register_netdev(dev); 2563 ret = register_netdev(dev);
2421 if (ret) { 2564 if (ret) {
2422 ehea_error("register_netdev failed. ret=%d", ret); 2565 ehea_error("register_netdev failed. ret=%d", ret);
2423 goto out_free; 2566 goto out_unreg_port;
2424 } 2567 }
2425 2568
2569 ret = ehea_get_jumboframe_status(port, &jumbo);
2570 if (ret)
2571 ehea_error("failed determining jumbo frame status for %s",
2572 port->netdev->name);
2573
2426 ehea_info("%s: Jumbo frames are %sabled", dev->name, 2574 ehea_info("%s: Jumbo frames are %sabled", dev->name,
2427 jumbo == 1 ? "en" : "dis"); 2575 jumbo == 1 ? "en" : "dis");
2428 2576
2429 port->netdev = dev; 2577 return port;
2430 ret = 0;
2431 goto out;
2432 2578
2433out_free: 2579out_unreg_port:
2580 ehea_unregister_port(port);
2581
2582out_free_mc_list:
2434 kfree(port->mc_list); 2583 kfree(port->mc_list);
2435out: 2584
2436 return ret; 2585out_free_ethdev:
2586 free_netdev(dev);
2587
2588out_err:
2589 ehea_error("setting up logical port with id=%d failed, ret=%d",
2590 logical_port_id, ret);
2591 return NULL;
2592}
2593
2594static void ehea_shutdown_single_port(struct ehea_port *port)
2595{
2596 unregister_netdev(port->netdev);
2597 ehea_unregister_port(port);
2598 kfree(port->mc_list);
2599 free_netdev(port->netdev);
2437} 2600}
2438 2601
2439static int ehea_setup_ports(struct ehea_adapter *adapter) 2602static int ehea_setup_ports(struct ehea_adapter *adapter)
2440{ 2603{
2441 int ret; 2604 struct device_node *lhea_dn;
2442 int port_setup_ok = 0; 2605 struct device_node *eth_dn = NULL;
2606
2607 u32 *dn_log_port_id;
2608 int i = 0;
2609
2610 lhea_dn = adapter->ebus_dev->ofdev.node;
2611 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
2612
2613 dn_log_port_id = (u32*)get_property(eth_dn, "ibm,hea-port-no",
2614 NULL);
2615 if (!dn_log_port_id) {
2616 ehea_error("bad device node: eth_dn name=%s",
2617 eth_dn->full_name);
2618 continue;
2619 }
2620
2621 if (ehea_add_adapter_mr(adapter)) {
2622 ehea_error("creating MR failed");
2623 of_node_put(eth_dn);
2624 return -EIO;
2625 }
2626
2627 adapter->port[i] = ehea_setup_single_port(adapter,
2628 *dn_log_port_id,
2629 eth_dn);
2630 if (adapter->port[i])
2631 ehea_info("%s -> logical port id #%d",
2632 adapter->port[i]->netdev->name,
2633 *dn_log_port_id);
2634 else
2635 ehea_remove_adapter_mr(adapter);
2636
2637 i++;
2638 };
2639
2640 return 0;
2641}
2642
2643static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
2644 u32 logical_port_id)
2645{
2646 struct device_node *lhea_dn;
2647 struct device_node *eth_dn = NULL;
2648 u32 *dn_log_port_id;
2649
2650 lhea_dn = adapter->ebus_dev->ofdev.node;
2651 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
2652
2653 dn_log_port_id = (u32*)get_property(eth_dn, "ibm,hea-port-no",
2654 NULL);
2655 if (dn_log_port_id)
2656 if (*dn_log_port_id == logical_port_id)
2657 return eth_dn;
2658 };
2659
2660 return NULL;
2661}
2662
2663static ssize_t ehea_probe_port(struct device *dev,
2664 struct device_attribute *attr,
2665 const char *buf, size_t count)
2666{
2667 struct ehea_adapter *adapter = dev->driver_data;
2443 struct ehea_port *port; 2668 struct ehea_port *port;
2444 struct device_node *dn = NULL; 2669 struct device_node *eth_dn = NULL;
2445 struct net_device *dev;
2446 int i; 2670 int i;
2447 2671
2448 /* get port properties for all ports */ 2672 u32 logical_port_id;
2449 for (i = 0; i < adapter->num_ports; i++) {
2450 2673
2451 if (adapter->port[i]) 2674 sscanf(buf, "%X", &logical_port_id);
2452 continue; /* port already up and running */
2453 2675
2454 /* allocate memory for the port structures */ 2676 port = ehea_get_port(adapter, logical_port_id);
2455 dev = alloc_etherdev(sizeof(struct ehea_port));
2456 2677
2457 if (!dev) { 2678 if (port) {
2458 ehea_error("no mem for net_device"); 2679 ehea_info("adding port with logical port id=%d failed. port "
2459 break; 2680 "already configured as %s.", logical_port_id,
2460 } 2681 port->netdev->name);
2682 return -EINVAL;
2683 }
2461 2684
2462 port = netdev_priv(dev); 2685 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
2463 port->adapter = adapter;
2464 port->netdev = dev;
2465 adapter->port[i] = port;
2466 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2467 2686
2468 dn = of_find_node_by_name(dn, "ethernet"); 2687 if (!eth_dn) {
2469 ret = ehea_setup_single_port(port, dn); 2688 ehea_info("no logical port with id %d found", logical_port_id);
2470 if (ret) { 2689 return -EINVAL;
2471 /* Free mem for this port struct. The others will be
2472 processed on rollback */
2473 free_netdev(dev);
2474 adapter->port[i] = NULL;
2475 ehea_error("eHEA port %d setup failed, ret=%d", i, ret);
2476 }
2477 } 2690 }
2478 2691
2479 of_node_put(dn); 2692 if (ehea_add_adapter_mr(adapter)) {
2693 ehea_error("creating MR failed");
2694 return -EIO;
2695 }
2480 2696
2481 /* Check for succesfully set up ports */ 2697 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
2482 for (i = 0; i < adapter->num_ports; i++)
2483 if (adapter->port[i])
2484 port_setup_ok++;
2485 2698
2486 if (port_setup_ok) 2699 of_node_put(eth_dn);
2487 ret = 0; /* At least some ports are setup correctly */ 2700
2488 else 2701 if (port) {
2489 ret = -EINVAL; 2702 for (i=0; i < EHEA_MAX_PORTS; i++)
2703 if (!adapter->port[i]) {
2704 adapter->port[i] = port;
2705 break;
2706 }
2707
2708 ehea_info("added %s (logical port id=%d)", port->netdev->name,
2709 logical_port_id);
2710 } else {
2711 ehea_remove_adapter_mr(adapter);
2712 return -EIO;
2713 }
2714
2715 return (ssize_t) count;
2716}
2717
2718static ssize_t ehea_remove_port(struct device *dev,
2719 struct device_attribute *attr,
2720 const char *buf, size_t count)
2721{
2722 struct ehea_adapter *adapter = dev->driver_data;
2723 struct ehea_port *port;
2724 int i;
2725 u32 logical_port_id;
2726
2727 sscanf(buf, "%X", &logical_port_id);
2728
2729 port = ehea_get_port(adapter, logical_port_id);
2730
2731 if (port) {
2732 ehea_info("removed %s (logical port id=%d)", port->netdev->name,
2733 logical_port_id);
2734
2735 ehea_shutdown_single_port(port);
2736
2737 for (i=0; i < EHEA_MAX_PORTS; i++)
2738 if (adapter->port[i] == port) {
2739 adapter->port[i] = NULL;
2740 break;
2741 }
2742 } else {
2743 ehea_error("removing port with logical port id=%d failed. port "
2744 "not configured.", logical_port_id);
2745 return -EINVAL;
2746 }
2747
2748 ehea_remove_adapter_mr(adapter);
2749
2750 return (ssize_t) count;
2751}
2752
2753static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
2754static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
2490 2755
2756int ehea_create_device_sysfs(struct ibmebus_dev *dev)
2757{
2758 int ret = device_create_file(&dev->ofdev.dev, &dev_attr_probe_port);
2759 if (ret)
2760 goto out;
2761
2762 ret = device_create_file(&dev->ofdev.dev, &dev_attr_remove_port);
2763out:
2491 return ret; 2764 return ret;
2492} 2765}
2493 2766
2494static int __devinit ehea_probe(struct ibmebus_dev *dev, 2767void ehea_remove_device_sysfs(struct ibmebus_dev *dev)
2495 const struct of_device_id *id) 2768{
2769 device_remove_file(&dev->ofdev.dev, &dev_attr_probe_port);
2770 device_remove_file(&dev->ofdev.dev, &dev_attr_remove_port);
2771}
2772
2773static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
2774 const struct of_device_id *id)
2496{ 2775{
2497 struct ehea_adapter *adapter; 2776 struct ehea_adapter *adapter;
2498 u64 *adapter_handle; 2777 u64 *adapter_handle;
2499 int ret; 2778 int ret;
2500 2779
2780 if (!dev || !dev->ofdev.node) {
2781 ehea_error("Invalid ibmebus device probed");
2782 return -EINVAL;
2783 }
2784
2501 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 2785 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2502 if (!adapter) { 2786 if (!adapter) {
2503 ret = -ENOMEM; 2787 ret = -ENOMEM;
@@ -2505,6 +2789,8 @@ static int __devinit ehea_probe(struct ibmebus_dev *dev,
2505 goto out; 2789 goto out;
2506 } 2790 }
2507 2791
2792 adapter->ebus_dev = dev;
2793
2508 adapter_handle = (u64*)get_property(dev->ofdev.node, "ibm,hea-handle", 2794 adapter_handle = (u64*)get_property(dev->ofdev.node, "ibm,hea-handle",
2509 NULL); 2795 NULL);
2510 if (adapter_handle) 2796 if (adapter_handle)
@@ -2521,26 +2807,21 @@ static int __devinit ehea_probe(struct ibmebus_dev *dev,
2521 2807
2522 dev->ofdev.dev.driver_data = adapter; 2808 dev->ofdev.dev.driver_data = adapter;
2523 2809
2524 ret = ehea_reg_mr_adapter(adapter);
2525 if (ret) {
2526 dev_err(&dev->ofdev.dev, "reg_mr_adapter failed\n");
2527 goto out_free_ad;
2528 }
2529 2810
2530 /* initialize adapter and ports */ 2811 /* initialize adapter and ports */
2531 /* get adapter properties */ 2812 /* get adapter properties */
2532 ret = ehea_sense_adapter_attr(adapter); 2813 ret = ehea_sense_adapter_attr(adapter);
2533 if (ret) { 2814 if (ret) {
2534 dev_err(&dev->ofdev.dev, "sense_adapter_attr failed: %d", ret); 2815 dev_err(&dev->ofdev.dev, "sense_adapter_attr failed: %d", ret);
2535 goto out_free_res; 2816 goto out_free_ad;
2536 } 2817 }
2537 dev_info(&dev->ofdev.dev, "%d eHEA ports found\n", adapter->num_ports);
2538 2818
2539 adapter->neq = ehea_create_eq(adapter, 2819 adapter->neq = ehea_create_eq(adapter,
2540 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1); 2820 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
2541 if (!adapter->neq) { 2821 if (!adapter->neq) {
2822 ret = -EIO;
2542 dev_err(&dev->ofdev.dev, "NEQ creation failed"); 2823 dev_err(&dev->ofdev.dev, "NEQ creation failed");
2543 goto out_free_res; 2824 goto out_free_ad;
2544 } 2825 }
2545 2826
2546 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet, 2827 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
@@ -2555,18 +2836,27 @@ static int __devinit ehea_probe(struct ibmebus_dev *dev,
2555 } 2836 }
2556 2837
2557 adapter->ehea_wq = create_workqueue("ehea_wq"); 2838 adapter->ehea_wq = create_workqueue("ehea_wq");
2558 if (!adapter->ehea_wq) 2839 if (!adapter->ehea_wq) {
2840 ret = -EIO;
2559 goto out_free_irq; 2841 goto out_free_irq;
2842 }
2843
2844 ret = ehea_create_device_sysfs(dev);
2845 if (ret)
2846 goto out_kill_wq;
2560 2847
2561 ret = ehea_setup_ports(adapter); 2848 ret = ehea_setup_ports(adapter);
2562 if (ret) { 2849 if (ret) {
2563 dev_err(&dev->ofdev.dev, "setup_ports failed"); 2850 dev_err(&dev->ofdev.dev, "setup_ports failed");
2564 goto out_kill_wq; 2851 goto out_rem_dev_sysfs;
2565 } 2852 }
2566 2853
2567 ret = 0; 2854 ret = 0;
2568 goto out; 2855 goto out;
2569 2856
2857out_rem_dev_sysfs:
2858 ehea_remove_device_sysfs(dev);
2859
2570out_kill_wq: 2860out_kill_wq:
2571 destroy_workqueue(adapter->ehea_wq); 2861 destroy_workqueue(adapter->ehea_wq);
2572 2862
@@ -2576,45 +2866,32 @@ out_free_irq:
2576out_kill_eq: 2866out_kill_eq:
2577 ehea_destroy_eq(adapter->neq); 2867 ehea_destroy_eq(adapter->neq);
2578 2868
2579out_free_res:
2580 ehea_h_free_resource(adapter->handle, adapter->mr.handle);
2581
2582out_free_ad: 2869out_free_ad:
2583 kfree(adapter); 2870 kfree(adapter);
2584out: 2871out:
2585 return ret; 2872 return ret;
2586} 2873}
2587 2874
2588static void ehea_shutdown_single_port(struct ehea_port *port)
2589{
2590 unregister_netdev(port->netdev);
2591 kfree(port->mc_list);
2592 free_netdev(port->netdev);
2593}
2594
2595static int __devexit ehea_remove(struct ibmebus_dev *dev) 2875static int __devexit ehea_remove(struct ibmebus_dev *dev)
2596{ 2876{
2597 struct ehea_adapter *adapter = dev->ofdev.dev.driver_data; 2877 struct ehea_adapter *adapter = dev->ofdev.dev.driver_data;
2598 u64 hret;
2599 int i; 2878 int i;
2600 2879
2601 for (i = 0; i < adapter->num_ports; i++) 2880 for (i = 0; i < EHEA_MAX_PORTS; i++)
2602 if (adapter->port[i]) { 2881 if (adapter->port[i]) {
2603 ehea_shutdown_single_port(adapter->port[i]); 2882 ehea_shutdown_single_port(adapter->port[i]);
2604 adapter->port[i] = NULL; 2883 adapter->port[i] = NULL;
2605 } 2884 }
2885
2886 ehea_remove_device_sysfs(dev);
2887
2606 destroy_workqueue(adapter->ehea_wq); 2888 destroy_workqueue(adapter->ehea_wq);
2607 2889
2608 ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); 2890 ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
2609 tasklet_kill(&adapter->neq_tasklet); 2891 tasklet_kill(&adapter->neq_tasklet);
2610 2892
2611 ehea_destroy_eq(adapter->neq); 2893 ehea_destroy_eq(adapter->neq);
2612 2894 ehea_remove_adapter_mr(adapter);
2613 hret = ehea_h_free_resource(adapter->handle, adapter->mr.handle);
2614 if (hret) {
2615 dev_err(&dev->ofdev.dev, "free_resource_mr failed");
2616 return -EIO;
2617 }
2618 kfree(adapter); 2895 kfree(adapter);
2619 return 0; 2896 return 0;
2620} 2897}
@@ -2647,21 +2924,6 @@ static int check_module_parm(void)
2647 return ret; 2924 return ret;
2648} 2925}
2649 2926
2650static struct of_device_id ehea_device_table[] = {
2651 {
2652 .name = "lhea",
2653 .compatible = "IBM,lhea",
2654 },
2655 {},
2656};
2657
2658static struct ibmebus_driver ehea_driver = {
2659 .name = "ehea",
2660 .id_table = ehea_device_table,
2661 .probe = ehea_probe,
2662 .remove = ehea_remove,
2663};
2664
2665int __init ehea_module_init(void) 2927int __init ehea_module_init(void)
2666{ 2928{
2667 int ret; 2929 int ret;