aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/benet/be_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/benet/be_main.c')
-rw-r--r--drivers/net/benet/be_main.c344
1 files changed, 212 insertions, 132 deletions
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 66bb56874d9b..308eb09ca56b 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -60,26 +60,6 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
60 return 0; 60 return 0;
61} 61}
62 62
63static inline void *queue_head_node(struct be_queue_info *q)
64{
65 return q->dma_mem.va + q->head * q->entry_size;
66}
67
68static inline void *queue_tail_node(struct be_queue_info *q)
69{
70 return q->dma_mem.va + q->tail * q->entry_size;
71}
72
73static inline void queue_head_inc(struct be_queue_info *q)
74{
75 index_inc(&q->head, q->len);
76}
77
78static inline void queue_tail_inc(struct be_queue_info *q)
79{
80 index_inc(&q->tail, q->len);
81}
82
83static void be_intr_set(struct be_ctrl_info *ctrl, bool enable) 63static void be_intr_set(struct be_ctrl_info *ctrl, bool enable)
84{ 64{
85 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 65 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
@@ -127,7 +107,7 @@ static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid,
127 iowrite32(val, ctrl->db + DB_EQ_OFFSET); 107 iowrite32(val, ctrl->db + DB_EQ_OFFSET);
128} 108}
129 109
130static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, 110void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
131 bool arm, u16 num_popped) 111 bool arm, u16 num_popped)
132{ 112{
133 u32 val = 0; 113 u32 val = 0;
@@ -234,28 +214,24 @@ static void netdev_stats_update(struct be_adapter *adapter)
234 dev_stats->tx_window_errors = 0; 214 dev_stats->tx_window_errors = 0;
235} 215}
236 216
237static void be_link_status_update(struct be_adapter *adapter) 217void be_link_status_update(void *ctxt, bool link_up)
238{ 218{
239 struct be_link_info *prev = &adapter->link; 219 struct be_adapter *adapter = ctxt;
240 struct be_link_info now = { 0 };
241 struct net_device *netdev = adapter->netdev; 220 struct net_device *netdev = adapter->netdev;
242 221
243 be_cmd_link_status_query(&adapter->ctrl, &now);
244
245 /* If link came up or went down */ 222 /* If link came up or went down */
246 if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO || 223 if (adapter->link_up != link_up) {
247 prev->speed == PHY_LINK_SPEED_ZERO)) { 224 if (link_up) {
248 if (now.speed == PHY_LINK_SPEED_ZERO) {
249 netif_stop_queue(netdev);
250 netif_carrier_off(netdev);
251 printk(KERN_INFO "%s: Link down\n", netdev->name);
252 } else {
253 netif_start_queue(netdev); 225 netif_start_queue(netdev);
254 netif_carrier_on(netdev); 226 netif_carrier_on(netdev);
255 printk(KERN_INFO "%s: Link up\n", netdev->name); 227 printk(KERN_INFO "%s: Link up\n", netdev->name);
228 } else {
229 netif_stop_queue(netdev);
230 netif_carrier_off(netdev);
231 printk(KERN_INFO "%s: Link down\n", netdev->name);
256 } 232 }
233 adapter->link_up = link_up;
257 } 234 }
258 *prev = now;
259} 235}
260 236
261/* Update the EQ delay n BE based on the RX frags consumed / sec */ 237/* Update the EQ delay n BE based on the RX frags consumed / sec */
@@ -569,47 +545,32 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
569 be_vid_config(netdev); 545 be_vid_config(netdev);
570} 546}
571 547
572static void be_set_multicast_filter(struct net_device *netdev) 548static void be_set_multicast_list(struct net_device *netdev)
573{ 549{
574 struct be_adapter *adapter = netdev_priv(netdev); 550 struct be_adapter *adapter = netdev_priv(netdev);
575 struct dev_mc_list *mc_ptr; 551 struct be_ctrl_info *ctrl = &adapter->ctrl;
576 u8 mac_addr[32][ETH_ALEN];
577 int i = 0;
578 552
579 if (netdev->flags & IFF_ALLMULTI) { 553 if (netdev->flags & IFF_PROMISC) {
580 /* set BE in Multicast promiscuous */ 554 be_cmd_promiscuous_config(ctrl, adapter->port_num, 1);
581 be_cmd_mcast_mac_set(&adapter->ctrl, 555 adapter->promiscuous = true;
582 adapter->if_handle, NULL, 0, true); 556 goto done;
583 return;
584 } 557 }
585 558
586 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 559 /* BE was previously in promiscous mode; disable it */
587 memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN); 560 if (adapter->promiscuous) {
588 if (++i >= 32) { 561 adapter->promiscuous = false;
589 be_cmd_mcast_mac_set(&adapter->ctrl, 562 be_cmd_promiscuous_config(ctrl, adapter->port_num, 0);
590 adapter->if_handle, &mac_addr[0][0], i, false);
591 i = 0;
592 }
593
594 } 563 }
595 564
596 if (i) { 565 if (netdev->flags & IFF_ALLMULTI) {
597 /* reset the promiscuous mode also. */ 566 be_cmd_multicast_set(ctrl, adapter->if_handle, NULL, 0);
598 be_cmd_mcast_mac_set(&adapter->ctrl, 567 goto done;
599 adapter->if_handle, &mac_addr[0][0], i, false);
600 } 568 }
601}
602
603static void be_set_multicast_list(struct net_device *netdev)
604{
605 struct be_adapter *adapter = netdev_priv(netdev);
606 569
607 if (netdev->flags & IFF_PROMISC) { 570 be_cmd_multicast_set(ctrl, adapter->if_handle, netdev->mc_list,
608 be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1); 571 netdev->mc_count);
609 } else { 572done:
610 be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0); 573 return;
611 be_set_multicast_filter(netdev);
612 }
613} 574}
614 575
615static void be_rx_rate_update(struct be_adapter *adapter) 576static void be_rx_rate_update(struct be_adapter *adapter)
@@ -705,7 +666,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
705{ 666{
706 struct be_queue_info *rxq = &adapter->rx_obj.q; 667 struct be_queue_info *rxq = &adapter->rx_obj.q;
707 struct be_rx_page_info *page_info; 668 struct be_rx_page_info *page_info;
708 u16 rxq_idx, i, num_rcvd; 669 u16 rxq_idx, i, num_rcvd, j;
709 u32 pktsize, hdr_len, curr_frag_len; 670 u32 pktsize, hdr_len, curr_frag_len;
710 u8 *start; 671 u8 *start;
711 672
@@ -748,22 +709,33 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
748 709
749 /* More frags present for this completion */ 710 /* More frags present for this completion */
750 pktsize -= curr_frag_len; /* account for above copied frag */ 711 pktsize -= curr_frag_len; /* account for above copied frag */
751 for (i = 1; i < num_rcvd; i++) { 712 for (i = 1, j = 0; i < num_rcvd; i++) {
752 index_inc(&rxq_idx, rxq->len); 713 index_inc(&rxq_idx, rxq->len);
753 page_info = get_rx_page_info(adapter, rxq_idx); 714 page_info = get_rx_page_info(adapter, rxq_idx);
754 715
755 curr_frag_len = min(pktsize, rx_frag_size); 716 curr_frag_len = min(pktsize, rx_frag_size);
756 717
757 skb_shinfo(skb)->frags[i].page = page_info->page; 718 /* Coalesce all frags from the same physical page in one slot */
758 skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset; 719 if (page_info->page_offset == 0) {
759 skb_shinfo(skb)->frags[i].size = curr_frag_len; 720 /* Fresh page */
721 j++;
722 skb_shinfo(skb)->frags[j].page = page_info->page;
723 skb_shinfo(skb)->frags[j].page_offset =
724 page_info->page_offset;
725 skb_shinfo(skb)->frags[j].size = 0;
726 skb_shinfo(skb)->nr_frags++;
727 } else {
728 put_page(page_info->page);
729 }
730
731 skb_shinfo(skb)->frags[j].size += curr_frag_len;
760 skb->len += curr_frag_len; 732 skb->len += curr_frag_len;
761 skb->data_len += curr_frag_len; 733 skb->data_len += curr_frag_len;
762 skb_shinfo(skb)->nr_frags++;
763 pktsize -= curr_frag_len; 734 pktsize -= curr_frag_len;
764 735
765 memset(page_info, 0, sizeof(*page_info)); 736 memset(page_info, 0, sizeof(*page_info));
766 } 737 }
738 BUG_ON(j > MAX_SKB_FRAGS);
767 739
768done: 740done:
769 be_rx_stats_update(adapter, pktsize, num_rcvd); 741 be_rx_stats_update(adapter, pktsize, num_rcvd);
@@ -825,7 +797,7 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
825 struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME]; 797 struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
826 struct be_queue_info *rxq = &adapter->rx_obj.q; 798 struct be_queue_info *rxq = &adapter->rx_obj.q;
827 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; 799 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
828 u16 i, rxq_idx = 0, vid; 800 u16 i, rxq_idx = 0, vid, j;
829 801
830 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 802 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
831 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 803 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
@@ -833,20 +805,28 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
833 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 805 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
834 806
835 remaining = pkt_size; 807 remaining = pkt_size;
836 for (i = 0; i < num_rcvd; i++) { 808 for (i = 0, j = -1; i < num_rcvd; i++) {
837 page_info = get_rx_page_info(adapter, rxq_idx); 809 page_info = get_rx_page_info(adapter, rxq_idx);
838 810
839 curr_frag_len = min(remaining, rx_frag_size); 811 curr_frag_len = min(remaining, rx_frag_size);
840 812
841 rx_frags[i].page = page_info->page; 813 /* Coalesce all frags from the same physical page in one slot */
842 rx_frags[i].page_offset = page_info->page_offset; 814 if (i == 0 || page_info->page_offset == 0) {
843 rx_frags[i].size = curr_frag_len; 815 /* First frag or Fresh page */
844 remaining -= curr_frag_len; 816 j++;
817 rx_frags[j].page = page_info->page;
818 rx_frags[j].page_offset = page_info->page_offset;
819 rx_frags[j].size = 0;
820 } else {
821 put_page(page_info->page);
822 }
823 rx_frags[j].size += curr_frag_len;
845 824
825 remaining -= curr_frag_len;
846 index_inc(&rxq_idx, rxq->len); 826 index_inc(&rxq_idx, rxq->len);
847
848 memset(page_info, 0, sizeof(*page_info)); 827 memset(page_info, 0, sizeof(*page_info));
849 } 828 }
829 BUG_ON(j > MAX_SKB_FRAGS);
850 830
851 if (likely(!vlanf)) { 831 if (likely(!vlanf)) {
852 lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size, 832 lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size,
@@ -960,10 +940,8 @@ static void be_post_rx_frags(struct be_adapter *adapter)
960 return; 940 return;
961} 941}
962 942
963static struct be_eth_tx_compl * 943static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
964be_tx_compl_get(struct be_adapter *adapter)
965{ 944{
966 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
967 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); 945 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
968 946
969 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) 947 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
@@ -1051,6 +1029,59 @@ static void be_tx_q_clean(struct be_adapter *adapter)
1051 } 1029 }
1052} 1030}
1053 1031
1032static void be_mcc_queues_destroy(struct be_adapter *adapter)
1033{
1034 struct be_queue_info *q;
1035 struct be_ctrl_info *ctrl = &adapter->ctrl;
1036
1037 q = &ctrl->mcc_obj.q;
1038 if (q->created)
1039 be_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
1040 be_queue_free(adapter, q);
1041
1042 q = &ctrl->mcc_obj.cq;
1043 if (q->created)
1044 be_cmd_q_destroy(ctrl, q, QTYPE_CQ);
1045 be_queue_free(adapter, q);
1046}
1047
1048/* Must be called only after TX qs are created as MCC shares TX EQ */
1049static int be_mcc_queues_create(struct be_adapter *adapter)
1050{
1051 struct be_queue_info *q, *cq;
1052 struct be_ctrl_info *ctrl = &adapter->ctrl;
1053
1054 /* Alloc MCC compl queue */
1055 cq = &ctrl->mcc_obj.cq;
1056 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1057 sizeof(struct be_mcc_cq_entry)))
1058 goto err;
1059
1060 /* Ask BE to create MCC compl queue; share TX's eq */
1061 if (be_cmd_cq_create(ctrl, cq, &adapter->tx_eq.q, false, true, 0))
1062 goto mcc_cq_free;
1063
1064 /* Alloc MCC queue */
1065 q = &ctrl->mcc_obj.q;
1066 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1067 goto mcc_cq_destroy;
1068
1069 /* Ask BE to create MCC queue */
1070 if (be_cmd_mccq_create(ctrl, q, cq))
1071 goto mcc_q_free;
1072
1073 return 0;
1074
1075mcc_q_free:
1076 be_queue_free(adapter, q);
1077mcc_cq_destroy:
1078 be_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
1079mcc_cq_free:
1080 be_queue_free(adapter, cq);
1081err:
1082 return -1;
1083}
1084
1054static void be_tx_queues_destroy(struct be_adapter *adapter) 1085static void be_tx_queues_destroy(struct be_adapter *adapter)
1055{ 1086{
1056 struct be_queue_info *q; 1087 struct be_queue_info *q;
@@ -1263,7 +1294,7 @@ static irqreturn_t be_msix_rx(int irq, void *dev)
1263 return IRQ_HANDLED; 1294 return IRQ_HANDLED;
1264} 1295}
1265 1296
1266static irqreturn_t be_msix_tx(int irq, void *dev) 1297static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1267{ 1298{
1268 struct be_adapter *adapter = dev; 1299 struct be_adapter *adapter = dev;
1269 1300
@@ -1324,40 +1355,51 @@ int be_poll_rx(struct napi_struct *napi, int budget)
1324 return work_done; 1355 return work_done;
1325} 1356}
1326 1357
1327/* For TX we don't honour budget; consume everything */ 1358void be_process_tx(struct be_adapter *adapter)
1328int be_poll_tx(struct napi_struct *napi, int budget)
1329{ 1359{
1330 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); 1360 struct be_queue_info *txq = &adapter->tx_obj.q;
1331 struct be_adapter *adapter = 1361 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1332 container_of(tx_eq, struct be_adapter, tx_eq);
1333 struct be_tx_obj *tx_obj = &adapter->tx_obj;
1334 struct be_queue_info *tx_cq = &tx_obj->cq;
1335 struct be_queue_info *txq = &tx_obj->q;
1336 struct be_eth_tx_compl *txcp; 1362 struct be_eth_tx_compl *txcp;
1337 u32 num_cmpl = 0; 1363 u32 num_cmpl = 0;
1338 u16 end_idx; 1364 u16 end_idx;
1339 1365
1340 while ((txcp = be_tx_compl_get(adapter))) { 1366 while ((txcp = be_tx_compl_get(tx_cq))) {
1341 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, 1367 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1342 wrb_index, txcp); 1368 wrb_index, txcp);
1343 be_tx_compl_process(adapter, end_idx); 1369 be_tx_compl_process(adapter, end_idx);
1344 num_cmpl++; 1370 num_cmpl++;
1345 } 1371 }
1346 1372
1347 /* As Tx wrbs have been freed up, wake up netdev queue if 1373 if (num_cmpl) {
1348 * it was stopped due to lack of tx wrbs. 1374 be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
1349 */ 1375
1350 if (netif_queue_stopped(adapter->netdev) && 1376 /* As Tx wrbs have been freed up, wake up netdev queue if
1377 * it was stopped due to lack of tx wrbs.
1378 */
1379 if (netif_queue_stopped(adapter->netdev) &&
1351 atomic_read(&txq->used) < txq->len / 2) { 1380 atomic_read(&txq->used) < txq->len / 2) {
1352 netif_wake_queue(adapter->netdev); 1381 netif_wake_queue(adapter->netdev);
1382 }
1383
1384 drvr_stats(adapter)->be_tx_events++;
1385 drvr_stats(adapter)->be_tx_compl += num_cmpl;
1353 } 1386 }
1387}
1388
1389/* As TX and MCC share the same EQ check for both TX and MCC completions.
1390 * For TX/MCC we don't honour budget; consume everything
1391 */
1392static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1393{
1394 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1395 struct be_adapter *adapter =
1396 container_of(tx_eq, struct be_adapter, tx_eq);
1354 1397
1355 napi_complete(napi); 1398 napi_complete(napi);
1356 1399
1357 be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl); 1400 be_process_tx(adapter);
1358 1401
1359 drvr_stats(adapter)->be_tx_events++; 1402 be_process_mcc(&adapter->ctrl);
1360 drvr_stats(adapter)->be_tx_compl += num_cmpl;
1361 1403
1362 return 1; 1404 return 1;
1363} 1405}
@@ -1368,9 +1410,6 @@ static void be_worker(struct work_struct *work)
1368 container_of(work, struct be_adapter, work.work); 1410 container_of(work, struct be_adapter, work.work);
1369 int status; 1411 int status;
1370 1412
1371 /* Check link */
1372 be_link_status_update(adapter);
1373
1374 /* Get Stats */ 1413 /* Get Stats */
1375 status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd); 1414 status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd);
1376 if (!status) 1415 if (!status)
@@ -1419,7 +1458,7 @@ static int be_msix_register(struct be_adapter *adapter)
1419 1458
1420 sprintf(tx_eq->desc, "%s-tx", netdev->name); 1459 sprintf(tx_eq->desc, "%s-tx", netdev->name);
1421 vec = be_msix_vec_get(adapter, tx_eq->q.id); 1460 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1422 status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter); 1461 status = request_irq(vec, be_msix_tx_mcc, 0, tx_eq->desc, adapter);
1423 if (status) 1462 if (status)
1424 goto err; 1463 goto err;
1425 1464
@@ -1495,6 +1534,39 @@ static int be_open(struct net_device *netdev)
1495 struct be_ctrl_info *ctrl = &adapter->ctrl; 1534 struct be_ctrl_info *ctrl = &adapter->ctrl;
1496 struct be_eq_obj *rx_eq = &adapter->rx_eq; 1535 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1497 struct be_eq_obj *tx_eq = &adapter->tx_eq; 1536 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1537 bool link_up;
1538 int status;
1539
1540 /* First time posting */
1541 be_post_rx_frags(adapter);
1542
1543 napi_enable(&rx_eq->napi);
1544 napi_enable(&tx_eq->napi);
1545
1546 be_irq_register(adapter);
1547
1548 be_intr_set(ctrl, true);
1549
1550 /* The evt queues are created in unarmed state; arm them */
1551 be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
1552 be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
1553
1554 /* Rx compl queue may be in unarmed state; rearm it */
1555 be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
1556
1557 status = be_cmd_link_status_query(ctrl, &link_up);
1558 if (status)
1559 return status;
1560 be_link_status_update(adapter, link_up);
1561
1562 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1563 return 0;
1564}
1565
1566static int be_setup(struct be_adapter *adapter)
1567{
1568 struct be_ctrl_info *ctrl = &adapter->ctrl;
1569 struct net_device *netdev = adapter->netdev;
1498 u32 if_flags; 1570 u32 if_flags;
1499 int status; 1571 int status;
1500 1572
@@ -1521,29 +1593,14 @@ static int be_open(struct net_device *netdev)
1521 if (status != 0) 1593 if (status != 0)
1522 goto tx_qs_destroy; 1594 goto tx_qs_destroy;
1523 1595
1524 /* First time posting */ 1596 status = be_mcc_queues_create(adapter);
1525 be_post_rx_frags(adapter); 1597 if (status != 0)
1526 1598 goto rx_qs_destroy;
1527 napi_enable(&rx_eq->napi);
1528 napi_enable(&tx_eq->napi);
1529
1530 be_irq_register(adapter);
1531
1532 be_intr_set(ctrl, true);
1533
1534 /* The evt queues are created in the unarmed state; arm them */
1535 be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
1536 be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
1537
1538 /* The compl queues are created in the unarmed state; arm them */
1539 be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
1540 be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0);
1541
1542 be_link_status_update(adapter);
1543 1599
1544 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1545 return 0; 1600 return 0;
1546 1601
1602rx_qs_destroy:
1603 be_rx_queues_destroy(adapter);
1547tx_qs_destroy: 1604tx_qs_destroy:
1548 be_tx_queues_destroy(adapter); 1605 be_tx_queues_destroy(adapter);
1549if_destroy: 1606if_destroy:
@@ -1552,6 +1609,19 @@ do_none:
1552 return status; 1609 return status;
1553} 1610}
1554 1611
1612static int be_clear(struct be_adapter *adapter)
1613{
1614 struct be_ctrl_info *ctrl = &adapter->ctrl;
1615
1616 be_rx_queues_destroy(adapter);
1617 be_tx_queues_destroy(adapter);
1618
1619 be_cmd_if_destroy(ctrl, adapter->if_handle);
1620
1621 be_mcc_queues_destroy(adapter);
1622 return 0;
1623}
1624
1555static int be_close(struct net_device *netdev) 1625static int be_close(struct net_device *netdev)
1556{ 1626{
1557 struct be_adapter *adapter = netdev_priv(netdev); 1627 struct be_adapter *adapter = netdev_priv(netdev);
@@ -1564,7 +1634,7 @@ static int be_close(struct net_device *netdev)
1564 1634
1565 netif_stop_queue(netdev); 1635 netif_stop_queue(netdev);
1566 netif_carrier_off(netdev); 1636 netif_carrier_off(netdev);
1567 adapter->link.speed = PHY_LINK_SPEED_ZERO; 1637 adapter->link_up = false;
1568 1638
1569 be_intr_set(ctrl, false); 1639 be_intr_set(ctrl, false);
1570 1640
@@ -1581,10 +1651,6 @@ static int be_close(struct net_device *netdev)
1581 napi_disable(&rx_eq->napi); 1651 napi_disable(&rx_eq->napi);
1582 napi_disable(&tx_eq->napi); 1652 napi_disable(&tx_eq->napi);
1583 1653
1584 be_rx_queues_destroy(adapter);
1585 be_tx_queues_destroy(adapter);
1586
1587 be_cmd_if_destroy(ctrl, adapter->if_handle);
1588 return 0; 1654 return 0;
1589} 1655}
1590 1656
@@ -1673,7 +1739,7 @@ static void be_netdev_init(struct net_device *netdev)
1673 1739
1674 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, 1740 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
1675 BE_NAPI_WEIGHT); 1741 BE_NAPI_WEIGHT);
1676 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx, 1742 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
1677 BE_NAPI_WEIGHT); 1743 BE_NAPI_WEIGHT);
1678 1744
1679 netif_carrier_off(netdev); 1745 netif_carrier_off(netdev);
@@ -1755,7 +1821,12 @@ static int be_ctrl_init(struct be_adapter *adapter)
1755 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 1821 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
1756 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 1822 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
1757 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 1823 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
1758 spin_lock_init(&ctrl->cmd_lock); 1824 spin_lock_init(&ctrl->mbox_lock);
1825 spin_lock_init(&ctrl->mcc_lock);
1826 spin_lock_init(&ctrl->mcc_cq_lock);
1827
1828 ctrl->async_cb = be_link_status_update;
1829 ctrl->adapter_ctxt = adapter;
1759 1830
1760 val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 1831 val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
1761 ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) & 1832 ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) &
@@ -1793,6 +1864,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
1793 1864
1794 unregister_netdev(adapter->netdev); 1865 unregister_netdev(adapter->netdev);
1795 1866
1867 be_clear(adapter);
1868
1796 be_stats_cleanup(adapter); 1869 be_stats_cleanup(adapter);
1797 1870
1798 be_ctrl_cleanup(adapter); 1871 be_ctrl_cleanup(adapter);
@@ -1890,13 +1963,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
1890 be_netdev_init(netdev); 1963 be_netdev_init(netdev);
1891 SET_NETDEV_DEV(netdev, &adapter->pdev->dev); 1964 SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
1892 1965
1966 status = be_setup(adapter);
1967 if (status)
1968 goto stats_clean;
1893 status = register_netdev(netdev); 1969 status = register_netdev(netdev);
1894 if (status != 0) 1970 if (status != 0)
1895 goto stats_clean; 1971 goto unsetup;
1896 1972
1897 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); 1973 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
1898 return 0; 1974 return 0;
1899 1975
1976unsetup:
1977 be_clear(adapter);
1900stats_clean: 1978stats_clean:
1901 be_stats_cleanup(adapter); 1979 be_stats_cleanup(adapter);
1902ctrl_clean: 1980ctrl_clean:
@@ -1921,6 +1999,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
1921 if (netif_running(netdev)) { 1999 if (netif_running(netdev)) {
1922 rtnl_lock(); 2000 rtnl_lock();
1923 be_close(netdev); 2001 be_close(netdev);
2002 be_clear(adapter);
1924 rtnl_unlock(); 2003 rtnl_unlock();
1925 } 2004 }
1926 2005
@@ -1947,6 +2026,7 @@ static int be_resume(struct pci_dev *pdev)
1947 2026
1948 if (netif_running(netdev)) { 2027 if (netif_running(netdev)) {
1949 rtnl_lock(); 2028 rtnl_lock();
2029 be_setup(adapter);
1950 be_open(netdev); 2030 be_open(netdev);
1951 rtnl_unlock(); 2031 rtnl_unlock();
1952 } 2032 }