aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/benet/be_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/benet/be_main.c')
-rw-r--r--drivers/net/benet/be_main.c828
1 files changed, 593 insertions, 235 deletions
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 876b357101fa..ec6ace802256 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2009 ServerEngines 2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -31,6 +31,7 @@ MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
31 31
32static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { 32static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
33 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 33 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
34 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
34 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 35 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
35 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 36 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
36 { 0 } 37 { 0 }
@@ -67,6 +68,9 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
67 u32 reg = ioread32(addr); 68 u32 reg = ioread32(addr);
68 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 69 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
69 70
71 if (adapter->eeh_err)
72 return;
73
70 if (!enabled && enable) 74 if (!enabled && enable)
71 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 75 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
72 else if (enabled && !enable) 76 else if (enabled && !enable)
@@ -98,6 +102,10 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
98{ 102{
99 u32 val = 0; 103 u32 val = 0;
100 val |= qid & DB_EQ_RING_ID_MASK; 104 val |= qid & DB_EQ_RING_ID_MASK;
105
106 if (adapter->eeh_err)
107 return;
108
101 if (arm) 109 if (arm)
102 val |= 1 << DB_EQ_REARM_SHIFT; 110 val |= 1 << DB_EQ_REARM_SHIFT;
103 if (clear_int) 111 if (clear_int)
@@ -111,6 +119,10 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
111{ 119{
112 u32 val = 0; 120 u32 val = 0;
113 val |= qid & DB_CQ_RING_ID_MASK; 121 val |= qid & DB_CQ_RING_ID_MASK;
122
123 if (adapter->eeh_err)
124 return;
125
114 if (arm) 126 if (arm)
115 val |= 1 << DB_CQ_REARM_SHIFT; 127 val |= 1 << DB_CQ_REARM_SHIFT;
116 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; 128 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
@@ -123,6 +135,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
123 struct sockaddr *addr = p; 135 struct sockaddr *addr = p;
124 int status = 0; 136 int status = 0;
125 137
138 if (!is_valid_ether_addr(addr->sa_data))
139 return -EADDRNOTAVAIL;
140
126 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id); 141 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
127 if (status) 142 if (status)
128 return status; 143 return status;
@@ -141,16 +156,13 @@ void netdev_stats_update(struct be_adapter *adapter)
141 struct be_rxf_stats *rxf_stats = &hw_stats->rxf; 156 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
142 struct be_port_rxf_stats *port_stats = 157 struct be_port_rxf_stats *port_stats =
143 &rxf_stats->port[adapter->port_num]; 158 &rxf_stats->port[adapter->port_num];
144 struct net_device_stats *dev_stats = &adapter->stats.net_stats; 159 struct net_device_stats *dev_stats = &adapter->netdev->stats;
145 struct be_erx_stats *erx_stats = &hw_stats->erx; 160 struct be_erx_stats *erx_stats = &hw_stats->erx;
146 161
147 dev_stats->rx_packets = port_stats->rx_total_frames; 162 dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
148 dev_stats->tx_packets = port_stats->tx_unicastframes + 163 dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
149 port_stats->tx_multicastframes + port_stats->tx_broadcastframes; 164 dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
150 dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 | 165 dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
151 (u64) port_stats->rx_bytes_lsd;
152 dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
153 (u64) port_stats->tx_bytes_lsd;
154 166
155 /* bad pkts received */ 167 /* bad pkts received */
156 dev_stats->rx_errors = port_stats->rx_crc_errors + 168 dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -168,7 +180,8 @@ void netdev_stats_update(struct be_adapter *adapter)
168 port_stats->rx_udp_checksum_errs; 180 port_stats->rx_udp_checksum_errs;
169 181
170 /* no space in linux buffers: best possible approximation */ 182 /* no space in linux buffers: best possible approximation */
171 dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0]; 183 dev_stats->rx_dropped =
184 erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
172 185
173 /* detailed rx errors */ 186 /* detailed rx errors */
174 dev_stats->rx_length_errors = port_stats->rx_in_range_errors + 187 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
@@ -214,6 +227,7 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
214 227
215 /* If link came up or went down */ 228 /* If link came up or went down */
216 if (adapter->link_up != link_up) { 229 if (adapter->link_up != link_up) {
230 adapter->link_speed = -1;
217 if (link_up) { 231 if (link_up) {
218 netif_start_queue(netdev); 232 netif_start_queue(netdev);
219 netif_carrier_on(netdev); 233 netif_carrier_on(netdev);
@@ -269,9 +283,7 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
269 283
270static struct net_device_stats *be_get_stats(struct net_device *dev) 284static struct net_device_stats *be_get_stats(struct net_device *dev)
271{ 285{
272 struct be_adapter *adapter = netdev_priv(dev); 286 return &dev->stats;
273
274 return &adapter->stats.net_stats;
275} 287}
276 288
277static u32 be_calc_rate(u64 bytes, unsigned long ticks) 289static u32 be_calc_rate(u64 bytes, unsigned long ticks)
@@ -307,12 +319,13 @@ static void be_tx_rate_update(struct be_adapter *adapter)
307} 319}
308 320
309static void be_tx_stats_update(struct be_adapter *adapter, 321static void be_tx_stats_update(struct be_adapter *adapter,
310 u32 wrb_cnt, u32 copied, bool stopped) 322 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
311{ 323{
312 struct be_drvr_stats *stats = drvr_stats(adapter); 324 struct be_drvr_stats *stats = drvr_stats(adapter);
313 stats->be_tx_reqs++; 325 stats->be_tx_reqs++;
314 stats->be_tx_wrbs += wrb_cnt; 326 stats->be_tx_wrbs += wrb_cnt;
315 stats->be_tx_bytes += copied; 327 stats->be_tx_bytes += copied;
328 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
316 if (stopped) 329 if (stopped)
317 stats->be_tx_stops++; 330 stats->be_tx_stops++;
318} 331}
@@ -389,15 +402,11 @@ static int make_tx_wrbs(struct be_adapter *adapter,
389 atomic_add(wrb_cnt, &txq->used); 402 atomic_add(wrb_cnt, &txq->used);
390 queue_head_inc(txq); 403 queue_head_inc(txq);
391 404
392 if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
393 dev_err(&pdev->dev, "TX DMA mapping failed\n");
394 return 0;
395 }
396
397 if (skb->len > skb->data_len) { 405 if (skb->len > skb->data_len) {
398 int len = skb->len - skb->data_len; 406 int len = skb->len - skb->data_len;
407 busaddr = pci_map_single(pdev, skb->data, len,
408 PCI_DMA_TODEVICE);
399 wrb = queue_head_node(txq); 409 wrb = queue_head_node(txq);
400 busaddr = skb_shinfo(skb)->dma_head;
401 wrb_fill(wrb, busaddr, len); 410 wrb_fill(wrb, busaddr, len);
402 be_dws_cpu_to_le(wrb, sizeof(*wrb)); 411 be_dws_cpu_to_le(wrb, sizeof(*wrb));
403 queue_head_inc(txq); 412 queue_head_inc(txq);
@@ -407,8 +416,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
407 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 416 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
408 struct skb_frag_struct *frag = 417 struct skb_frag_struct *frag =
409 &skb_shinfo(skb)->frags[i]; 418 &skb_shinfo(skb)->frags[i];
410 419 busaddr = pci_map_page(pdev, frag->page,
411 busaddr = skb_shinfo(skb)->dma_maps[i]; 420 frag->page_offset,
421 frag->size, PCI_DMA_TODEVICE);
412 wrb = queue_head_node(txq); 422 wrb = queue_head_node(txq);
413 wrb_fill(wrb, busaddr, frag->size); 423 wrb_fill(wrb, busaddr, frag->size);
414 be_dws_cpu_to_le(wrb, sizeof(*wrb)); 424 be_dws_cpu_to_le(wrb, sizeof(*wrb));
@@ -460,7 +470,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
460 470
461 be_txq_notify(adapter, txq->id, wrb_cnt); 471 be_txq_notify(adapter, txq->id, wrb_cnt);
462 472
463 be_tx_stats_update(adapter, wrb_cnt, copied, stopped); 473 be_tx_stats_update(adapter, wrb_cnt, copied,
474 skb_shinfo(skb)->gso_segs, stopped);
464 } else { 475 } else {
465 txq->head = start; 476 txq->head = start;
466 dev_kfree_skb_any(skb); 477 dev_kfree_skb_any(skb);
@@ -472,10 +483,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
472{ 483{
473 struct be_adapter *adapter = netdev_priv(netdev); 484 struct be_adapter *adapter = netdev_priv(netdev);
474 if (new_mtu < BE_MIN_MTU || 485 if (new_mtu < BE_MIN_MTU ||
475 new_mtu > BE_MAX_JUMBO_FRAME_SIZE) { 486 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
487 (ETH_HLEN + ETH_FCS_LEN))) {
476 dev_info(&adapter->pdev->dev, 488 dev_info(&adapter->pdev->dev,
477 "MTU must be between %d and %d bytes\n", 489 "MTU must be between %d and %d bytes\n",
478 BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE); 490 BE_MIN_MTU,
491 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
479 return -EINVAL; 492 return -EINVAL;
480 } 493 }
481 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", 494 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
@@ -485,17 +498,16 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
485} 498}
486 499
487/* 500/*
488 * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured, 501 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
489 * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured, 502 * If the user configures more, place BE in vlan promiscuous mode.
490 * set the BE in promiscuous VLAN mode.
491 */ 503 */
492static int be_vid_config(struct be_adapter *adapter) 504static int be_vid_config(struct be_adapter *adapter)
493{ 505{
494 u16 vtag[BE_NUM_VLANS_SUPPORTED]; 506 u16 vtag[BE_NUM_VLANS_SUPPORTED];
495 u16 ntags = 0, i; 507 u16 ntags = 0, i;
496 int status; 508 int status = 0;
497 509
498 if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) { 510 if (adapter->vlans_added <= adapter->max_vlans) {
499 /* Construct VLAN Table to give to HW */ 511 /* Construct VLAN Table to give to HW */
500 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 512 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
501 if (adapter->vlan_tag[i]) { 513 if (adapter->vlan_tag[i]) {
@@ -529,21 +541,21 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
529{ 541{
530 struct be_adapter *adapter = netdev_priv(netdev); 542 struct be_adapter *adapter = netdev_priv(netdev);
531 543
532 adapter->num_vlans++;
533 adapter->vlan_tag[vid] = 1; 544 adapter->vlan_tag[vid] = 1;
534 545 adapter->vlans_added++;
535 be_vid_config(adapter); 546 if (adapter->vlans_added <= (adapter->max_vlans + 1))
547 be_vid_config(adapter);
536} 548}
537 549
538static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) 550static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
539{ 551{
540 struct be_adapter *adapter = netdev_priv(netdev); 552 struct be_adapter *adapter = netdev_priv(netdev);
541 553
542 adapter->num_vlans--;
543 adapter->vlan_tag[vid] = 0; 554 adapter->vlan_tag[vid] = 0;
544
545 vlan_group_set_device(adapter->vlan_grp, vid, NULL); 555 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
546 be_vid_config(adapter); 556 adapter->vlans_added--;
557 if (adapter->vlans_added <= adapter->max_vlans)
558 be_vid_config(adapter);
547} 559}
548 560
549static void be_set_multicast_list(struct net_device *netdev) 561static void be_set_multicast_list(struct net_device *netdev)
@@ -562,13 +574,16 @@ static void be_set_multicast_list(struct net_device *netdev)
562 be_cmd_promiscuous_config(adapter, adapter->port_num, 0); 574 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
563 } 575 }
564 576
565 if (netdev->flags & IFF_ALLMULTI) { 577 /* Enable multicast promisc if num configured exceeds what we support */
566 be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0); 578 if (netdev->flags & IFF_ALLMULTI ||
579 netdev_mc_count(netdev) > BE_MAX_MC) {
580 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
581 &adapter->mc_cmd_mem);
567 goto done; 582 goto done;
568 } 583 }
569 584
570 be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list, 585 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
571 netdev->mc_count); 586 &adapter->mc_cmd_mem);
572done: 587done:
573 return; 588 return;
574} 589}
@@ -603,6 +618,7 @@ static void be_rx_stats_update(struct be_adapter *adapter,
603 stats->be_rx_compl++; 618 stats->be_rx_compl++;
604 stats->be_rx_frags += numfrags; 619 stats->be_rx_frags += numfrags;
605 stats->be_rx_bytes += pktsize; 620 stats->be_rx_bytes += pktsize;
621 stats->be_rx_pkts++;
606} 622}
607 623
608static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) 624static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -630,9 +646,11 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
630 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx]; 646 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
631 BUG_ON(!rx_page_info->page); 647 BUG_ON(!rx_page_info->page);
632 648
633 if (rx_page_info->last_page_user) 649 if (rx_page_info->last_page_user) {
634 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus), 650 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
635 adapter->big_page_size, PCI_DMA_FROMDEVICE); 651 adapter->big_page_size, PCI_DMA_FROMDEVICE);
652 rx_page_info->last_page_user = false;
653 }
636 654
637 atomic_dec(&rxq->used); 655 atomic_dec(&rxq->used);
638 return rx_page_info; 656 return rx_page_info;
@@ -662,17 +680,17 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
662 * indicated by rxcp. 680 * indicated by rxcp.
663 */ 681 */
664static void skb_fill_rx_data(struct be_adapter *adapter, 682static void skb_fill_rx_data(struct be_adapter *adapter,
665 struct sk_buff *skb, struct be_eth_rx_compl *rxcp) 683 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
684 u16 num_rcvd)
666{ 685{
667 struct be_queue_info *rxq = &adapter->rx_obj.q; 686 struct be_queue_info *rxq = &adapter->rx_obj.q;
668 struct be_rx_page_info *page_info; 687 struct be_rx_page_info *page_info;
669 u16 rxq_idx, i, num_rcvd, j; 688 u16 rxq_idx, i, j;
670 u32 pktsize, hdr_len, curr_frag_len, size; 689 u32 pktsize, hdr_len, curr_frag_len, size;
671 u8 *start; 690 u8 *start;
672 691
673 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 692 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
674 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 693 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
675 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
676 694
677 page_info = get_rx_page_info(adapter, rxq_idx); 695 page_info = get_rx_page_info(adapter, rxq_idx);
678 696
@@ -700,7 +718,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
700 skb->data_len = curr_frag_len - hdr_len; 718 skb->data_len = curr_frag_len - hdr_len;
701 skb->tail += hdr_len; 719 skb->tail += hdr_len;
702 } 720 }
703 memset(page_info, 0, sizeof(*page_info)); 721 page_info->page = NULL;
704 722
705 if (pktsize <= rx_frag_size) { 723 if (pktsize <= rx_frag_size) {
706 BUG_ON(num_rcvd != 1); 724 BUG_ON(num_rcvd != 1);
@@ -733,7 +751,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
733 skb->len += curr_frag_len; 751 skb->len += curr_frag_len;
734 skb->data_len += curr_frag_len; 752 skb->data_len += curr_frag_len;
735 753
736 memset(page_info, 0, sizeof(*page_info)); 754 page_info->page = NULL;
737 } 755 }
738 BUG_ON(j > MAX_SKB_FRAGS); 756 BUG_ON(j > MAX_SKB_FRAGS);
739 757
@@ -748,27 +766,23 @@ static void be_rx_compl_process(struct be_adapter *adapter,
748{ 766{
749 struct sk_buff *skb; 767 struct sk_buff *skb;
750 u32 vlanf, vid; 768 u32 vlanf, vid;
769 u16 num_rcvd;
751 u8 vtm; 770 u8 vtm;
752 771
753 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 772 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
754 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); 773 /* Is it a flush compl that has no data */
755 774 if (unlikely(num_rcvd == 0))
756 /* vlanf could be wrongly set in some cards. 775 return;
757 * ignore if vtm is not set */
758 if ((adapter->cap == 0x400) && !vtm)
759 vlanf = 0;
760 776
761 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); 777 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
762 if (!skb) { 778 if (unlikely(!skb)) {
763 if (net_ratelimit()) 779 if (net_ratelimit())
764 dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); 780 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
765 be_rx_compl_discard(adapter, rxcp); 781 be_rx_compl_discard(adapter, rxcp);
766 return; 782 return;
767 } 783 }
768 784
769 skb_reserve(skb, NET_IP_ALIGN); 785 skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
770
771 skb_fill_rx_data(adapter, skb, rxcp);
772 786
773 if (do_pkt_csum(rxcp, adapter->rx_csum)) 787 if (do_pkt_csum(rxcp, adapter->rx_csum))
774 skb->ip_summed = CHECKSUM_NONE; 788 skb->ip_summed = CHECKSUM_NONE;
@@ -779,13 +793,21 @@ static void be_rx_compl_process(struct be_adapter *adapter,
779 skb->protocol = eth_type_trans(skb, adapter->netdev); 793 skb->protocol = eth_type_trans(skb, adapter->netdev);
780 skb->dev = adapter->netdev; 794 skb->dev = adapter->netdev;
781 795
782 if (vlanf) { 796 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
783 if (!adapter->vlan_grp || adapter->num_vlans == 0) { 797 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
798
799 /* vlanf could be wrongly set in some cards.
800 * ignore if vtm is not set */
801 if ((adapter->cap & 0x400) && !vtm)
802 vlanf = 0;
803
804 if (unlikely(vlanf)) {
805 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
784 kfree_skb(skb); 806 kfree_skb(skb);
785 return; 807 return;
786 } 808 }
787 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 809 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
788 vid = be16_to_cpu(vid); 810 vid = swab16(vid);
789 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); 811 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
790 } else { 812 } else {
791 netif_receive_skb(skb); 813 netif_receive_skb(skb);
@@ -807,6 +829,10 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
807 u8 vtm; 829 u8 vtm;
808 830
809 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 831 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
832 /* Is it a flush compl that has no data */
833 if (unlikely(num_rcvd == 0))
834 return;
835
810 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 836 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
811 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 837 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
812 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 838 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
@@ -814,7 +840,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
814 840
815 /* vlanf could be wrongly set in some cards. 841 /* vlanf could be wrongly set in some cards.
816 * ignore if vtm is not set */ 842 * ignore if vtm is not set */
817 if ((adapter->cap == 0x400) && !vtm) 843 if ((adapter->cap & 0x400) && !vtm)
818 vlanf = 0; 844 vlanf = 0;
819 845
820 skb = napi_get_frags(&eq_obj->napi); 846 skb = napi_get_frags(&eq_obj->napi);
@@ -858,9 +884,9 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
858 napi_gro_frags(&eq_obj->napi); 884 napi_gro_frags(&eq_obj->napi);
859 } else { 885 } else {
860 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 886 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
861 vid = be16_to_cpu(vid); 887 vid = swab16(vid);
862 888
863 if (!adapter->vlan_grp || adapter->num_vlans == 0) 889 if (!adapter->vlan_grp || adapter->vlans_added == 0)
864 return; 890 return;
865 891
866 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); 892 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
@@ -908,7 +934,7 @@ static inline struct page *be_alloc_pages(u32 size)
908static void be_post_rx_frags(struct be_adapter *adapter) 934static void be_post_rx_frags(struct be_adapter *adapter)
909{ 935{
910 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; 936 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
911 struct be_rx_page_info *page_info = NULL; 937 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
912 struct be_queue_info *rxq = &adapter->rx_obj.q; 938 struct be_queue_info *rxq = &adapter->rx_obj.q;
913 struct page *pagep = NULL; 939 struct page *pagep = NULL;
914 struct be_eth_rx_d *rxd; 940 struct be_eth_rx_d *rxd;
@@ -939,7 +965,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
939 rxd = queue_head_node(rxq); 965 rxd = queue_head_node(rxq);
940 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); 966 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
941 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); 967 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
942 queue_head_inc(rxq);
943 968
944 /* Any space left in the current big page for another frag? */ 969 /* Any space left in the current big page for another frag? */
945 if ((page_offset + rx_frag_size + rx_frag_size) > 970 if ((page_offset + rx_frag_size + rx_frag_size) >
@@ -947,10 +972,13 @@ static void be_post_rx_frags(struct be_adapter *adapter)
947 pagep = NULL; 972 pagep = NULL;
948 page_info->last_page_user = true; 973 page_info->last_page_user = true;
949 } 974 }
975
976 prev_page_info = page_info;
977 queue_head_inc(rxq);
950 page_info = &page_info_tbl[rxq->head]; 978 page_info = &page_info_tbl[rxq->head];
951 } 979 }
952 if (pagep) 980 if (pagep)
953 page_info->last_page_user = true; 981 prev_page_info->last_page_user = true;
954 982
955 if (posted) { 983 if (posted) {
956 atomic_add(posted, &rxq->used); 984 atomic_add(posted, &rxq->used);
@@ -981,23 +1009,41 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
981static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) 1009static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
982{ 1010{
983 struct be_queue_info *txq = &adapter->tx_obj.q; 1011 struct be_queue_info *txq = &adapter->tx_obj.q;
1012 struct be_eth_wrb *wrb;
984 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; 1013 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
985 struct sk_buff *sent_skb; 1014 struct sk_buff *sent_skb;
1015 u64 busaddr;
986 u16 cur_index, num_wrbs = 0; 1016 u16 cur_index, num_wrbs = 0;
987 1017
988 cur_index = txq->tail; 1018 cur_index = txq->tail;
989 sent_skb = sent_skbs[cur_index]; 1019 sent_skb = sent_skbs[cur_index];
990 BUG_ON(!sent_skb); 1020 BUG_ON(!sent_skb);
991 sent_skbs[cur_index] = NULL; 1021 sent_skbs[cur_index] = NULL;
1022 wrb = queue_tail_node(txq);
1023 be_dws_le_to_cpu(wrb, sizeof(*wrb));
1024 busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
1025 if (busaddr != 0) {
1026 pci_unmap_single(adapter->pdev, busaddr,
1027 wrb->frag_len, PCI_DMA_TODEVICE);
1028 }
1029 num_wrbs++;
1030 queue_tail_inc(txq);
992 1031
993 do { 1032 while (cur_index != last_index) {
994 cur_index = txq->tail; 1033 cur_index = txq->tail;
1034 wrb = queue_tail_node(txq);
1035 be_dws_le_to_cpu(wrb, sizeof(*wrb));
1036 busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
1037 if (busaddr != 0) {
1038 pci_unmap_page(adapter->pdev, busaddr,
1039 wrb->frag_len, PCI_DMA_TODEVICE);
1040 }
995 num_wrbs++; 1041 num_wrbs++;
996 queue_tail_inc(txq); 1042 queue_tail_inc(txq);
997 } while (cur_index != last_index); 1043 }
998 1044
999 atomic_sub(num_wrbs, &txq->used); 1045 atomic_sub(num_wrbs, &txq->used);
1000 skb_dma_unmap(&adapter->pdev->dev, sent_skb, DMA_TO_DEVICE); 1046
1001 kfree_skb(sent_skb); 1047 kfree_skb(sent_skb);
1002} 1048}
1003 1049
@@ -1082,6 +1128,9 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
1082 struct be_queue_info *txq = &adapter->tx_obj.q; 1128 struct be_queue_info *txq = &adapter->tx_obj.q;
1083 struct be_eth_tx_compl *txcp; 1129 struct be_eth_tx_compl *txcp;
1084 u16 end_idx, cmpl = 0, timeo = 0; 1130 u16 end_idx, cmpl = 0, timeo = 0;
1131 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1132 struct sk_buff *sent_skb;
1133 bool dummy_wrb;
1085 1134
1086 /* Wait for a max of 200ms for all the tx-completions to arrive. */ 1135 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1087 do { 1136 do {
@@ -1105,6 +1154,15 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
1105 if (atomic_read(&txq->used)) 1154 if (atomic_read(&txq->used))
1106 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n", 1155 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1107 atomic_read(&txq->used)); 1156 atomic_read(&txq->used));
1157
1158 /* free posted tx for which compls will never arrive */
1159 while (atomic_read(&txq->used)) {
1160 sent_skb = sent_skbs[txq->tail];
1161 end_idx = txq->tail;
1162 index_adv(&end_idx,
1163 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1164 be_tx_compl_process(adapter, end_idx);
1165 }
1108} 1166}
1109 1167
1110static void be_mcc_queues_destroy(struct be_adapter *adapter) 1168static void be_mcc_queues_destroy(struct be_adapter *adapter)
@@ -1237,6 +1295,11 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
1237 q = &adapter->rx_obj.q; 1295 q = &adapter->rx_obj.q;
1238 if (q->created) { 1296 if (q->created) {
1239 be_cmd_q_destroy(adapter, q, QTYPE_RXQ); 1297 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1298
1299 /* After the rxq is invalidated, wait for a grace time
1300 * of 1ms for all dma to end and the flush compl to arrive
1301 */
1302 mdelay(1);
1240 be_rx_q_clean(adapter); 1303 be_rx_q_clean(adapter);
1241 } 1304 }
1242 be_queue_free(adapter, q); 1305 be_queue_free(adapter, q);
@@ -1319,7 +1382,7 @@ rx_eq_free:
1319/* There are 8 evt ids per func. Retruns the evt id's bit number */ 1382/* There are 8 evt ids per func. Retruns the evt id's bit number */
1320static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id) 1383static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1321{ 1384{
1322 return eq_id - 8 * be_pci_func(adapter); 1385 return eq_id % 8;
1323} 1386}
1324 1387
1325static irqreturn_t be_intx(int irq, void *dev) 1388static irqreturn_t be_intx(int irq, void *dev)
@@ -1328,7 +1391,7 @@ static irqreturn_t be_intx(int irq, void *dev)
1328 int isr; 1391 int isr;
1329 1392
1330 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + 1393 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1331 be_pci_func(adapter) * CEV_ISR_SIZE); 1394 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1332 if (!isr) 1395 if (!isr)
1333 return IRQ_NONE; 1396 return IRQ_NONE;
1334 1397
@@ -1377,6 +1440,7 @@ int be_poll_rx(struct napi_struct *napi, int budget)
1377 struct be_eth_rx_compl *rxcp; 1440 struct be_eth_rx_compl *rxcp;
1378 u32 work_done; 1441 u32 work_done;
1379 1442
1443 adapter->stats.drvr_stats.be_rx_polls++;
1380 for (work_done = 0; work_done < budget; work_done++) { 1444 for (work_done = 0; work_done < budget; work_done++) {
1381 rxcp = be_rx_compl_get(adapter); 1445 rxcp = be_rx_compl_get(adapter);
1382 if (!rxcp) 1446 if (!rxcp)
@@ -1405,23 +1469,38 @@ int be_poll_rx(struct napi_struct *napi, int budget)
1405 return work_done; 1469 return work_done;
1406} 1470}
1407 1471
1408void be_process_tx(struct be_adapter *adapter) 1472/* As TX and MCC share the same EQ check for both TX and MCC completions.
1473 * For TX/MCC we don't honour budget; consume everything
1474 */
1475static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1409{ 1476{
1477 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1478 struct be_adapter *adapter =
1479 container_of(tx_eq, struct be_adapter, tx_eq);
1410 struct be_queue_info *txq = &adapter->tx_obj.q; 1480 struct be_queue_info *txq = &adapter->tx_obj.q;
1411 struct be_queue_info *tx_cq = &adapter->tx_obj.cq; 1481 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1412 struct be_eth_tx_compl *txcp; 1482 struct be_eth_tx_compl *txcp;
1413 u32 num_cmpl = 0; 1483 int tx_compl = 0, mcc_compl, status = 0;
1414 u16 end_idx; 1484 u16 end_idx;
1415 1485
1416 while ((txcp = be_tx_compl_get(tx_cq))) { 1486 while ((txcp = be_tx_compl_get(tx_cq))) {
1417 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, 1487 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1418 wrb_index, txcp); 1488 wrb_index, txcp);
1419 be_tx_compl_process(adapter, end_idx); 1489 be_tx_compl_process(adapter, end_idx);
1420 num_cmpl++; 1490 tx_compl++;
1491 }
1492
1493 mcc_compl = be_process_mcc(adapter, &status);
1494
1495 napi_complete(napi);
1496
1497 if (mcc_compl) {
1498 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1499 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1421 } 1500 }
1422 1501
1423 if (num_cmpl) { 1502 if (tx_compl) {
1424 be_cq_notify(adapter, tx_cq->id, true, num_cmpl); 1503 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1425 1504
1426 /* As Tx wrbs have been freed up, wake up netdev queue if 1505 /* As Tx wrbs have been freed up, wake up netdev queue if
1427 * it was stopped due to lack of tx wrbs. 1506 * it was stopped due to lack of tx wrbs.
@@ -1432,24 +1511,8 @@ void be_process_tx(struct be_adapter *adapter)
1432 } 1511 }
1433 1512
1434 drvr_stats(adapter)->be_tx_events++; 1513 drvr_stats(adapter)->be_tx_events++;
1435 drvr_stats(adapter)->be_tx_compl += num_cmpl; 1514 drvr_stats(adapter)->be_tx_compl += tx_compl;
1436 } 1515 }
1437}
1438
1439/* As TX and MCC share the same EQ check for both TX and MCC completions.
1440 * For TX/MCC we don't honour budget; consume everything
1441 */
1442static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1443{
1444 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1445 struct be_adapter *adapter =
1446 container_of(tx_eq, struct be_adapter, tx_eq);
1447
1448 napi_complete(napi);
1449
1450 be_process_tx(adapter);
1451
1452 be_process_mcc(adapter);
1453 1516
1454 return 1; 1517 return 1;
1455} 1518}
@@ -1475,6 +1538,14 @@ static void be_worker(struct work_struct *work)
1475 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 1538 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1476} 1539}
1477 1540
1541static void be_msix_disable(struct be_adapter *adapter)
1542{
1543 if (adapter->msix_enabled) {
1544 pci_disable_msix(adapter->pdev);
1545 adapter->msix_enabled = false;
1546 }
1547}
1548
1478static void be_msix_enable(struct be_adapter *adapter) 1549static void be_msix_enable(struct be_adapter *adapter)
1479{ 1550{
1480 int i, status; 1551 int i, status;
@@ -1590,6 +1661,8 @@ static int be_open(struct net_device *netdev)
1590 struct be_eq_obj *tx_eq = &adapter->tx_eq; 1661 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1591 bool link_up; 1662 bool link_up;
1592 int status; 1663 int status;
1664 u8 mac_speed;
1665 u16 link_speed;
1593 1666
1594 /* First time posting */ 1667 /* First time posting */
1595 be_post_rx_frags(adapter); 1668 be_post_rx_frags(adapter);
@@ -1608,7 +1681,11 @@ static int be_open(struct net_device *netdev)
1608 /* Rx compl queue may be in unarmed state; rearm it */ 1681 /* Rx compl queue may be in unarmed state; rearm it */
1609 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0); 1682 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
1610 1683
1611 status = be_cmd_link_status_query(adapter, &link_up); 1684 /* Now that interrupts are on we can process async mcc */
1685 be_async_mcc_enable(adapter);
1686
1687 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
1688 &link_speed);
1612 if (status) 1689 if (status)
1613 goto ret_sts; 1690 goto ret_sts;
1614 be_link_status_update(adapter, link_up); 1691 be_link_status_update(adapter, link_up);
@@ -1627,6 +1704,44 @@ ret_sts:
1627 return status; 1704 return status;
1628} 1705}
1629 1706
1707static int be_setup_wol(struct be_adapter *adapter, bool enable)
1708{
1709 struct be_dma_mem cmd;
1710 int status = 0;
1711 u8 mac[ETH_ALEN];
1712
1713 memset(mac, 0, ETH_ALEN);
1714
1715 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
1716 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
1717 if (cmd.va == NULL)
1718 return -1;
1719 memset(cmd.va, 0, cmd.size);
1720
1721 if (enable) {
1722 status = pci_write_config_dword(adapter->pdev,
1723 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
1724 if (status) {
1725 dev_err(&adapter->pdev->dev,
1726 "Could not enable Wake-on-lan \n");
1727 pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
1728 cmd.dma);
1729 return status;
1730 }
1731 status = be_cmd_enable_magic_wol(adapter,
1732 adapter->netdev->dev_addr, &cmd);
1733 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
1734 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
1735 } else {
1736 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
1737 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
1738 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
1739 }
1740
1741 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
1742 return status;
1743}
1744
1630static int be_setup(struct be_adapter *adapter) 1745static int be_setup(struct be_adapter *adapter)
1631{ 1746{
1632 struct net_device *netdev = adapter->netdev; 1747 struct net_device *netdev = adapter->netdev;
@@ -1658,6 +1773,8 @@ static int be_setup(struct be_adapter *adapter)
1658 if (status != 0) 1773 if (status != 0)
1659 goto rx_qs_destroy; 1774 goto rx_qs_destroy;
1660 1775
1776 adapter->link_speed = -1;
1777
1661 return 0; 1778 return 0;
1662 1779
1663rx_qs_destroy: 1780rx_qs_destroy:
@@ -1678,6 +1795,8 @@ static int be_clear(struct be_adapter *adapter)
1678 1795
1679 be_cmd_if_destroy(adapter, adapter->if_handle); 1796 be_cmd_if_destroy(adapter, adapter->if_handle);
1680 1797
1798 /* tell fw we're done with firing cmds */
1799 be_cmd_fw_clean(adapter);
1681 return 0; 1800 return 0;
1682} 1801}
1683 1802
@@ -1690,6 +1809,8 @@ static int be_close(struct net_device *netdev)
1690 1809
1691 cancel_delayed_work_sync(&adapter->work); 1810 cancel_delayed_work_sync(&adapter->work);
1692 1811
1812 be_async_mcc_disable(adapter);
1813
1693 netif_stop_queue(netdev); 1814 netif_stop_queue(netdev);
1694 netif_carrier_off(netdev); 1815 netif_carrier_off(netdev);
1695 adapter->link_up = false; 1816 adapter->link_up = false;
@@ -1720,103 +1841,159 @@ static int be_close(struct net_device *netdev)
1720#define FW_FILE_HDR_SIGN "ServerEngines Corp. " 1841#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
1721char flash_cookie[2][16] = {"*** SE FLAS", 1842char flash_cookie[2][16] = {"*** SE FLAS",
1722 "H DIRECTORY *** "}; 1843 "H DIRECTORY *** "};
1723static int be_flash_image(struct be_adapter *adapter, 1844
1724 const struct firmware *fw, 1845static bool be_flash_redboot(struct be_adapter *adapter,
1725 struct be_dma_mem *flash_cmd, u32 flash_type) 1846 const u8 *p, u32 img_start, int image_size,
1847 int hdr_size)
1726{ 1848{
1849 u32 crc_offset;
1850 u8 flashed_crc[4];
1727 int status; 1851 int status;
1728 u32 flash_op, image_offset = 0, total_bytes, image_size = 0;
1729 int num_bytes;
1730 const u8 *p = fw->data;
1731 struct be_cmd_write_flashrom *req = flash_cmd->va;
1732 1852
1733 switch (flash_type) { 1853 crc_offset = hdr_size + img_start + image_size - 4;
1734 case FLASHROM_TYPE_ISCSI_ACTIVE:
1735 image_offset = FLASH_iSCSI_PRIMARY_IMAGE_START;
1736 image_size = FLASH_IMAGE_MAX_SIZE;
1737 break;
1738 case FLASHROM_TYPE_ISCSI_BACKUP:
1739 image_offset = FLASH_iSCSI_BACKUP_IMAGE_START;
1740 image_size = FLASH_IMAGE_MAX_SIZE;
1741 break;
1742 case FLASHROM_TYPE_FCOE_FW_ACTIVE:
1743 image_offset = FLASH_FCoE_PRIMARY_IMAGE_START;
1744 image_size = FLASH_IMAGE_MAX_SIZE;
1745 break;
1746 case FLASHROM_TYPE_FCOE_FW_BACKUP:
1747 image_offset = FLASH_FCoE_BACKUP_IMAGE_START;
1748 image_size = FLASH_IMAGE_MAX_SIZE;
1749 break;
1750 case FLASHROM_TYPE_BIOS:
1751 image_offset = FLASH_iSCSI_BIOS_START;
1752 image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
1753 break;
1754 case FLASHROM_TYPE_FCOE_BIOS:
1755 image_offset = FLASH_FCoE_BIOS_START;
1756 image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
1757 break;
1758 case FLASHROM_TYPE_PXE_BIOS:
1759 image_offset = FLASH_PXE_BIOS_START;
1760 image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
1761 break;
1762 default:
1763 return 0;
1764 }
1765 1854
1766 p += sizeof(struct flash_file_hdr) + image_offset; 1855 p += crc_offset;
1767 if (p + image_size > fw->data + fw->size)
1768 return -1;
1769 1856
1770 total_bytes = image_size; 1857 status = be_cmd_get_flash_crc(adapter, flashed_crc,
1858 (image_size - 4));
1859 if (status) {
1860 dev_err(&adapter->pdev->dev,
1861 "could not get crc from flash, not flashing redboot\n");
1862 return false;
1863 }
1771 1864
1772 while (total_bytes) { 1865 /*update redboot only if crc does not match*/
1773 if (total_bytes > 32*1024) 1866 if (!memcmp(flashed_crc, p, 4))
1774 num_bytes = 32*1024; 1867 return false;
1775 else 1868 else
1776 num_bytes = total_bytes; 1869 return true;
1777 total_bytes -= num_bytes; 1870}
1778 1871
1779 if (!total_bytes) 1872static int be_flash_data(struct be_adapter *adapter,
1780 flash_op = FLASHROM_OPER_FLASH; 1873 const struct firmware *fw,
1781 else 1874 struct be_dma_mem *flash_cmd, int num_of_images)
1782 flash_op = FLASHROM_OPER_SAVE; 1875
1783 memcpy(req->params.data_buf, p, num_bytes); 1876{
1784 p += num_bytes; 1877 int status = 0, i, filehdr_size = 0;
1785 status = be_cmd_write_flashrom(adapter, flash_cmd, 1878 u32 total_bytes = 0, flash_op;
1786 flash_type, flash_op, num_bytes); 1879 int num_bytes;
1787 if (status) { 1880 const u8 *p = fw->data;
1788 dev_err(&adapter->pdev->dev, 1881 struct be_cmd_write_flashrom *req = flash_cmd->va;
1789 "cmd to write to flash rom failed. type/op %d/%d\n", 1882 struct flash_comp *pflashcomp;
1790 flash_type, flash_op); 1883 int num_comp;
1791 return -1; 1884
1885 struct flash_comp gen3_flash_types[9] = {
1886 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
1887 FLASH_IMAGE_MAX_SIZE_g3},
1888 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
1889 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
1890 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
1891 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
1892 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
1893 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
1894 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
1895 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
1896 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
1897 FLASH_IMAGE_MAX_SIZE_g3},
1898 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
1899 FLASH_IMAGE_MAX_SIZE_g3},
1900 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
1901 FLASH_IMAGE_MAX_SIZE_g3},
1902 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
1903 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
1904 };
1905 struct flash_comp gen2_flash_types[8] = {
1906 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
1907 FLASH_IMAGE_MAX_SIZE_g2},
1908 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
1909 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
1910 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
1911 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
1912 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
1913 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
1914 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
1915 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
1916 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
1917 FLASH_IMAGE_MAX_SIZE_g2},
1918 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
1919 FLASH_IMAGE_MAX_SIZE_g2},
1920 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
1921 FLASH_IMAGE_MAX_SIZE_g2}
1922 };
1923
1924 if (adapter->generation == BE_GEN3) {
1925 pflashcomp = gen3_flash_types;
1926 filehdr_size = sizeof(struct flash_file_hdr_g3);
1927 num_comp = 9;
1928 } else {
1929 pflashcomp = gen2_flash_types;
1930 filehdr_size = sizeof(struct flash_file_hdr_g2);
1931 num_comp = 8;
1932 }
1933 for (i = 0; i < num_comp; i++) {
1934 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
1935 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
1936 continue;
1937 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
1938 (!be_flash_redboot(adapter, fw->data,
1939 pflashcomp[i].offset, pflashcomp[i].size,
1940 filehdr_size)))
1941 continue;
1942 p = fw->data;
1943 p += filehdr_size + pflashcomp[i].offset
1944 + (num_of_images * sizeof(struct image_hdr));
1945 if (p + pflashcomp[i].size > fw->data + fw->size)
1946 return -1;
1947 total_bytes = pflashcomp[i].size;
1948 while (total_bytes) {
1949 if (total_bytes > 32*1024)
1950 num_bytes = 32*1024;
1951 else
1952 num_bytes = total_bytes;
1953 total_bytes -= num_bytes;
1954
1955 if (!total_bytes)
1956 flash_op = FLASHROM_OPER_FLASH;
1957 else
1958 flash_op = FLASHROM_OPER_SAVE;
1959 memcpy(req->params.data_buf, p, num_bytes);
1960 p += num_bytes;
1961 status = be_cmd_write_flashrom(adapter, flash_cmd,
1962 pflashcomp[i].optype, flash_op, num_bytes);
1963 if (status) {
1964 dev_err(&adapter->pdev->dev,
1965 "cmd to write to flash rom failed.\n");
1966 return -1;
1967 }
1968 yield();
1792 } 1969 }
1793 yield();
1794 } 1970 }
1795
1796 return 0; 1971 return 0;
1797} 1972}
1798 1973
1974static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
1975{
1976 if (fhdr == NULL)
1977 return 0;
1978 if (fhdr->build[0] == '3')
1979 return BE_GEN3;
1980 else if (fhdr->build[0] == '2')
1981 return BE_GEN2;
1982 else
1983 return 0;
1984}
1985
1799int be_load_fw(struct be_adapter *adapter, u8 *func) 1986int be_load_fw(struct be_adapter *adapter, u8 *func)
1800{ 1987{
1801 char fw_file[ETHTOOL_FLASH_MAX_FILENAME]; 1988 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
1802 const struct firmware *fw; 1989 const struct firmware *fw;
1803 struct flash_file_hdr *fhdr; 1990 struct flash_file_hdr_g2 *fhdr;
1804 struct flash_section_info *fsec = NULL; 1991 struct flash_file_hdr_g3 *fhdr3;
1992 struct image_hdr *img_hdr_ptr = NULL;
1805 struct be_dma_mem flash_cmd; 1993 struct be_dma_mem flash_cmd;
1806 int status; 1994 int status, i = 0, num_imgs = 0;
1807 const u8 *p; 1995 const u8 *p;
1808 bool entry_found = false;
1809 int flash_type;
1810 char fw_ver[FW_VER_LEN];
1811 char fw_cfg;
1812 1996
1813 status = be_cmd_get_fw_ver(adapter, fw_ver);
1814 if (status)
1815 return status;
1816
1817 fw_cfg = *(fw_ver + 2);
1818 if (fw_cfg == '0')
1819 fw_cfg = '1';
1820 strcpy(fw_file, func); 1997 strcpy(fw_file, func);
1821 1998
1822 status = request_firmware(&fw, fw_file, &adapter->pdev->dev); 1999 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
@@ -1824,34 +2001,9 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
1824 goto fw_exit; 2001 goto fw_exit;
1825 2002
1826 p = fw->data; 2003 p = fw->data;
1827 fhdr = (struct flash_file_hdr *) p; 2004 fhdr = (struct flash_file_hdr_g2 *) p;
1828 if (memcmp(fhdr->sign, FW_FILE_HDR_SIGN, strlen(FW_FILE_HDR_SIGN))) {
1829 dev_err(&adapter->pdev->dev,
1830 "Firmware(%s) load error (signature did not match)\n",
1831 fw_file);
1832 status = -1;
1833 goto fw_exit;
1834 }
1835
1836 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file); 2005 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
1837 2006
1838 p += sizeof(struct flash_file_hdr);
1839 while (p < (fw->data + fw->size)) {
1840 fsec = (struct flash_section_info *)p;
1841 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) {
1842 entry_found = true;
1843 break;
1844 }
1845 p += 32;
1846 }
1847
1848 if (!entry_found) {
1849 status = -1;
1850 dev_err(&adapter->pdev->dev,
1851 "Flash cookie not found in firmware image\n");
1852 goto fw_exit;
1853 }
1854
1855 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024; 2007 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
1856 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size, 2008 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
1857 &flash_cmd.dma); 2009 &flash_cmd.dma);
@@ -1862,12 +2014,25 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
1862 goto fw_exit; 2014 goto fw_exit;
1863 } 2015 }
1864 2016
1865 for (flash_type = FLASHROM_TYPE_ISCSI_ACTIVE; 2017 if ((adapter->generation == BE_GEN3) &&
1866 flash_type <= FLASHROM_TYPE_FCOE_FW_BACKUP; flash_type++) { 2018 (get_ufigen_type(fhdr) == BE_GEN3)) {
1867 status = be_flash_image(adapter, fw, &flash_cmd, 2019 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
1868 flash_type); 2020 num_imgs = le32_to_cpu(fhdr3->num_imgs);
1869 if (status) 2021 for (i = 0; i < num_imgs; i++) {
1870 break; 2022 img_hdr_ptr = (struct image_hdr *) (fw->data +
2023 (sizeof(struct flash_file_hdr_g3) +
2024 i * sizeof(struct image_hdr)));
2025 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2026 status = be_flash_data(adapter, fw, &flash_cmd,
2027 num_imgs);
2028 }
2029 } else if ((adapter->generation == BE_GEN2) &&
2030 (get_ufigen_type(fhdr) == BE_GEN2)) {
2031 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2032 } else {
2033 dev_err(&adapter->pdev->dev,
2034 "UFI and Interface are not compatible for flashing\n");
2035 status = -1;
1871 } 2036 }
1872 2037
1873 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va, 2038 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
@@ -1877,7 +2042,7 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
1877 goto fw_exit; 2042 goto fw_exit;
1878 } 2043 }
1879 2044
1880 dev_info(&adapter->pdev->dev, "Firmware flashed succesfully\n"); 2045 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
1881 2046
1882fw_exit: 2047fw_exit:
1883 release_firmware(fw); 2048 release_firmware(fw);
@@ -1906,6 +2071,8 @@ static void be_netdev_init(struct net_device *netdev)
1906 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM | 2071 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
1907 NETIF_F_GRO; 2072 NETIF_F_GRO;
1908 2073
2074 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2075
1909 netdev->flags |= IFF_MULTICAST; 2076 netdev->flags |= IFF_MULTICAST;
1910 2077
1911 adapter->rx_csum = true; 2078 adapter->rx_csum = true;
@@ -1942,6 +2109,7 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
1942static int be_map_pci_bars(struct be_adapter *adapter) 2109static int be_map_pci_bars(struct be_adapter *adapter)
1943{ 2110{
1944 u8 __iomem *addr; 2111 u8 __iomem *addr;
2112 int pcicfg_reg;
1945 2113
1946 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), 2114 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
1947 pci_resource_len(adapter->pdev, 2)); 2115 pci_resource_len(adapter->pdev, 2));
@@ -1955,8 +2123,13 @@ static int be_map_pci_bars(struct be_adapter *adapter)
1955 goto pci_map_err; 2123 goto pci_map_err;
1956 adapter->db = addr; 2124 adapter->db = addr;
1957 2125
1958 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1), 2126 if (adapter->generation == BE_GEN2)
1959 pci_resource_len(adapter->pdev, 1)); 2127 pcicfg_reg = 1;
2128 else
2129 pcicfg_reg = 0;
2130
2131 addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
2132 pci_resource_len(adapter->pdev, pcicfg_reg));
1960 if (addr == NULL) 2133 if (addr == NULL)
1961 goto pci_map_err; 2134 goto pci_map_err;
1962 adapter->pcicfg = addr; 2135 adapter->pcicfg = addr;
@@ -1977,34 +2150,62 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
1977 if (mem->va) 2150 if (mem->va)
1978 pci_free_consistent(adapter->pdev, mem->size, 2151 pci_free_consistent(adapter->pdev, mem->size,
1979 mem->va, mem->dma); 2152 mem->va, mem->dma);
2153
2154 mem = &adapter->mc_cmd_mem;
2155 if (mem->va)
2156 pci_free_consistent(adapter->pdev, mem->size,
2157 mem->va, mem->dma);
1980} 2158}
1981 2159
1982static int be_ctrl_init(struct be_adapter *adapter) 2160static int be_ctrl_init(struct be_adapter *adapter)
1983{ 2161{
1984 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced; 2162 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
1985 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem; 2163 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2164 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
1986 int status; 2165 int status;
1987 2166
1988 status = be_map_pci_bars(adapter); 2167 status = be_map_pci_bars(adapter);
1989 if (status) 2168 if (status)
1990 return status; 2169 goto done;
1991 2170
1992 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 2171 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
1993 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev, 2172 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
1994 mbox_mem_alloc->size, &mbox_mem_alloc->dma); 2173 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
1995 if (!mbox_mem_alloc->va) { 2174 if (!mbox_mem_alloc->va) {
1996 be_unmap_pci_bars(adapter); 2175 status = -ENOMEM;
1997 return -1; 2176 goto unmap_pci_bars;
1998 } 2177 }
2178
1999 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 2179 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2000 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 2180 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2001 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 2181 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2002 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 2182 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2183
2184 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2185 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2186 &mc_cmd_mem->dma);
2187 if (mc_cmd_mem->va == NULL) {
2188 status = -ENOMEM;
2189 goto free_mbox;
2190 }
2191 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2192
2003 spin_lock_init(&adapter->mbox_lock); 2193 spin_lock_init(&adapter->mbox_lock);
2004 spin_lock_init(&adapter->mcc_lock); 2194 spin_lock_init(&adapter->mcc_lock);
2005 spin_lock_init(&adapter->mcc_cq_lock); 2195 spin_lock_init(&adapter->mcc_cq_lock);
2006 2196
2197 pci_save_state(adapter->pdev);
2007 return 0; 2198 return 0;
2199
2200free_mbox:
2201 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2202 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2203
2204unmap_pci_bars:
2205 be_unmap_pci_bars(adapter);
2206
2207done:
2208 return status;
2008} 2209}
2009 2210
2010static void be_stats_cleanup(struct be_adapter *adapter) 2211static void be_stats_cleanup(struct be_adapter *adapter)
@@ -2026,12 +2227,14 @@ static int be_stats_init(struct be_adapter *adapter)
2026 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); 2227 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2027 if (cmd->va == NULL) 2228 if (cmd->va == NULL)
2028 return -1; 2229 return -1;
2230 memset(cmd->va, 0, cmd->size);
2029 return 0; 2231 return 0;
2030} 2232}
2031 2233
2032static void __devexit be_remove(struct pci_dev *pdev) 2234static void __devexit be_remove(struct pci_dev *pdev)
2033{ 2235{
2034 struct be_adapter *adapter = pci_get_drvdata(pdev); 2236 struct be_adapter *adapter = pci_get_drvdata(pdev);
2237
2035 if (!adapter) 2238 if (!adapter)
2036 return; 2239 return;
2037 2240
@@ -2043,10 +2246,7 @@ static void __devexit be_remove(struct pci_dev *pdev)
2043 2246
2044 be_ctrl_cleanup(adapter); 2247 be_ctrl_cleanup(adapter);
2045 2248
2046 if (adapter->msix_enabled) { 2249 be_msix_disable(adapter);
2047 pci_disable_msix(adapter->pdev);
2048 adapter->msix_enabled = false;
2049 }
2050 2250
2051 pci_set_drvdata(pdev, NULL); 2251 pci_set_drvdata(pdev, NULL);
2052 pci_release_regions(pdev); 2252 pci_release_regions(pdev);
@@ -2055,25 +2255,38 @@ static void __devexit be_remove(struct pci_dev *pdev)
2055 free_netdev(adapter->netdev); 2255 free_netdev(adapter->netdev);
2056} 2256}
2057 2257
2058static int be_hw_up(struct be_adapter *adapter) 2258static int be_get_config(struct be_adapter *adapter)
2059{ 2259{
2060 int status; 2260 int status;
2261 u8 mac[ETH_ALEN];
2061 2262
2062 status = be_cmd_POST(adapter); 2263 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2063 if (status) 2264 if (status)
2064 return status; 2265 return status;
2065 2266
2066 status = be_cmd_reset_function(adapter); 2267 status = be_cmd_query_fw_cfg(adapter,
2268 &adapter->port_num, &adapter->cap);
2067 if (status) 2269 if (status)
2068 return status; 2270 return status;
2069 2271
2070 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver); 2272 memset(mac, 0, ETH_ALEN);
2273 status = be_cmd_mac_addr_query(adapter, mac,
2274 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2071 if (status) 2275 if (status)
2072 return status; 2276 return status;
2073 2277
2074 status = be_cmd_query_fw_cfg(adapter, 2278 if (!is_valid_ether_addr(mac))
2075 &adapter->port_num, &adapter->cap); 2279 return -EADDRNOTAVAIL;
2076 return status; 2280
2281 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2282 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2283
2284 if (adapter->cap & 0x400)
2285 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2286 else
2287 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2288
2289 return 0;
2077} 2290}
2078 2291
2079static int __devinit be_probe(struct pci_dev *pdev, 2292static int __devinit be_probe(struct pci_dev *pdev,
@@ -2082,7 +2295,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
2082 int status = 0; 2295 int status = 0;
2083 struct be_adapter *adapter; 2296 struct be_adapter *adapter;
2084 struct net_device *netdev; 2297 struct net_device *netdev;
2085 u8 mac[ETH_ALEN];
2086 2298
2087 status = pci_enable_device(pdev); 2299 status = pci_enable_device(pdev);
2088 if (status) 2300 if (status)
@@ -2099,9 +2311,25 @@ static int __devinit be_probe(struct pci_dev *pdev,
2099 goto rel_reg; 2311 goto rel_reg;
2100 } 2312 }
2101 adapter = netdev_priv(netdev); 2313 adapter = netdev_priv(netdev);
2314
2315 switch (pdev->device) {
2316 case BE_DEVICE_ID1:
2317 case OC_DEVICE_ID1:
2318 adapter->generation = BE_GEN2;
2319 break;
2320 case BE_DEVICE_ID2:
2321 case OC_DEVICE_ID2:
2322 adapter->generation = BE_GEN3;
2323 break;
2324 default:
2325 adapter->generation = 0;
2326 }
2327
2102 adapter->pdev = pdev; 2328 adapter->pdev = pdev;
2103 pci_set_drvdata(pdev, adapter); 2329 pci_set_drvdata(pdev, adapter);
2104 adapter->netdev = netdev; 2330 adapter->netdev = netdev;
2331 be_netdev_init(netdev);
2332 SET_NETDEV_DEV(netdev, &pdev->dev);
2105 2333
2106 be_msix_enable(adapter); 2334 be_msix_enable(adapter);
2107 2335
@@ -2120,27 +2348,34 @@ static int __devinit be_probe(struct pci_dev *pdev,
2120 if (status) 2348 if (status)
2121 goto free_netdev; 2349 goto free_netdev;
2122 2350
2123 status = be_stats_init(adapter); 2351 /* sync up with fw's ready state */
2352 status = be_cmd_POST(adapter);
2124 if (status) 2353 if (status)
2125 goto ctrl_clean; 2354 goto ctrl_clean;
2126 2355
2127 status = be_hw_up(adapter); 2356 /* tell fw we're ready to fire cmds */
2357 status = be_cmd_fw_init(adapter);
2128 if (status) 2358 if (status)
2129 goto stats_clean; 2359 goto ctrl_clean;
2360
2361 status = be_cmd_reset_function(adapter);
2362 if (status)
2363 goto ctrl_clean;
2364
2365 status = be_stats_init(adapter);
2366 if (status)
2367 goto ctrl_clean;
2130 2368
2131 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK, 2369 status = be_get_config(adapter);
2132 true /* permanent */, 0);
2133 if (status) 2370 if (status)
2134 goto stats_clean; 2371 goto stats_clean;
2135 memcpy(netdev->dev_addr, mac, ETH_ALEN);
2136 2372
2137 INIT_DELAYED_WORK(&adapter->work, be_worker); 2373 INIT_DELAYED_WORK(&adapter->work, be_worker);
2138 be_netdev_init(netdev);
2139 SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
2140 2374
2141 status = be_setup(adapter); 2375 status = be_setup(adapter);
2142 if (status) 2376 if (status)
2143 goto stats_clean; 2377 goto stats_clean;
2378
2144 status = register_netdev(netdev); 2379 status = register_netdev(netdev);
2145 if (status != 0) 2380 if (status != 0)
2146 goto unsetup; 2381 goto unsetup;
@@ -2155,7 +2390,9 @@ stats_clean:
2155ctrl_clean: 2390ctrl_clean:
2156 be_ctrl_cleanup(adapter); 2391 be_ctrl_cleanup(adapter);
2157free_netdev: 2392free_netdev:
2393 be_msix_disable(adapter);
2158 free_netdev(adapter->netdev); 2394 free_netdev(adapter->netdev);
2395 pci_set_drvdata(pdev, NULL);
2159rel_reg: 2396rel_reg:
2160 pci_release_regions(pdev); 2397 pci_release_regions(pdev);
2161disable_dev: 2398disable_dev:
@@ -2170,6 +2407,9 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2170 struct be_adapter *adapter = pci_get_drvdata(pdev); 2407 struct be_adapter *adapter = pci_get_drvdata(pdev);
2171 struct net_device *netdev = adapter->netdev; 2408 struct net_device *netdev = adapter->netdev;
2172 2409
2410 if (adapter->wol)
2411 be_setup_wol(adapter, true);
2412
2173 netif_device_detach(netdev); 2413 netif_device_detach(netdev);
2174 if (netif_running(netdev)) { 2414 if (netif_running(netdev)) {
2175 rtnl_lock(); 2415 rtnl_lock();
@@ -2200,6 +2440,11 @@ static int be_resume(struct pci_dev *pdev)
2200 pci_set_power_state(pdev, 0); 2440 pci_set_power_state(pdev, 0);
2201 pci_restore_state(pdev); 2441 pci_restore_state(pdev);
2202 2442
2443 /* tell fw we're ready to fire cmds */
2444 status = be_cmd_fw_init(adapter);
2445 if (status)
2446 return status;
2447
2203 be_setup(adapter); 2448 be_setup(adapter);
2204 if (netif_running(netdev)) { 2449 if (netif_running(netdev)) {
2205 rtnl_lock(); 2450 rtnl_lock();
@@ -2207,22 +2452,135 @@ static int be_resume(struct pci_dev *pdev)
2207 rtnl_unlock(); 2452 rtnl_unlock();
2208 } 2453 }
2209 netif_device_attach(netdev); 2454 netif_device_attach(netdev);
2455
2456 if (adapter->wol)
2457 be_setup_wol(adapter, false);
2210 return 0; 2458 return 0;
2211} 2459}
2212 2460
2461/*
2462 * An FLR will stop BE from DMAing any data.
2463 */
2464static void be_shutdown(struct pci_dev *pdev)
2465{
2466 struct be_adapter *adapter = pci_get_drvdata(pdev);
2467 struct net_device *netdev = adapter->netdev;
2468
2469 netif_device_detach(netdev);
2470
2471 be_cmd_reset_function(adapter);
2472
2473 if (adapter->wol)
2474 be_setup_wol(adapter, true);
2475
2476 pci_disable_device(pdev);
2477
2478 return;
2479}
2480
2481static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2482 pci_channel_state_t state)
2483{
2484 struct be_adapter *adapter = pci_get_drvdata(pdev);
2485 struct net_device *netdev = adapter->netdev;
2486
2487 dev_err(&adapter->pdev->dev, "EEH error detected\n");
2488
2489 adapter->eeh_err = true;
2490
2491 netif_device_detach(netdev);
2492
2493 if (netif_running(netdev)) {
2494 rtnl_lock();
2495 be_close(netdev);
2496 rtnl_unlock();
2497 }
2498 be_clear(adapter);
2499
2500 if (state == pci_channel_io_perm_failure)
2501 return PCI_ERS_RESULT_DISCONNECT;
2502
2503 pci_disable_device(pdev);
2504
2505 return PCI_ERS_RESULT_NEED_RESET;
2506}
2507
2508static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2509{
2510 struct be_adapter *adapter = pci_get_drvdata(pdev);
2511 int status;
2512
2513 dev_info(&adapter->pdev->dev, "EEH reset\n");
2514 adapter->eeh_err = false;
2515
2516 status = pci_enable_device(pdev);
2517 if (status)
2518 return PCI_ERS_RESULT_DISCONNECT;
2519
2520 pci_set_master(pdev);
2521 pci_set_power_state(pdev, 0);
2522 pci_restore_state(pdev);
2523
2524 /* Check if card is ok and fw is ready */
2525 status = be_cmd_POST(adapter);
2526 if (status)
2527 return PCI_ERS_RESULT_DISCONNECT;
2528
2529 return PCI_ERS_RESULT_RECOVERED;
2530}
2531
2532static void be_eeh_resume(struct pci_dev *pdev)
2533{
2534 int status = 0;
2535 struct be_adapter *adapter = pci_get_drvdata(pdev);
2536 struct net_device *netdev = adapter->netdev;
2537
2538 dev_info(&adapter->pdev->dev, "EEH resume\n");
2539
2540 pci_save_state(pdev);
2541
2542 /* tell fw we're ready to fire cmds */
2543 status = be_cmd_fw_init(adapter);
2544 if (status)
2545 goto err;
2546
2547 status = be_setup(adapter);
2548 if (status)
2549 goto err;
2550
2551 if (netif_running(netdev)) {
2552 status = be_open(netdev);
2553 if (status)
2554 goto err;
2555 }
2556 netif_device_attach(netdev);
2557 return;
2558err:
2559 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
2560 return;
2561}
2562
2563static struct pci_error_handlers be_eeh_handlers = {
2564 .error_detected = be_eeh_err_detected,
2565 .slot_reset = be_eeh_reset,
2566 .resume = be_eeh_resume,
2567};
2568
2213static struct pci_driver be_driver = { 2569static struct pci_driver be_driver = {
2214 .name = DRV_NAME, 2570 .name = DRV_NAME,
2215 .id_table = be_dev_ids, 2571 .id_table = be_dev_ids,
2216 .probe = be_probe, 2572 .probe = be_probe,
2217 .remove = be_remove, 2573 .remove = be_remove,
2218 .suspend = be_suspend, 2574 .suspend = be_suspend,
2219 .resume = be_resume 2575 .resume = be_resume,
2576 .shutdown = be_shutdown,
2577 .err_handler = &be_eeh_handlers
2220}; 2578};
2221 2579
2222static int __init be_init_module(void) 2580static int __init be_init_module(void)
2223{ 2581{
2224 if (rx_frag_size != 8192 && rx_frag_size != 4096 2582 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
2225 && rx_frag_size != 2048) { 2583 rx_frag_size != 2048) {
2226 printk(KERN_WARNING DRV_NAME 2584 printk(KERN_WARNING DRV_NAME
2227 " : Module param rx_frag_size must be 2048/4096/8192." 2585 " : Module param rx_frag_size must be 2048/4096/8192."
2228 " Using 2048\n"); 2586 " Using 2048\n");