aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r--drivers/net/cxgb3/adapter.h5
-rw-r--r--drivers/net/cxgb3/ael1002.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c60
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c1
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h5
-rw-r--r--drivers/net/cxgb3/l2t.c1
-rw-r--r--drivers/net/cxgb3/regs.h16
-rw-r--r--drivers/net/cxgb3/sge.c35
-rw-r--r--drivers/net/cxgb3/t3_hw.c5
-rw-r--r--drivers/net/cxgb3/xgmac.c8
10 files changed, 115 insertions, 23 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 3e8618b4efbc..4cd7f420766a 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -264,6 +264,10 @@ struct adapter {
264 struct work_struct fatal_error_handler_task; 264 struct work_struct fatal_error_handler_task;
265 struct work_struct link_fault_handler_task; 265 struct work_struct link_fault_handler_task;
266 266
267 struct work_struct db_full_task;
268 struct work_struct db_empty_task;
269 struct work_struct db_drop_task;
270
267 struct dentry *debugfs_root; 271 struct dentry *debugfs_root;
268 272
269 struct mutex mdio_lock; 273 struct mutex mdio_lock;
@@ -335,6 +339,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
335int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, 339int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
336 unsigned char *data); 340 unsigned char *data);
337irqreturn_t t3_sge_intr_msix(int irq, void *cookie); 341irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
342extern struct workqueue_struct *cxgb3_wq;
338 343
339int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size); 344int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
340 345
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
index 5248f9e0b2f4..35cd36729155 100644
--- a/drivers/net/cxgb3/ael1002.c
+++ b/drivers/net/cxgb3/ael1002.c
@@ -934,7 +934,7 @@ static struct cphy_ops xaui_direct_ops = {
934int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, 934int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
935 int phy_addr, const struct mdio_ops *mdio_ops) 935 int phy_addr, const struct mdio_ops *mdio_ops)
936{ 936{
937 cphy_init(phy, adapter, MDIO_PRTAD_NONE, &xaui_direct_ops, mdio_ops, 937 cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
938 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, 938 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
939 "10GBASE-CX4"); 939 "10GBASE-CX4");
940 return 0; 940 return 0;
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index cecdec1551db..e3f1b8566495 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -45,6 +45,8 @@
45#include <linux/firmware.h> 45#include <linux/firmware.h>
46#include <linux/log2.h> 46#include <linux/log2.h>
47#include <linux/stringify.h> 47#include <linux/stringify.h>
48#include <linux/sched.h>
49#include <linux/slab.h>
48#include <asm/uaccess.h> 50#include <asm/uaccess.h>
49 51
50#include "common.h" 52#include "common.h"
@@ -140,7 +142,7 @@ MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
140 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting 142 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
141 * for our work to complete. Get our own work queue to solve this. 143 * for our work to complete. Get our own work queue to solve this.
142 */ 144 */
143static struct workqueue_struct *cxgb3_wq; 145struct workqueue_struct *cxgb3_wq;
144 146
145/** 147/**
146 * link_report - show link status and link speed/duplex 148 * link_report - show link status and link speed/duplex
@@ -437,7 +439,7 @@ static void free_irq_resources(struct adapter *adapter)
437static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, 439static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
438 unsigned long n) 440 unsigned long n)
439{ 441{
440 int attempts = 5; 442 int attempts = 10;
441 443
442 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { 444 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
443 if (!--attempts) 445 if (!--attempts)
@@ -586,6 +588,19 @@ static void setup_rss(struct adapter *adap)
586 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); 588 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
587} 589}
588 590
591static void ring_dbs(struct adapter *adap)
592{
593 int i, j;
594
595 for (i = 0; i < SGE_QSETS; i++) {
596 struct sge_qset *qs = &adap->sge.qs[i];
597
598 if (qs->adap)
599 for (j = 0; j < SGE_TXQ_PER_SET; j++)
600 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
601 }
602}
603
589static void init_napi(struct adapter *adap) 604static void init_napi(struct adapter *adap)
590{ 605{
591 int i; 606 int i;
@@ -2751,6 +2766,42 @@ static void t3_adap_check_task(struct work_struct *work)
2751 spin_unlock_irq(&adapter->work_lock); 2766 spin_unlock_irq(&adapter->work_lock);
2752} 2767}
2753 2768
2769static void db_full_task(struct work_struct *work)
2770{
2771 struct adapter *adapter = container_of(work, struct adapter,
2772 db_full_task);
2773
2774 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2775}
2776
2777static void db_empty_task(struct work_struct *work)
2778{
2779 struct adapter *adapter = container_of(work, struct adapter,
2780 db_empty_task);
2781
2782 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2783}
2784
2785static void db_drop_task(struct work_struct *work)
2786{
2787 struct adapter *adapter = container_of(work, struct adapter,
2788 db_drop_task);
2789 unsigned long delay = 1000;
2790 unsigned short r;
2791
2792 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2793
2794 /*
2795 * Sleep a while before ringing the driver qset dbs.
2796 * The delay is between 1000-2023 usecs.
2797 */
2798 get_random_bytes(&r, 2);
2799 delay += r & 1023;
2800 set_current_state(TASK_UNINTERRUPTIBLE);
2801 schedule_timeout(usecs_to_jiffies(delay));
2802 ring_dbs(adapter);
2803}
2804
2754/* 2805/*
2755 * Processes external (PHY) interrupts in process context. 2806 * Processes external (PHY) interrupts in process context.
2756 */ 2807 */
@@ -3219,6 +3270,11 @@ static int __devinit init_one(struct pci_dev *pdev,
3219 INIT_LIST_HEAD(&adapter->adapter_list); 3270 INIT_LIST_HEAD(&adapter->adapter_list);
3220 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); 3271 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3221 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); 3272 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3273
3274 INIT_WORK(&adapter->db_full_task, db_full_task);
3275 INIT_WORK(&adapter->db_empty_task, db_empty_task);
3276 INIT_WORK(&adapter->db_drop_task, db_drop_task);
3277
3222 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); 3278 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3223 3279
3224 for (i = 0; i < ai->nports0 + ai->nports1; ++i) { 3280 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 9498361119d6..c6485b39eb0e 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/slab.h>
34#include <net/neighbour.h> 35#include <net/neighbour.h>
35#include <linux/notifier.h> 36#include <linux/notifier.h>
36#include <asm/atomic.h> 37#include <asm/atomic.h>
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index 670aa62042da..929c298115ca 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -73,7 +73,10 @@ enum {
73 OFFLOAD_STATUS_UP, 73 OFFLOAD_STATUS_UP,
74 OFFLOAD_STATUS_DOWN, 74 OFFLOAD_STATUS_DOWN,
75 OFFLOAD_PORT_DOWN, 75 OFFLOAD_PORT_DOWN,
76 OFFLOAD_PORT_UP 76 OFFLOAD_PORT_UP,
77 OFFLOAD_DB_FULL,
78 OFFLOAD_DB_EMPTY,
79 OFFLOAD_DB_DROP
77}; 80};
78 81
79struct cxgb3_client { 82struct cxgb3_client {
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index ff1611f90e7a..2f3ee721c3e1 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -34,6 +34,7 @@
34#include <linux/if.h> 34#include <linux/if.h>
35#include <linux/if_vlan.h> 35#include <linux/if_vlan.h>
36#include <linux/jhash.h> 36#include <linux/jhash.h>
37#include <linux/slab.h>
37#include <net/neighbour.h> 38#include <net/neighbour.h>
38#include "common.h" 39#include "common.h"
39#include "t3cdev.h" 40#include "t3cdev.h"
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 1b5327b5a965..cb42353c9fdd 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -254,6 +254,22 @@
254#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR) 254#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
255#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U) 255#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
256 256
257#define S_HIPRIORITYDBFULL 7
258#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
259#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
260
261#define S_HIPRIORITYDBEMPTY 6
262#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
263#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
264
265#define S_LOPRIORITYDBFULL 5
266#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
267#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
268
269#define S_LOPRIORITYDBEMPTY 4
270#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
271#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
272
257#define S_RSPQDISABLED 3 273#define S_RSPQDISABLED 3
258#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED) 274#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
259#define F_RSPQDISABLED V_RSPQDISABLED(1U) 275#define F_RSPQDISABLED V_RSPQDISABLED(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 048205903741..5962b911b5bd 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -36,12 +36,14 @@
36#include <linux/ip.h> 36#include <linux/ip.h>
37#include <linux/tcp.h> 37#include <linux/tcp.h>
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <linux/slab.h>
39#include <net/arp.h> 40#include <net/arp.h>
40#include "common.h" 41#include "common.h"
41#include "regs.h" 42#include "regs.h"
42#include "sge_defs.h" 43#include "sge_defs.h"
43#include "t3_cpl.h" 44#include "t3_cpl.h"
44#include "firmware_exports.h" 45#include "firmware_exports.h"
46#include "cxgb3_offload.h"
45 47
46#define USE_GTS 0 48#define USE_GTS 0
47 49
@@ -116,7 +118,7 @@ struct rx_sw_desc { /* SW state per Rx descriptor */
116 struct sk_buff *skb; 118 struct sk_buff *skb;
117 struct fl_pg_chunk pg_chunk; 119 struct fl_pg_chunk pg_chunk;
118 }; 120 };
119 DECLARE_PCI_UNMAP_ADDR(dma_addr); 121 DEFINE_DMA_UNMAP_ADDR(dma_addr);
120}; 122};
121 123
122struct rsp_desc { /* response queue descriptor */ 124struct rsp_desc { /* response queue descriptor */
@@ -196,17 +198,17 @@ static inline void refill_rspq(struct adapter *adapter,
196/** 198/**
197 * need_skb_unmap - does the platform need unmapping of sk_buffs? 199 * need_skb_unmap - does the platform need unmapping of sk_buffs?
198 * 200 *
199 * Returns true if the platfrom needs sk_buff unmapping. The compiler 201 * Returns true if the platform needs sk_buff unmapping. The compiler
200 * optimizes away unecessary code if this returns true. 202 * optimizes away unecessary code if this returns true.
201 */ 203 */
202static inline int need_skb_unmap(void) 204static inline int need_skb_unmap(void)
203{ 205{
204 /* 206 /*
205 * This structure is used to tell if the platfrom needs buffer 207 * This structure is used to tell if the platform needs buffer
206 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything. 208 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
207 */ 209 */
208 struct dummy { 210 struct dummy {
209 DECLARE_PCI_UNMAP_ADDR(addr); 211 DEFINE_DMA_UNMAP_ADDR(addr);
210 }; 212 };
211 213
212 return sizeof(struct dummy) != 0; 214 return sizeof(struct dummy) != 0;
@@ -361,7 +363,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
361 put_page(d->pg_chunk.page); 363 put_page(d->pg_chunk.page);
362 d->pg_chunk.page = NULL; 364 d->pg_chunk.page = NULL;
363 } else { 365 } else {
364 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), 366 pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
365 q->buf_size, PCI_DMA_FROMDEVICE); 367 q->buf_size, PCI_DMA_FROMDEVICE);
366 kfree_skb(d->skb); 368 kfree_skb(d->skb);
367 d->skb = NULL; 369 d->skb = NULL;
@@ -417,7 +419,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
417 if (unlikely(pci_dma_mapping_error(pdev, mapping))) 419 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
418 return -ENOMEM; 420 return -ENOMEM;
419 421
420 pci_unmap_addr_set(sd, dma_addr, mapping); 422 dma_unmap_addr_set(sd, dma_addr, mapping);
421 423
422 d->addr_lo = cpu_to_be32(mapping); 424 d->addr_lo = cpu_to_be32(mapping);
423 d->addr_hi = cpu_to_be32((u64) mapping >> 32); 425 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
@@ -513,7 +515,7 @@ nomem: q->alloc_failed++;
513 break; 515 break;
514 } 516 }
515 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; 517 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
516 pci_unmap_addr_set(sd, dma_addr, mapping); 518 dma_unmap_addr_set(sd, dma_addr, mapping);
517 519
518 add_one_rx_chunk(mapping, d, q->gen); 520 add_one_rx_chunk(mapping, d, q->gen);
519 pci_dma_sync_single_for_device(adap->pdev, mapping, 521 pci_dma_sync_single_for_device(adap->pdev, mapping,
@@ -789,11 +791,11 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
789 if (likely(skb != NULL)) { 791 if (likely(skb != NULL)) {
790 __skb_put(skb, len); 792 __skb_put(skb, len);
791 pci_dma_sync_single_for_cpu(adap->pdev, 793 pci_dma_sync_single_for_cpu(adap->pdev,
792 pci_unmap_addr(sd, dma_addr), len, 794 dma_unmap_addr(sd, dma_addr), len,
793 PCI_DMA_FROMDEVICE); 795 PCI_DMA_FROMDEVICE);
794 memcpy(skb->data, sd->skb->data, len); 796 memcpy(skb->data, sd->skb->data, len);
795 pci_dma_sync_single_for_device(adap->pdev, 797 pci_dma_sync_single_for_device(adap->pdev,
796 pci_unmap_addr(sd, dma_addr), len, 798 dma_unmap_addr(sd, dma_addr), len,
797 PCI_DMA_FROMDEVICE); 799 PCI_DMA_FROMDEVICE);
798 } else if (!drop_thres) 800 } else if (!drop_thres)
799 goto use_orig_buf; 801 goto use_orig_buf;
@@ -808,7 +810,7 @@ recycle:
808 goto recycle; 810 goto recycle;
809 811
810use_orig_buf: 812use_orig_buf:
811 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), 813 pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
812 fl->buf_size, PCI_DMA_FROMDEVICE); 814 fl->buf_size, PCI_DMA_FROMDEVICE);
813 skb = sd->skb; 815 skb = sd->skb;
814 skb_put(skb, len); 816 skb_put(skb, len);
@@ -841,7 +843,7 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
841 struct sk_buff *newskb, *skb; 843 struct sk_buff *newskb, *skb;
842 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 844 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
843 845
844 dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr); 846 dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
845 847
846 newskb = skb = q->pg_skb; 848 newskb = skb = q->pg_skb;
847 if (!skb && (len <= SGE_RX_COPY_THRES)) { 849 if (!skb && (len <= SGE_RX_COPY_THRES)) {
@@ -2095,7 +2097,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2095 fl->credits--; 2097 fl->credits--;
2096 2098
2097 pci_dma_sync_single_for_cpu(adap->pdev, 2099 pci_dma_sync_single_for_cpu(adap->pdev,
2098 pci_unmap_addr(sd, dma_addr), 2100 dma_unmap_addr(sd, dma_addr),
2099 fl->buf_size - SGE_PG_RSVD, 2101 fl->buf_size - SGE_PG_RSVD,
2100 PCI_DMA_FROMDEVICE); 2102 PCI_DMA_FROMDEVICE);
2101 2103
@@ -2841,8 +2843,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
2841 } 2843 }
2842 2844
2843 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) 2845 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2844 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n", 2846 queue_work(cxgb3_wq, &adapter->db_drop_task);
2845 status & F_HIPIODRBDROPERR ? "high" : "lo"); 2847
2848 if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2849 queue_work(cxgb3_wq, &adapter->db_full_task);
2850
2851 if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2852 queue_work(cxgb3_wq, &adapter->db_empty_task);
2846 2853
2847 t3_write_reg(adapter, A_SG_INT_CAUSE, status); 2854 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2848 if (status & SGE_FATALERR) 2855 if (status & SGE_FATALERR)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 3ab9f51918aa..95a8ba0759f1 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1433,7 +1433,10 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1433 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ 1433 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1434 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ 1434 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1435 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ 1435 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1436 F_HIRCQPARITYERROR) 1436 F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1437 F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1438 F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1439 F_LOPIODRBDROPERR)
1437#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \ 1440#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1438 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \ 1441 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1439 F_NFASRCHFAIL) 1442 F_NFASRCHFAIL)
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index c142a2132e9f..3af19a550372 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -311,16 +311,16 @@ int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
311 if (dev->flags & IFF_ALLMULTI) 311 if (dev->flags & IFF_ALLMULTI)
312 hash_lo = hash_hi = 0xffffffff; 312 hash_lo = hash_hi = 0xffffffff;
313 else { 313 else {
314 struct dev_mc_list *dmi; 314 struct netdev_hw_addr *ha;
315 int exact_addr_idx = mac->nucast; 315 int exact_addr_idx = mac->nucast;
316 316
317 hash_lo = hash_hi = 0; 317 hash_lo = hash_hi = 0;
318 netdev_for_each_mc_addr(dmi, dev) 318 netdev_for_each_mc_addr(ha, dev)
319 if (exact_addr_idx < EXACT_ADDR_FILTERS) 319 if (exact_addr_idx < EXACT_ADDR_FILTERS)
320 set_addr_filter(mac, exact_addr_idx++, 320 set_addr_filter(mac, exact_addr_idx++,
321 dmi->dmi_addr); 321 ha->addr);
322 else { 322 else {
323 int hash = hash_hw_addr(dmi->dmi_addr); 323 int hash = hash_hw_addr(ha->addr);
324 324
325 if (hash < 32) 325 if (hash < 32)
326 hash_lo |= (1 << hash); 326 hash_lo |= (1 << hash);