aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS4
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/chelsio/sge.c1
-rw-r--r--drivers/net/cxgb3/adapter.h11
-rw-r--r--drivers/net/cxgb3/cxgb3_ioctl.h33
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c69
-rw-r--r--drivers/net/cxgb3/sge.c390
-rw-r--r--drivers/net/cxgb3/t3_hw.c6
-rw-r--r--drivers/net/cxgb3/version.h2
-rw-r--r--drivers/net/de600.c6
-rw-r--r--drivers/net/forcedeth.c20
-rw-r--r--drivers/net/mv643xx_eth.c6
-rw-r--r--drivers/net/mv643xx_eth.h11
-rw-r--r--drivers/net/myri10ge/myri10ge.c27
-rw-r--r--drivers/net/natsemi.c2
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c9
-rw-r--r--drivers/net/netxen/netxen_nic_init.c5
-rw-r--r--drivers/net/netxen/netxen_nic_main.c6
-rw-r--r--drivers/net/ni52.c2
-rwxr-xr-xdrivers/net/qla3xxx.c492
-rwxr-xr-xdrivers/net/qla3xxx.h11
-rw-r--r--drivers/net/s2io-regs.h1
-rw-r--r--drivers/net/s2io.c299
-rw-r--r--drivers/net/s2io.h10
-rw-r--r--drivers/net/sgiseeq.c11
-rw-r--r--drivers/net/skfp/cfm.c2
-rw-r--r--drivers/net/skge.c47
-rw-r--r--drivers/net/skge.h3
-rw-r--r--drivers/net/spider_net.c554
-rw-r--r--drivers/net/spider_net.h34
-rw-r--r--drivers/net/sun3_82586.c2
-rw-r--r--drivers/net/sungem_phy.c389
-rw-r--r--drivers/net/sungem_phy.h10
-rw-r--r--drivers/net/tc35815.c10
-rw-r--r--drivers/net/ucc_geth.c4
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/net/wireless/wl3501_cs.c1
37 files changed, 1679 insertions, 814 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 06d2087be6bf..35e0a648422d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3177,8 +3177,8 @@ L: linux-kernel@vger.kernel.org ?
3177S: Supported 3177S: Supported
3178 3178
3179SPIDERNET NETWORK DRIVER for CELL 3179SPIDERNET NETWORK DRIVER for CELL
3180P: Jim Lewis 3180P: Linas Vepstas
3181M: jim@jklewis.com 3181M: linas@austin.ibm.com
3182L: netdev@vger.kernel.org 3182L: netdev@vger.kernel.org
3183S: Supported 3183S: Supported
3184 3184
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 38ac6796fc48..5ff0922e628c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2245,7 +2245,7 @@ config BNX2
2245 2245
2246config SPIDER_NET 2246config SPIDER_NET
2247 tristate "Spider Gigabit Ethernet driver" 2247 tristate "Spider Gigabit Ethernet driver"
2248 depends on PCI && PPC_IBM_CELL_BLADE 2248 depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
2249 select FW_LOADER 2249 select FW_LOADER
2250 help 2250 help
2251 This driver supports the Gigabit Ethernet chips present on the 2251 This driver supports the Gigabit Ethernet chips present on the
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 89a682702fa9..326d4a665123 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1696,6 +1696,7 @@ irqreturn_t t1_interrupt(int irq, void *cookie)
1696{ 1696{
1697 int work_done; 1697 int work_done;
1698 struct adapter *adapter = cookie; 1698 struct adapter *adapter = cookie;
1699 struct respQ *Q = &adapter->sge->respQ;
1699 1700
1700 spin_lock(&adapter->async_lock); 1701 spin_lock(&adapter->async_lock);
1701 1702
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 5c97a64451ce..80c3d8f268a7 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -74,6 +74,11 @@ enum { /* adapter flags */
74struct rx_desc; 74struct rx_desc;
75struct rx_sw_desc; 75struct rx_sw_desc;
76 76
77struct sge_fl_page {
78 struct skb_frag_struct frag;
79 unsigned char *va;
80};
81
77struct sge_fl { /* SGE per free-buffer list state */ 82struct sge_fl { /* SGE per free-buffer list state */
78 unsigned int buf_size; /* size of each Rx buffer */ 83 unsigned int buf_size; /* size of each Rx buffer */
79 unsigned int credits; /* # of available Rx buffers */ 84 unsigned int credits; /* # of available Rx buffers */
@@ -81,11 +86,13 @@ struct sge_fl { /* SGE per free-buffer list state */
81 unsigned int cidx; /* consumer index */ 86 unsigned int cidx; /* consumer index */
82 unsigned int pidx; /* producer index */ 87 unsigned int pidx; /* producer index */
83 unsigned int gen; /* free list generation */ 88 unsigned int gen; /* free list generation */
89 unsigned int cntxt_id; /* SGE context id for the free list */
90 struct sge_fl_page page;
84 struct rx_desc *desc; /* address of HW Rx descriptor ring */ 91 struct rx_desc *desc; /* address of HW Rx descriptor ring */
85 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ 92 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
86 dma_addr_t phys_addr; /* physical address of HW ring start */ 93 dma_addr_t phys_addr; /* physical address of HW ring start */
87 unsigned int cntxt_id; /* SGE context id for the free list */
88 unsigned long empty; /* # of times queue ran out of buffers */ 94 unsigned long empty; /* # of times queue ran out of buffers */
95 unsigned long alloc_failed; /* # of times buffer allocation failed */
89}; 96};
90 97
91/* 98/*
@@ -121,6 +128,8 @@ struct sge_rspq { /* state for an SGE response queue */
121 unsigned long empty; /* # of times queue ran out of credits */ 128 unsigned long empty; /* # of times queue ran out of credits */
122 unsigned long nomem; /* # of responses deferred due to no mem */ 129 unsigned long nomem; /* # of responses deferred due to no mem */
123 unsigned long unhandled_irqs; /* # of spurious intrs */ 130 unsigned long unhandled_irqs; /* # of spurious intrs */
131 unsigned long starved;
132 unsigned long restarted;
124}; 133};
125 134
126struct tx_desc; 135struct tx_desc;
diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/cxgb3/cxgb3_ioctl.h
index a94281861a66..0a82fcddf2d8 100644
--- a/drivers/net/cxgb3/cxgb3_ioctl.h
+++ b/drivers/net/cxgb3/cxgb3_ioctl.h
@@ -36,28 +36,17 @@
36 * Ioctl commands specific to this driver. 36 * Ioctl commands specific to this driver.
37 */ 37 */
38enum { 38enum {
39 CHELSIO_SETREG = 1024, 39 CHELSIO_GETMTUTAB = 1029,
40 CHELSIO_GETREG, 40 CHELSIO_SETMTUTAB = 1030,
41 CHELSIO_SETTPI, 41 CHELSIO_SET_PM = 1032,
42 CHELSIO_GETTPI, 42 CHELSIO_GET_PM = 1033,
43 CHELSIO_GETMTUTAB, 43 CHELSIO_GET_MEM = 1038,
44 CHELSIO_SETMTUTAB, 44 CHELSIO_LOAD_FW = 1041,
45 CHELSIO_GETMTU, 45 CHELSIO_SET_TRACE_FILTER = 1044,
46 CHELSIO_SET_PM, 46 CHELSIO_SET_QSET_PARAMS = 1045,
47 CHELSIO_GET_PM, 47 CHELSIO_GET_QSET_PARAMS = 1046,
48 CHELSIO_GET_TCAM, 48 CHELSIO_SET_QSET_NUM = 1047,
49 CHELSIO_SET_TCAM, 49 CHELSIO_GET_QSET_NUM = 1048,
50 CHELSIO_GET_TCB,
51 CHELSIO_GET_MEM,
52 CHELSIO_LOAD_FW,
53 CHELSIO_GET_PROTO,
54 CHELSIO_SET_PROTO,
55 CHELSIO_SET_TRACE_FILTER,
56 CHELSIO_SET_QSET_PARAMS,
57 CHELSIO_GET_QSET_PARAMS,
58 CHELSIO_SET_QSET_NUM,
59 CHELSIO_GET_QSET_NUM,
60 CHELSIO_SET_PKTSCHED,
61}; 50};
62 51
63struct ch_reg { 52struct ch_reg {
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 43583ed655ab..7ff834e45d6b 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -434,27 +434,25 @@ static int setup_sge_qsets(struct adapter *adap)
434 434
435static ssize_t attr_show(struct device *d, struct device_attribute *attr, 435static ssize_t attr_show(struct device *d, struct device_attribute *attr,
436 char *buf, 436 char *buf,
437 ssize_t(*format) (struct adapter *, char *)) 437 ssize_t(*format) (struct net_device *, char *))
438{ 438{
439 ssize_t len; 439 ssize_t len;
440 struct adapter *adap = to_net_dev(d)->priv;
441 440
442 /* Synchronize with ioctls that may shut down the device */ 441 /* Synchronize with ioctls that may shut down the device */
443 rtnl_lock(); 442 rtnl_lock();
444 len = (*format) (adap, buf); 443 len = (*format) (to_net_dev(d), buf);
445 rtnl_unlock(); 444 rtnl_unlock();
446 return len; 445 return len;
447} 446}
448 447
449static ssize_t attr_store(struct device *d, struct device_attribute *attr, 448static ssize_t attr_store(struct device *d, struct device_attribute *attr,
450 const char *buf, size_t len, 449 const char *buf, size_t len,
451 ssize_t(*set) (struct adapter *, unsigned int), 450 ssize_t(*set) (struct net_device *, unsigned int),
452 unsigned int min_val, unsigned int max_val) 451 unsigned int min_val, unsigned int max_val)
453{ 452{
454 char *endp; 453 char *endp;
455 ssize_t ret; 454 ssize_t ret;
456 unsigned int val; 455 unsigned int val;
457 struct adapter *adap = to_net_dev(d)->priv;
458 456
459 if (!capable(CAP_NET_ADMIN)) 457 if (!capable(CAP_NET_ADMIN))
460 return -EPERM; 458 return -EPERM;
@@ -464,7 +462,7 @@ static ssize_t attr_store(struct device *d, struct device_attribute *attr,
464 return -EINVAL; 462 return -EINVAL;
465 463
466 rtnl_lock(); 464 rtnl_lock();
467 ret = (*set) (adap, val); 465 ret = (*set) (to_net_dev(d), val);
468 if (!ret) 466 if (!ret)
469 ret = len; 467 ret = len;
470 rtnl_unlock(); 468 rtnl_unlock();
@@ -472,8 +470,9 @@ static ssize_t attr_store(struct device *d, struct device_attribute *attr,
472} 470}
473 471
474#define CXGB3_SHOW(name, val_expr) \ 472#define CXGB3_SHOW(name, val_expr) \
475static ssize_t format_##name(struct adapter *adap, char *buf) \ 473static ssize_t format_##name(struct net_device *dev, char *buf) \
476{ \ 474{ \
475 struct adapter *adap = dev->priv; \
477 return sprintf(buf, "%u\n", val_expr); \ 476 return sprintf(buf, "%u\n", val_expr); \
478} \ 477} \
479static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ 478static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
@@ -482,8 +481,10 @@ static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
482 return attr_show(d, attr, buf, format_##name); \ 481 return attr_show(d, attr, buf, format_##name); \
483} 482}
484 483
485static ssize_t set_nfilters(struct adapter *adap, unsigned int val) 484static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
486{ 485{
486 struct adapter *adap = dev->priv;
487
487 if (adap->flags & FULL_INIT_DONE) 488 if (adap->flags & FULL_INIT_DONE)
488 return -EBUSY; 489 return -EBUSY;
489 if (val && adap->params.rev == 0) 490 if (val && adap->params.rev == 0)
@@ -500,8 +501,10 @@ static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
500 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0); 501 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
501} 502}
502 503
503static ssize_t set_nservers(struct adapter *adap, unsigned int val) 504static ssize_t set_nservers(struct net_device *dev, unsigned int val)
504{ 505{
506 struct adapter *adap = dev->priv;
507
505 if (adap->flags & FULL_INIT_DONE) 508 if (adap->flags & FULL_INIT_DONE)
506 return -EBUSY; 509 return -EBUSY;
507 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters) 510 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
@@ -1549,32 +1552,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1549 return -EFAULT; 1552 return -EFAULT;
1550 1553
1551 switch (cmd) { 1554 switch (cmd) {
1552 case CHELSIO_SETREG:{
1553 struct ch_reg edata;
1554
1555 if (!capable(CAP_NET_ADMIN))
1556 return -EPERM;
1557 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1558 return -EFAULT;
1559 if ((edata.addr & 3) != 0
1560 || edata.addr >= adapter->mmio_len)
1561 return -EINVAL;
1562 writel(edata.val, adapter->regs + edata.addr);
1563 break;
1564 }
1565 case CHELSIO_GETREG:{
1566 struct ch_reg edata;
1567
1568 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1569 return -EFAULT;
1570 if ((edata.addr & 3) != 0
1571 || edata.addr >= adapter->mmio_len)
1572 return -EINVAL;
1573 edata.val = readl(adapter->regs + edata.addr);
1574 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1575 return -EFAULT;
1576 break;
1577 }
1578 case CHELSIO_SET_QSET_PARAMS:{ 1555 case CHELSIO_SET_QSET_PARAMS:{
1579 int i; 1556 int i;
1580 struct qset_params *q; 1557 struct qset_params *q;
@@ -1838,10 +1815,10 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1838 return -EINVAL; 1815 return -EINVAL;
1839 1816
1840 /* 1817 /*
1841 * Version scheme: 1818 * Version scheme:
1842 * bits 0..9: chip version 1819 * bits 0..9: chip version
1843 * bits 10..15: chip revision 1820 * bits 10..15: chip revision
1844 */ 1821 */
1845 t.version = 3 | (adapter->params.rev << 10); 1822 t.version = 3 | (adapter->params.rev << 10);
1846 if (copy_to_user(useraddr, &t, sizeof(t))) 1823 if (copy_to_user(useraddr, &t, sizeof(t)))
1847 return -EFAULT; 1824 return -EFAULT;
@@ -1890,20 +1867,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1890 t.trace_rx); 1867 t.trace_rx);
1891 break; 1868 break;
1892 } 1869 }
1893 case CHELSIO_SET_PKTSCHED:{
1894 struct ch_pktsched_params p;
1895
1896 if (!capable(CAP_NET_ADMIN))
1897 return -EPERM;
1898 if (!adapter->open_device_map)
1899 return -EAGAIN; /* uP and SGE must be running */
1900 if (copy_from_user(&p, useraddr, sizeof(p)))
1901 return -EFAULT;
1902 send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
1903 p.binding);
1904 break;
1905
1906 }
1907 default: 1870 default:
1908 return -EOPNOTSUPP; 1871 return -EOPNOTSUPP;
1909 } 1872 }
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 3f2cf8a07c61..c23783432e51 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -45,9 +45,25 @@
45#define USE_GTS 0 45#define USE_GTS 0
46 46
47#define SGE_RX_SM_BUF_SIZE 1536 47#define SGE_RX_SM_BUF_SIZE 1536
48
49/*
50 * If USE_RX_PAGE is defined, the small freelist populated with (partial)
51 * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must
52 * be a multiple of the host page size).
53 */
54#define USE_RX_PAGE
55#define RX_PAGE_SIZE 2048
56
57/*
58 * skb freelist packets are copied into a new skb (and the freelist one is
59 * reused) if their len is <=
60 */
48#define SGE_RX_COPY_THRES 256 61#define SGE_RX_COPY_THRES 256
49 62
50# define SGE_RX_DROP_THRES 16 63/*
64 * Minimum number of freelist entries before we start dropping TUNNEL frames.
65 */
66#define SGE_RX_DROP_THRES 16
51 67
52/* 68/*
53 * Period of the Tx buffer reclaim timer. This timer does not need to run 69 * Period of the Tx buffer reclaim timer. This timer does not need to run
@@ -85,7 +101,10 @@ struct tx_sw_desc { /* SW state per Tx descriptor */
85}; 101};
86 102
87struct rx_sw_desc { /* SW state per Rx descriptor */ 103struct rx_sw_desc { /* SW state per Rx descriptor */
88 struct sk_buff *skb; 104 union {
105 struct sk_buff *skb;
106 struct sge_fl_page page;
107 } t;
89 DECLARE_PCI_UNMAP_ADDR(dma_addr); 108 DECLARE_PCI_UNMAP_ADDR(dma_addr);
90}; 109};
91 110
@@ -105,6 +124,15 @@ struct unmap_info { /* packet unmapping info, overlays skb->cb */
105}; 124};
106 125
107/* 126/*
127 * Holds unmapping information for Tx packets that need deferred unmapping.
128 * This structure lives at skb->head and must be allocated by callers.
129 */
130struct deferred_unmap_info {
131 struct pci_dev *pdev;
132 dma_addr_t addr[MAX_SKB_FRAGS + 1];
133};
134
135/*
108 * Maps a number of flits to the number of Tx descriptors that can hold them. 136 * Maps a number of flits to the number of Tx descriptors that can hold them.
109 * The formula is 137 * The formula is
110 * 138 *
@@ -252,10 +280,13 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
252 struct pci_dev *pdev = adapter->pdev; 280 struct pci_dev *pdev = adapter->pdev;
253 unsigned int cidx = q->cidx; 281 unsigned int cidx = q->cidx;
254 282
283 const int need_unmap = need_skb_unmap() &&
284 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
285
255 d = &q->sdesc[cidx]; 286 d = &q->sdesc[cidx];
256 while (n--) { 287 while (n--) {
257 if (d->skb) { /* an SGL is present */ 288 if (d->skb) { /* an SGL is present */
258 if (need_skb_unmap()) 289 if (need_unmap)
259 unmap_skb(d->skb, q, cidx, pdev); 290 unmap_skb(d->skb, q, cidx, pdev);
260 if (d->skb->priority == cidx) 291 if (d->skb->priority == cidx)
261 kfree_skb(d->skb); 292 kfree_skb(d->skb);
@@ -320,16 +351,27 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
320 351
321 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), 352 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
322 q->buf_size, PCI_DMA_FROMDEVICE); 353 q->buf_size, PCI_DMA_FROMDEVICE);
323 kfree_skb(d->skb); 354
324 d->skb = NULL; 355 if (q->buf_size != RX_PAGE_SIZE) {
356 kfree_skb(d->t.skb);
357 d->t.skb = NULL;
358 } else {
359 if (d->t.page.frag.page)
360 put_page(d->t.page.frag.page);
361 d->t.page.frag.page = NULL;
362 }
325 if (++cidx == q->size) 363 if (++cidx == q->size)
326 cidx = 0; 364 cidx = 0;
327 } 365 }
366
367 if (q->page.frag.page)
368 put_page(q->page.frag.page);
369 q->page.frag.page = NULL;
328} 370}
329 371
330/** 372/**
331 * add_one_rx_buf - add a packet buffer to a free-buffer list 373 * add_one_rx_buf - add a packet buffer to a free-buffer list
332 * @skb: the buffer to add 374 * @va: va of the buffer to add
333 * @len: the buffer length 375 * @len: the buffer length
334 * @d: the HW Rx descriptor to write 376 * @d: the HW Rx descriptor to write
335 * @sd: the SW Rx descriptor to write 377 * @sd: the SW Rx descriptor to write
@@ -339,14 +381,13 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
339 * Add a buffer of the given length to the supplied HW and SW Rx 381 * Add a buffer of the given length to the supplied HW and SW Rx
340 * descriptors. 382 * descriptors.
341 */ 383 */
342static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len, 384static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
343 struct rx_desc *d, struct rx_sw_desc *sd, 385 struct rx_desc *d, struct rx_sw_desc *sd,
344 unsigned int gen, struct pci_dev *pdev) 386 unsigned int gen, struct pci_dev *pdev)
345{ 387{
346 dma_addr_t mapping; 388 dma_addr_t mapping;
347 389
348 sd->skb = skb; 390 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
349 mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
350 pci_unmap_addr_set(sd, dma_addr, mapping); 391 pci_unmap_addr_set(sd, dma_addr, mapping);
351 392
352 d->addr_lo = cpu_to_be32(mapping); 393 d->addr_lo = cpu_to_be32(mapping);
@@ -371,14 +412,47 @@ static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
371{ 412{
372 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 413 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
373 struct rx_desc *d = &q->desc[q->pidx]; 414 struct rx_desc *d = &q->desc[q->pidx];
415 struct sge_fl_page *p = &q->page;
374 416
375 while (n--) { 417 while (n--) {
376 struct sk_buff *skb = alloc_skb(q->buf_size, gfp); 418 unsigned char *va;
377 419
378 if (!skb) 420 if (unlikely(q->buf_size != RX_PAGE_SIZE)) {
379 break; 421 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
422
423 if (!skb) {
424 q->alloc_failed++;
425 break;
426 }
427 va = skb->data;
428 sd->t.skb = skb;
429 } else {
430 if (!p->frag.page) {
431 p->frag.page = alloc_pages(gfp, 0);
432 if (unlikely(!p->frag.page)) {
433 q->alloc_failed++;
434 break;
435 } else {
436 p->frag.size = RX_PAGE_SIZE;
437 p->frag.page_offset = 0;
438 p->va = page_address(p->frag.page);
439 }
440 }
441
442 memcpy(&sd->t, p, sizeof(*p));
443 va = p->va;
444
445 p->frag.page_offset += RX_PAGE_SIZE;
446 BUG_ON(p->frag.page_offset > PAGE_SIZE);
447 p->va += RX_PAGE_SIZE;
448 if (p->frag.page_offset == PAGE_SIZE)
449 p->frag.page = NULL;
450 else
451 get_page(p->frag.page);
452 }
453
454 add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev);
380 455
381 add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
382 d++; 456 d++;
383 sd++; 457 sd++;
384 if (++q->pidx == q->size) { 458 if (++q->pidx == q->size) {
@@ -413,7 +487,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
413 struct rx_desc *from = &q->desc[idx]; 487 struct rx_desc *from = &q->desc[idx];
414 struct rx_desc *to = &q->desc[q->pidx]; 488 struct rx_desc *to = &q->desc[q->pidx];
415 489
416 q->sdesc[q->pidx] = q->sdesc[idx]; 490 memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc));
417 to->addr_lo = from->addr_lo; /* already big endian */ 491 to->addr_lo = from->addr_lo; /* already big endian */
418 to->addr_hi = from->addr_hi; /* likewise */ 492 to->addr_hi = from->addr_hi; /* likewise */
419 wmb(); 493 wmb();
@@ -446,7 +520,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
446 * of the SW ring. 520 * of the SW ring.
447 */ 521 */
448static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, 522static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
449 size_t sw_size, dma_addr_t *phys, void *metadata) 523 size_t sw_size, dma_addr_t * phys, void *metadata)
450{ 524{
451 size_t len = nelem * elem_size; 525 size_t len = nelem * elem_size;
452 void *s = NULL; 526 void *s = NULL;
@@ -576,61 +650,6 @@ static inline unsigned int flits_to_desc(unsigned int n)
576} 650}
577 651
578/** 652/**
579 * get_packet - return the next ingress packet buffer from a free list
580 * @adap: the adapter that received the packet
581 * @fl: the SGE free list holding the packet
582 * @len: the packet length including any SGE padding
583 * @drop_thres: # of remaining buffers before we start dropping packets
584 *
585 * Get the next packet from a free list and complete setup of the
586 * sk_buff. If the packet is small we make a copy and recycle the
587 * original buffer, otherwise we use the original buffer itself. If a
588 * positive drop threshold is supplied packets are dropped and their
589 * buffers recycled if (a) the number of remaining buffers is under the
590 * threshold and the packet is too big to copy, or (b) the packet should
591 * be copied but there is no memory for the copy.
592 */
593static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
594 unsigned int len, unsigned int drop_thres)
595{
596 struct sk_buff *skb = NULL;
597 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
598
599 prefetch(sd->skb->data);
600
601 if (len <= SGE_RX_COPY_THRES) {
602 skb = alloc_skb(len, GFP_ATOMIC);
603 if (likely(skb != NULL)) {
604 __skb_put(skb, len);
605 pci_dma_sync_single_for_cpu(adap->pdev,
606 pci_unmap_addr(sd,
607 dma_addr),
608 len, PCI_DMA_FROMDEVICE);
609 memcpy(skb->data, sd->skb->data, len);
610 pci_dma_sync_single_for_device(adap->pdev,
611 pci_unmap_addr(sd,
612 dma_addr),
613 len, PCI_DMA_FROMDEVICE);
614 } else if (!drop_thres)
615 goto use_orig_buf;
616 recycle:
617 recycle_rx_buf(adap, fl, fl->cidx);
618 return skb;
619 }
620
621 if (unlikely(fl->credits < drop_thres))
622 goto recycle;
623
624 use_orig_buf:
625 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
626 fl->buf_size, PCI_DMA_FROMDEVICE);
627 skb = sd->skb;
628 skb_put(skb, len);
629 __refill_fl(adap, fl);
630 return skb;
631}
632
633/**
634 * get_imm_packet - return the next ingress packet buffer from a response 653 * get_imm_packet - return the next ingress packet buffer from a response
635 * @resp: the response descriptor containing the packet data 654 * @resp: the response descriptor containing the packet data
636 * 655 *
@@ -1227,6 +1246,50 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1227} 1246}
1228 1247
1229/** 1248/**
1249 * deferred_unmap_destructor - unmap a packet when it is freed
1250 * @skb: the packet
1251 *
1252 * This is the packet destructor used for Tx packets that need to remain
1253 * mapped until they are freed rather than until their Tx descriptors are
1254 * freed.
1255 */
1256static void deferred_unmap_destructor(struct sk_buff *skb)
1257{
1258 int i;
1259 const dma_addr_t *p;
1260 const struct skb_shared_info *si;
1261 const struct deferred_unmap_info *dui;
1262 const struct unmap_info *ui = (struct unmap_info *)skb->cb;
1263
1264 dui = (struct deferred_unmap_info *)skb->head;
1265 p = dui->addr;
1266
1267 if (ui->len)
1268 pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE);
1269
1270 si = skb_shinfo(skb);
1271 for (i = 0; i < si->nr_frags; i++)
1272 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1273 PCI_DMA_TODEVICE);
1274}
1275
1276static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1277 const struct sg_ent *sgl, int sgl_flits)
1278{
1279 dma_addr_t *p;
1280 struct deferred_unmap_info *dui;
1281
1282 dui = (struct deferred_unmap_info *)skb->head;
1283 dui->pdev = pdev;
1284 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1285 *p++ = be64_to_cpu(sgl->addr[0]);
1286 *p++ = be64_to_cpu(sgl->addr[1]);
1287 }
1288 if (sgl_flits)
1289 *p = be64_to_cpu(sgl->addr[0]);
1290}
1291
1292/**
1230 * write_ofld_wr - write an offload work request 1293 * write_ofld_wr - write an offload work request
1231 * @adap: the adapter 1294 * @adap: the adapter
1232 * @skb: the packet to send 1295 * @skb: the packet to send
@@ -1262,8 +1325,11 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1262 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1325 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1263 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw, 1326 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1264 adap->pdev); 1327 adap->pdev);
1265 if (need_skb_unmap()) 1328 if (need_skb_unmap()) {
1329 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1330 skb->destructor = deferred_unmap_destructor;
1266 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw; 1331 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
1332 }
1267 1333
1268 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, 1334 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1269 gen, from->wr_hi, from->wr_lo); 1335 gen, from->wr_hi, from->wr_lo);
@@ -1617,7 +1683,6 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1617 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); 1683 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1618 struct port_info *pi; 1684 struct port_info *pi;
1619 1685
1620 rq->eth_pkts++;
1621 skb_pull(skb, sizeof(*p) + pad); 1686 skb_pull(skb, sizeof(*p) + pad);
1622 skb->dev = adap->port[p->iff]; 1687 skb->dev = adap->port[p->iff];
1623 skb->dev->last_rx = jiffies; 1688 skb->dev->last_rx = jiffies;
@@ -1645,6 +1710,85 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1645 netif_rx(skb); 1710 netif_rx(skb);
1646} 1711}
1647 1712
1713#define SKB_DATA_SIZE 128
1714
1715static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
1716 unsigned int len)
1717{
1718 skb->len = len;
1719 if (len <= SKB_DATA_SIZE) {
1720 memcpy(skb->data, p->va, len);
1721 skb->tail += len;
1722 put_page(p->frag.page);
1723 } else {
1724 memcpy(skb->data, p->va, SKB_DATA_SIZE);
1725 skb_shinfo(skb)->frags[0].page = p->frag.page;
1726 skb_shinfo(skb)->frags[0].page_offset =
1727 p->frag.page_offset + SKB_DATA_SIZE;
1728 skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE;
1729 skb_shinfo(skb)->nr_frags = 1;
1730 skb->data_len = len - SKB_DATA_SIZE;
1731 skb->tail += SKB_DATA_SIZE;
1732 skb->truesize += skb->data_len;
1733 }
1734}
1735
1736/**
1737* get_packet - return the next ingress packet buffer from a free list
1738* @adap: the adapter that received the packet
1739* @fl: the SGE free list holding the packet
1740* @len: the packet length including any SGE padding
1741* @drop_thres: # of remaining buffers before we start dropping packets
1742*
1743* Get the next packet from a free list and complete setup of the
1744* sk_buff. If the packet is small we make a copy and recycle the
1745* original buffer, otherwise we use the original buffer itself. If a
1746* positive drop threshold is supplied packets are dropped and their
1747* buffers recycled if (a) the number of remaining buffers is under the
1748* threshold and the packet is too big to copy, or (b) the packet should
1749* be copied but there is no memory for the copy.
1750*/
1751static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
1752 unsigned int len, unsigned int drop_thres)
1753{
1754 struct sk_buff *skb = NULL;
1755 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1756
1757 prefetch(sd->t.skb->data);
1758
1759 if (len <= SGE_RX_COPY_THRES) {
1760 skb = alloc_skb(len, GFP_ATOMIC);
1761 if (likely(skb != NULL)) {
1762 struct rx_desc *d = &fl->desc[fl->cidx];
1763 dma_addr_t mapping =
1764 (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 |
1765 be32_to_cpu(d->addr_lo));
1766
1767 __skb_put(skb, len);
1768 pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
1769 PCI_DMA_FROMDEVICE);
1770 memcpy(skb->data, sd->t.skb->data, len);
1771 pci_dma_sync_single_for_device(adap->pdev, mapping, len,
1772 PCI_DMA_FROMDEVICE);
1773 } else if (!drop_thres)
1774 goto use_orig_buf;
1775recycle:
1776 recycle_rx_buf(adap, fl, fl->cidx);
1777 return skb;
1778 }
1779
1780 if (unlikely(fl->credits < drop_thres))
1781 goto recycle;
1782
1783use_orig_buf:
1784 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
1785 fl->buf_size, PCI_DMA_FROMDEVICE);
1786 skb = sd->t.skb;
1787 skb_put(skb, len);
1788 __refill_fl(adap, fl);
1789 return skb;
1790}
1791
1648/** 1792/**
1649 * handle_rsp_cntrl_info - handles control information in a response 1793 * handle_rsp_cntrl_info - handles control information in a response
1650 * @qs: the queue set corresponding to the response 1794 * @qs: the queue set corresponding to the response
@@ -1767,7 +1911,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
1767 q->next_holdoff = q->holdoff_tmr; 1911 q->next_holdoff = q->holdoff_tmr;
1768 1912
1769 while (likely(budget_left && is_new_response(r, q))) { 1913 while (likely(budget_left && is_new_response(r, q))) {
1770 int eth, ethpad = 0; 1914 int eth, ethpad = 2;
1771 struct sk_buff *skb = NULL; 1915 struct sk_buff *skb = NULL;
1772 u32 len, flags = ntohl(r->flags); 1916 u32 len, flags = ntohl(r->flags);
1773 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val; 1917 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
@@ -1794,18 +1938,56 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
1794 break; 1938 break;
1795 } 1939 }
1796 q->imm_data++; 1940 q->imm_data++;
1941 ethpad = 0;
1797 } else if ((len = ntohl(r->len_cq)) != 0) { 1942 } else if ((len = ntohl(r->len_cq)) != 0) {
1798 struct sge_fl *fl; 1943 struct sge_fl *fl =
1944 (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1945
1946 if (fl->buf_size == RX_PAGE_SIZE) {
1947 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1948 struct sge_fl_page *p = &sd->t.page;
1949
1950 prefetch(p->va);
1951 prefetch(p->va + L1_CACHE_BYTES);
1952
1953 __refill_fl(adap, fl);
1954
1955 pci_unmap_single(adap->pdev,
1956 pci_unmap_addr(sd, dma_addr),
1957 fl->buf_size,
1958 PCI_DMA_FROMDEVICE);
1959
1960 if (eth) {
1961 if (unlikely(fl->credits <
1962 SGE_RX_DROP_THRES))
1963 goto eth_recycle;
1964
1965 skb = alloc_skb(SKB_DATA_SIZE,
1966 GFP_ATOMIC);
1967 if (unlikely(!skb)) {
1968eth_recycle:
1969 q->rx_drops++;
1970 recycle_rx_buf(adap, fl,
1971 fl->cidx);
1972 goto eth_done;
1973 }
1974 } else {
1975 skb = alloc_skb(SKB_DATA_SIZE,
1976 GFP_ATOMIC);
1977 if (unlikely(!skb))
1978 goto no_mem;
1979 }
1980
1981 skb_data_init(skb, p, G_RSPD_LEN(len));
1982eth_done:
1983 fl->credits--;
1984 q->eth_pkts++;
1985 } else {
1986 fl->credits--;
1987 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1988 eth ? SGE_RX_DROP_THRES : 0);
1989 }
1799 1990
1800 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1801 fl->credits--;
1802 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1803 eth ? SGE_RX_DROP_THRES : 0);
1804 if (!skb)
1805 q->rx_drops++;
1806 else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
1807 __skb_pull(skb, 2);
1808 ethpad = 2;
1809 if (++fl->cidx == fl->size) 1991 if (++fl->cidx == fl->size)
1810 fl->cidx = 0; 1992 fl->cidx = 0;
1811 } else 1993 } else
@@ -1829,18 +2011,23 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
1829 q->credits = 0; 2011 q->credits = 0;
1830 } 2012 }
1831 2013
1832 if (likely(skb != NULL)) { 2014 if (skb) {
2015 /* Preserve the RSS info in csum & priority */
2016 skb->csum = rss_hi;
2017 skb->priority = rss_lo;
2018
1833 if (eth) 2019 if (eth)
1834 rx_eth(adap, q, skb, ethpad); 2020 rx_eth(adap, q, skb, ethpad);
1835 else { 2021 else {
1836 /* Preserve the RSS info in csum & priority */ 2022 if (unlikely(r->rss_hdr.opcode ==
1837 skb->csum = rss_hi; 2023 CPL_TRACE_PKT))
1838 skb->priority = rss_lo; 2024 __skb_pull(skb, ethpad);
1839 ngathered = rx_offload(&adap->tdev, q, skb, 2025
1840 offload_skbs, ngathered); 2026 ngathered = rx_offload(&adap->tdev, q,
2027 skb, offload_skbs,
2028 ngathered);
1841 } 2029 }
1842 } 2030 }
1843
1844 --budget_left; 2031 --budget_left;
1845 } 2032 }
1846 2033
@@ -2320,10 +2507,23 @@ static void sge_timer_cb(unsigned long data)
2320 &adap->sge.qs[0].rspq.lock; 2507 &adap->sge.qs[0].rspq.lock;
2321 if (spin_trylock_irq(lock)) { 2508 if (spin_trylock_irq(lock)) {
2322 if (!napi_is_scheduled(qs->netdev)) { 2509 if (!napi_is_scheduled(qs->netdev)) {
2510 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2511
2323 if (qs->fl[0].credits < qs->fl[0].size) 2512 if (qs->fl[0].credits < qs->fl[0].size)
2324 __refill_fl(adap, &qs->fl[0]); 2513 __refill_fl(adap, &qs->fl[0]);
2325 if (qs->fl[1].credits < qs->fl[1].size) 2514 if (qs->fl[1].credits < qs->fl[1].size)
2326 __refill_fl(adap, &qs->fl[1]); 2515 __refill_fl(adap, &qs->fl[1]);
2516
2517 if (status & (1 << qs->rspq.cntxt_id)) {
2518 qs->rspq.starved++;
2519 if (qs->rspq.credits) {
2520 refill_rspq(adap, &qs->rspq, 1);
2521 qs->rspq.credits--;
2522 qs->rspq.restarted++;
2523 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2524 1 << qs->rspq.cntxt_id);
2525 }
2526 }
2327 } 2527 }
2328 spin_unlock_irq(lock); 2528 spin_unlock_irq(lock);
2329 } 2529 }
@@ -2432,13 +2632,21 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2432 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); 2632 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2433 2633
2434 if (ntxq == 1) { 2634 if (ntxq == 1) {
2635#ifdef USE_RX_PAGE
2636 q->fl[0].buf_size = RX_PAGE_SIZE;
2637#else
2435 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 + 2638 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2436 sizeof(struct cpl_rx_pkt); 2639 sizeof(struct cpl_rx_pkt);
2640#endif
2437 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 + 2641 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2438 sizeof(struct cpl_rx_pkt); 2642 sizeof(struct cpl_rx_pkt);
2439 } else { 2643 } else {
2644#ifdef USE_RX_PAGE
2645 q->fl[0].buf_size = RX_PAGE_SIZE;
2646#else
2440 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2647 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2441 sizeof(struct cpl_rx_data); 2648 sizeof(struct cpl_rx_data);
2649#endif
2442 q->fl[1].buf_size = (16 * 1024) - 2650 q->fl[1].buf_size = (16 * 1024) -
2443 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 2651 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2444 } 2652 }
@@ -2632,7 +2840,7 @@ void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2632 q->polling = adap->params.rev > 0; 2840 q->polling = adap->params.rev > 0;
2633 q->coalesce_usecs = 5; 2841 q->coalesce_usecs = 5;
2634 q->rspq_size = 1024; 2842 q->rspq_size = 1024;
2635 q->fl_size = 4096; 2843 q->fl_size = 1024;
2636 q->jumbo_size = 512; 2844 q->jumbo_size = 512;
2637 q->txq_size[TXQ_ETH] = 1024; 2845 q->txq_size[TXQ_ETH] = 1024;
2638 q->txq_size[TXQ_OFLD] = 1024; 2846 q->txq_size[TXQ_OFLD] = 1024;
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 365a7f5b1f94..eaa7a2e89a30 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -884,11 +884,13 @@ int t3_check_fw_version(struct adapter *adapter)
884 major = G_FW_VERSION_MAJOR(vers); 884 major = G_FW_VERSION_MAJOR(vers);
885 minor = G_FW_VERSION_MINOR(vers); 885 minor = G_FW_VERSION_MINOR(vers);
886 886
887 if (type == FW_VERSION_T3 && major == 3 && minor == 1) 887 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
888 minor == FW_VERSION_MINOR)
888 return 0; 889 return 0;
889 890
890 CH_ERR(adapter, "found wrong FW version(%u.%u), " 891 CH_ERR(adapter, "found wrong FW version(%u.%u), "
891 "driver needs version 3.1\n", major, minor); 892 "driver needs version %u.%u\n", major, minor,
893 FW_VERSION_MAJOR, FW_VERSION_MINOR);
892 return -EINVAL; 894 return -EINVAL;
893} 895}
894 896
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index 2b67dd523cc1..782a6cf158a5 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -36,4 +36,6 @@
36#define DRV_NAME "cxgb3" 36#define DRV_NAME "cxgb3"
37/* Driver version */ 37/* Driver version */
38#define DRV_VERSION "1.0" 38#define DRV_VERSION "1.0"
39#define FW_VERSION_MAJOR 3
40#define FW_VERSION_MINOR 2
39#endif /* __CHELSIO_VERSION_H */ 41#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/de600.c b/drivers/net/de600.c
index 8396e411f1ce..e547ce14eefe 100644
--- a/drivers/net/de600.c
+++ b/drivers/net/de600.c
@@ -38,12 +38,6 @@ static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj
38/* Add more time here if your adapter won't work OK: */ 38/* Add more time here if your adapter won't work OK: */
39#define DE600_SLOW_DOWN udelay(delay_time) 39#define DE600_SLOW_DOWN udelay(delay_time)
40 40
41 /*
42 * If you still have trouble reading/writing to the adapter,
43 * modify the following "#define": (see <asm/io.h> for more info)
44#define REALLY_SLOW_IO
45 */
46
47/* use 0 for production, 1 for verification, >2 for debug */ 41/* use 0 for production, 1 for verification, >2 for debug */
48#ifdef DE600_DEBUG 42#ifdef DE600_DEBUG
49#define PRINTK(x) if (de600_debug >= 2) printk x 43#define PRINTK(x) if (de600_debug >= 2) printk x
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index a363148d0198..46e1697d9cfd 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -839,7 +839,7 @@ enum {
839 NV_MSIX_INT_DISABLED, 839 NV_MSIX_INT_DISABLED,
840 NV_MSIX_INT_ENABLED 840 NV_MSIX_INT_ENABLED
841}; 841};
842static int msix = NV_MSIX_INT_ENABLED; 842static int msix = NV_MSIX_INT_DISABLED;
843 843
844/* 844/*
845 * DMA 64bit 845 * DMA 64bit
@@ -3104,13 +3104,17 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
3104 struct fe_priv *np = netdev_priv(dev); 3104 struct fe_priv *np = netdev_priv(dev);
3105 u8 __iomem *base = get_hwbase(dev); 3105 u8 __iomem *base = get_hwbase(dev);
3106 unsigned long flags; 3106 unsigned long flags;
3107 int retcode;
3107 3108
3108 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 3109 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3109 pkts = nv_rx_process(dev, limit); 3110 pkts = nv_rx_process(dev, limit);
3110 else 3111 retcode = nv_alloc_rx(dev);
3112 } else {
3111 pkts = nv_rx_process_optimized(dev, limit); 3113 pkts = nv_rx_process_optimized(dev, limit);
3114 retcode = nv_alloc_rx_optimized(dev);
3115 }
3112 3116
3113 if (nv_alloc_rx(dev)) { 3117 if (retcode) {
3114 spin_lock_irqsave(&np->lock, flags); 3118 spin_lock_irqsave(&np->lock, flags);
3115 if (!np->in_shutdown) 3119 if (!np->in_shutdown)
3116 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3120 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -5370,19 +5374,19 @@ static struct pci_device_id pci_tbl[] = {
5370 }, 5374 },
5371 { /* MCP65 Ethernet Controller */ 5375 { /* MCP65 Ethernet Controller */
5372 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 5376 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
5373 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5377 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5374 }, 5378 },
5375 { /* MCP65 Ethernet Controller */ 5379 { /* MCP65 Ethernet Controller */
5376 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 5380 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
5377 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5381 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5378 }, 5382 },
5379 { /* MCP65 Ethernet Controller */ 5383 { /* MCP65 Ethernet Controller */
5380 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 5384 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
5381 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5385 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5382 }, 5386 },
5383 { /* MCP65 Ethernet Controller */ 5387 { /* MCP65 Ethernet Controller */
5384 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 5388 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
5385 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5389 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5386 }, 5390 },
5387 { /* MCP67 Ethernet Controller */ 5391 { /* MCP67 Ethernet Controller */
5388 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 5392 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index d98e53efa2ef..3e045a695dbc 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -147,13 +147,13 @@ static void mv643xx_eth_rx_refill_descs(struct net_device *dev)
147 int unaligned; 147 int unaligned;
148 148
149 while (mp->rx_desc_count < mp->rx_ring_size) { 149 while (mp->rx_desc_count < mp->rx_ring_size) {
150 skb = dev_alloc_skb(ETH_RX_SKB_SIZE + ETH_DMA_ALIGN); 150 skb = dev_alloc_skb(ETH_RX_SKB_SIZE + dma_get_cache_alignment());
151 if (!skb) 151 if (!skb)
152 break; 152 break;
153 mp->rx_desc_count++; 153 mp->rx_desc_count++;
154 unaligned = (u32)skb->data & (ETH_DMA_ALIGN - 1); 154 unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
155 if (unaligned) 155 if (unaligned)
156 skb_reserve(skb, ETH_DMA_ALIGN - unaligned); 156 skb_reserve(skb, dma_get_cache_alignment() - unaligned);
157 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; 157 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
158 pkt_info.byte_cnt = ETH_RX_SKB_SIZE; 158 pkt_info.byte_cnt = ETH_RX_SKB_SIZE;
159 pkt_info.buf_ptr = dma_map_single(NULL, skb->data, 159 pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index 33c5fafdbbd3..7cb0a41c5cb4 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -42,17 +42,6 @@
42#define MAX_DESCS_PER_SKB 1 42#define MAX_DESCS_PER_SKB 1
43#endif 43#endif
44 44
45/*
46 * The MV643XX HW requires 8-byte alignment. However, when I/O
47 * is non-cache-coherent, we need to ensure that the I/O buffers
48 * we use don't share cache lines with other data.
49 */
50#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_NOT_COHERENT_CACHE)
51#define ETH_DMA_ALIGN L1_CACHE_BYTES
52#else
53#define ETH_DMA_ALIGN 8
54#endif
55
56#define ETH_VLAN_HLEN 4 45#define ETH_VLAN_HLEN 4
57#define ETH_FCS_LEN 4 46#define ETH_FCS_LEN 4
58#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ 47#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 030924fb1ab3..954842e85ab9 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -195,6 +195,10 @@ struct myri10ge_priv {
195 char *fw_name; 195 char *fw_name;
196 char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; 196 char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
197 char fw_version[128]; 197 char fw_version[128];
198 int fw_ver_major;
199 int fw_ver_minor;
200 int fw_ver_tiny;
201 int adopted_rx_filter_bug;
198 u8 mac_addr[6]; /* eeprom mac address */ 202 u8 mac_addr[6]; /* eeprom mac address */
199 unsigned long serial_number; 203 unsigned long serial_number;
200 int vendor_specific_offset; 204 int vendor_specific_offset;
@@ -447,7 +451,6 @@ myri10ge_validate_firmware(struct myri10ge_priv *mgp,
447 struct mcp_gen_header *hdr) 451 struct mcp_gen_header *hdr)
448{ 452{
449 struct device *dev = &mgp->pdev->dev; 453 struct device *dev = &mgp->pdev->dev;
450 int major, minor;
451 454
452 /* check firmware type */ 455 /* check firmware type */
453 if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) { 456 if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
@@ -458,9 +461,11 @@ myri10ge_validate_firmware(struct myri10ge_priv *mgp,
458 /* save firmware version for ethtool */ 461 /* save firmware version for ethtool */
459 strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version)); 462 strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
460 463
461 sscanf(mgp->fw_version, "%d.%d", &major, &minor); 464 sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major,
465 &mgp->fw_ver_minor, &mgp->fw_ver_tiny);
462 466
463 if (!(major == MXGEFW_VERSION_MAJOR && minor == MXGEFW_VERSION_MINOR)) { 467 if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR
468 && mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
464 dev_err(dev, "Found firmware version %s\n", mgp->fw_version); 469 dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
465 dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR, 470 dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
466 MXGEFW_VERSION_MINOR); 471 MXGEFW_VERSION_MINOR);
@@ -561,6 +566,18 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
561 memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes); 566 memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
562 status = myri10ge_validate_firmware(mgp, hdr); 567 status = myri10ge_validate_firmware(mgp, hdr);
563 kfree(hdr); 568 kfree(hdr);
569
570 /* check to see if adopted firmware has bug where adopting
571 * it will cause broadcasts to be filtered unless the NIC
572 * is kept in ALLMULTI mode */
573 if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 &&
574 mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) {
575 mgp->adopted_rx_filter_bug = 1;
576 dev_warn(dev, "Adopting fw %d.%d.%d: "
577 "working around rx filter bug\n",
578 mgp->fw_ver_major, mgp->fw_ver_minor,
579 mgp->fw_ver_tiny);
580 }
564 return status; 581 return status;
565} 582}
566 583
@@ -794,6 +811,8 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
794 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); 811 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
795 myri10ge_change_promisc(mgp, 0, 0); 812 myri10ge_change_promisc(mgp, 0, 0);
796 myri10ge_change_pause(mgp, mgp->pause); 813 myri10ge_change_pause(mgp, mgp->pause);
814 if (mgp->adopted_rx_filter_bug)
815 (void)myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
797 return status; 816 return status;
798} 817}
799 818
@@ -2239,7 +2258,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
2239 myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1); 2258 myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
2240 2259
2241 /* This firmware is known to not support multicast */ 2260 /* This firmware is known to not support multicast */
2242 if (!mgp->fw_multicast_support) 2261 if (!mgp->fw_multicast_support || mgp->adopted_rx_filter_bug)
2243 return; 2262 return;
2244 2263
2245 /* Disable multicast filtering */ 2264 /* Disable multicast filtering */
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index adf29dd66798..5c57433cb306 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -260,7 +260,7 @@ static const struct {
260 260
261static const struct pci_device_id natsemi_pci_tbl[] __devinitdata = { 261static const struct pci_device_id natsemi_pci_tbl[] __devinitdata = {
262 { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 }, 262 { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
263 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 263 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
264 { } /* terminate list */ 264 { } /* terminate list */
265}; 265};
266MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl); 266MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 7195af3e8f3d..deec796f90da 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -242,10 +242,11 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
242 adapter->cmd_consumer = (uint32_t *) (((char *)addr) + 242 adapter->cmd_consumer = (uint32_t *) (((char *)addr) +
243 sizeof(struct netxen_ring_ctx)); 243 sizeof(struct netxen_ring_ctx));
244 244
245 addr = pci_alloc_consistent(adapter->ahw.pdev, 245 addr = netxen_alloc(adapter->ahw.pdev,
246 sizeof(struct cmd_desc_type0) * 246 sizeof(struct cmd_desc_type0) *
247 adapter->max_tx_desc_count, 247 adapter->max_tx_desc_count,
248 (dma_addr_t *) & hw->cmd_desc_phys_addr); 248 (dma_addr_t *) & hw->cmd_desc_phys_addr,
249 &adapter->ahw.cmd_desc_pdev);
249 printk("cmd_desc_phys_addr: 0x%llx\n", (u64) hw->cmd_desc_phys_addr); 250 printk("cmd_desc_phys_addr: 0x%llx\n", (u64) hw->cmd_desc_phys_addr);
250 251
251 if (addr == NULL) { 252 if (addr == NULL) {
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 2f324366784d..2f965701a95b 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -499,7 +499,10 @@ static inline int do_rom_fast_write_words(struct netxen_adapter *adapter,
499 while(1) { 499 while(1) {
500 int data1; 500 int data1;
501 501
502 do_rom_fast_read(adapter, addridx, &data1); 502 ret = do_rom_fast_read(adapter, addridx, &data1);
503 if (ret < 0)
504 return ret;
505
503 if (data1 == data) 506 if (data1 == data)
504 break; 507 break;
505 508
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 225ff55527c4..1bf3d494b92f 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -525,6 +525,8 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
525 if (adapter == NULL) 525 if (adapter == NULL)
526 return; 526 return;
527 527
528 if (adapter->irq)
529 free_irq(adapter->irq, adapter);
528 netxen_nic_stop_all_ports(adapter); 530 netxen_nic_stop_all_ports(adapter);
529 /* leave the hw in the same state as reboot */ 531 /* leave the hw in the same state as reboot */
530 netxen_pinit_from_rom(adapter, 0); 532 netxen_pinit_from_rom(adapter, 0);
@@ -672,8 +674,6 @@ static int netxen_nic_close(struct net_device *netdev)
672 674
673 if (!adapter->active_ports) { 675 if (!adapter->active_ports) {
674 netxen_nic_disable_int(adapter); 676 netxen_nic_disable_int(adapter);
675 if (adapter->irq)
676 free_irq(adapter->irq, adapter);
677 cmd_buff = adapter->cmd_buf_arr; 677 cmd_buff = adapter->cmd_buf_arr;
678 for (i = 0; i < adapter->max_tx_desc_count; i++) { 678 for (i = 0; i < adapter->max_tx_desc_count; i++) {
679 buffrag = cmd_buff->frag_array; 679 buffrag = cmd_buff->frag_array;
@@ -1155,8 +1155,8 @@ static void __exit netxen_exit_module(void)
1155 /* 1155 /*
1156 * Wait for some time to allow the dma to drain, if any. 1156 * Wait for some time to allow the dma to drain, if any.
1157 */ 1157 */
1158 destroy_workqueue(netxen_workq);
1159 pci_unregister_driver(&netxen_driver); 1158 pci_unregister_driver(&netxen_driver);
1159 destroy_workqueue(netxen_workq);
1160} 1160}
1161 1161
1162module_exit(netxen_exit_module); 1162module_exit(netxen_exit_module);
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 196993a29b09..a6f4b24b0176 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -104,8 +104,6 @@ static int automatic_resume; /* experimental .. better should be zero */
104static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */ 104static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */
105static int fifo=0x8; /* don't change */ 105static int fifo=0x8; /* don't change */
106 106
107/* #define REALLY_SLOW_IO */
108
109#include <linux/module.h> 107#include <linux/module.h>
110#include <linux/kernel.h> 108#include <linux/kernel.h>
111#include <linux/string.h> 109#include <linux/string.h>
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index a142cdfd947b..3a14d19b72bd 100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -39,7 +39,7 @@
39 39
40#define DRV_NAME "qla3xxx" 40#define DRV_NAME "qla3xxx"
41#define DRV_STRING "QLogic ISP3XXX Network Driver" 41#define DRV_STRING "QLogic ISP3XXX Network Driver"
42#define DRV_VERSION "v2.02.00-k36" 42#define DRV_VERSION "v2.03.00-k3"
43#define PFX DRV_NAME " " 43#define PFX DRV_NAME " "
44 44
45static const char ql3xxx_driver_name[] = DRV_NAME; 45static const char ql3xxx_driver_name[] = DRV_NAME;
@@ -276,7 +276,8 @@ static void ql_enable_interrupts(struct ql3_adapter *qdev)
276static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, 276static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
277 struct ql_rcv_buf_cb *lrg_buf_cb) 277 struct ql_rcv_buf_cb *lrg_buf_cb)
278{ 278{
279 u64 map; 279 dma_addr_t map;
280 int err;
280 lrg_buf_cb->next = NULL; 281 lrg_buf_cb->next = NULL;
281 282
282 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ 283 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
@@ -287,9 +288,10 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
287 } 288 }
288 289
289 if (!lrg_buf_cb->skb) { 290 if (!lrg_buf_cb->skb) {
290 lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len); 291 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
292 qdev->lrg_buffer_len);
291 if (unlikely(!lrg_buf_cb->skb)) { 293 if (unlikely(!lrg_buf_cb->skb)) {
292 printk(KERN_ERR PFX "%s: failed dev_alloc_skb().\n", 294 printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
293 qdev->ndev->name); 295 qdev->ndev->name);
294 qdev->lrg_buf_skb_check++; 296 qdev->lrg_buf_skb_check++;
295 } else { 297 } else {
@@ -303,6 +305,17 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
303 qdev->lrg_buffer_len - 305 qdev->lrg_buffer_len -
304 QL_HEADER_SPACE, 306 QL_HEADER_SPACE,
305 PCI_DMA_FROMDEVICE); 307 PCI_DMA_FROMDEVICE);
308 err = pci_dma_mapping_error(map);
309 if(err) {
310 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
311 qdev->ndev->name, err);
312 dev_kfree_skb(lrg_buf_cb->skb);
313 lrg_buf_cb->skb = NULL;
314
315 qdev->lrg_buf_skb_check++;
316 return;
317 }
318
306 lrg_buf_cb->buf_phy_addr_low = 319 lrg_buf_cb->buf_phy_addr_low =
307 cpu_to_le32(LS_64BITS(map)); 320 cpu_to_le32(LS_64BITS(map));
308 lrg_buf_cb->buf_phy_addr_high = 321 lrg_buf_cb->buf_phy_addr_high =
@@ -1387,6 +1400,8 @@ static void ql_link_state_machine(struct ql3_adapter *qdev)
1387 printk(KERN_INFO PFX 1400 printk(KERN_INFO PFX
1388 "%s: Reset in progress, skip processing link " 1401 "%s: Reset in progress, skip processing link "
1389 "state.\n", qdev->ndev->name); 1402 "state.\n", qdev->ndev->name);
1403
1404 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1390 return; 1405 return;
1391 } 1406 }
1392 1407
@@ -1518,8 +1533,10 @@ static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1518 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1533 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1519 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1534 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1520 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1535 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1521 2) << 7)) 1536 2) << 7)) {
1537 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1522 return 0; 1538 return 0;
1539 }
1523 status = ql_is_auto_cfg(qdev); 1540 status = ql_is_auto_cfg(qdev);
1524 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1541 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1525 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1542 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -1533,8 +1550,10 @@ static u32 ql_get_speed(struct ql3_adapter *qdev)
1533 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1550 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1534 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1551 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1535 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1552 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1536 2) << 7)) 1553 2) << 7)) {
1554 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1537 return 0; 1555 return 0;
1556 }
1538 status = ql_get_link_speed(qdev); 1557 status = ql_get_link_speed(qdev);
1539 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1558 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1540 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1559 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -1548,8 +1567,10 @@ static int ql_get_full_dup(struct ql3_adapter *qdev)
1548 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1567 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1549 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1568 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1550 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1569 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1551 2) << 7)) 1570 2) << 7)) {
1571 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1552 return 0; 1572 return 0;
1573 }
1553 status = ql_is_link_full_dup(qdev); 1574 status = ql_is_link_full_dup(qdev);
1554 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1575 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1555 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1576 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -1615,14 +1636,16 @@ static const struct ethtool_ops ql3xxx_ethtool_ops = {
1615static int ql_populate_free_queue(struct ql3_adapter *qdev) 1636static int ql_populate_free_queue(struct ql3_adapter *qdev)
1616{ 1637{
1617 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 1638 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1618 u64 map; 1639 dma_addr_t map;
1640 int err;
1619 1641
1620 while (lrg_buf_cb) { 1642 while (lrg_buf_cb) {
1621 if (!lrg_buf_cb->skb) { 1643 if (!lrg_buf_cb->skb) {
1622 lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len); 1644 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
1645 qdev->lrg_buffer_len);
1623 if (unlikely(!lrg_buf_cb->skb)) { 1646 if (unlikely(!lrg_buf_cb->skb)) {
1624 printk(KERN_DEBUG PFX 1647 printk(KERN_DEBUG PFX
1625 "%s: Failed dev_alloc_skb().\n", 1648 "%s: Failed netdev_alloc_skb().\n",
1626 qdev->ndev->name); 1649 qdev->ndev->name);
1627 break; 1650 break;
1628 } else { 1651 } else {
@@ -1636,6 +1659,17 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1636 qdev->lrg_buffer_len - 1659 qdev->lrg_buffer_len -
1637 QL_HEADER_SPACE, 1660 QL_HEADER_SPACE,
1638 PCI_DMA_FROMDEVICE); 1661 PCI_DMA_FROMDEVICE);
1662
1663 err = pci_dma_mapping_error(map);
1664 if(err) {
1665 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
1666 qdev->ndev->name, err);
1667 dev_kfree_skb(lrg_buf_cb->skb);
1668 lrg_buf_cb->skb = NULL;
1669 break;
1670 }
1671
1672
1639 lrg_buf_cb->buf_phy_addr_low = 1673 lrg_buf_cb->buf_phy_addr_low =
1640 cpu_to_le32(LS_64BITS(map)); 1674 cpu_to_le32(LS_64BITS(map));
1641 lrg_buf_cb->buf_phy_addr_high = 1675 lrg_buf_cb->buf_phy_addr_high =
@@ -1690,11 +1724,11 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1690 1724
1691 qdev->lrg_buf_q_producer_index++; 1725 qdev->lrg_buf_q_producer_index++;
1692 1726
1693 if (qdev->lrg_buf_q_producer_index == NUM_LBUFQ_ENTRIES) 1727 if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
1694 qdev->lrg_buf_q_producer_index = 0; 1728 qdev->lrg_buf_q_producer_index = 0;
1695 1729
1696 if (qdev->lrg_buf_q_producer_index == 1730 if (qdev->lrg_buf_q_producer_index ==
1697 (NUM_LBUFQ_ENTRIES - 1)) { 1731 (qdev->num_lbufq_entries - 1)) {
1698 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; 1732 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1699 } 1733 }
1700 } 1734 }
@@ -1713,8 +1747,31 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1713{ 1747{
1714 struct ql_tx_buf_cb *tx_cb; 1748 struct ql_tx_buf_cb *tx_cb;
1715 int i; 1749 int i;
1750 int retval = 0;
1716 1751
1752 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1753 printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
1754 }
1755
1717 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1756 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1757
1758 /* Check the transmit response flags for any errors */
1759 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1760 printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
1761
1762 qdev->stats.tx_errors++;
1763 retval = -EIO;
1764 goto frame_not_sent;
1765 }
1766
1767 if(tx_cb->seg_count == 0) {
1768 printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
1769
1770 qdev->stats.tx_errors++;
1771 retval = -EIO;
1772 goto invalid_seg_count;
1773 }
1774
1718 pci_unmap_single(qdev->pdev, 1775 pci_unmap_single(qdev->pdev,
1719 pci_unmap_addr(&tx_cb->map[0], mapaddr), 1776 pci_unmap_addr(&tx_cb->map[0], mapaddr),
1720 pci_unmap_len(&tx_cb->map[0], maplen), 1777 pci_unmap_len(&tx_cb->map[0], maplen),
@@ -1731,11 +1788,32 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1731 } 1788 }
1732 qdev->stats.tx_packets++; 1789 qdev->stats.tx_packets++;
1733 qdev->stats.tx_bytes += tx_cb->skb->len; 1790 qdev->stats.tx_bytes += tx_cb->skb->len;
1791
1792frame_not_sent:
1734 dev_kfree_skb_irq(tx_cb->skb); 1793 dev_kfree_skb_irq(tx_cb->skb);
1735 tx_cb->skb = NULL; 1794 tx_cb->skb = NULL;
1795
1796invalid_seg_count:
1736 atomic_inc(&qdev->tx_count); 1797 atomic_inc(&qdev->tx_count);
1737} 1798}
1738 1799
1800void ql_get_sbuf(struct ql3_adapter *qdev)
1801{
1802 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1803 qdev->small_buf_index = 0;
1804 qdev->small_buf_release_cnt++;
1805}
1806
1807struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
1808{
1809 struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
1810 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
1811 qdev->lrg_buf_release_cnt++;
1812 if (++qdev->lrg_buf_index == qdev->num_large_buffers)
1813 qdev->lrg_buf_index = 0;
1814 return(lrg_buf_cb);
1815}
1816
1739/* 1817/*
1740 * The difference between 3022 and 3032 for inbound completions: 1818 * The difference between 3022 and 3032 for inbound completions:
1741 * 3022 uses two buffers per completion. The first buffer contains 1819 * 3022 uses two buffers per completion. The first buffer contains
@@ -1751,47 +1829,21 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1751static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 1829static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1752 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 1830 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
1753{ 1831{
1754 long int offset;
1755 u32 lrg_buf_phy_addr_low = 0;
1756 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 1832 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1757 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 1833 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1758 u32 *curr_ial_ptr;
1759 struct sk_buff *skb; 1834 struct sk_buff *skb;
1760 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); 1835 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
1761 1836
1762 /* 1837 /*
1763 * Get the inbound address list (small buffer). 1838 * Get the inbound address list (small buffer).
1764 */ 1839 */
1765 offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE; 1840 ql_get_sbuf(qdev);
1766 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1767 qdev->small_buf_index = 0;
1768 1841
1769 curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset); 1842 if (qdev->device_id == QL3022_DEVICE_ID)
1770 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; 1843 lrg_buf_cb1 = ql_get_lbuf(qdev);
1771 qdev->small_buf_release_cnt++;
1772
1773 if (qdev->device_id == QL3022_DEVICE_ID) {
1774 /* start of first buffer (3022 only) */
1775 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1776 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1777 qdev->lrg_buf_release_cnt++;
1778 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) {
1779 qdev->lrg_buf_index = 0;
1780 }
1781 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1782 curr_ial_ptr++;
1783 }
1784 1844
1785 /* start of second buffer */ 1845 /* start of second buffer */
1786 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1846 lrg_buf_cb2 = ql_get_lbuf(qdev);
1787 lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
1788
1789 /*
1790 * Second buffer gets sent up the stack.
1791 */
1792 qdev->lrg_buf_release_cnt++;
1793 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1794 qdev->lrg_buf_index = 0;
1795 skb = lrg_buf_cb2->skb; 1847 skb = lrg_buf_cb2->skb;
1796 1848
1797 qdev->stats.rx_packets++; 1849 qdev->stats.rx_packets++;
@@ -1819,11 +1871,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1819static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, 1871static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1820 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) 1872 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
1821{ 1873{
1822 long int offset;
1823 u32 lrg_buf_phy_addr_low = 0;
1824 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 1874 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1825 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 1875 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1826 u32 *curr_ial_ptr;
1827 struct sk_buff *skb1 = NULL, *skb2; 1876 struct sk_buff *skb1 = NULL, *skb2;
1828 struct net_device *ndev = qdev->ndev; 1877 struct net_device *ndev = qdev->ndev;
1829 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 1878 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
@@ -1833,35 +1882,20 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1833 * Get the inbound address list (small buffer). 1882 * Get the inbound address list (small buffer).
1834 */ 1883 */
1835 1884
1836 offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE; 1885 ql_get_sbuf(qdev);
1837 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1838 qdev->small_buf_index = 0;
1839 curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
1840 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1841 qdev->small_buf_release_cnt++;
1842 1886
1843 if (qdev->device_id == QL3022_DEVICE_ID) { 1887 if (qdev->device_id == QL3022_DEVICE_ID) {
1844 /* start of first buffer on 3022 */ 1888 /* start of first buffer on 3022 */
1845 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1889 lrg_buf_cb1 = ql_get_lbuf(qdev);
1846 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1847 qdev->lrg_buf_release_cnt++;
1848 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1849 qdev->lrg_buf_index = 0;
1850 skb1 = lrg_buf_cb1->skb; 1890 skb1 = lrg_buf_cb1->skb;
1851 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1852 curr_ial_ptr++;
1853 size = ETH_HLEN; 1891 size = ETH_HLEN;
1854 if (*((u16 *) skb1->data) != 0xFFFF) 1892 if (*((u16 *) skb1->data) != 0xFFFF)
1855 size += VLAN_ETH_HLEN - ETH_HLEN; 1893 size += VLAN_ETH_HLEN - ETH_HLEN;
1856 } 1894 }
1857 1895
1858 /* start of second buffer */ 1896 /* start of second buffer */
1859 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1897 lrg_buf_cb2 = ql_get_lbuf(qdev);
1860 lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
1861 skb2 = lrg_buf_cb2->skb; 1898 skb2 = lrg_buf_cb2->skb;
1862 qdev->lrg_buf_release_cnt++;
1863 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1864 qdev->lrg_buf_index = 0;
1865 1899
1866 skb_put(skb2, length); /* Just the second buffer length here. */ 1900 skb_put(skb2, length); /* Just the second buffer length here. */
1867 pci_unmap_single(qdev->pdev, 1901 pci_unmap_single(qdev->pdev,
@@ -1914,10 +1948,13 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1914 struct net_rsp_iocb *net_rsp; 1948 struct net_rsp_iocb *net_rsp;
1915 struct net_device *ndev = qdev->ndev; 1949 struct net_device *ndev = qdev->ndev;
1916 unsigned long hw_flags; 1950 unsigned long hw_flags;
1951 int work_done = 0;
1952
1953 u32 rsp_producer_index = le32_to_cpu(*(qdev->prsp_producer_index));
1917 1954
1918 /* While there are entries in the completion queue. */ 1955 /* While there are entries in the completion queue. */
1919 while ((cpu_to_le32(*(qdev->prsp_producer_index)) != 1956 while ((rsp_producer_index !=
1920 qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) { 1957 qdev->rsp_consumer_index) && (work_done < work_to_do)) {
1921 1958
1922 net_rsp = qdev->rsp_current; 1959 net_rsp = qdev->rsp_current;
1923 switch (net_rsp->opcode) { 1960 switch (net_rsp->opcode) {
@@ -1968,37 +2005,34 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1968 } else { 2005 } else {
1969 qdev->rsp_current++; 2006 qdev->rsp_current++;
1970 } 2007 }
2008
2009 work_done = *tx_cleaned + *rx_cleaned;
1971 } 2010 }
1972 2011
1973 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 2012 if(work_done) {
2013 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1974 2014
1975 ql_update_lrg_bufq_prod_index(qdev); 2015 ql_update_lrg_bufq_prod_index(qdev);
1976 2016
1977 if (qdev->small_buf_release_cnt >= 16) { 2017 if (qdev->small_buf_release_cnt >= 16) {
1978 while (qdev->small_buf_release_cnt >= 16) { 2018 while (qdev->small_buf_release_cnt >= 16) {
1979 qdev->small_buf_q_producer_index++; 2019 qdev->small_buf_q_producer_index++;
1980 2020
1981 if (qdev->small_buf_q_producer_index == 2021 if (qdev->small_buf_q_producer_index ==
1982 NUM_SBUFQ_ENTRIES) 2022 NUM_SBUFQ_ENTRIES)
1983 qdev->small_buf_q_producer_index = 0; 2023 qdev->small_buf_q_producer_index = 0;
1984 qdev->small_buf_release_cnt -= 8; 2024 qdev->small_buf_release_cnt -= 8;
1985 } 2025 }
1986 2026
1987 ql_write_common_reg(qdev, 2027 wmb();
1988 &port_regs->CommonRegs. 2028 ql_write_common_reg(qdev,
1989 rxSmallQProducerIndex, 2029 &port_regs->CommonRegs.
1990 qdev->small_buf_q_producer_index); 2030 rxSmallQProducerIndex,
1991 } 2031 qdev->small_buf_q_producer_index);
1992 2032
1993 ql_write_common_reg(qdev, 2033 }
1994 &port_regs->CommonRegs.rspQConsumerIndex,
1995 qdev->rsp_consumer_index);
1996 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1997 2034
1998 if (unlikely(netif_queue_stopped(qdev->ndev))) { 2035 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1999 if (netif_queue_stopped(qdev->ndev) &&
2000 (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4)))
2001 netif_wake_queue(qdev->ndev);
2002 } 2036 }
2003 2037
2004 return *tx_cleaned + *rx_cleaned; 2038 return *tx_cleaned + *rx_cleaned;
@@ -2009,6 +2043,8 @@ static int ql_poll(struct net_device *ndev, int *budget)
2009 struct ql3_adapter *qdev = netdev_priv(ndev); 2043 struct ql3_adapter *qdev = netdev_priv(ndev);
2010 int work_to_do = min(*budget, ndev->quota); 2044 int work_to_do = min(*budget, ndev->quota);
2011 int rx_cleaned = 0, tx_cleaned = 0; 2045 int rx_cleaned = 0, tx_cleaned = 0;
2046 unsigned long hw_flags;
2047 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2012 2048
2013 if (!netif_carrier_ok(ndev)) 2049 if (!netif_carrier_ok(ndev))
2014 goto quit_polling; 2050 goto quit_polling;
@@ -2017,9 +2053,17 @@ static int ql_poll(struct net_device *ndev, int *budget)
2017 *budget -= rx_cleaned; 2053 *budget -= rx_cleaned;
2018 ndev->quota -= rx_cleaned; 2054 ndev->quota -= rx_cleaned;
2019 2055
2020 if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) { 2056 if( tx_cleaned + rx_cleaned != work_to_do ||
2057 !netif_running(ndev)) {
2021quit_polling: 2058quit_polling:
2022 netif_rx_complete(ndev); 2059 netif_rx_complete(ndev);
2060
2061 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2062 ql_write_common_reg(qdev,
2063 &port_regs->CommonRegs.rspQConsumerIndex,
2064 qdev->rsp_consumer_index);
2065 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2066
2023 ql_enable_interrupts(qdev); 2067 ql_enable_interrupts(qdev);
2024 return 0; 2068 return 0;
2025 } 2069 }
@@ -2073,10 +2117,9 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2073 spin_unlock(&qdev->adapter_lock); 2117 spin_unlock(&qdev->adapter_lock);
2074 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2118 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2075 ql_disable_interrupts(qdev); 2119 ql_disable_interrupts(qdev);
2076 if (likely(netif_rx_schedule_prep(ndev))) 2120 if (likely(netif_rx_schedule_prep(ndev))) {
2077 __netif_rx_schedule(ndev); 2121 __netif_rx_schedule(ndev);
2078 else 2122 }
2079 ql_enable_interrupts(qdev);
2080 } else { 2123 } else {
2081 return IRQ_NONE; 2124 return IRQ_NONE;
2082 } 2125 }
@@ -2093,8 +2136,12 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2093 * the next AOL if more frags are coming. 2136 * the next AOL if more frags are coming.
2094 * That is why the frags:segment count ratio is not linear. 2137 * That is why the frags:segment count ratio is not linear.
2095 */ 2138 */
2096static int ql_get_seg_count(unsigned short frags) 2139static int ql_get_seg_count(struct ql3_adapter *qdev,
2140 unsigned short frags)
2097{ 2141{
2142 if (qdev->device_id == QL3022_DEVICE_ID)
2143 return 1;
2144
2098 switch(frags) { 2145 switch(frags) {
2099 case 0: return 1; /* just the skb->data seg */ 2146 case 0: return 1; /* just the skb->data seg */
2100 case 1: return 2; /* skb->data + 1 frag */ 2147 case 1: return 2; /* skb->data + 1 frag */
@@ -2139,11 +2186,13 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
2139 2186
2140 if (ip) { 2187 if (ip) {
2141 if (ip->protocol == IPPROTO_TCP) { 2188 if (ip->protocol == IPPROTO_TCP) {
2142 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC; 2189 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2190 OB_3032MAC_IOCB_REQ_IC;
2143 mac_iocb_ptr->ip_hdr_off = offset; 2191 mac_iocb_ptr->ip_hdr_off = offset;
2144 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2192 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2145 } else if (ip->protocol == IPPROTO_UDP) { 2193 } else if (ip->protocol == IPPROTO_UDP) {
2146 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC; 2194 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2195 OB_3032MAC_IOCB_REQ_IC;
2147 mac_iocb_ptr->ip_hdr_off = offset; 2196 mac_iocb_ptr->ip_hdr_off = offset;
2148 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2197 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2149 } 2198 }
@@ -2151,53 +2200,42 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
2151} 2200}
2152 2201
2153/* 2202/*
2154 * The difference between 3022 and 3032 sends: 2203 * Map the buffers for this transmit. This will return
2155 * 3022 only supports a simple single segment transmission. 2204 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2156 * 3032 supports checksumming and scatter/gather lists (fragments).
2157 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2158 * in the IOCB plus a chain of outbound address lists (OAL) that
2159 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2160 * will used to point to an OAL when more ALP entries are required.
2161 * The IOCB is always the top of the chain followed by one or more
2162 * OALs (when necessary).
2163 */ 2205 */
2164static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) 2206static int ql_send_map(struct ql3_adapter *qdev,
2207 struct ob_mac_iocb_req *mac_iocb_ptr,
2208 struct ql_tx_buf_cb *tx_cb,
2209 struct sk_buff *skb)
2165{ 2210{
2166 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2167 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2168 struct ql_tx_buf_cb *tx_cb;
2169 u32 tot_len = skb->len;
2170 struct oal *oal; 2211 struct oal *oal;
2171 struct oal_entry *oal_entry; 2212 struct oal_entry *oal_entry;
2172 int len; 2213 int len = skb->len;
2173 struct ob_mac_iocb_req *mac_iocb_ptr; 2214 dma_addr_t map;
2174 u64 map; 2215 int err;
2216 int completed_segs, i;
2175 int seg_cnt, seg = 0; 2217 int seg_cnt, seg = 0;
2176 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2218 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2177 2219
2178 if (unlikely(atomic_read(&qdev->tx_count) < 2)) { 2220 seg_cnt = tx_cb->seg_count = ql_get_seg_count(qdev,
2179 if (!netif_queue_stopped(ndev)) 2221 (skb_shinfo(skb)->nr_frags));
2180 netif_stop_queue(ndev);
2181 return NETDEV_TX_BUSY;
2182 }
2183 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2184 seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
2185 if(seg_cnt == -1) { 2222 if(seg_cnt == -1) {
2186 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); 2223 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2187 return NETDEV_TX_OK; 2224 return NETDEV_TX_BUSY;
2188
2189 } 2225 }
2190 mac_iocb_ptr = tx_cb->queue_entry; 2226 /*
2191 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2227 * Map the skb buffer first.
2192 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2228 */
2193 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2194 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2195 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2196 tx_cb->skb = skb;
2197 if (skb->ip_summed == CHECKSUM_PARTIAL)
2198 ql_hw_csum_setup(skb, mac_iocb_ptr);
2199 len = skb_headlen(skb);
2200 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2229 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2230
2231 err = pci_dma_mapping_error(map);
2232 if(err) {
2233 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2234 qdev->ndev->name, err);
2235
2236 return NETDEV_TX_BUSY;
2237 }
2238
2201 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2239 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2202 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2240 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2203 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2241 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
@@ -2206,15 +2244,14 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2206 pci_unmap_len_set(&tx_cb->map[seg], maplen, len); 2244 pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
2207 seg++; 2245 seg++;
2208 2246
2209 if (!skb_shinfo(skb)->nr_frags) { 2247 if (seg_cnt == 1) {
2210 /* Terminate the last segment. */ 2248 /* Terminate the last segment. */
2211 oal_entry->len = 2249 oal_entry->len =
2212 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY); 2250 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2213 } else { 2251 } else {
2214 int i;
2215 oal = tx_cb->oal; 2252 oal = tx_cb->oal;
2216 for (i=0; i<frag_cnt; i++,seg++) { 2253 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
2217 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2254 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2218 oal_entry++; 2255 oal_entry++;
2219 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */ 2256 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2220 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ 2257 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
@@ -2224,6 +2261,15 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2224 map = pci_map_single(qdev->pdev, oal, 2261 map = pci_map_single(qdev->pdev, oal,
2225 sizeof(struct oal), 2262 sizeof(struct oal),
2226 PCI_DMA_TODEVICE); 2263 PCI_DMA_TODEVICE);
2264
2265 err = pci_dma_mapping_error(map);
2266 if(err) {
2267
2268 printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
2269 qdev->ndev->name, err);
2270 goto map_error;
2271 }
2272
2227 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2273 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2228 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2274 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2229 oal_entry->len = 2275 oal_entry->len =
@@ -2242,6 +2288,14 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2242 pci_map_page(qdev->pdev, frag->page, 2288 pci_map_page(qdev->pdev, frag->page,
2243 frag->page_offset, frag->size, 2289 frag->page_offset, frag->size,
2244 PCI_DMA_TODEVICE); 2290 PCI_DMA_TODEVICE);
2291
2292 err = pci_dma_mapping_error(map);
2293 if(err) {
2294 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
2295 qdev->ndev->name, err);
2296 goto map_error;
2297 }
2298
2245 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2299 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2246 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2300 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2247 oal_entry->len = cpu_to_le32(frag->size); 2301 oal_entry->len = cpu_to_le32(frag->size);
@@ -2253,6 +2307,94 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2253 oal_entry->len = 2307 oal_entry->len =
2254 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY); 2308 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2255 } 2309 }
2310
2311 return NETDEV_TX_OK;
2312
2313map_error:
2314 /* A PCI mapping failed and now we will need to back out
2315 * We need to traverse through the oal's and associated pages which
2316 * have been mapped and now we must unmap them to clean up properly
2317 */
2318
2319 seg = 1;
2320 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2321 oal = tx_cb->oal;
2322 for (i=0; i<completed_segs; i++,seg++) {
2323 oal_entry++;
2324
2325 if((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2326 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2327 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2328 (seg == 17 && seg_cnt > 18)) {
2329 pci_unmap_single(qdev->pdev,
2330 pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2331 pci_unmap_len(&tx_cb->map[seg], maplen),
2332 PCI_DMA_TODEVICE);
2333 oal++;
2334 seg++;
2335 }
2336
2337 pci_unmap_page(qdev->pdev,
2338 pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2339 pci_unmap_len(&tx_cb->map[seg], maplen),
2340 PCI_DMA_TODEVICE);
2341 }
2342
2343 pci_unmap_single(qdev->pdev,
2344 pci_unmap_addr(&tx_cb->map[0], mapaddr),
2345 pci_unmap_addr(&tx_cb->map[0], maplen),
2346 PCI_DMA_TODEVICE);
2347
2348 return NETDEV_TX_BUSY;
2349
2350}
2351
2352/*
2353 * The difference between 3022 and 3032 sends:
2354 * 3022 only supports a simple single segment transmission.
2355 * 3032 supports checksumming and scatter/gather lists (fragments).
2356 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2357 * in the IOCB plus a chain of outbound address lists (OAL) that
2358 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2359 * will used to point to an OAL when more ALP entries are required.
2360 * The IOCB is always the top of the chain followed by one or more
2361 * OALs (when necessary).
2362 */
2363static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2364{
2365 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2366 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2367 struct ql_tx_buf_cb *tx_cb;
2368 u32 tot_len = skb->len;
2369 struct ob_mac_iocb_req *mac_iocb_ptr;
2370
2371 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2372 return NETDEV_TX_BUSY;
2373 }
2374
2375 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2376 if((tx_cb->seg_count = ql_get_seg_count(qdev,
2377 (skb_shinfo(skb)->nr_frags))) == -1) {
2378 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2379 return NETDEV_TX_OK;
2380 }
2381
2382 mac_iocb_ptr = tx_cb->queue_entry;
2383 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2384 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2385 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2386 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2387 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2388 tx_cb->skb = skb;
2389 if (qdev->device_id == QL3032_DEVICE_ID &&
2390 skb->ip_summed == CHECKSUM_PARTIAL)
2391 ql_hw_csum_setup(skb, mac_iocb_ptr);
2392
2393 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
2394 printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
2395 return NETDEV_TX_BUSY;
2396 }
2397
2256 wmb(); 2398 wmb();
2257 qdev->req_producer_index++; 2399 qdev->req_producer_index++;
2258 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2400 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
@@ -2338,12 +2480,19 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2338{ 2480{
2339 /* Create Large Buffer Queue */ 2481 /* Create Large Buffer Queue */
2340 qdev->lrg_buf_q_size = 2482 qdev->lrg_buf_q_size =
2341 NUM_LBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); 2483 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2342 if (qdev->lrg_buf_q_size < PAGE_SIZE) 2484 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2343 qdev->lrg_buf_q_alloc_size = PAGE_SIZE; 2485 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2344 else 2486 else
2345 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; 2487 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2346 2488
2489 qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
2490 if (qdev->lrg_buf == NULL) {
2491 printk(KERN_ERR PFX
2492 "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
2493 return -ENOMEM;
2494 }
2495
2347 qdev->lrg_buf_q_alloc_virt_addr = 2496 qdev->lrg_buf_q_alloc_virt_addr =
2348 pci_alloc_consistent(qdev->pdev, 2497 pci_alloc_consistent(qdev->pdev,
2349 qdev->lrg_buf_q_alloc_size, 2498 qdev->lrg_buf_q_alloc_size,
@@ -2393,7 +2542,7 @@ static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2393 "%s: Already done.\n", qdev->ndev->name); 2542 "%s: Already done.\n", qdev->ndev->name);
2394 return; 2543 return;
2395 } 2544 }
2396 2545 if(qdev->lrg_buf) kfree(qdev->lrg_buf);
2397 pci_free_consistent(qdev->pdev, 2546 pci_free_consistent(qdev->pdev,
2398 qdev->lrg_buf_q_alloc_size, 2547 qdev->lrg_buf_q_alloc_size,
2399 qdev->lrg_buf_q_alloc_virt_addr, 2548 qdev->lrg_buf_q_alloc_virt_addr,
@@ -2438,8 +2587,6 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2438 2587
2439 small_buf_q_entry = qdev->small_buf_q_virt_addr; 2588 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2440 2589
2441 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low;
2442
2443 /* Initialize the small buffer queue. */ 2590 /* Initialize the small buffer queue. */
2444 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { 2591 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2445 small_buf_q_entry->addr_high = 2592 small_buf_q_entry->addr_high =
@@ -2476,7 +2623,7 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
2476 int i = 0; 2623 int i = 0;
2477 struct ql_rcv_buf_cb *lrg_buf_cb; 2624 struct ql_rcv_buf_cb *lrg_buf_cb;
2478 2625
2479 for (i = 0; i < NUM_LARGE_BUFFERS; i++) { 2626 for (i = 0; i < qdev->num_large_buffers; i++) {
2480 lrg_buf_cb = &qdev->lrg_buf[i]; 2627 lrg_buf_cb = &qdev->lrg_buf[i];
2481 if (lrg_buf_cb->skb) { 2628 if (lrg_buf_cb->skb) {
2482 dev_kfree_skb(lrg_buf_cb->skb); 2629 dev_kfree_skb(lrg_buf_cb->skb);
@@ -2497,7 +2644,7 @@ static void ql_init_large_buffers(struct ql3_adapter *qdev)
2497 struct ql_rcv_buf_cb *lrg_buf_cb; 2644 struct ql_rcv_buf_cb *lrg_buf_cb;
2498 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; 2645 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2499 2646
2500 for (i = 0; i < NUM_LARGE_BUFFERS; i++) { 2647 for (i = 0; i < qdev->num_large_buffers; i++) {
2501 lrg_buf_cb = &qdev->lrg_buf[i]; 2648 lrg_buf_cb = &qdev->lrg_buf[i];
2502 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; 2649 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2503 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; 2650 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
@@ -2512,10 +2659,12 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2512 int i; 2659 int i;
2513 struct ql_rcv_buf_cb *lrg_buf_cb; 2660 struct ql_rcv_buf_cb *lrg_buf_cb;
2514 struct sk_buff *skb; 2661 struct sk_buff *skb;
2515 u64 map; 2662 dma_addr_t map;
2663 int err;
2516 2664
2517 for (i = 0; i < NUM_LARGE_BUFFERS; i++) { 2665 for (i = 0; i < qdev->num_large_buffers; i++) {
2518 skb = dev_alloc_skb(qdev->lrg_buffer_len); 2666 skb = netdev_alloc_skb(qdev->ndev,
2667 qdev->lrg_buffer_len);
2519 if (unlikely(!skb)) { 2668 if (unlikely(!skb)) {
2520 /* Better luck next round */ 2669 /* Better luck next round */
2521 printk(KERN_ERR PFX 2670 printk(KERN_ERR PFX
@@ -2541,6 +2690,15 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2541 qdev->lrg_buffer_len - 2690 qdev->lrg_buffer_len -
2542 QL_HEADER_SPACE, 2691 QL_HEADER_SPACE,
2543 PCI_DMA_FROMDEVICE); 2692 PCI_DMA_FROMDEVICE);
2693
2694 err = pci_dma_mapping_error(map);
2695 if(err) {
2696 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2697 qdev->ndev->name, err);
2698 ql_free_large_buffers(qdev);
2699 return -ENOMEM;
2700 }
2701
2544 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); 2702 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2545 pci_unmap_len_set(lrg_buf_cb, maplen, 2703 pci_unmap_len_set(lrg_buf_cb, maplen,
2546 qdev->lrg_buffer_len - 2704 qdev->lrg_buffer_len -
@@ -2592,9 +2750,15 @@ static int ql_create_send_free_list(struct ql3_adapter *qdev)
2592 2750
2593static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2751static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2594{ 2752{
2595 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) 2753 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2754 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2596 qdev->lrg_buffer_len = NORMAL_MTU_SIZE; 2755 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2756 }
2597 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { 2757 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2758 /*
2759 * Bigger buffers, so less of them.
2760 */
2761 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2598 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; 2762 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2599 } else { 2763 } else {
2600 printk(KERN_ERR PFX 2764 printk(KERN_ERR PFX
@@ -2602,6 +2766,7 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2602 qdev->ndev->name); 2766 qdev->ndev->name);
2603 return -ENOMEM; 2767 return -ENOMEM;
2604 } 2768 }
2769 qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2605 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; 2770 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2606 qdev->max_frame_size = 2771 qdev->max_frame_size =
2607 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; 2772 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
@@ -2834,7 +2999,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
2834 &hmem_regs->rxLargeQBaseAddrLow, 2999 &hmem_regs->rxLargeQBaseAddrLow,
2835 LS_64BITS(qdev->lrg_buf_q_phy_addr)); 3000 LS_64BITS(qdev->lrg_buf_q_phy_addr));
2836 3001
2837 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, NUM_LBUFQ_ENTRIES); 3002 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
2838 3003
2839 ql_write_page1_reg(qdev, 3004 ql_write_page1_reg(qdev,
2840 &hmem_regs->rxLargeBufferLength, 3005 &hmem_regs->rxLargeBufferLength,
@@ -2856,7 +3021,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
2856 3021
2857 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; 3022 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
2858 qdev->small_buf_release_cnt = 8; 3023 qdev->small_buf_release_cnt = 8;
2859 qdev->lrg_buf_q_producer_index = NUM_LBUFQ_ENTRIES - 1; 3024 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
2860 qdev->lrg_buf_release_cnt = 8; 3025 qdev->lrg_buf_release_cnt = 8;
2861 qdev->lrg_buf_next_free = 3026 qdev->lrg_buf_next_free =
2862 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr; 3027 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
@@ -3292,6 +3457,7 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
3292err_init: 3457err_init:
3293 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3458 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3294err_lock: 3459err_lock:
3460 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3295 free_irq(qdev->pdev->irq, ndev); 3461 free_irq(qdev->pdev->irq, ndev);
3296err_irq: 3462err_irq:
3297 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { 3463 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
@@ -3343,27 +3509,6 @@ static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
3343 return &qdev->stats; 3509 return &qdev->stats;
3344} 3510}
3345 3511
3346static int ql3xxx_change_mtu(struct net_device *ndev, int new_mtu)
3347{
3348 struct ql3_adapter *qdev = netdev_priv(ndev);
3349 printk(KERN_ERR PFX "%s: new mtu size = %d.\n", ndev->name, new_mtu);
3350 if (new_mtu != NORMAL_MTU_SIZE && new_mtu != JUMBO_MTU_SIZE) {
3351 printk(KERN_ERR PFX
3352 "%s: mtu size of %d is not valid. Use exactly %d or "
3353 "%d.\n", ndev->name, new_mtu, NORMAL_MTU_SIZE,
3354 JUMBO_MTU_SIZE);
3355 return -EINVAL;
3356 }
3357
3358 if (!netif_running(ndev)) {
3359 ndev->mtu = new_mtu;
3360 return 0;
3361 }
3362
3363 ndev->mtu = new_mtu;
3364 return ql_cycle_adapter(qdev,QL_DO_RESET);
3365}
3366
3367static void ql3xxx_set_multicast_list(struct net_device *ndev) 3512static void ql3xxx_set_multicast_list(struct net_device *ndev)
3368{ 3513{
3369 /* 3514 /*
@@ -3609,8 +3754,12 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3609 } 3754 }
3610 3755
3611 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3756 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3612 if (!ndev) 3757 if (!ndev) {
3758 printk(KERN_ERR PFX "%s could not alloc etherdev\n",
3759 pci_name(pdev));
3760 err = -ENOMEM;
3613 goto err_out_free_regions; 3761 goto err_out_free_regions;
3762 }
3614 3763
3615 SET_MODULE_OWNER(ndev); 3764 SET_MODULE_OWNER(ndev);
3616 SET_NETDEV_DEV(ndev, &pdev->dev); 3765 SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -3639,6 +3788,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3639 if (!qdev->mem_map_registers) { 3788 if (!qdev->mem_map_registers) {
3640 printk(KERN_ERR PFX "%s: cannot map device registers\n", 3789 printk(KERN_ERR PFX "%s: cannot map device registers\n",
3641 pci_name(pdev)); 3790 pci_name(pdev));
3791 err = -EIO;
3642 goto err_out_free_ndev; 3792 goto err_out_free_ndev;
3643 } 3793 }
3644 3794
@@ -3650,7 +3800,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3650 ndev->hard_start_xmit = ql3xxx_send; 3800 ndev->hard_start_xmit = ql3xxx_send;
3651 ndev->stop = ql3xxx_close; 3801 ndev->stop = ql3xxx_close;
3652 ndev->get_stats = ql3xxx_get_stats; 3802 ndev->get_stats = ql3xxx_get_stats;
3653 ndev->change_mtu = ql3xxx_change_mtu;
3654 ndev->set_multicast_list = ql3xxx_set_multicast_list; 3803 ndev->set_multicast_list = ql3xxx_set_multicast_list;
3655 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 3804 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
3656 ndev->set_mac_address = ql3xxx_set_mac_address; 3805 ndev->set_mac_address = ql3xxx_set_mac_address;
@@ -3667,6 +3816,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3667 printk(KERN_ALERT PFX 3816 printk(KERN_ALERT PFX
3668 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n", 3817 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
3669 qdev->index); 3818 qdev->index);
3819 err = -EIO;
3670 goto err_out_iounmap; 3820 goto err_out_iounmap;
3671 } 3821 }
3672 3822
@@ -3674,9 +3824,11 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3674 3824
3675 /* Validate and set parameters */ 3825 /* Validate and set parameters */
3676 if (qdev->mac_index) { 3826 if (qdev->mac_index) {
3827 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
3677 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress, 3828 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
3678 ETH_ALEN); 3829 ETH_ALEN);
3679 } else { 3830 } else {
3831 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
3680 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress, 3832 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
3681 ETH_ALEN); 3833 ETH_ALEN);
3682 } 3834 }
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index b2d76ea68827..34cd6580fd07 100755
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -1014,13 +1014,15 @@ struct eeprom_data {
1014 1014
1015/* Transmit and Receive Buffers */ 1015/* Transmit and Receive Buffers */
1016#define NUM_LBUFQ_ENTRIES 128 1016#define NUM_LBUFQ_ENTRIES 128
1017#define JUMBO_NUM_LBUFQ_ENTRIES \
1018(NUM_LBUFQ_ENTRIES/(JUMBO_MTU_SIZE/NORMAL_MTU_SIZE))
1017#define NUM_SBUFQ_ENTRIES 64 1019#define NUM_SBUFQ_ENTRIES 64
1018#define QL_SMALL_BUFFER_SIZE 32 1020#define QL_SMALL_BUFFER_SIZE 32
1019#define QL_ADDR_ELE_PER_BUFQ_ENTRY \ 1021#define QL_ADDR_ELE_PER_BUFQ_ENTRY \
1020(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element)) 1022(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element))
1021 /* Each send has at least control block. This is how many we keep. */ 1023 /* Each send has at least control block. This is how many we keep. */
1022#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY 1024#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
1023#define NUM_LARGE_BUFFERS NUM_LBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY 1025
1024#define QL_HEADER_SPACE 32 /* make header space at top of skb. */ 1026#define QL_HEADER_SPACE 32 /* make header space at top of skb. */
1025/* 1027/*
1026 * Large & Small Buffers for Receives 1028 * Large & Small Buffers for Receives
@@ -1092,7 +1094,6 @@ struct oal_entry {
1092 u32 len; 1094 u32 len;
1093#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */ 1095#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */
1094#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */ 1096#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */
1095 u32 reserved;
1096}; 1097};
1097 1098
1098struct oal { 1099struct oal {
@@ -1193,7 +1194,7 @@ struct ql3_adapter {
1193 struct net_rsp_iocb *rsp_current; 1194 struct net_rsp_iocb *rsp_current;
1194 u16 rsp_consumer_index; 1195 u16 rsp_consumer_index;
1195 u16 reserved_06; 1196 u16 reserved_06;
1196 u32 *prsp_producer_index; 1197 volatile u32 *prsp_producer_index;
1197 u32 rsp_producer_index_phy_addr_high; 1198 u32 rsp_producer_index_phy_addr_high;
1198 u32 rsp_producer_index_phy_addr_low; 1199 u32 rsp_producer_index_phy_addr_low;
1199 1200
@@ -1207,9 +1208,11 @@ struct ql3_adapter {
1207 u32 lrg_buf_q_producer_index; 1208 u32 lrg_buf_q_producer_index;
1208 u32 lrg_buf_release_cnt; 1209 u32 lrg_buf_release_cnt;
1209 struct bufq_addr_element *lrg_buf_next_free; 1210 struct bufq_addr_element *lrg_buf_next_free;
1211 u32 num_large_buffers;
1212 u32 num_lbufq_entries;
1210 1213
1211 /* Large (Receive) Buffers */ 1214 /* Large (Receive) Buffers */
1212 struct ql_rcv_buf_cb lrg_buf[NUM_LARGE_BUFFERS]; 1215 struct ql_rcv_buf_cb *lrg_buf;
1213 struct ql_rcv_buf_cb *lrg_buf_free_head; 1216 struct ql_rcv_buf_cb *lrg_buf_free_head;
1214 struct ql_rcv_buf_cb *lrg_buf_free_tail; 1217 struct ql_rcv_buf_cb *lrg_buf_free_tail;
1215 u32 lrg_buf_free_count; 1218 u32 lrg_buf_free_count;
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 0e345cbc2bf9..33fb7f3b7041 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -430,6 +430,7 @@ struct XENA_dev_config {
430#define TX_PA_CFG_IGNORE_SNAP_OUI BIT(2) 430#define TX_PA_CFG_IGNORE_SNAP_OUI BIT(2)
431#define TX_PA_CFG_IGNORE_LLC_CTRL BIT(3) 431#define TX_PA_CFG_IGNORE_LLC_CTRL BIT(3)
432#define TX_PA_CFG_IGNORE_L2_ERR BIT(6) 432#define TX_PA_CFG_IGNORE_L2_ERR BIT(6)
433#define RX_PA_CFG_STRIP_VLAN_TAG BIT(15)
433 434
434/* Recent add, used only debug purposes. */ 435/* Recent add, used only debug purposes. */
435 u64 pcc_enable; 436 u64 pcc_enable;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index fd85648d98d1..44bb2395af84 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -42,6 +42,14 @@
42 * Possible values '1' for enable '0' for disable. Default is '0' 42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be 43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet 44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
45 ************************************************************************/ 53 ************************************************************************/
46 54
47#include <linux/module.h> 55#include <linux/module.h>
@@ -76,7 +84,7 @@
76#include "s2io.h" 84#include "s2io.h"
77#include "s2io-regs.h" 85#include "s2io-regs.h"
78 86
79#define DRV_VERSION "2.0.16.1" 87#define DRV_VERSION "2.0.17.1"
80 88
81/* S2io Driver name & version. */ 89/* S2io Driver name & version. */
82static char s2io_driver_name[] = "Neterion"; 90static char s2io_driver_name[] = "Neterion";
@@ -131,7 +139,7 @@ static char s2io_gstrings[][ETH_GSTRING_LEN] = {
131 "BIST Test\t(offline)" 139 "BIST Test\t(offline)"
132}; 140};
133 141
134static char ethtool_stats_keys[][ETH_GSTRING_LEN] = { 142static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
135 {"tmac_frms"}, 143 {"tmac_frms"},
136 {"tmac_data_octets"}, 144 {"tmac_data_octets"},
137 {"tmac_drop_frms"}, 145 {"tmac_drop_frms"},
@@ -225,7 +233,10 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
225 {"rxd_rd_cnt"}, 233 {"rxd_rd_cnt"},
226 {"rxd_wr_cnt"}, 234 {"rxd_wr_cnt"},
227 {"txf_rd_cnt"}, 235 {"txf_rd_cnt"},
228 {"rxf_wr_cnt"}, 236 {"rxf_wr_cnt"}
237};
238
239static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
229 {"rmac_ttl_1519_4095_frms"}, 240 {"rmac_ttl_1519_4095_frms"},
230 {"rmac_ttl_4096_8191_frms"}, 241 {"rmac_ttl_4096_8191_frms"},
231 {"rmac_ttl_8192_max_frms"}, 242 {"rmac_ttl_8192_max_frms"},
@@ -241,7 +252,10 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
241 {"rmac_red_discard"}, 252 {"rmac_red_discard"},
242 {"rmac_rts_discard"}, 253 {"rmac_rts_discard"},
243 {"rmac_ingm_full_discard"}, 254 {"rmac_ingm_full_discard"},
244 {"link_fault_cnt"}, 255 {"link_fault_cnt"}
256};
257
258static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
245 {"\n DRIVER STATISTICS"}, 259 {"\n DRIVER STATISTICS"},
246 {"single_bit_ecc_errs"}, 260 {"single_bit_ecc_errs"},
247 {"double_bit_ecc_errs"}, 261 {"double_bit_ecc_errs"},
@@ -269,8 +283,16 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
269 ("lro_avg_aggr_pkts"), 283 ("lro_avg_aggr_pkts"),
270}; 284};
271 285
272#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN 286#define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
273#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN 287#define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
288 ETH_GSTRING_LEN
289#define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
290
291#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
292#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
293
294#define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
295#define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
274 296
275#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN 297#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
276#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN 298#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
@@ -293,6 +315,9 @@ static void s2io_vlan_rx_register(struct net_device *dev,
293 spin_unlock_irqrestore(&nic->tx_lock, flags); 315 spin_unlock_irqrestore(&nic->tx_lock, flags);
294} 316}
295 317
318/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
319int vlan_strip_flag;
320
296/* Unregister the vlan */ 321/* Unregister the vlan */
297static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid) 322static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
298{ 323{
@@ -404,6 +429,7 @@ S2IO_PARM_INT(indicate_max_pkts, 0);
404 429
405S2IO_PARM_INT(napi, 1); 430S2IO_PARM_INT(napi, 1);
406S2IO_PARM_INT(ufo, 0); 431S2IO_PARM_INT(ufo, 0);
432S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
407 433
408static unsigned int tx_fifo_len[MAX_TX_FIFOS] = 434static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
409 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; 435 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
@@ -1371,6 +1397,16 @@ static int init_nic(struct s2io_nic *nic)
1371 &bar0->rts_frm_len_n[i]); 1397 &bar0->rts_frm_len_n[i]);
1372 } 1398 }
1373 } 1399 }
1400
1401 /* Disable differentiated services steering logic */
1402 for (i = 0; i < 64; i++) {
1403 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1404 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1405 dev->name);
1406 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1407 return FAILURE;
1408 }
1409 }
1374 1410
1375 /* Program statistics memory */ 1411 /* Program statistics memory */
1376 writeq(mac_control->stats_mem_phy, &bar0->stat_addr); 1412 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
@@ -1943,6 +1979,13 @@ static int start_nic(struct s2io_nic *nic)
1943 writeq(val64, &bar0->rx_pa_cfg); 1979 writeq(val64, &bar0->rx_pa_cfg);
1944 } 1980 }
1945 1981
1982 if (vlan_tag_strip == 0) {
1983 val64 = readq(&bar0->rx_pa_cfg);
1984 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
1985 writeq(val64, &bar0->rx_pa_cfg);
1986 vlan_strip_flag = 0;
1987 }
1988
1946 /* 1989 /*
1947 * Enabling MC-RLDRAM. After enabling the device, we timeout 1990 * Enabling MC-RLDRAM. After enabling the device, we timeout
1948 * for around 100ms, which is approximately the time required 1991 * for around 100ms, which is approximately the time required
@@ -3195,26 +3238,37 @@ static void alarm_intr_handler(struct s2io_nic *nic)
3195 * SUCCESS on success and FAILURE on failure. 3238 * SUCCESS on success and FAILURE on failure.
3196 */ 3239 */
3197 3240
3198static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit) 3241static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3242 int bit_state)
3199{ 3243{
3200 int ret = FAILURE, cnt = 0; 3244 int ret = FAILURE, cnt = 0, delay = 1;
3201 u64 val64; 3245 u64 val64;
3202 3246
3203 while (TRUE) { 3247 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3248 return FAILURE;
3249
3250 do {
3204 val64 = readq(addr); 3251 val64 = readq(addr);
3205 if (!(val64 & busy_bit)) { 3252 if (bit_state == S2IO_BIT_RESET) {
3206 ret = SUCCESS; 3253 if (!(val64 & busy_bit)) {
3207 break; 3254 ret = SUCCESS;
3255 break;
3256 }
3257 } else {
3258 if (!(val64 & busy_bit)) {
3259 ret = SUCCESS;
3260 break;
3261 }
3208 } 3262 }
3209 3263
3210 if(in_interrupt()) 3264 if(in_interrupt())
3211 mdelay(50); 3265 mdelay(delay);
3212 else 3266 else
3213 msleep(50); 3267 msleep(delay);
3214 3268
3215 if (cnt++ > 10) 3269 if (++cnt >= 10)
3216 break; 3270 delay = 50;
3217 } 3271 } while (cnt < 20);
3218 return ret; 3272 return ret;
3219} 3273}
3220/* 3274/*
@@ -3340,6 +3394,9 @@ new_way:
3340 writeq(val64, &bar0->pcc_err_reg); 3394 writeq(val64, &bar0->pcc_err_reg);
3341 } 3395 }
3342 3396
3397 /* restore the previously assigned mac address */
3398 s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3399
3343 sp->device_enabled_once = FALSE; 3400 sp->device_enabled_once = FALSE;
3344} 3401}
3345 3402
@@ -4087,6 +4144,11 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4087 val64 &= ~GPIO_INT_MASK_LINK_UP; 4144 val64 &= ~GPIO_INT_MASK_LINK_UP;
4088 val64 |= GPIO_INT_MASK_LINK_DOWN; 4145 val64 |= GPIO_INT_MASK_LINK_DOWN;
4089 writeq(val64, &bar0->gpio_int_mask); 4146 writeq(val64, &bar0->gpio_int_mask);
4147
4148 /* turn off LED */
4149 val64 = readq(&bar0->adapter_control);
4150 val64 = val64 &(~ADAPTER_LED_ON);
4151 writeq(val64, &bar0->adapter_control);
4090 } 4152 }
4091 } 4153 }
4092 val64 = readq(&bar0->gpio_int_mask); 4154 val64 = readq(&bar0->gpio_int_mask);
@@ -4296,7 +4358,8 @@ static void s2io_set_multicast(struct net_device *dev)
4296 writeq(val64, &bar0->rmac_addr_cmd_mem); 4358 writeq(val64, &bar0->rmac_addr_cmd_mem);
4297 /* Wait till command completes */ 4359 /* Wait till command completes */
4298 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 4360 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4299 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING); 4361 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4362 S2IO_BIT_RESET);
4300 4363
4301 sp->m_cast_flg = 1; 4364 sp->m_cast_flg = 1;
4302 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET; 4365 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
@@ -4312,7 +4375,8 @@ static void s2io_set_multicast(struct net_device *dev)
4312 writeq(val64, &bar0->rmac_addr_cmd_mem); 4375 writeq(val64, &bar0->rmac_addr_cmd_mem);
4313 /* Wait till command completes */ 4376 /* Wait till command completes */
4314 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 4377 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4315 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING); 4378 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4379 S2IO_BIT_RESET);
4316 4380
4317 sp->m_cast_flg = 0; 4381 sp->m_cast_flg = 0;
4318 sp->all_multi_pos = 0; 4382 sp->all_multi_pos = 0;
@@ -4329,6 +4393,13 @@ static void s2io_set_multicast(struct net_device *dev)
4329 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 4393 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4330 writel((u32) (val64 >> 32), (add + 4)); 4394 writel((u32) (val64 >> 32), (add + 4));
4331 4395
4396 if (vlan_tag_strip != 1) {
4397 val64 = readq(&bar0->rx_pa_cfg);
4398 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4399 writeq(val64, &bar0->rx_pa_cfg);
4400 vlan_strip_flag = 0;
4401 }
4402
4332 val64 = readq(&bar0->mac_cfg); 4403 val64 = readq(&bar0->mac_cfg);
4333 sp->promisc_flg = 1; 4404 sp->promisc_flg = 1;
4334 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n", 4405 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
@@ -4344,6 +4415,13 @@ static void s2io_set_multicast(struct net_device *dev)
4344 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 4415 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4345 writel((u32) (val64 >> 32), (add + 4)); 4416 writel((u32) (val64 >> 32), (add + 4));
4346 4417
4418 if (vlan_tag_strip != 0) {
4419 val64 = readq(&bar0->rx_pa_cfg);
4420 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4421 writeq(val64, &bar0->rx_pa_cfg);
4422 vlan_strip_flag = 1;
4423 }
4424
4347 val64 = readq(&bar0->mac_cfg); 4425 val64 = readq(&bar0->mac_cfg);
4348 sp->promisc_flg = 0; 4426 sp->promisc_flg = 0;
4349 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", 4427 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
@@ -4378,7 +4456,8 @@ static void s2io_set_multicast(struct net_device *dev)
4378 4456
4379 /* Wait for command completes */ 4457 /* Wait for command completes */
4380 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 4458 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4381 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) { 4459 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4460 S2IO_BIT_RESET)) {
4382 DBG_PRINT(ERR_DBG, "%s: Adding ", 4461 DBG_PRINT(ERR_DBG, "%s: Adding ",
4383 dev->name); 4462 dev->name);
4384 DBG_PRINT(ERR_DBG, "Multicasts failed\n"); 4463 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
@@ -4409,7 +4488,8 @@ static void s2io_set_multicast(struct net_device *dev)
4409 4488
4410 /* Wait for command completes */ 4489 /* Wait for command completes */
4411 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 4490 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4412 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) { 4491 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4492 S2IO_BIT_RESET)) {
4413 DBG_PRINT(ERR_DBG, "%s: Adding ", 4493 DBG_PRINT(ERR_DBG, "%s: Adding ",
4414 dev->name); 4494 dev->name);
4415 DBG_PRINT(ERR_DBG, "Multicasts failed\n"); 4495 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
@@ -4435,6 +4515,7 @@ static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4435 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4515 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4436 register u64 val64, mac_addr = 0; 4516 register u64 val64, mac_addr = 0;
4437 int i; 4517 int i;
4518 u64 old_mac_addr = 0;
4438 4519
4439 /* 4520 /*
4440 * Set the new MAC address as the new unicast filter and reflect this 4521 * Set the new MAC address as the new unicast filter and reflect this
@@ -4444,6 +4525,22 @@ static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4444 for (i = 0; i < ETH_ALEN; i++) { 4525 for (i = 0; i < ETH_ALEN; i++) {
4445 mac_addr <<= 8; 4526 mac_addr <<= 8;
4446 mac_addr |= addr[i]; 4527 mac_addr |= addr[i];
4528 old_mac_addr <<= 8;
4529 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4530 }
4531
4532 if(0 == mac_addr)
4533 return SUCCESS;
4534
4535 /* Update the internal structure with this new mac address */
4536 if(mac_addr != old_mac_addr) {
4537 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4538 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4539 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4540 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4541 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4542 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4543 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4447 } 4544 }
4448 4545
4449 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr), 4546 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
@@ -4455,7 +4552,7 @@ static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4455 writeq(val64, &bar0->rmac_addr_cmd_mem); 4552 writeq(val64, &bar0->rmac_addr_cmd_mem);
4456 /* Wait till command completes */ 4553 /* Wait till command completes */
4457 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 4554 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4458 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) { 4555 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4459 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name); 4556 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4460 return FAILURE; 4557 return FAILURE;
4461 } 4558 }
@@ -4546,7 +4643,11 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4546 info->regdump_len = XENA_REG_SPACE; 4643 info->regdump_len = XENA_REG_SPACE;
4547 info->eedump_len = XENA_EEPROM_SPACE; 4644 info->eedump_len = XENA_EEPROM_SPACE;
4548 info->testinfo_len = S2IO_TEST_LEN; 4645 info->testinfo_len = S2IO_TEST_LEN;
4549 info->n_stats = S2IO_STAT_LEN; 4646
4647 if (sp->device_type == XFRAME_I_DEVICE)
4648 info->n_stats = XFRAME_I_STAT_LEN;
4649 else
4650 info->n_stats = XFRAME_II_STAT_LEN;
4550} 4651}
4551 4652
4552/** 4653/**
@@ -5568,22 +5669,30 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
5568 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt); 5669 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5569 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt); 5670 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5570 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt); 5671 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5571 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms); 5672
5572 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms); 5673 /* Enhanced statistics exist only for Hercules */
5573 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms); 5674 if(sp->device_type == XFRAME_II_DEVICE) {
5574 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms); 5675 tmp_stats[i++] =
5575 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms); 5676 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5576 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms); 5677 tmp_stats[i++] =
5577 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms); 5678 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5578 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms); 5679 tmp_stats[i++] =
5579 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard); 5680 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5580 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard); 5681 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5581 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard); 5682 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5582 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard); 5683 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5583 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard); 5684 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5584 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard); 5685 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5585 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard); 5686 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5586 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt); 5687 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5688 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5689 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5690 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5691 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5692 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5693 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5694 }
5695
5587 tmp_stats[i++] = 0; 5696 tmp_stats[i++] = 0;
5588 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs; 5697 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5589 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs; 5698 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
@@ -5663,18 +5772,42 @@ static int s2io_ethtool_self_test_count(struct net_device *dev)
5663static void s2io_ethtool_get_strings(struct net_device *dev, 5772static void s2io_ethtool_get_strings(struct net_device *dev,
5664 u32 stringset, u8 * data) 5773 u32 stringset, u8 * data)
5665{ 5774{
5775 int stat_size = 0;
5776 struct s2io_nic *sp = dev->priv;
5777
5666 switch (stringset) { 5778 switch (stringset) {
5667 case ETH_SS_TEST: 5779 case ETH_SS_TEST:
5668 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN); 5780 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5669 break; 5781 break;
5670 case ETH_SS_STATS: 5782 case ETH_SS_STATS:
5671 memcpy(data, &ethtool_stats_keys, 5783 stat_size = sizeof(ethtool_xena_stats_keys);
5672 sizeof(ethtool_stats_keys)); 5784 memcpy(data, &ethtool_xena_stats_keys,stat_size);
5785 if(sp->device_type == XFRAME_II_DEVICE) {
5786 memcpy(data + stat_size,
5787 &ethtool_enhanced_stats_keys,
5788 sizeof(ethtool_enhanced_stats_keys));
5789 stat_size += sizeof(ethtool_enhanced_stats_keys);
5790 }
5791
5792 memcpy(data + stat_size, &ethtool_driver_stats_keys,
5793 sizeof(ethtool_driver_stats_keys));
5673 } 5794 }
5674} 5795}
5675static int s2io_ethtool_get_stats_count(struct net_device *dev) 5796static int s2io_ethtool_get_stats_count(struct net_device *dev)
5676{ 5797{
5677 return (S2IO_STAT_LEN); 5798 struct s2io_nic *sp = dev->priv;
5799 int stat_count = 0;
5800 switch(sp->device_type) {
5801 case XFRAME_I_DEVICE:
5802 stat_count = XFRAME_I_STAT_LEN;
5803 break;
5804
5805 case XFRAME_II_DEVICE:
5806 stat_count = XFRAME_II_STAT_LEN;
5807 break;
5808 }
5809
5810 return stat_count;
5678} 5811}
5679 5812
5680static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) 5813static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
@@ -5909,7 +6042,7 @@ static void s2io_set_link(struct work_struct *work)
5909 clear_bit(0, &(nic->link_state)); 6042 clear_bit(0, &(nic->link_state));
5910 6043
5911out_unlock: 6044out_unlock:
5912 rtnl_lock(); 6045 rtnl_unlock();
5913} 6046}
5914 6047
5915static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, 6048static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
@@ -6066,10 +6199,13 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
6066 rx_blocks[j].rxds[k].virt_addr; 6199 rx_blocks[j].rxds[k].virt_addr;
6067 if(sp->rxd_mode >= RXD_MODE_3A) 6200 if(sp->rxd_mode >= RXD_MODE_3A)
6068 ba = &mac_control->rings[i].ba[j][k]; 6201 ba = &mac_control->rings[i].ba[j][k];
6069 set_rxd_buffer_pointer(sp, rxdp, ba, 6202 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6070 &skb,(u64 *)&temp0_64, 6203 &skb,(u64 *)&temp0_64,
6071 (u64 *)&temp1_64, 6204 (u64 *)&temp1_64,
6072 (u64 *)&temp2_64, size); 6205 (u64 *)&temp2_64,
6206 size) == ENOMEM) {
6207 return 0;
6208 }
6073 6209
6074 set_rxd_buffer_size(sp, rxdp, size); 6210 set_rxd_buffer_size(sp, rxdp, size);
6075 wmb(); 6211 wmb();
@@ -6112,7 +6248,7 @@ static int s2io_add_isr(struct s2io_nic * sp)
6112 } 6248 }
6113 } 6249 }
6114 if (sp->intr_type == MSI_X) { 6250 if (sp->intr_type == MSI_X) {
6115 int i; 6251 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6116 6252
6117 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { 6253 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6118 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { 6254 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
@@ -6121,16 +6257,36 @@ static int s2io_add_isr(struct s2io_nic * sp)
6121 err = request_irq(sp->entries[i].vector, 6257 err = request_irq(sp->entries[i].vector,
6122 s2io_msix_fifo_handle, 0, sp->desc[i], 6258 s2io_msix_fifo_handle, 0, sp->desc[i],
6123 sp->s2io_entries[i].arg); 6259 sp->s2io_entries[i].arg);
6124 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i], 6260 /* If either data or addr is zero print it */
6125 (unsigned long long)sp->msix_info[i].addr); 6261 if(!(sp->msix_info[i].addr &&
6262 sp->msix_info[i].data)) {
6263 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6264 "Data:0x%lx\n",sp->desc[i],
6265 (unsigned long long)
6266 sp->msix_info[i].addr,
6267 (unsigned long)
6268 ntohl(sp->msix_info[i].data));
6269 } else {
6270 msix_tx_cnt++;
6271 }
6126 } else { 6272 } else {
6127 sprintf(sp->desc[i], "%s:MSI-X-%d-RX", 6273 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6128 dev->name, i); 6274 dev->name, i);
6129 err = request_irq(sp->entries[i].vector, 6275 err = request_irq(sp->entries[i].vector,
6130 s2io_msix_ring_handle, 0, sp->desc[i], 6276 s2io_msix_ring_handle, 0, sp->desc[i],
6131 sp->s2io_entries[i].arg); 6277 sp->s2io_entries[i].arg);
6132 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i], 6278 /* If either data or addr is zero print it */
6133 (unsigned long long)sp->msix_info[i].addr); 6279 if(!(sp->msix_info[i].addr &&
6280 sp->msix_info[i].data)) {
6281 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6282 "Data:0x%lx\n",sp->desc[i],
6283 (unsigned long long)
6284 sp->msix_info[i].addr,
6285 (unsigned long)
6286 ntohl(sp->msix_info[i].data));
6287 } else {
6288 msix_rx_cnt++;
6289 }
6134 } 6290 }
6135 if (err) { 6291 if (err) {
6136 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration " 6292 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
@@ -6140,6 +6296,8 @@ static int s2io_add_isr(struct s2io_nic * sp)
6140 } 6296 }
6141 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS; 6297 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6142 } 6298 }
6299 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6300 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6143 } 6301 }
6144 if (sp->intr_type == INTA) { 6302 if (sp->intr_type == INTA) {
6145 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED, 6303 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
@@ -6567,7 +6725,8 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6567 6725
6568 if (!sp->lro) { 6726 if (!sp->lro) {
6569 skb->protocol = eth_type_trans(skb, dev); 6727 skb->protocol = eth_type_trans(skb, dev);
6570 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { 6728 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
6729 vlan_strip_flag)) {
6571 /* Queueing the vlan frame to the upper layer */ 6730 /* Queueing the vlan frame to the upper layer */
6572 if (napi) 6731 if (napi)
6573 vlan_hwaccel_receive_skb(skb, sp->vlgrp, 6732 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
@@ -6704,8 +6863,7 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6704 "Defaulting to INTA\n"); 6863 "Defaulting to INTA\n");
6705 *dev_intr_type = INTA; 6864 *dev_intr_type = INTA;
6706 } 6865 }
6707 if ( (rx_ring_num > 1) && (*dev_intr_type != INTA) ) 6866
6708 napi = 0;
6709 if (rx_ring_mode > 3) { 6867 if (rx_ring_mode > 3) {
6710 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n"); 6868 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6711 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n"); 6869 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
@@ -6715,6 +6873,37 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6715} 6873}
6716 6874
6717/** 6875/**
6876 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
6877 * or Traffic class respectively.
6878 * @nic: device peivate variable
6879 * Description: The function configures the receive steering to
6880 * desired receive ring.
6881 * Return Value: SUCCESS on success and
6882 * '-1' on failure (endian settings incorrect).
6883 */
6884static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
6885{
6886 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6887 register u64 val64 = 0;
6888
6889 if (ds_codepoint > 63)
6890 return FAILURE;
6891
6892 val64 = RTS_DS_MEM_DATA(ring);
6893 writeq(val64, &bar0->rts_ds_mem_data);
6894
6895 val64 = RTS_DS_MEM_CTRL_WE |
6896 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
6897 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
6898
6899 writeq(val64, &bar0->rts_ds_mem_ctrl);
6900
6901 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
6902 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
6903 S2IO_BIT_RESET);
6904}
6905
6906/**
6718 * s2io_init_nic - Initialization of the adapter . 6907 * s2io_init_nic - Initialization of the adapter .
6719 * @pdev : structure containing the PCI related information of the device. 6908 * @pdev : structure containing the PCI related information of the device.
6720 * @pre: List of PCI devices supported by the driver listed in s2io_tbl. 6909 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
@@ -7008,13 +7197,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7008 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET); 7197 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7009 writeq(val64, &bar0->rmac_addr_cmd_mem); 7198 writeq(val64, &bar0->rmac_addr_cmd_mem);
7010 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 7199 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7011 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING); 7200 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7012 tmp64 = readq(&bar0->rmac_addr_data0_mem); 7201 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7013 mac_down = (u32) tmp64; 7202 mac_down = (u32) tmp64;
7014 mac_up = (u32) (tmp64 >> 32); 7203 mac_up = (u32) (tmp64 >> 32);
7015 7204
7016 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
7017
7018 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up); 7205 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7019 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8); 7206 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7020 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16); 7207 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 0de0c65f945a..803137ca4b6c 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -32,7 +32,8 @@
32#define FAILURE -1 32#define FAILURE -1
33#define S2IO_MINUS_ONE 0xFFFFFFFFFFFFFFFFULL 33#define S2IO_MINUS_ONE 0xFFFFFFFFFFFFFFFFULL
34#define S2IO_MAX_PCI_CONFIG_SPACE_REINIT 100 34#define S2IO_MAX_PCI_CONFIG_SPACE_REINIT 100
35 35#define S2IO_BIT_RESET 1
36#define S2IO_BIT_SET 2
36#define CHECKBIT(value, nbit) (value & (1 << nbit)) 37#define CHECKBIT(value, nbit) (value & (1 << nbit))
37 38
38/* Maximum time to flicker LED when asked to identify NIC using ethtool */ 39/* Maximum time to flicker LED when asked to identify NIC using ethtool */
@@ -296,6 +297,9 @@ struct stat_block {
296 struct xpakStat xpak_stat; 297 struct xpakStat xpak_stat;
297}; 298};
298 299
300/* Default value for 'vlan_strip_tag' configuration parameter */
301#define NO_STRIP_IN_PROMISC 2
302
299/* 303/*
300 * Structures representing different init time configuration 304 * Structures representing different init time configuration
301 * parameters of the NIC. 305 * parameters of the NIC.
@@ -1005,7 +1009,8 @@ static int s2io_set_swapper(struct s2io_nic * sp);
1005static void s2io_card_down(struct s2io_nic *nic); 1009static void s2io_card_down(struct s2io_nic *nic);
1006static int s2io_card_up(struct s2io_nic *nic); 1010static int s2io_card_up(struct s2io_nic *nic);
1007static int get_xena_rev_id(struct pci_dev *pdev); 1011static int get_xena_rev_id(struct pci_dev *pdev);
1008static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit); 1012static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
1013 int bit_state);
1009static int s2io_add_isr(struct s2io_nic * sp); 1014static int s2io_add_isr(struct s2io_nic * sp);
1010static void s2io_rem_isr(struct s2io_nic * sp); 1015static void s2io_rem_isr(struct s2io_nic * sp);
1011 1016
@@ -1019,6 +1024,7 @@ static void queue_rx_frame(struct sk_buff *skb);
1019static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro); 1024static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
1020static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro, 1025static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
1021 struct sk_buff *skb, u32 tcp_len); 1026 struct sk_buff *skb, u32 tcp_len);
1027static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring);
1022 1028
1023#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size 1029#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
1024#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size 1030#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index a833e7f9757f..52ed522a234c 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -12,26 +12,15 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/ioport.h>
16#include <linux/socket.h>
17#include <linux/in.h>
18#include <linux/route.h>
19#include <linux/slab.h> 15#include <linux/slab.h>
20#include <linux/string.h> 16#include <linux/string.h>
21#include <linux/delay.h> 17#include <linux/delay.h>
22#include <linux/netdevice.h> 18#include <linux/netdevice.h>
23#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
24#include <linux/skbuff.h> 20#include <linux/skbuff.h>
25#include <linux/bitops.h>
26 21
27#include <asm/byteorder.h>
28#include <asm/io.h>
29#include <asm/system.h>
30#include <asm/page.h>
31#include <asm/pgtable.h>
32#include <asm/sgi/hpc3.h> 22#include <asm/sgi/hpc3.h>
33#include <asm/sgi/ip22.h> 23#include <asm/sgi/ip22.h>
34#include <asm/sgialib.h>
35 24
36#include "sgiseeq.h" 25#include "sgiseeq.h"
37 26
diff --git a/drivers/net/skfp/cfm.c b/drivers/net/skfp/cfm.c
index 4c8aaa762333..5310d39b5737 100644
--- a/drivers/net/skfp/cfm.c
+++ b/drivers/net/skfp/cfm.c
@@ -73,7 +73,7 @@ static const char * const cfm_events[] = {
73/* 73/*
74 * map from state to downstream port type 74 * map from state to downstream port type
75 */ 75 */
76static const u_char cf_to_ptype[] = { 76static const unsigned char cf_to_ptype[] = {
77 TNONE,TNONE,TNONE,TNONE,TNONE, 77 TNONE,TNONE,TNONE,TNONE,TNONE,
78 TNONE,TB,TB,TS, 78 TNONE,TB,TB,TS,
79 TA,TB,TS,TB 79 TA,TB,TS,TB
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index c3d2e0a2c4e6..eea75a401b0c 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -77,13 +77,13 @@ static const struct pci_device_id skge_id_table[] = {
77 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) }, 77 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
78 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, 78 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
79 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, 79 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
80 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), }, 80 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T) },
81 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */ 81 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */
82 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, 82 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
83 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ 83 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
84 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, 84 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
85 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) }, 85 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) },
86 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015, }, 86 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 },
87 { 0 } 87 { 0 }
88}; 88};
89MODULE_DEVICE_TABLE(pci, skge_id_table); 89MODULE_DEVICE_TABLE(pci, skge_id_table);
@@ -2767,6 +2767,17 @@ static int skge_change_mtu(struct net_device *dev, int new_mtu)
2767 return err; 2767 return err;
2768} 2768}
2769 2769
2770static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
2771
2772static void genesis_add_filter(u8 filter[8], const u8 *addr)
2773{
2774 u32 crc, bit;
2775
2776 crc = ether_crc_le(ETH_ALEN, addr);
2777 bit = ~crc & 0x3f;
2778 filter[bit/8] |= 1 << (bit%8);
2779}
2780
2770static void genesis_set_multicast(struct net_device *dev) 2781static void genesis_set_multicast(struct net_device *dev)
2771{ 2782{
2772 struct skge_port *skge = netdev_priv(dev); 2783 struct skge_port *skge = netdev_priv(dev);
@@ -2788,24 +2799,33 @@ static void genesis_set_multicast(struct net_device *dev)
2788 memset(filter, 0xff, sizeof(filter)); 2799 memset(filter, 0xff, sizeof(filter));
2789 else { 2800 else {
2790 memset(filter, 0, sizeof(filter)); 2801 memset(filter, 0, sizeof(filter));
2791 for (i = 0; list && i < count; i++, list = list->next) { 2802
2792 u32 crc, bit; 2803 if (skge->flow_status == FLOW_STAT_REM_SEND
2793 crc = ether_crc_le(ETH_ALEN, list->dmi_addr); 2804 || skge->flow_status == FLOW_STAT_SYMMETRIC)
2794 bit = ~crc & 0x3f; 2805 genesis_add_filter(filter, pause_mc_addr);
2795 filter[bit/8] |= 1 << (bit%8); 2806
2796 } 2807 for (i = 0; list && i < count; i++, list = list->next)
2808 genesis_add_filter(filter, list->dmi_addr);
2797 } 2809 }
2798 2810
2799 xm_write32(hw, port, XM_MODE, mode); 2811 xm_write32(hw, port, XM_MODE, mode);
2800 xm_outhash(hw, port, XM_HSM, filter); 2812 xm_outhash(hw, port, XM_HSM, filter);
2801} 2813}
2802 2814
2815static void yukon_add_filter(u8 filter[8], const u8 *addr)
2816{
2817 u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f;
2818 filter[bit/8] |= 1 << (bit%8);
2819}
2820
2803static void yukon_set_multicast(struct net_device *dev) 2821static void yukon_set_multicast(struct net_device *dev)
2804{ 2822{
2805 struct skge_port *skge = netdev_priv(dev); 2823 struct skge_port *skge = netdev_priv(dev);
2806 struct skge_hw *hw = skge->hw; 2824 struct skge_hw *hw = skge->hw;
2807 int port = skge->port; 2825 int port = skge->port;
2808 struct dev_mc_list *list = dev->mc_list; 2826 struct dev_mc_list *list = dev->mc_list;
2827 int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND
2828 || skge->flow_status == FLOW_STAT_SYMMETRIC);
2809 u16 reg; 2829 u16 reg;
2810 u8 filter[8]; 2830 u8 filter[8];
2811 2831
@@ -2818,16 +2838,17 @@ static void yukon_set_multicast(struct net_device *dev)
2818 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 2838 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2819 else if (dev->flags & IFF_ALLMULTI) /* all multicast */ 2839 else if (dev->flags & IFF_ALLMULTI) /* all multicast */
2820 memset(filter, 0xff, sizeof(filter)); 2840 memset(filter, 0xff, sizeof(filter));
2821 else if (dev->mc_count == 0) /* no multicast */ 2841 else if (dev->mc_count == 0 && !rx_pause)/* no multicast */
2822 reg &= ~GM_RXCR_MCF_ENA; 2842 reg &= ~GM_RXCR_MCF_ENA;
2823 else { 2843 else {
2824 int i; 2844 int i;
2825 reg |= GM_RXCR_MCF_ENA; 2845 reg |= GM_RXCR_MCF_ENA;
2826 2846
2827 for (i = 0; list && i < dev->mc_count; i++, list = list->next) { 2847 if (rx_pause)
2828 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f; 2848 yukon_add_filter(filter, pause_mc_addr);
2829 filter[bit/8] |= 1 << (bit%8); 2849
2830 } 2850 for (i = 0; list && i < dev->mc_count; i++, list = list->next)
2851 yukon_add_filter(filter, list->dmi_addr);
2831 } 2852 }
2832 2853
2833 2854
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 17b1b479dff5..e9354dfa7e9a 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -1849,8 +1849,7 @@ enum {
1849 GMR_FS_JABBER, 1849 GMR_FS_JABBER,
1850/* Rx GMAC FIFO Flush Mask (default) */ 1850/* Rx GMAC FIFO Flush Mask (default) */
1851 RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR | 1851 RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR |
1852 GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_UN_SIZE | 1852 GMR_FS_BAD_FC | GMR_FS_UN_SIZE | GMR_FS_JABBER,
1853 GMR_FS_JABBER,
1854}; 1853};
1855 1854
1856/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ 1855/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 64ed8ff5b03a..3b91af89e4c7 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Network device driver for Cell Processor-Based Blade 2 * Network device driver for Cell Processor-Based Blade and Celleb platform
3 * 3 *
4 * (C) Copyright IBM Corp. 2005 4 * (C) Copyright IBM Corp. 2005
5 * (C) Copyright 2006 TOSHIBA CORPORATION
5 * 6 *
6 * Authors : Utz Bacher <utz.bacher@de.ibm.com> 7 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
7 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com> 8 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
@@ -166,6 +167,41 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
166} 167}
167 168
168/** 169/**
170 * spider_net_setup_aneg - initial auto-negotiation setup
171 * @card: device structure
172 **/
173static void
174spider_net_setup_aneg(struct spider_net_card *card)
175{
176 struct mii_phy *phy = &card->phy;
177 u32 advertise = 0;
178 u16 bmcr, bmsr, stat1000, estat;
179
180 bmcr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMCR);
181 bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
182 stat1000 = spider_net_read_phy(card->netdev, phy->mii_id, MII_STAT1000);
183 estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
184
185 if (bmsr & BMSR_10HALF)
186 advertise |= ADVERTISED_10baseT_Half;
187 if (bmsr & BMSR_10FULL)
188 advertise |= ADVERTISED_10baseT_Full;
189 if (bmsr & BMSR_100HALF)
190 advertise |= ADVERTISED_100baseT_Half;
191 if (bmsr & BMSR_100FULL)
192 advertise |= ADVERTISED_100baseT_Full;
193
194 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
195 advertise |= SUPPORTED_1000baseT_Full;
196 if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
197 advertise |= SUPPORTED_1000baseT_Half;
198
199 mii_phy_probe(phy, phy->mii_id);
200 phy->def->ops->setup_aneg(phy, advertise);
201
202}
203
204/**
169 * spider_net_rx_irq_off - switch off rx irq on this spider card 205 * spider_net_rx_irq_off - switch off rx irq on this spider card
170 * @card: device structure 206 * @card: device structure
171 * 207 *
@@ -263,9 +299,9 @@ spider_net_get_mac_address(struct net_device *netdev)
263 * returns the status as in the dmac_cmd_status field of the descriptor 299 * returns the status as in the dmac_cmd_status field of the descriptor
264 */ 300 */
265static inline int 301static inline int
266spider_net_get_descr_status(struct spider_net_descr *descr) 302spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
267{ 303{
268 return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK; 304 return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
269} 305}
270 306
271/** 307/**
@@ -283,12 +319,12 @@ spider_net_free_chain(struct spider_net_card *card,
283 descr = chain->ring; 319 descr = chain->ring;
284 do { 320 do {
285 descr->bus_addr = 0; 321 descr->bus_addr = 0;
286 descr->next_descr_addr = 0; 322 descr->hwdescr->next_descr_addr = 0;
287 descr = descr->next; 323 descr = descr->next;
288 } while (descr != chain->ring); 324 } while (descr != chain->ring);
289 325
290 dma_free_coherent(&card->pdev->dev, chain->num_desc, 326 dma_free_coherent(&card->pdev->dev, chain->num_desc,
291 chain->ring, chain->dma_addr); 327 chain->hwring, chain->dma_addr);
292} 328}
293 329
294/** 330/**
@@ -307,31 +343,34 @@ spider_net_init_chain(struct spider_net_card *card,
307{ 343{
308 int i; 344 int i;
309 struct spider_net_descr *descr; 345 struct spider_net_descr *descr;
346 struct spider_net_hw_descr *hwdescr;
310 dma_addr_t buf; 347 dma_addr_t buf;
311 size_t alloc_size; 348 size_t alloc_size;
312 349
313 alloc_size = chain->num_desc * sizeof (struct spider_net_descr); 350 alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
314 351
315 chain->ring = dma_alloc_coherent(&card->pdev->dev, alloc_size, 352 chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
316 &chain->dma_addr, GFP_KERNEL); 353 &chain->dma_addr, GFP_KERNEL);
317 354
318 if (!chain->ring) 355 if (!chain->hwring)
319 return -ENOMEM; 356 return -ENOMEM;
320 357
321 descr = chain->ring; 358 memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr));
322 memset(descr, 0, alloc_size);
323 359
324 /* Set up the hardware pointers in each descriptor */ 360 /* Set up the hardware pointers in each descriptor */
361 descr = chain->ring;
362 hwdescr = chain->hwring;
325 buf = chain->dma_addr; 363 buf = chain->dma_addr;
326 for (i=0; i < chain->num_desc; i++, descr++) { 364 for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
327 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 365 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
366 hwdescr->next_descr_addr = 0;
328 367
368 descr->hwdescr = hwdescr;
329 descr->bus_addr = buf; 369 descr->bus_addr = buf;
330 descr->next_descr_addr = 0;
331 descr->next = descr + 1; 370 descr->next = descr + 1;
332 descr->prev = descr - 1; 371 descr->prev = descr - 1;
333 372
334 buf += sizeof(struct spider_net_descr); 373 buf += sizeof(struct spider_net_hw_descr);
335 } 374 }
336 /* do actual circular list */ 375 /* do actual circular list */
337 (descr-1)->next = chain->ring; 376 (descr-1)->next = chain->ring;
@@ -357,10 +396,11 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
357 descr = card->rx_chain.head; 396 descr = card->rx_chain.head;
358 do { 397 do {
359 if (descr->skb) { 398 if (descr->skb) {
360 dev_kfree_skb(descr->skb); 399 pci_unmap_single(card->pdev, descr->hwdescr->buf_addr,
361 pci_unmap_single(card->pdev, descr->buf_addr,
362 SPIDER_NET_MAX_FRAME, 400 SPIDER_NET_MAX_FRAME,
363 PCI_DMA_BIDIRECTIONAL); 401 PCI_DMA_BIDIRECTIONAL);
402 dev_kfree_skb(descr->skb);
403 descr->skb = NULL;
364 } 404 }
365 descr = descr->next; 405 descr = descr->next;
366 } while (descr != card->rx_chain.head); 406 } while (descr != card->rx_chain.head);
@@ -380,6 +420,7 @@ static int
380spider_net_prepare_rx_descr(struct spider_net_card *card, 420spider_net_prepare_rx_descr(struct spider_net_card *card,
381 struct spider_net_descr *descr) 421 struct spider_net_descr *descr)
382{ 422{
423 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
383 dma_addr_t buf; 424 dma_addr_t buf;
384 int offset; 425 int offset;
385 int bufsize; 426 int bufsize;
@@ -398,11 +439,11 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
398 card->spider_stats.alloc_rx_skb_error++; 439 card->spider_stats.alloc_rx_skb_error++;
399 return -ENOMEM; 440 return -ENOMEM;
400 } 441 }
401 descr->buf_size = bufsize; 442 hwdescr->buf_size = bufsize;
402 descr->result_size = 0; 443 hwdescr->result_size = 0;
403 descr->valid_size = 0; 444 hwdescr->valid_size = 0;
404 descr->data_status = 0; 445 hwdescr->data_status = 0;
405 descr->data_error = 0; 446 hwdescr->data_error = 0;
406 447
407 offset = ((unsigned long)descr->skb->data) & 448 offset = ((unsigned long)descr->skb->data) &
408 (SPIDER_NET_RXBUF_ALIGN - 1); 449 (SPIDER_NET_RXBUF_ALIGN - 1);
@@ -411,21 +452,22 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
411 /* iommu-map the skb */ 452 /* iommu-map the skb */
412 buf = pci_map_single(card->pdev, descr->skb->data, 453 buf = pci_map_single(card->pdev, descr->skb->data,
413 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 454 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
414 descr->buf_addr = buf;
415 if (pci_dma_mapping_error(buf)) { 455 if (pci_dma_mapping_error(buf)) {
416 dev_kfree_skb_any(descr->skb); 456 dev_kfree_skb_any(descr->skb);
457 descr->skb = NULL;
417 if (netif_msg_rx_err(card) && net_ratelimit()) 458 if (netif_msg_rx_err(card) && net_ratelimit())
418 pr_err("Could not iommu-map rx buffer\n"); 459 pr_err("Could not iommu-map rx buffer\n");
419 card->spider_stats.rx_iommu_map_error++; 460 card->spider_stats.rx_iommu_map_error++;
420 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 461 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
421 } else { 462 } else {
422 descr->next_descr_addr = 0; 463 hwdescr->buf_addr = buf;
464 hwdescr->next_descr_addr = 0;
423 wmb(); 465 wmb();
424 descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | 466 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
425 SPIDER_NET_DMAC_NOINTR_COMPLETE; 467 SPIDER_NET_DMAC_NOINTR_COMPLETE;
426 468
427 wmb(); 469 wmb();
428 descr->prev->next_descr_addr = descr->bus_addr; 470 descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
429 } 471 }
430 472
431 return 0; 473 return 0;
@@ -481,7 +523,7 @@ spider_net_refill_rx_chain(struct spider_net_card *card)
481 if (!spin_trylock_irqsave(&chain->lock, flags)) 523 if (!spin_trylock_irqsave(&chain->lock, flags))
482 return; 524 return;
483 525
484 while (spider_net_get_descr_status(chain->head) == 526 while (spider_net_get_descr_status(chain->head->hwdescr) ==
485 SPIDER_NET_DESCR_NOT_IN_USE) { 527 SPIDER_NET_DESCR_NOT_IN_USE) {
486 if (spider_net_prepare_rx_descr(card, chain->head)) 528 if (spider_net_prepare_rx_descr(card, chain->head))
487 break; 529 break;
@@ -642,7 +684,9 @@ static int
642spider_net_prepare_tx_descr(struct spider_net_card *card, 684spider_net_prepare_tx_descr(struct spider_net_card *card,
643 struct sk_buff *skb) 685 struct sk_buff *skb)
644{ 686{
687 struct spider_net_descr_chain *chain = &card->tx_chain;
645 struct spider_net_descr *descr; 688 struct spider_net_descr *descr;
689 struct spider_net_hw_descr *hwdescr;
646 dma_addr_t buf; 690 dma_addr_t buf;
647 unsigned long flags; 691 unsigned long flags;
648 692
@@ -655,32 +699,39 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
655 return -ENOMEM; 699 return -ENOMEM;
656 } 700 }
657 701
658 spin_lock_irqsave(&card->tx_chain.lock, flags); 702 spin_lock_irqsave(&chain->lock, flags);
659 descr = card->tx_chain.head; 703 descr = card->tx_chain.head;
660 card->tx_chain.head = descr->next; 704 if (descr->next == chain->tail->prev) {
705 spin_unlock_irqrestore(&chain->lock, flags);
706 pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE);
707 return -ENOMEM;
708 }
709 hwdescr = descr->hwdescr;
710 chain->head = descr->next;
661 711
662 descr->buf_addr = buf;
663 descr->buf_size = skb->len;
664 descr->next_descr_addr = 0;
665 descr->skb = skb; 712 descr->skb = skb;
666 descr->data_status = 0; 713 hwdescr->buf_addr = buf;
714 hwdescr->buf_size = skb->len;
715 hwdescr->next_descr_addr = 0;
716 hwdescr->data_status = 0;
667 717
668 descr->dmac_cmd_status = 718 hwdescr->dmac_cmd_status =
669 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; 719 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
670 spin_unlock_irqrestore(&card->tx_chain.lock, flags); 720 spin_unlock_irqrestore(&chain->lock, flags);
671 721
672 if (skb->protocol == htons(ETH_P_IP)) 722 if (skb->protocol == htons(ETH_P_IP))
673 switch (skb->nh.iph->protocol) { 723 switch (skb->nh.iph->protocol) {
674 case IPPROTO_TCP: 724 case IPPROTO_TCP:
675 descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; 725 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
676 break; 726 break;
677 case IPPROTO_UDP: 727 case IPPROTO_UDP:
678 descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP; 728 hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
679 break; 729 break;
680 } 730 }
681 731
682 /* Chain the bus address, so that the DMA engine finds this descr. */ 732 /* Chain the bus address, so that the DMA engine finds this descr. */
683 descr->prev->next_descr_addr = descr->bus_addr; 733 wmb();
734 descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
684 735
685 card->netdev->trans_start = jiffies; /* set netdev watchdog timer */ 736 card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
686 return 0; 737 return 0;
@@ -689,16 +740,17 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
689static int 740static int
690spider_net_set_low_watermark(struct spider_net_card *card) 741spider_net_set_low_watermark(struct spider_net_card *card)
691{ 742{
743 struct spider_net_descr *descr = card->tx_chain.tail;
744 struct spider_net_hw_descr *hwdescr;
692 unsigned long flags; 745 unsigned long flags;
693 int status; 746 int status;
694 int cnt=0; 747 int cnt=0;
695 int i; 748 int i;
696 struct spider_net_descr *descr = card->tx_chain.tail;
697 749
698 /* Measure the length of the queue. Measurement does not 750 /* Measure the length of the queue. Measurement does not
699 * need to be precise -- does not need a lock. */ 751 * need to be precise -- does not need a lock. */
700 while (descr != card->tx_chain.head) { 752 while (descr != card->tx_chain.head) {
701 status = descr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE; 753 status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
702 if (status == SPIDER_NET_DESCR_NOT_IN_USE) 754 if (status == SPIDER_NET_DESCR_NOT_IN_USE)
703 break; 755 break;
704 descr = descr->next; 756 descr = descr->next;
@@ -717,10 +769,12 @@ spider_net_set_low_watermark(struct spider_net_card *card)
717 769
718 /* Set the new watermark, clear the old watermark */ 770 /* Set the new watermark, clear the old watermark */
719 spin_lock_irqsave(&card->tx_chain.lock, flags); 771 spin_lock_irqsave(&card->tx_chain.lock, flags);
720 descr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG; 772 descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
721 if (card->low_watermark && card->low_watermark != descr) 773 if (card->low_watermark && card->low_watermark != descr) {
722 card->low_watermark->dmac_cmd_status = 774 hwdescr = card->low_watermark->hwdescr;
723 card->low_watermark->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG; 775 hwdescr->dmac_cmd_status =
776 hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
777 }
724 card->low_watermark = descr; 778 card->low_watermark = descr;
725 spin_unlock_irqrestore(&card->tx_chain.lock, flags); 779 spin_unlock_irqrestore(&card->tx_chain.lock, flags);
726 return cnt; 780 return cnt;
@@ -743,16 +797,22 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
743{ 797{
744 struct spider_net_descr_chain *chain = &card->tx_chain; 798 struct spider_net_descr_chain *chain = &card->tx_chain;
745 struct spider_net_descr *descr; 799 struct spider_net_descr *descr;
800 struct spider_net_hw_descr *hwdescr;
746 struct sk_buff *skb; 801 struct sk_buff *skb;
747 u32 buf_addr; 802 u32 buf_addr;
748 unsigned long flags; 803 unsigned long flags;
749 int status; 804 int status;
750 805
751 while (chain->tail != chain->head) { 806 while (1) {
752 spin_lock_irqsave(&chain->lock, flags); 807 spin_lock_irqsave(&chain->lock, flags);
808 if (chain->tail == chain->head) {
809 spin_unlock_irqrestore(&chain->lock, flags);
810 return 0;
811 }
753 descr = chain->tail; 812 descr = chain->tail;
813 hwdescr = descr->hwdescr;
754 814
755 status = spider_net_get_descr_status(descr); 815 status = spider_net_get_descr_status(hwdescr);
756 switch (status) { 816 switch (status) {
757 case SPIDER_NET_DESCR_COMPLETE: 817 case SPIDER_NET_DESCR_COMPLETE:
758 card->netdev_stats.tx_packets++; 818 card->netdev_stats.tx_packets++;
@@ -788,9 +848,10 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
788 } 848 }
789 849
790 chain->tail = descr->next; 850 chain->tail = descr->next;
791 descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; 851 hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
792 skb = descr->skb; 852 skb = descr->skb;
793 buf_addr = descr->buf_addr; 853 descr->skb = NULL;
854 buf_addr = hwdescr->buf_addr;
794 spin_unlock_irqrestore(&chain->lock, flags); 855 spin_unlock_irqrestore(&chain->lock, flags);
795 856
796 /* unmap the skb */ 857 /* unmap the skb */
@@ -826,7 +887,7 @@ spider_net_kick_tx_dma(struct spider_net_card *card)
826 887
827 descr = card->tx_chain.tail; 888 descr = card->tx_chain.tail;
828 for (;;) { 889 for (;;) {
829 if (spider_net_get_descr_status(descr) == 890 if (spider_net_get_descr_status(descr->hwdescr) ==
830 SPIDER_NET_DESCR_CARDOWNED) { 891 SPIDER_NET_DESCR_CARDOWNED) {
831 spider_net_write_reg(card, SPIDER_NET_GDTDCHA, 892 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
832 descr->bus_addr); 893 descr->bus_addr);
@@ -855,13 +916,10 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
855{ 916{
856 int cnt; 917 int cnt;
857 struct spider_net_card *card = netdev_priv(netdev); 918 struct spider_net_card *card = netdev_priv(netdev);
858 struct spider_net_descr_chain *chain = &card->tx_chain;
859 919
860 spider_net_release_tx_chain(card, 0); 920 spider_net_release_tx_chain(card, 0);
861 921
862 if ((chain->head->next == chain->tail->prev) || 922 if (spider_net_prepare_tx_descr(card, skb) != 0) {
863 (spider_net_prepare_tx_descr(card, skb) != 0)) {
864
865 card->netdev_stats.tx_dropped++; 923 card->netdev_stats.tx_dropped++;
866 netif_stop_queue(netdev); 924 netif_stop_queue(netdev);
867 return NETDEV_TX_BUSY; 925 return NETDEV_TX_BUSY;
@@ -922,17 +980,18 @@ static void
922spider_net_pass_skb_up(struct spider_net_descr *descr, 980spider_net_pass_skb_up(struct spider_net_descr *descr,
923 struct spider_net_card *card) 981 struct spider_net_card *card)
924{ 982{
983 struct spider_net_hw_descr *hwdescr= descr->hwdescr;
925 struct sk_buff *skb; 984 struct sk_buff *skb;
926 struct net_device *netdev; 985 struct net_device *netdev;
927 u32 data_status, data_error; 986 u32 data_status, data_error;
928 987
929 data_status = descr->data_status; 988 data_status = hwdescr->data_status;
930 data_error = descr->data_error; 989 data_error = hwdescr->data_error;
931 netdev = card->netdev; 990 netdev = card->netdev;
932 991
933 skb = descr->skb; 992 skb = descr->skb;
934 skb->dev = netdev; 993 skb->dev = netdev;
935 skb_put(skb, descr->valid_size); 994 skb_put(skb, hwdescr->valid_size);
936 995
937 /* the card seems to add 2 bytes of junk in front 996 /* the card seems to add 2 bytes of junk in front
938 * of the ethernet frame */ 997 * of the ethernet frame */
@@ -994,23 +1053,25 @@ static void show_rx_chain(struct spider_net_card *card)
994#endif 1053#endif
995 1054
996/** 1055/**
997 * spider_net_decode_one_descr - processes an rx descriptor 1056 * spider_net_decode_one_descr - processes an RX descriptor
998 * @card: card structure 1057 * @card: card structure
999 * 1058 *
1000 * Returns 1 if a packet has been sent to the stack, otherwise 0 1059 * Returns 1 if a packet has been sent to the stack, otherwise 0.
1001 * 1060 *
1002 * Processes an rx descriptor by iommu-unmapping the data buffer and passing 1061 * Processes an RX descriptor by iommu-unmapping the data buffer
1003 * the packet up to the stack. This function is called in softirq 1062 * and passing the packet up to the stack. This function is called
1004 * context, e.g. either bottom half from interrupt or NAPI polling context 1063 * in softirq context, e.g. either bottom half from interrupt or
1064 * NAPI polling context.
1005 */ 1065 */
1006static int 1066static int
1007spider_net_decode_one_descr(struct spider_net_card *card) 1067spider_net_decode_one_descr(struct spider_net_card *card)
1008{ 1068{
1009 struct spider_net_descr_chain *chain = &card->rx_chain; 1069 struct spider_net_descr_chain *chain = &card->rx_chain;
1010 struct spider_net_descr *descr = chain->tail; 1070 struct spider_net_descr *descr = chain->tail;
1071 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
1011 int status; 1072 int status;
1012 1073
1013 status = spider_net_get_descr_status(descr); 1074 status = spider_net_get_descr_status(hwdescr);
1014 1075
1015 /* Nothing in the descriptor, or ring must be empty */ 1076 /* Nothing in the descriptor, or ring must be empty */
1016 if ((status == SPIDER_NET_DESCR_CARDOWNED) || 1077 if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
@@ -1021,7 +1082,7 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1021 chain->tail = descr->next; 1082 chain->tail = descr->next;
1022 1083
1023 /* unmap descriptor */ 1084 /* unmap descriptor */
1024 pci_unmap_single(card->pdev, descr->buf_addr, 1085 pci_unmap_single(card->pdev, hwdescr->buf_addr,
1025 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 1086 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1026 1087
1027 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) || 1088 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
@@ -1037,34 +1098,33 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1037 if ( (status != SPIDER_NET_DESCR_COMPLETE) && 1098 if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
1038 (status != SPIDER_NET_DESCR_FRAME_END) ) { 1099 (status != SPIDER_NET_DESCR_FRAME_END) ) {
1039 if (netif_msg_rx_err(card)) 1100 if (netif_msg_rx_err(card))
1040 pr_err("%s: RX descriptor with unkown state %d\n", 1101 pr_err("%s: RX descriptor with unknown state %d\n",
1041 card->netdev->name, status); 1102 card->netdev->name, status);
1042 card->spider_stats.rx_desc_unk_state++; 1103 card->spider_stats.rx_desc_unk_state++;
1043 goto bad_desc; 1104 goto bad_desc;
1044 } 1105 }
1045 1106
1046 /* The cases we'll throw away the packet immediately */ 1107 /* The cases we'll throw away the packet immediately */
1047 if (descr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) { 1108 if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1048 if (netif_msg_rx_err(card)) 1109 if (netif_msg_rx_err(card))
1049 pr_err("%s: error in received descriptor found, " 1110 pr_err("%s: error in received descriptor found, "
1050 "data_status=x%08x, data_error=x%08x\n", 1111 "data_status=x%08x, data_error=x%08x\n",
1051 card->netdev->name, 1112 card->netdev->name,
1052 descr->data_status, descr->data_error); 1113 hwdescr->data_status, hwdescr->data_error);
1053 goto bad_desc; 1114 goto bad_desc;
1054 } 1115 }
1055 1116
1056 if (descr->dmac_cmd_status & 0xfefe) { 1117 if (hwdescr->dmac_cmd_status & 0xfefe) {
1057 pr_err("%s: bad status, cmd_status=x%08x\n", 1118 pr_err("%s: bad status, cmd_status=x%08x\n",
1058 card->netdev->name, 1119 card->netdev->name,
1059 descr->dmac_cmd_status); 1120 hwdescr->dmac_cmd_status);
1060 pr_err("buf_addr=x%08x\n", descr->buf_addr); 1121 pr_err("buf_addr=x%08x\n", hwdescr->buf_addr);
1061 pr_err("buf_size=x%08x\n", descr->buf_size); 1122 pr_err("buf_size=x%08x\n", hwdescr->buf_size);
1062 pr_err("next_descr_addr=x%08x\n", descr->next_descr_addr); 1123 pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
1063 pr_err("result_size=x%08x\n", descr->result_size); 1124 pr_err("result_size=x%08x\n", hwdescr->result_size);
1064 pr_err("valid_size=x%08x\n", descr->valid_size); 1125 pr_err("valid_size=x%08x\n", hwdescr->valid_size);
1065 pr_err("data_status=x%08x\n", descr->data_status); 1126 pr_err("data_status=x%08x\n", hwdescr->data_status);
1066 pr_err("data_error=x%08x\n", descr->data_error); 1127 pr_err("data_error=x%08x\n", hwdescr->data_error);
1067 pr_err("bus_addr=x%08x\n", descr->bus_addr);
1068 pr_err("which=%ld\n", descr - card->rx_chain.ring); 1128 pr_err("which=%ld\n", descr - card->rx_chain.ring);
1069 1129
1070 card->spider_stats.rx_desc_error++; 1130 card->spider_stats.rx_desc_error++;
@@ -1073,12 +1133,13 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1073 1133
1074 /* Ok, we've got a packet in descr */ 1134 /* Ok, we've got a packet in descr */
1075 spider_net_pass_skb_up(descr, card); 1135 spider_net_pass_skb_up(descr, card);
1076 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 1136 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1077 return 1; 1137 return 1;
1078 1138
1079bad_desc: 1139bad_desc:
1080 dev_kfree_skb_irq(descr->skb); 1140 dev_kfree_skb_irq(descr->skb);
1081 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 1141 descr->skb = NULL;
1142 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1082 return 0; 1143 return 0;
1083} 1144}
1084 1145
@@ -1248,6 +1309,33 @@ spider_net_set_mac(struct net_device *netdev, void *p)
1248} 1309}
1249 1310
1250/** 1311/**
1312 * spider_net_link_reset
1313 * @netdev: net device structure
1314 *
1315 * This is called when the PHY_LINK signal is asserted. For the blade this is
1316 * not connected so we should never get here.
1317 *
1318 */
1319static void
1320spider_net_link_reset(struct net_device *netdev)
1321{
1322
1323 struct spider_net_card *card = netdev_priv(netdev);
1324
1325 del_timer_sync(&card->aneg_timer);
1326
1327 /* clear interrupt, block further interrupts */
1328 spider_net_write_reg(card, SPIDER_NET_GMACST,
1329 spider_net_read_reg(card, SPIDER_NET_GMACST));
1330 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1331
1332 /* reset phy and setup aneg */
1333 spider_net_setup_aneg(card);
1334 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1335
1336}
1337
1338/**
1251 * spider_net_handle_error_irq - handles errors raised by an interrupt 1339 * spider_net_handle_error_irq - handles errors raised by an interrupt
1252 * @card: card structure 1340 * @card: card structure
1253 * @status_reg: interrupt status register 0 (GHIINT0STS) 1341 * @status_reg: interrupt status register 0 (GHIINT0STS)
@@ -1359,8 +1447,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1359 switch (i) 1447 switch (i)
1360 { 1448 {
1361 case SPIDER_NET_GTMFLLINT: 1449 case SPIDER_NET_GTMFLLINT:
1362 if (netif_msg_intr(card) && net_ratelimit()) 1450 /* TX RAM full may happen on a usual case.
1363 pr_err("Spider TX RAM full\n"); 1451 * Logging is not needed. */
1364 show_error = 0; 1452 show_error = 0;
1365 break; 1453 break;
1366 case SPIDER_NET_GRFDFLLINT: /* fallthrough */ 1454 case SPIDER_NET_GRFDFLLINT: /* fallthrough */
@@ -1500,6 +1588,9 @@ spider_net_interrupt(int irq, void *ptr)
1500 if (status_reg & SPIDER_NET_TXINT) 1588 if (status_reg & SPIDER_NET_TXINT)
1501 netif_rx_schedule(netdev); 1589 netif_rx_schedule(netdev);
1502 1590
1591 if (status_reg & SPIDER_NET_LINKINT)
1592 spider_net_link_reset(netdev);
1593
1503 if (status_reg & SPIDER_NET_ERRINT ) 1594 if (status_reg & SPIDER_NET_ERRINT )
1504 spider_net_handle_error_irq(card, status_reg); 1595 spider_net_handle_error_irq(card, status_reg);
1505 1596
@@ -1540,6 +1631,11 @@ spider_net_init_card(struct spider_net_card *card)
1540 1631
1541 spider_net_write_reg(card, SPIDER_NET_CKRCTRL, 1632 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1542 SPIDER_NET_CKRCTRL_RUN_VALUE); 1633 SPIDER_NET_CKRCTRL_RUN_VALUE);
1634
1635 /* trigger ETOMOD signal */
1636 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1637 spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
1638
1543} 1639}
1544 1640
1545/** 1641/**
@@ -1624,8 +1720,6 @@ spider_net_enable_card(struct spider_net_card *card)
1624 1720
1625 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, 1721 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
1626 SPIDER_NET_LENLMT_VALUE); 1722 SPIDER_NET_LENLMT_VALUE);
1627 spider_net_write_reg(card, SPIDER_NET_GMACMODE,
1628 SPIDER_NET_MACMODE_VALUE);
1629 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, 1723 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1630 SPIDER_NET_OPMODE_VALUE); 1724 SPIDER_NET_OPMODE_VALUE);
1631 1725
@@ -1642,98 +1736,6 @@ spider_net_enable_card(struct spider_net_card *card)
1642} 1736}
1643 1737
1644/** 1738/**
1645 * spider_net_open - called upon ifonfig up
1646 * @netdev: interface device structure
1647 *
1648 * returns 0 on success, <0 on failure
1649 *
1650 * spider_net_open allocates all the descriptors and memory needed for
1651 * operation, sets up multicast list and enables interrupts
1652 */
1653int
1654spider_net_open(struct net_device *netdev)
1655{
1656 struct spider_net_card *card = netdev_priv(netdev);
1657 int result;
1658
1659 result = spider_net_init_chain(card, &card->tx_chain);
1660 if (result)
1661 goto alloc_tx_failed;
1662 card->low_watermark = NULL;
1663
1664 result = spider_net_init_chain(card, &card->rx_chain);
1665 if (result)
1666 goto alloc_rx_failed;
1667
1668 /* Allocate rx skbs */
1669 if (spider_net_alloc_rx_skbs(card))
1670 goto alloc_skbs_failed;
1671
1672 spider_net_set_multi(netdev);
1673
1674 /* further enhancement: setup hw vlan, if needed */
1675
1676 result = -EBUSY;
1677 if (request_irq(netdev->irq, spider_net_interrupt,
1678 IRQF_SHARED, netdev->name, netdev))
1679 goto register_int_failed;
1680
1681 spider_net_enable_card(card);
1682
1683 netif_start_queue(netdev);
1684 netif_carrier_on(netdev);
1685 netif_poll_enable(netdev);
1686
1687 return 0;
1688
1689register_int_failed:
1690 spider_net_free_rx_chain_contents(card);
1691alloc_skbs_failed:
1692 spider_net_free_chain(card, &card->rx_chain);
1693alloc_rx_failed:
1694 spider_net_free_chain(card, &card->tx_chain);
1695alloc_tx_failed:
1696 return result;
1697}
1698
1699/**
1700 * spider_net_setup_phy - setup PHY
1701 * @card: card structure
1702 *
1703 * returns 0 on success, <0 on failure
1704 *
1705 * spider_net_setup_phy is used as part of spider_net_probe. Sets
1706 * the PHY to 1000 Mbps
1707 **/
1708static int
1709spider_net_setup_phy(struct spider_net_card *card)
1710{
1711 struct mii_phy *phy = &card->phy;
1712
1713 spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
1714 SPIDER_NET_DMASEL_VALUE);
1715 spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
1716 SPIDER_NET_PHY_CTRL_VALUE);
1717 phy->mii_id = 1;
1718 phy->dev = card->netdev;
1719 phy->mdio_read = spider_net_read_phy;
1720 phy->mdio_write = spider_net_write_phy;
1721
1722 mii_phy_probe(phy, phy->mii_id);
1723
1724 if (phy->def->ops->setup_forced)
1725 phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL);
1726
1727 phy->def->ops->enable_fiber(phy);
1728
1729 phy->def->ops->read_link(phy);
1730 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
1731 phy->speed, phy->duplex==1 ? "Full" : "Half");
1732
1733 return 0;
1734}
1735
1736/**
1737 * spider_net_download_firmware - loads firmware into the adapter 1739 * spider_net_download_firmware - loads firmware into the adapter
1738 * @card: card structure 1740 * @card: card structure
1739 * @firmware_ptr: pointer to firmware data 1741 * @firmware_ptr: pointer to firmware data
@@ -1852,6 +1854,179 @@ out_err:
1852} 1854}
1853 1855
1854/** 1856/**
1857 * spider_net_open - called upon ifonfig up
1858 * @netdev: interface device structure
1859 *
1860 * returns 0 on success, <0 on failure
1861 *
1862 * spider_net_open allocates all the descriptors and memory needed for
1863 * operation, sets up multicast list and enables interrupts
1864 */
1865int
1866spider_net_open(struct net_device *netdev)
1867{
1868 struct spider_net_card *card = netdev_priv(netdev);
1869 int result;
1870
1871 result = spider_net_init_firmware(card);
1872 if (result)
1873 goto init_firmware_failed;
1874
1875 /* start probing with copper */
1876 spider_net_setup_aneg(card);
1877 if (card->phy.def->phy_id)
1878 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1879
1880 result = spider_net_init_chain(card, &card->tx_chain);
1881 if (result)
1882 goto alloc_tx_failed;
1883 card->low_watermark = NULL;
1884
1885 result = spider_net_init_chain(card, &card->rx_chain);
1886 if (result)
1887 goto alloc_rx_failed;
1888
1889 /* Allocate rx skbs */
1890 if (spider_net_alloc_rx_skbs(card))
1891 goto alloc_skbs_failed;
1892
1893 spider_net_set_multi(netdev);
1894
1895 /* further enhancement: setup hw vlan, if needed */
1896
1897 result = -EBUSY;
1898 if (request_irq(netdev->irq, spider_net_interrupt,
1899 IRQF_SHARED, netdev->name, netdev))
1900 goto register_int_failed;
1901
1902 spider_net_enable_card(card);
1903
1904 netif_start_queue(netdev);
1905 netif_carrier_on(netdev);
1906 netif_poll_enable(netdev);
1907
1908 return 0;
1909
1910register_int_failed:
1911 spider_net_free_rx_chain_contents(card);
1912alloc_skbs_failed:
1913 spider_net_free_chain(card, &card->rx_chain);
1914alloc_rx_failed:
1915 spider_net_free_chain(card, &card->tx_chain);
1916alloc_tx_failed:
1917 del_timer_sync(&card->aneg_timer);
1918init_firmware_failed:
1919 return result;
1920}
1921
1922/**
1923 * spider_net_link_phy
1924 * @data: used for pointer to card structure
1925 *
1926 */
1927static void spider_net_link_phy(unsigned long data)
1928{
1929 struct spider_net_card *card = (struct spider_net_card *)data;
1930 struct mii_phy *phy = &card->phy;
1931
1932 /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
1933 if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
1934
1935 pr_info("%s: link is down trying to bring it up\n", card->netdev->name);
1936
1937 switch (card->medium) {
1938 case BCM54XX_COPPER:
1939 /* enable fiber with autonegotiation first */
1940 if (phy->def->ops->enable_fiber)
1941 phy->def->ops->enable_fiber(phy, 1);
1942 card->medium = BCM54XX_FIBER;
1943 break;
1944
1945 case BCM54XX_FIBER:
1946 /* fiber didn't come up, try to disable fiber autoneg */
1947 if (phy->def->ops->enable_fiber)
1948 phy->def->ops->enable_fiber(phy, 0);
1949 card->medium = BCM54XX_UNKNOWN;
1950 break;
1951
1952 case BCM54XX_UNKNOWN:
1953 /* copper, fiber with and without failed,
1954 * retry from beginning */
1955 spider_net_setup_aneg(card);
1956 card->medium = BCM54XX_COPPER;
1957 break;
1958 }
1959
1960 card->aneg_count = 0;
1961 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1962 return;
1963 }
1964
1965 /* link still not up, try again later */
1966 if (!(phy->def->ops->poll_link(phy))) {
1967 card->aneg_count++;
1968 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1969 return;
1970 }
1971
1972 /* link came up, get abilities */
1973 phy->def->ops->read_link(phy);
1974
1975 spider_net_write_reg(card, SPIDER_NET_GMACST,
1976 spider_net_read_reg(card, SPIDER_NET_GMACST));
1977 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
1978
1979 if (phy->speed == 1000)
1980 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
1981 else
1982 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
1983
1984 card->aneg_count = 0;
1985
1986 pr_debug("Found %s with %i Mbps, %s-duplex %sautoneg.\n",
1987 phy->def->name, phy->speed, phy->duplex==1 ? "Full" : "Half",
1988 phy->autoneg==1 ? "" : "no ");
1989
1990 return;
1991}
1992
1993/**
1994 * spider_net_setup_phy - setup PHY
1995 * @card: card structure
1996 *
1997 * returns 0 on success, <0 on failure
1998 *
1999 * spider_net_setup_phy is used as part of spider_net_probe.
2000 **/
2001static int
2002spider_net_setup_phy(struct spider_net_card *card)
2003{
2004 struct mii_phy *phy = &card->phy;
2005
2006 spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
2007 SPIDER_NET_DMASEL_VALUE);
2008 spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
2009 SPIDER_NET_PHY_CTRL_VALUE);
2010
2011 phy->dev = card->netdev;
2012 phy->mdio_read = spider_net_read_phy;
2013 phy->mdio_write = spider_net_write_phy;
2014
2015 for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
2016 unsigned short id;
2017 id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
2018 if (id != 0x0000 && id != 0xffff) {
2019 if (!mii_phy_probe(phy, phy->mii_id)) {
2020 pr_info("Found %s.\n", phy->def->name);
2021 break;
2022 }
2023 }
2024 }
2025
2026 return 0;
2027}
2028
2029/**
1855 * spider_net_workaround_rxramfull - work around firmware bug 2030 * spider_net_workaround_rxramfull - work around firmware bug
1856 * @card: card structure 2031 * @card: card structure
1857 * 2032 *
@@ -1900,14 +2075,15 @@ spider_net_stop(struct net_device *netdev)
1900 netif_carrier_off(netdev); 2075 netif_carrier_off(netdev);
1901 netif_stop_queue(netdev); 2076 netif_stop_queue(netdev);
1902 del_timer_sync(&card->tx_timer); 2077 del_timer_sync(&card->tx_timer);
2078 del_timer_sync(&card->aneg_timer);
1903 2079
1904 /* disable/mask all interrupts */ 2080 /* disable/mask all interrupts */
1905 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); 2081 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
1906 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); 2082 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
1907 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); 2083 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
2084 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1908 2085
1909 /* free_irq(netdev->irq, netdev);*/ 2086 free_irq(netdev->irq, netdev);
1910 free_irq(to_pci_dev(netdev->dev.parent)->irq, netdev);
1911 2087
1912 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 2088 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
1913 SPIDER_NET_DMA_TX_FEND_VALUE); 2089 SPIDER_NET_DMA_TX_FEND_VALUE);
@@ -1919,8 +2095,6 @@ spider_net_stop(struct net_device *netdev)
1919 spider_net_release_tx_chain(card, 1); 2095 spider_net_release_tx_chain(card, 1);
1920 spider_net_free_rx_chain_contents(card); 2096 spider_net_free_rx_chain_contents(card);
1921 2097
1922 spider_net_free_rx_chain_contents(card);
1923
1924 spider_net_free_chain(card, &card->tx_chain); 2098 spider_net_free_chain(card, &card->tx_chain);
1925 spider_net_free_chain(card, &card->rx_chain); 2099 spider_net_free_chain(card, &card->rx_chain);
1926 2100
@@ -1952,8 +2126,6 @@ spider_net_tx_timeout_task(struct work_struct *work)
1952 2126
1953 if (spider_net_setup_phy(card)) 2127 if (spider_net_setup_phy(card))
1954 goto out; 2128 goto out;
1955 if (spider_net_init_firmware(card))
1956 goto out;
1957 2129
1958 spider_net_open(netdev); 2130 spider_net_open(netdev);
1959 spider_net_kick_tx_dma(card); 2131 spider_net_kick_tx_dma(card);
@@ -2046,10 +2218,12 @@ spider_net_setup_netdev(struct spider_net_card *card)
2046 card->tx_timer.data = (unsigned long) card; 2218 card->tx_timer.data = (unsigned long) card;
2047 netdev->irq = card->pdev->irq; 2219 netdev->irq = card->pdev->irq;
2048 2220
2049 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; 2221 card->aneg_count = 0;
2222 init_timer(&card->aneg_timer);
2223 card->aneg_timer.function = spider_net_link_phy;
2224 card->aneg_timer.data = (unsigned long) card;
2050 2225
2051 card->tx_chain.num_desc = tx_descriptors; 2226 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
2052 card->rx_chain.num_desc = rx_descriptors;
2053 2227
2054 spider_net_setup_netdev_ops(netdev); 2228 spider_net_setup_netdev_ops(netdev);
2055 2229
@@ -2098,8 +2272,11 @@ spider_net_alloc_card(void)
2098{ 2272{
2099 struct net_device *netdev; 2273 struct net_device *netdev;
2100 struct spider_net_card *card; 2274 struct spider_net_card *card;
2275 size_t alloc_size;
2101 2276
2102 netdev = alloc_etherdev(sizeof(struct spider_net_card)); 2277 alloc_size = sizeof(struct spider_net_card) +
2278 (tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr);
2279 netdev = alloc_etherdev(alloc_size);
2103 if (!netdev) 2280 if (!netdev)
2104 return NULL; 2281 return NULL;
2105 2282
@@ -2110,6 +2287,11 @@ spider_net_alloc_card(void)
2110 init_waitqueue_head(&card->waitq); 2287 init_waitqueue_head(&card->waitq);
2111 atomic_set(&card->tx_timeout_task_counter, 0); 2288 atomic_set(&card->tx_timeout_task_counter, 0);
2112 2289
2290 card->rx_chain.num_desc = rx_descriptors;
2291 card->rx_chain.ring = card->darray;
2292 card->tx_chain.num_desc = tx_descriptors;
2293 card->tx_chain.ring = card->darray + rx_descriptors;
2294
2113 return card; 2295 return card;
2114} 2296}
2115 2297
@@ -2220,10 +2402,6 @@ spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2220 if (err) 2402 if (err)
2221 goto out_undo_pci; 2403 goto out_undo_pci;
2222 2404
2223 err = spider_net_init_firmware(card);
2224 if (err)
2225 goto out_undo_pci;
2226
2227 err = spider_net_setup_netdev(card); 2405 err = spider_net_setup_netdev(card);
2228 if (err) 2406 if (err)
2229 goto out_undo_pci; 2407 goto out_undo_pci;
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 2fec5cf76926..4a1e0d28a502 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -1,7 +1,8 @@
1/* 1/*
2 * Network device driver for Cell Processor-Based Blade 2 * Network device driver for Cell Processor-Based Blade and Celleb platform
3 * 3 *
4 * (C) Copyright IBM Corp. 2005 4 * (C) Copyright IBM Corp. 2005
5 * (C) Copyright 2006 TOSHIBA CORPORATION
5 * 6 *
6 * Authors : Utz Bacher <utz.bacher@de.ibm.com> 7 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
7 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com> 8 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
@@ -24,7 +25,7 @@
24#ifndef _SPIDER_NET_H 25#ifndef _SPIDER_NET_H
25#define _SPIDER_NET_H 26#define _SPIDER_NET_H
26 27
27#define VERSION "1.6 B" 28#define VERSION "2.0 A"
28 29
29#include "sungem_phy.h" 30#include "sungem_phy.h"
30 31
@@ -50,6 +51,8 @@ extern char spider_net_driver_name[];
50#define SPIDER_NET_TX_DESCRIPTORS_MAX 512 51#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
51 52
52#define SPIDER_NET_TX_TIMER (HZ/5) 53#define SPIDER_NET_TX_TIMER (HZ/5)
54#define SPIDER_NET_ANEG_TIMER (HZ)
55#define SPIDER_NET_ANEG_TIMEOUT 2
53 56
54#define SPIDER_NET_RX_CSUM_DEFAULT 1 57#define SPIDER_NET_RX_CSUM_DEFAULT 1
55 58
@@ -104,6 +107,7 @@ extern char spider_net_driver_name[];
104 107
105#define SPIDER_NET_GMACOPEMD 0x00000100 108#define SPIDER_NET_GMACOPEMD 0x00000100
106#define SPIDER_NET_GMACLENLMT 0x00000108 109#define SPIDER_NET_GMACLENLMT 0x00000108
110#define SPIDER_NET_GMACST 0x00000110
107#define SPIDER_NET_GMACINTEN 0x00000118 111#define SPIDER_NET_GMACINTEN 0x00000118
108#define SPIDER_NET_GMACPHYCTRL 0x00000120 112#define SPIDER_NET_GMACPHYCTRL 0x00000120
109 113
@@ -181,7 +185,8 @@ extern char spider_net_driver_name[];
181 185
182/* pause frames: automatic, no upper retransmission count */ 186/* pause frames: automatic, no upper retransmission count */
183/* outside loopback mode: ETOMOD signal dont matter, not connected */ 187/* outside loopback mode: ETOMOD signal dont matter, not connected */
184#define SPIDER_NET_OPMODE_VALUE 0x00000063 188/* ETOMOD signal is brought to PHY reset. bit 2 must be 1 in Celleb */
189#define SPIDER_NET_OPMODE_VALUE 0x00000067
185/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/ 190/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/
186#define SPIDER_NET_LENLMT_VALUE 0x00000908 191#define SPIDER_NET_LENLMT_VALUE 0x00000908
187 192
@@ -333,9 +338,12 @@ enum spider_net_int2_status {
333/* We rely on flagged descriptor interrupts */ 338/* We rely on flagged descriptor interrupts */
334#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) ) 339#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) )
335 340
341#define SPIDER_NET_LINKINT ( 1 << SPIDER_NET_GMAC2INT )
342
336#define SPIDER_NET_ERRINT ( 0xffffffff & \ 343#define SPIDER_NET_ERRINT ( 0xffffffff & \
337 (~SPIDER_NET_TXINT) & \ 344 (~SPIDER_NET_TXINT) & \
338 (~SPIDER_NET_RXINT) ) 345 (~SPIDER_NET_RXINT) & \
346 (~SPIDER_NET_LINKINT) )
339 347
340#define SPIDER_NET_GPREXEC 0x80000000 348#define SPIDER_NET_GPREXEC 0x80000000
341#define SPIDER_NET_GPRDAT_MASK 0x0000ffff 349#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
@@ -356,8 +364,8 @@ enum spider_net_int2_status {
356#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000 364#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
357#define SPIDER_NET_DESCR_TXDESFLG 0x00800000 365#define SPIDER_NET_DESCR_TXDESFLG 0x00800000
358 366
359struct spider_net_descr { 367/* Descriptor, as defined by the hardware */
360 /* as defined by the hardware */ 368struct spider_net_hw_descr {
361 u32 buf_addr; 369 u32 buf_addr;
362 u32 buf_size; 370 u32 buf_size;
363 u32 next_descr_addr; 371 u32 next_descr_addr;
@@ -366,13 +374,15 @@ struct spider_net_descr {
366 u32 valid_size; /* all zeroes for tx */ 374 u32 valid_size; /* all zeroes for tx */
367 u32 data_status; 375 u32 data_status;
368 u32 data_error; /* all zeroes for tx */ 376 u32 data_error; /* all zeroes for tx */
377} __attribute__((aligned(32)));
369 378
370 /* used in the driver */ 379struct spider_net_descr {
380 struct spider_net_hw_descr *hwdescr;
371 struct sk_buff *skb; 381 struct sk_buff *skb;
372 u32 bus_addr; 382 u32 bus_addr;
373 struct spider_net_descr *next; 383 struct spider_net_descr *next;
374 struct spider_net_descr *prev; 384 struct spider_net_descr *prev;
375} __attribute__((aligned(32))); 385};
376 386
377struct spider_net_descr_chain { 387struct spider_net_descr_chain {
378 spinlock_t lock; 388 spinlock_t lock;
@@ -380,6 +390,7 @@ struct spider_net_descr_chain {
380 struct spider_net_descr *tail; 390 struct spider_net_descr *tail;
381 struct spider_net_descr *ring; 391 struct spider_net_descr *ring;
382 int num_desc; 392 int num_desc;
393 struct spider_net_hw_descr *hwring;
383 dma_addr_t dma_addr; 394 dma_addr_t dma_addr;
384}; 395};
385 396
@@ -436,12 +447,16 @@ struct spider_net_card {
436 struct pci_dev *pdev; 447 struct pci_dev *pdev;
437 struct mii_phy phy; 448 struct mii_phy phy;
438 449
450 int medium;
451
439 void __iomem *regs; 452 void __iomem *regs;
440 453
441 struct spider_net_descr_chain tx_chain; 454 struct spider_net_descr_chain tx_chain;
442 struct spider_net_descr_chain rx_chain; 455 struct spider_net_descr_chain rx_chain;
443 struct spider_net_descr *low_watermark; 456 struct spider_net_descr *low_watermark;
444 457
458 int aneg_count;
459 struct timer_list aneg_timer;
445 struct timer_list tx_timer; 460 struct timer_list tx_timer;
446 struct work_struct tx_timeout_task; 461 struct work_struct tx_timeout_task;
447 atomic_t tx_timeout_task_counter; 462 atomic_t tx_timeout_task_counter;
@@ -452,6 +467,9 @@ struct spider_net_card {
452 struct net_device_stats netdev_stats; 467 struct net_device_stats netdev_stats;
453 struct spider_net_extra_stats spider_stats; 468 struct spider_net_extra_stats spider_stats;
454 struct spider_net_options options; 469 struct spider_net_options options;
470
471 /* Must be last item in struct */
472 struct spider_net_descr darray[0];
455}; 473};
456 474
457#define pr_err(fmt,arg...) \ 475#define pr_err(fmt,arg...) \
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index a3220a96524f..4757aa647c7a 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -28,8 +28,6 @@ static int automatic_resume = 0; /* experimental .. better should be zero */
28static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */ 28static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */
29static int fifo=0x8; /* don't change */ 29static int fifo=0x8; /* don't change */
30 30
31/* #define REALLY_SLOW_IO */
32
33#include <linux/module.h> 31#include <linux/module.h>
34#include <linux/kernel.h> 32#include <linux/kernel.h>
35#include <linux/string.h> 33#include <linux/string.h>
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 701ba4f3b69d..56a110ca5e6f 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -310,6 +310,107 @@ static int bcm5411_init(struct mii_phy* phy)
310 return 0; 310 return 0;
311} 311}
312 312
313static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
314{
315 u16 ctl, adv;
316
317 phy->autoneg = 1;
318 phy->speed = SPEED_10;
319 phy->duplex = DUPLEX_HALF;
320 phy->pause = 0;
321 phy->advertising = advertise;
322
323 /* Setup standard advertise */
324 adv = phy_read(phy, MII_ADVERTISE);
325 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
326 if (advertise & ADVERTISED_10baseT_Half)
327 adv |= ADVERTISE_10HALF;
328 if (advertise & ADVERTISED_10baseT_Full)
329 adv |= ADVERTISE_10FULL;
330 if (advertise & ADVERTISED_100baseT_Half)
331 adv |= ADVERTISE_100HALF;
332 if (advertise & ADVERTISED_100baseT_Full)
333 adv |= ADVERTISE_100FULL;
334 phy_write(phy, MII_ADVERTISE, adv);
335
336 /* Start/Restart aneg */
337 ctl = phy_read(phy, MII_BMCR);
338 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
339 phy_write(phy, MII_BMCR, ctl);
340
341 return 0;
342}
343
344static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
345{
346 u16 ctl;
347
348 phy->autoneg = 0;
349 phy->speed = speed;
350 phy->duplex = fd;
351 phy->pause = 0;
352
353 ctl = phy_read(phy, MII_BMCR);
354 ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);
355
356 /* First reset the PHY */
357 phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
358
359 /* Select speed & duplex */
360 switch(speed) {
361 case SPEED_10:
362 break;
363 case SPEED_100:
364 ctl |= BMCR_SPEED100;
365 break;
366 case SPEED_1000:
367 default:
368 return -EINVAL;
369 }
370 if (fd == DUPLEX_FULL)
371 ctl |= BMCR_FULLDPLX;
372 phy_write(phy, MII_BMCR, ctl);
373
374 return 0;
375}
376
377static int genmii_poll_link(struct mii_phy *phy)
378{
379 u16 status;
380
381 (void)phy_read(phy, MII_BMSR);
382 status = phy_read(phy, MII_BMSR);
383 if ((status & BMSR_LSTATUS) == 0)
384 return 0;
385 if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
386 return 0;
387 return 1;
388}
389
390static int genmii_read_link(struct mii_phy *phy)
391{
392 u16 lpa;
393
394 if (phy->autoneg) {
395 lpa = phy_read(phy, MII_LPA);
396
397 if (lpa & (LPA_10FULL | LPA_100FULL))
398 phy->duplex = DUPLEX_FULL;
399 else
400 phy->duplex = DUPLEX_HALF;
401 if (lpa & (LPA_100FULL | LPA_100HALF))
402 phy->speed = SPEED_100;
403 else
404 phy->speed = SPEED_10;
405 phy->pause = 0;
406 }
407 /* On non-aneg, we assume what we put in BMCR is the speed,
408 * though magic-aneg shouldn't prevent this case from occurring
409 */
410
411 return 0;
412}
413
313static int generic_suspend(struct mii_phy* phy) 414static int generic_suspend(struct mii_phy* phy)
314{ 415{
315 phy_write(phy, MII_BMCR, BMCR_PDOWN); 416 phy_write(phy, MII_BMCR, BMCR_PDOWN);
@@ -364,30 +465,6 @@ static int bcm5421_init(struct mii_phy* phy)
364 return 0; 465 return 0;
365} 466}
366 467
367static int bcm5421_enable_fiber(struct mii_phy* phy)
368{
369 /* enable fiber mode */
370 phy_write(phy, MII_NCONFIG, 0x9020);
371 /* LEDs active in both modes, autosense prio = fiber */
372 phy_write(phy, MII_NCONFIG, 0x945f);
373
374 /* switch off fibre autoneg */
375 phy_write(phy, MII_NCONFIG, 0xfc01);
376 phy_write(phy, 0x0b, 0x0004);
377
378 return 0;
379}
380
381static int bcm5461_enable_fiber(struct mii_phy* phy)
382{
383 phy_write(phy, MII_NCONFIG, 0xfc0c);
384 phy_write(phy, MII_BMCR, 0x4140);
385 phy_write(phy, MII_NCONFIG, 0xfc0b);
386 phy_write(phy, MII_BMCR, 0x0140);
387
388 return 0;
389}
390
391static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) 468static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
392{ 469{
393 u16 ctl, adv; 470 u16 ctl, adv;
@@ -515,6 +592,155 @@ static int marvell88e1111_init(struct mii_phy* phy)
515 return 0; 592 return 0;
516} 593}
517 594
595#define BCM5421_MODE_MASK (1 << 5)
596
597static int bcm5421_poll_link(struct mii_phy* phy)
598{
599 u32 phy_reg;
600 int mode;
601
602 /* find out in what mode we are */
603 phy_write(phy, MII_NCONFIG, 0x1000);
604 phy_reg = phy_read(phy, MII_NCONFIG);
605
606 mode = (phy_reg & BCM5421_MODE_MASK) >> 5;
607
608 if ( mode == BCM54XX_COPPER)
609 return genmii_poll_link(phy);
610
611 /* try to find out wether we have a link */
612 phy_write(phy, MII_NCONFIG, 0x2000);
613 phy_reg = phy_read(phy, MII_NCONFIG);
614
615 if (phy_reg & 0x0020)
616 return 0;
617 else
618 return 1;
619}
620
621static int bcm5421_read_link(struct mii_phy* phy)
622{
623 u32 phy_reg;
624 int mode;
625
626 /* find out in what mode we are */
627 phy_write(phy, MII_NCONFIG, 0x1000);
628 phy_reg = phy_read(phy, MII_NCONFIG);
629
630 mode = (phy_reg & BCM5421_MODE_MASK ) >> 5;
631
632 if ( mode == BCM54XX_COPPER)
633 return bcm54xx_read_link(phy);
634
635 phy->speed = SPEED_1000;
636
637 /* find out wether we are running half- or full duplex */
638 phy_write(phy, MII_NCONFIG, 0x2000);
639 phy_reg = phy_read(phy, MII_NCONFIG);
640
641 if ( (phy_reg & 0x0080) >> 7)
642 phy->duplex |= DUPLEX_HALF;
643 else
644 phy->duplex |= DUPLEX_FULL;
645
646 return 0;
647}
648
649static int bcm5421_enable_fiber(struct mii_phy* phy, int autoneg)
650{
651 /* enable fiber mode */
652 phy_write(phy, MII_NCONFIG, 0x9020);
653 /* LEDs active in both modes, autosense prio = fiber */
654 phy_write(phy, MII_NCONFIG, 0x945f);
655
656 if (!autoneg) {
657 /* switch off fibre autoneg */
658 phy_write(phy, MII_NCONFIG, 0xfc01);
659 phy_write(phy, 0x0b, 0x0004);
660 }
661
662 phy->autoneg = autoneg;
663
664 return 0;
665}
666
667#define BCM5461_FIBER_LINK (1 << 2)
668#define BCM5461_MODE_MASK (3 << 1)
669
670static int bcm5461_poll_link(struct mii_phy* phy)
671{
672 u32 phy_reg;
673 int mode;
674
675 /* find out in what mode we are */
676 phy_write(phy, MII_NCONFIG, 0x7c00);
677 phy_reg = phy_read(phy, MII_NCONFIG);
678
679 mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
680
681 if ( mode == BCM54XX_COPPER)
682 return genmii_poll_link(phy);
683
684 /* find out wether we have a link */
685 phy_write(phy, MII_NCONFIG, 0x7000);
686 phy_reg = phy_read(phy, MII_NCONFIG);
687
688 if (phy_reg & BCM5461_FIBER_LINK)
689 return 1;
690 else
691 return 0;
692}
693
694#define BCM5461_FIBER_DUPLEX (1 << 3)
695
696static int bcm5461_read_link(struct mii_phy* phy)
697{
698 u32 phy_reg;
699 int mode;
700
701 /* find out in what mode we are */
702 phy_write(phy, MII_NCONFIG, 0x7c00);
703 phy_reg = phy_read(phy, MII_NCONFIG);
704
705 mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
706
707 if ( mode == BCM54XX_COPPER) {
708 return bcm54xx_read_link(phy);
709 }
710
711 phy->speed = SPEED_1000;
712
713 /* find out wether we are running half- or full duplex */
714 phy_write(phy, MII_NCONFIG, 0x7000);
715 phy_reg = phy_read(phy, MII_NCONFIG);
716
717 if (phy_reg & BCM5461_FIBER_DUPLEX)
718 phy->duplex |= DUPLEX_FULL;
719 else
720 phy->duplex |= DUPLEX_HALF;
721
722 return 0;
723}
724
725static int bcm5461_enable_fiber(struct mii_phy* phy, int autoneg)
726{
727 /* select fiber mode, enable 1000 base-X registers */
728 phy_write(phy, MII_NCONFIG, 0xfc0b);
729
730 if (autoneg) {
731 /* enable fiber with no autonegotiation */
732 phy_write(phy, MII_ADVERTISE, 0x01e0);
733 phy_write(phy, MII_BMCR, 0x1140);
734 } else {
735 /* enable fiber with autonegotiation */
736 phy_write(phy, MII_BMCR, 0x0140);
737 }
738
739 phy->autoneg = autoneg;
740
741 return 0;
742}
743
518static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) 744static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
519{ 745{
520 u16 ctl, adv; 746 u16 ctl, adv;
@@ -645,113 +871,6 @@ static int marvell_read_link(struct mii_phy *phy)
645 return 0; 871 return 0;
646} 872}
647 873
648static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
649{
650 u16 ctl, adv;
651
652 phy->autoneg = 1;
653 phy->speed = SPEED_10;
654 phy->duplex = DUPLEX_HALF;
655 phy->pause = 0;
656 phy->advertising = advertise;
657
658 /* Setup standard advertise */
659 adv = phy_read(phy, MII_ADVERTISE);
660 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
661 if (advertise & ADVERTISED_10baseT_Half)
662 adv |= ADVERTISE_10HALF;
663 if (advertise & ADVERTISED_10baseT_Full)
664 adv |= ADVERTISE_10FULL;
665 if (advertise & ADVERTISED_100baseT_Half)
666 adv |= ADVERTISE_100HALF;
667 if (advertise & ADVERTISED_100baseT_Full)
668 adv |= ADVERTISE_100FULL;
669 if (advertise & ADVERTISED_Pause)
670 adv |= ADVERTISE_PAUSE_CAP;
671 if (advertise & ADVERTISED_Asym_Pause)
672 adv |= ADVERTISE_PAUSE_ASYM;
673 phy_write(phy, MII_ADVERTISE, adv);
674
675 /* Start/Restart aneg */
676 ctl = phy_read(phy, MII_BMCR);
677 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
678 phy_write(phy, MII_BMCR, ctl);
679
680 return 0;
681}
682
683static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
684{
685 u16 ctl;
686
687 phy->autoneg = 0;
688 phy->speed = speed;
689 phy->duplex = fd;
690 phy->pause = 0;
691
692 ctl = phy_read(phy, MII_BMCR);
693 ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);
694
695 /* First reset the PHY */
696 phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
697
698 /* Select speed & duplex */
699 switch(speed) {
700 case SPEED_10:
701 break;
702 case SPEED_100:
703 ctl |= BMCR_SPEED100;
704 break;
705 case SPEED_1000:
706 default:
707 return -EINVAL;
708 }
709 if (fd == DUPLEX_FULL)
710 ctl |= BMCR_FULLDPLX;
711 phy_write(phy, MII_BMCR, ctl);
712
713 return 0;
714}
715
716static int genmii_poll_link(struct mii_phy *phy)
717{
718 u16 status;
719
720 (void)phy_read(phy, MII_BMSR);
721 status = phy_read(phy, MII_BMSR);
722 if ((status & BMSR_LSTATUS) == 0)
723 return 0;
724 if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
725 return 0;
726 return 1;
727}
728
729static int genmii_read_link(struct mii_phy *phy)
730{
731 u16 lpa;
732
733 if (phy->autoneg) {
734 lpa = phy_read(phy, MII_LPA);
735
736 if (lpa & (LPA_10FULL | LPA_100FULL))
737 phy->duplex = DUPLEX_FULL;
738 else
739 phy->duplex = DUPLEX_HALF;
740 if (lpa & (LPA_100FULL | LPA_100HALF))
741 phy->speed = SPEED_100;
742 else
743 phy->speed = SPEED_10;
744 phy->pause = (phy->duplex == DUPLEX_FULL) &&
745 ((lpa & LPA_PAUSE) != 0);
746 }
747 /* On non-aneg, we assume what we put in BMCR is the speed,
748 * though magic-aneg shouldn't prevent this case from occurring
749 */
750
751 return 0;
752}
753
754
755#define MII_BASIC_FEATURES \ 874#define MII_BASIC_FEATURES \
756 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ 875 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
757 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ 876 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
@@ -885,8 +1004,8 @@ static struct mii_phy_ops bcm5421_phy_ops = {
885 .suspend = generic_suspend, 1004 .suspend = generic_suspend,
886 .setup_aneg = bcm54xx_setup_aneg, 1005 .setup_aneg = bcm54xx_setup_aneg,
887 .setup_forced = bcm54xx_setup_forced, 1006 .setup_forced = bcm54xx_setup_forced,
888 .poll_link = genmii_poll_link, 1007 .poll_link = bcm5421_poll_link,
889 .read_link = bcm54xx_read_link, 1008 .read_link = bcm5421_read_link,
890 .enable_fiber = bcm5421_enable_fiber, 1009 .enable_fiber = bcm5421_enable_fiber,
891}; 1010};
892 1011
@@ -923,8 +1042,8 @@ static struct mii_phy_ops bcm5461_phy_ops = {
923 .suspend = generic_suspend, 1042 .suspend = generic_suspend,
924 .setup_aneg = bcm54xx_setup_aneg, 1043 .setup_aneg = bcm54xx_setup_aneg,
925 .setup_forced = bcm54xx_setup_forced, 1044 .setup_forced = bcm54xx_setup_forced,
926 .poll_link = genmii_poll_link, 1045 .poll_link = bcm5461_poll_link,
927 .read_link = bcm54xx_read_link, 1046 .read_link = bcm5461_read_link,
928 .enable_fiber = bcm5461_enable_fiber, 1047 .enable_fiber = bcm5461_enable_fiber,
929}; 1048};
930 1049
diff --git a/drivers/net/sungem_phy.h b/drivers/net/sungem_phy.h
index 1d70ba6f9f10..af02f9479cbb 100644
--- a/drivers/net/sungem_phy.h
+++ b/drivers/net/sungem_phy.h
@@ -12,7 +12,7 @@ struct mii_phy_ops
12 int (*setup_forced)(struct mii_phy *phy, int speed, int fd); 12 int (*setup_forced)(struct mii_phy *phy, int speed, int fd);
13 int (*poll_link)(struct mii_phy *phy); 13 int (*poll_link)(struct mii_phy *phy);
14 int (*read_link)(struct mii_phy *phy); 14 int (*read_link)(struct mii_phy *phy);
15 int (*enable_fiber)(struct mii_phy *phy); 15 int (*enable_fiber)(struct mii_phy *phy, int autoneg);
16}; 16};
17 17
18/* Structure used to statically define an mii/gii based PHY */ 18/* Structure used to statically define an mii/gii based PHY */
@@ -26,6 +26,14 @@ struct mii_phy_def
26 const struct mii_phy_ops* ops; 26 const struct mii_phy_ops* ops;
27}; 27};
28 28
29enum {
30 BCM54XX_COPPER,
31 BCM54XX_FIBER,
32 BCM54XX_GBIC,
33 BCM54XX_SGMII,
34 BCM54XX_UNKNOWN,
35};
36
29/* An instance of a PHY, partially borrowed from mii_if_info */ 37/* An instance of a PHY, partially borrowed from mii_if_info */
30struct mii_phy 38struct mii_phy
31{ 39{
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 81ed82f0b520..eae2b63951f1 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -657,7 +657,7 @@ tc35815_init_queues(struct net_device *dev)
657 dma_cache_wback_inv((unsigned long)lp->fd_buf, PAGE_SIZE * FD_PAGE_NUM); 657 dma_cache_wback_inv((unsigned long)lp->fd_buf, PAGE_SIZE * FD_PAGE_NUM);
658#endif 658#endif
659 } else { 659 } else {
660 clear_page(lp->fd_buf); 660 memset(lp->fd_buf, 0, PAGE_SIZE * FD_PAGE_NUM);
661#ifdef __mips__ 661#ifdef __mips__
662 dma_cache_wback_inv((unsigned long)lp->fd_buf, PAGE_SIZE * FD_PAGE_NUM); 662 dma_cache_wback_inv((unsigned long)lp->fd_buf, PAGE_SIZE * FD_PAGE_NUM);
663#endif 663#endif
@@ -1732,6 +1732,11 @@ static void __exit tc35815_cleanup_module(void)
1732{ 1732{
1733 struct net_device *next_dev; 1733 struct net_device *next_dev;
1734 1734
1735 /*
1736 * TODO: implement a tc35815_driver.remove hook, and
1737 * move this code into that function. Then, delete
1738 * all root_tc35815_dev list handling code.
1739 */
1735 while (root_tc35815_dev) { 1740 while (root_tc35815_dev) {
1736 struct net_device *dev = root_tc35815_dev; 1741 struct net_device *dev = root_tc35815_dev;
1737 next_dev = ((struct tc35815_local *)dev->priv)->next_module; 1742 next_dev = ((struct tc35815_local *)dev->priv)->next_module;
@@ -1740,6 +1745,9 @@ static void __exit tc35815_cleanup_module(void)
1740 free_netdev(dev); 1745 free_netdev(dev);
1741 root_tc35815_dev = next_dev; 1746 root_tc35815_dev = next_dev;
1742 } 1747 }
1748
1749 pci_unregister_driver(&tc35815_driver);
1743} 1750}
1751
1744module_init(tc35815_init_module); 1752module_init(tc35815_init_module);
1745module_exit(tc35815_cleanup_module); 1753module_exit(tc35815_cleanup_module);
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index a2fc2bbcf97f..885e73d731c2 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -4199,9 +4199,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
4199 ugeth->ug_info = ug_info; 4199 ugeth->ug_info = ug_info;
4200 ugeth->dev = dev; 4200 ugeth->dev = dev;
4201 4201
4202 mac_addr = get_property(np, "mac-address", NULL); 4202 mac_addr = of_get_mac_address(np);
4203 if (mac_addr == NULL)
4204 mac_addr = get_property(np, "local-mac-address", NULL);
4205 if (mac_addr) 4203 if (mac_addr)
4206 memcpy(dev->dev_addr, mac_addr, 6); 4204 memcpy(dev->dev_addr, mac_addr, 6);
4207 4205
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index e91b5a84a20a..5b82e4fd0d73 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -94,7 +94,6 @@
94#include <linux/device.h> 94#include <linux/device.h>
95 95
96#undef COSA_SLOW_IO /* for testing purposes only */ 96#undef COSA_SLOW_IO /* for testing purposes only */
97#undef REALLY_SLOW_IO
98 97
99#include <asm/io.h> 98#include <asm/io.h>
100#include <asm/dma.h> 99#include <asm/dma.h>
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index c250f08c8dd5..ce9230b2f630 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -26,7 +26,6 @@
26 * Tested with Planet AP in 2.5.73-bk, 216 Kbytes/s in Infrastructure mode 26 * Tested with Planet AP in 2.5.73-bk, 216 Kbytes/s in Infrastructure mode
27 * with a SMP machine (dual pentium 100), using pktgen, 432 pps (pkt_size = 60) 27 * with a SMP machine (dual pentium 100), using pktgen, 432 pps (pkt_size = 60)
28 */ 28 */
29#undef REALLY_SLOW_IO /* most systems can safely undef this */
30 29
31#include <linux/delay.h> 30#include <linux/delay.h>
32#include <linux/types.h> 31#include <linux/types.h>