aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/skge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r--drivers/net/skge.c216
1 files changed, 111 insertions, 105 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 0208258e7826..fd398da4993b 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "1.0" 45#define DRV_VERSION "1.1"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
@@ -105,41 +105,28 @@ static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
105static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 105static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
106static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 }; 106static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
107 107
108/* Don't need to look at whole 16K.
109 * last interesting register is descriptor poll timer.
110 */
111#define SKGE_REGS_LEN (29*128)
112
113static int skge_get_regs_len(struct net_device *dev) 108static int skge_get_regs_len(struct net_device *dev)
114{ 109{
115 return SKGE_REGS_LEN; 110 return 0x4000;
116} 111}
117 112
118/* 113/*
119 * Returns copy of control register region 114 * Returns copy of whole control register region
120 * I/O region is divided into banks and certain regions are unreadable 115 * Note: skip RAM address register because accessing it will
116 * cause bus hangs!
121 */ 117 */
122static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, 118static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
123 void *p) 119 void *p)
124{ 120{
125 const struct skge_port *skge = netdev_priv(dev); 121 const struct skge_port *skge = netdev_priv(dev);
126 unsigned long offs;
127 const void __iomem *io = skge->hw->regs; 122 const void __iomem *io = skge->hw->regs;
128 static const unsigned long bankmap
129 = (1<<0) | (1<<2) | (1<<8) | (1<<9)
130 | (1<<12) | (1<<13) | (1<<14) | (1<<15) | (1<<16)
131 | (1<<17) | (1<<20) | (1<<21) | (1<<22) | (1<<23)
132 | (1<<24) | (1<<25) | (1<<26) | (1<<27) | (1<<28);
133 123
134 regs->version = 1; 124 regs->version = 1;
135 for (offs = 0; offs < regs->len; offs += 128) { 125 memset(p, 0, regs->len);
136 u32 len = min_t(u32, 128, regs->len - offs); 126 memcpy_fromio(p, io, B3_RAM_ADDR);
137 127
138 if (bankmap & (1<<(offs/128))) 128 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
139 memcpy_fromio(p + offs, io + offs, len); 129 regs->len - B3_RI_WTO_R1);
140 else
141 memset(p + offs, 0, len);
142 }
143} 130}
144 131
145/* Wake on Lan only supported on Yukon chps with rev 1 or above */ 132/* Wake on Lan only supported on Yukon chps with rev 1 or above */
@@ -775,17 +762,6 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
775 return 0; 762 return 0;
776} 763}
777 764
778static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size)
779{
780 struct sk_buff *skb = dev_alloc_skb(size);
781
782 if (likely(skb)) {
783 skb->dev = dev;
784 skb_reserve(skb, NET_IP_ALIGN);
785 }
786 return skb;
787}
788
789/* Allocate and setup a new buffer for receiving */ 765/* Allocate and setup a new buffer for receiving */
790static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 766static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
791 struct sk_buff *skb, unsigned int bufsize) 767 struct sk_buff *skb, unsigned int bufsize)
@@ -858,16 +834,17 @@ static int skge_rx_fill(struct skge_port *skge)
858{ 834{
859 struct skge_ring *ring = &skge->rx_ring; 835 struct skge_ring *ring = &skge->rx_ring;
860 struct skge_element *e; 836 struct skge_element *e;
861 unsigned int bufsize = skge->rx_buf_size;
862 837
863 e = ring->start; 838 e = ring->start;
864 do { 839 do {
865 struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize); 840 struct sk_buff *skb;
866 841
842 skb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
867 if (!skb) 843 if (!skb)
868 return -ENOMEM; 844 return -ENOMEM;
869 845
870 skge_rx_setup(skge, e, skb, bufsize); 846 skb_reserve(skb, NET_IP_ALIGN);
847 skge_rx_setup(skge, e, skb, skge->rx_buf_size);
871 } while ( (e = e->next) != ring->start); 848 } while ( (e = e->next) != ring->start);
872 849
873 ring->to_clean = ring->start; 850 ring->to_clean = ring->start;
@@ -1666,6 +1643,22 @@ static void yukon_reset(struct skge_hw *hw, int port)
1666 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 1643 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
1667} 1644}
1668 1645
1646/* Apparently, early versions of Yukon-Lite had wrong chip_id? */
1647static int is_yukon_lite_a0(struct skge_hw *hw)
1648{
1649 u32 reg;
1650 int ret;
1651
1652 if (hw->chip_id != CHIP_ID_YUKON)
1653 return 0;
1654
1655 reg = skge_read32(hw, B2_FAR);
1656 skge_write8(hw, B2_FAR + 3, 0xff);
1657 ret = (skge_read8(hw, B2_FAR + 3) != 0);
1658 skge_write32(hw, B2_FAR, reg);
1659 return ret;
1660}
1661
1669static void yukon_mac_init(struct skge_hw *hw, int port) 1662static void yukon_mac_init(struct skge_hw *hw, int port)
1670{ 1663{
1671 struct skge_port *skge = netdev_priv(hw->dev[port]); 1664 struct skge_port *skge = netdev_priv(hw->dev[port]);
@@ -1781,9 +1774,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1781 /* Configure Rx MAC FIFO */ 1774 /* Configure Rx MAC FIFO */
1782 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); 1775 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
1783 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 1776 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
1784 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1777
1785 hw->chip_rev >= CHIP_REV_YU_LITE_A3) 1778 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
1779 if (is_yukon_lite_a0(hw))
1786 reg &= ~GMF_RX_F_FL_ON; 1780 reg &= ~GMF_RX_F_FL_ON;
1781
1787 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 1782 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1788 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); 1783 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1789 /* 1784 /*
@@ -2442,6 +2437,14 @@ static void yukon_set_multicast(struct net_device *dev)
2442 gma_write16(hw, port, GM_RX_CTRL, reg); 2437 gma_write16(hw, port, GM_RX_CTRL, reg);
2443} 2438}
2444 2439
2440static inline u16 phy_length(const struct skge_hw *hw, u32 status)
2441{
2442 if (hw->chip_id == CHIP_ID_GENESIS)
2443 return status >> XMR_FS_LEN_SHIFT;
2444 else
2445 return status >> GMR_FS_LEN_SHIFT;
2446}
2447
2445static inline int bad_phy_status(const struct skge_hw *hw, u32 status) 2448static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2446{ 2449{
2447 if (hw->chip_id == CHIP_ID_GENESIS) 2450 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2451,80 +2454,99 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2451 (status & GMR_FS_RX_OK) == 0; 2454 (status & GMR_FS_RX_OK) == 0;
2452} 2455}
2453 2456
2454static void skge_rx_error(struct skge_port *skge, int slot,
2455 u32 control, u32 status)
2456{
2457 if (netif_msg_rx_err(skge))
2458 printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n",
2459 skge->netdev->name, slot, control, status);
2460
2461 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2462 skge->net_stats.rx_length_errors++;
2463 else if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2464 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2465 skge->net_stats.rx_length_errors++;
2466 if (status & XMR_FS_FRA_ERR)
2467 skge->net_stats.rx_frame_errors++;
2468 if (status & XMR_FS_FCS_ERR)
2469 skge->net_stats.rx_crc_errors++;
2470 } else {
2471 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2472 skge->net_stats.rx_length_errors++;
2473 if (status & GMR_FS_FRAGMENT)
2474 skge->net_stats.rx_frame_errors++;
2475 if (status & GMR_FS_CRC_ERR)
2476 skge->net_stats.rx_crc_errors++;
2477 }
2478}
2479 2457
2480/* Get receive buffer from descriptor. 2458/* Get receive buffer from descriptor.
2481 * Handles copy of small buffers and reallocation failures 2459 * Handles copy of small buffers and reallocation failures
2482 */ 2460 */
2483static inline struct sk_buff *skge_rx_get(struct skge_port *skge, 2461static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2484 struct skge_element *e, 2462 struct skge_element *e,
2485 unsigned int len) 2463 u32 control, u32 status, u16 csum)
2486{ 2464{
2487 struct sk_buff *nskb, *skb; 2465 struct sk_buff *skb;
2466 u16 len = control & BMU_BBC;
2467
2468 if (unlikely(netif_msg_rx_status(skge)))
2469 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2470 skge->netdev->name, e - skge->rx_ring.start,
2471 status, len);
2472
2473 if (len > skge->rx_buf_size)
2474 goto error;
2475
2476 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2477 goto error;
2478
2479 if (bad_phy_status(skge->hw, status))
2480 goto error;
2481
2482 if (phy_length(skge->hw, status) != len)
2483 goto error;
2488 2484
2489 if (len < RX_COPY_THRESHOLD) { 2485 if (len < RX_COPY_THRESHOLD) {
2490 nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN); 2486 skb = dev_alloc_skb(len + 2);
2491 if (unlikely(!nskb)) 2487 if (!skb)
2492 return NULL; 2488 goto resubmit;
2493 2489
2490 skb_reserve(skb, 2);
2494 pci_dma_sync_single_for_cpu(skge->hw->pdev, 2491 pci_dma_sync_single_for_cpu(skge->hw->pdev,
2495 pci_unmap_addr(e, mapaddr), 2492 pci_unmap_addr(e, mapaddr),
2496 len, PCI_DMA_FROMDEVICE); 2493 len, PCI_DMA_FROMDEVICE);
2497 memcpy(nskb->data, e->skb->data, len); 2494 memcpy(skb->data, e->skb->data, len);
2498 pci_dma_sync_single_for_device(skge->hw->pdev, 2495 pci_dma_sync_single_for_device(skge->hw->pdev,
2499 pci_unmap_addr(e, mapaddr), 2496 pci_unmap_addr(e, mapaddr),
2500 len, PCI_DMA_FROMDEVICE); 2497 len, PCI_DMA_FROMDEVICE);
2501
2502 if (skge->rx_csum) {
2503 struct skge_rx_desc *rd = e->desc;
2504 nskb->csum = le16_to_cpu(rd->csum2);
2505 nskb->ip_summed = CHECKSUM_HW;
2506 }
2507 skge_rx_reuse(e, skge->rx_buf_size); 2498 skge_rx_reuse(e, skge->rx_buf_size);
2508 return nskb;
2509 } else { 2499 } else {
2510 nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size); 2500 struct sk_buff *nskb;
2511 if (unlikely(!nskb)) 2501 nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
2512 return NULL; 2502 if (!nskb)
2503 goto resubmit;
2513 2504
2514 pci_unmap_single(skge->hw->pdev, 2505 pci_unmap_single(skge->hw->pdev,
2515 pci_unmap_addr(e, mapaddr), 2506 pci_unmap_addr(e, mapaddr),
2516 pci_unmap_len(e, maplen), 2507 pci_unmap_len(e, maplen),
2517 PCI_DMA_FROMDEVICE); 2508 PCI_DMA_FROMDEVICE);
2518 skb = e->skb; 2509 skb = e->skb;
2519 if (skge->rx_csum) { 2510 prefetch(skb->data);
2520 struct skge_rx_desc *rd = e->desc;
2521 skb->csum = le16_to_cpu(rd->csum2);
2522 skb->ip_summed = CHECKSUM_HW;
2523 }
2524
2525 skge_rx_setup(skge, e, nskb, skge->rx_buf_size); 2511 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
2526 return skb;
2527 } 2512 }
2513
2514 skb_put(skb, len);
2515 skb->dev = skge->netdev;
2516 if (skge->rx_csum) {
2517 skb->csum = csum;
2518 skb->ip_summed = CHECKSUM_HW;
2519 }
2520
2521 skb->protocol = eth_type_trans(skb, skge->netdev);
2522
2523 return skb;
2524error:
2525
2526 if (netif_msg_rx_err(skge))
2527 printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
2528 skge->netdev->name, e - skge->rx_ring.start,
2529 control, status);
2530
2531 if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2532 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2533 skge->net_stats.rx_length_errors++;
2534 if (status & XMR_FS_FRA_ERR)
2535 skge->net_stats.rx_frame_errors++;
2536 if (status & XMR_FS_FCS_ERR)
2537 skge->net_stats.rx_crc_errors++;
2538 } else {
2539 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2540 skge->net_stats.rx_length_errors++;
2541 if (status & GMR_FS_FRAGMENT)
2542 skge->net_stats.rx_frame_errors++;
2543 if (status & GMR_FS_CRC_ERR)
2544 skge->net_stats.rx_crc_errors++;
2545 }
2546
2547resubmit:
2548 skge_rx_reuse(e, skge->rx_buf_size);
2549 return NULL;
2528} 2550}
2529 2551
2530 2552
@@ -2540,32 +2562,16 @@ static int skge_poll(struct net_device *dev, int *budget)
2540 for (e = ring->to_clean; work_done < to_do; e = e->next) { 2562 for (e = ring->to_clean; work_done < to_do; e = e->next) {
2541 struct skge_rx_desc *rd = e->desc; 2563 struct skge_rx_desc *rd = e->desc;
2542 struct sk_buff *skb; 2564 struct sk_buff *skb;
2543 u32 control, len, status; 2565 u32 control;
2544 2566
2545 rmb(); 2567 rmb();
2546 control = rd->control; 2568 control = rd->control;
2547 if (control & BMU_OWN) 2569 if (control & BMU_OWN)
2548 break; 2570 break;
2549 2571
2550 len = control & BMU_BBC; 2572 skb = skge_rx_get(skge, e, control, rd->status,
2551 status = rd->status; 2573 le16_to_cpu(rd->csum2));
2552
2553 if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
2554 || bad_phy_status(hw, status))) {
2555 skge_rx_error(skge, e - ring->start, control, status);
2556 skge_rx_reuse(e, skge->rx_buf_size);
2557 continue;
2558 }
2559
2560 if (netif_msg_rx_status(skge))
2561 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2562 dev->name, e - ring->start, rd->status, len);
2563
2564 skb = skge_rx_get(skge, e, len);
2565 if (likely(skb)) { 2574 if (likely(skb)) {
2566 skb_put(skb, len);
2567 skb->protocol = eth_type_trans(skb, dev);
2568
2569 dev->last_rx = jiffies; 2575 dev->last_rx = jiffies;
2570 netif_receive_skb(skb); 2576 netif_receive_skb(skb);
2571 2577