aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/skge.c
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-09-22 15:43:14 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-22 15:43:14 -0400
commitda192bb50cc9a7fe7d95e048422d51777bad0bba (patch)
tree560986cb68a747de281f82ca605c7125ccc65c1c /drivers/net/skge.c
parentaf9288a707b609cdb1069cfe5bde0d6567c12c31 (diff)
parent3fd07d3bf0077dcc0f5a33d2eb1938ea050da8da (diff)
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r--drivers/net/skge.c194
1 files changed, 91 insertions, 103 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 7ce734ec6ba8..189203c95330 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "1.0" 45#define DRV_VERSION "1.1"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
@@ -105,41 +105,28 @@ static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
105static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 105static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
106static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 }; 106static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
107 107
108/* Don't need to look at whole 16K.
109 * last interesting register is descriptor poll timer.
110 */
111#define SKGE_REGS_LEN (29*128)
112
113static int skge_get_regs_len(struct net_device *dev) 108static int skge_get_regs_len(struct net_device *dev)
114{ 109{
115 return SKGE_REGS_LEN; 110 return 0x4000;
116} 111}
117 112
118/* 113/*
119 * Returns copy of control register region 114 * Returns copy of whole control register region
120 * I/O region is divided into banks and certain regions are unreadable 115 * Note: skip RAM address register because accessing it will
116 * cause bus hangs!
121 */ 117 */
122static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, 118static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
123 void *p) 119 void *p)
124{ 120{
125 const struct skge_port *skge = netdev_priv(dev); 121 const struct skge_port *skge = netdev_priv(dev);
126 unsigned long offs;
127 const void __iomem *io = skge->hw->regs; 122 const void __iomem *io = skge->hw->regs;
128 static const unsigned long bankmap
129 = (1<<0) | (1<<2) | (1<<8) | (1<<9)
130 | (1<<12) | (1<<13) | (1<<14) | (1<<15) | (1<<16)
131 | (1<<17) | (1<<20) | (1<<21) | (1<<22) | (1<<23)
132 | (1<<24) | (1<<25) | (1<<26) | (1<<27) | (1<<28);
133 123
134 regs->version = 1; 124 regs->version = 1;
135 for (offs = 0; offs < regs->len; offs += 128) { 125 memset(p, 0, regs->len);
136 u32 len = min_t(u32, 128, regs->len - offs); 126 memcpy_fromio(p, io, B3_RAM_ADDR);
137 127
138 if (bankmap & (1<<(offs/128))) 128 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
139 memcpy_fromio(p + offs, io + offs, len); 129 regs->len - B3_RI_WTO_R1);
140 else
141 memset(p + offs, 0, len);
142 }
143} 130}
144 131
145/* Wake on Lan only supported on Yukon chps with rev 1 or above */ 132/* Wake on Lan only supported on Yukon chps with rev 1 or above */
@@ -776,17 +763,6 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
776 return 0; 763 return 0;
777} 764}
778 765
779static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size)
780{
781 struct sk_buff *skb = dev_alloc_skb(size);
782
783 if (likely(skb)) {
784 skb->dev = dev;
785 skb_reserve(skb, NET_IP_ALIGN);
786 }
787 return skb;
788}
789
790/* Allocate and setup a new buffer for receiving */ 766/* Allocate and setup a new buffer for receiving */
791static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 767static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
792 struct sk_buff *skb, unsigned int bufsize) 768 struct sk_buff *skb, unsigned int bufsize)
@@ -859,16 +835,17 @@ static int skge_rx_fill(struct skge_port *skge)
859{ 835{
860 struct skge_ring *ring = &skge->rx_ring; 836 struct skge_ring *ring = &skge->rx_ring;
861 struct skge_element *e; 837 struct skge_element *e;
862 unsigned int bufsize = skge->rx_buf_size;
863 838
864 e = ring->start; 839 e = ring->start;
865 do { 840 do {
866 struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize); 841 struct sk_buff *skb;
867 842
843 skb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
868 if (!skb) 844 if (!skb)
869 return -ENOMEM; 845 return -ENOMEM;
870 846
871 skge_rx_setup(skge, e, skb, bufsize); 847 skb_reserve(skb, NET_IP_ALIGN);
848 skge_rx_setup(skge, e, skb, skge->rx_buf_size);
872 } while ( (e = e->next) != ring->start); 849 } while ( (e = e->next) != ring->start);
873 850
874 ring->to_clean = ring->start; 851 ring->to_clean = ring->start;
@@ -2443,6 +2420,14 @@ static void yukon_set_multicast(struct net_device *dev)
2443 gma_write16(hw, port, GM_RX_CTRL, reg); 2420 gma_write16(hw, port, GM_RX_CTRL, reg);
2444} 2421}
2445 2422
2423static inline u16 phy_length(const struct skge_hw *hw, u32 status)
2424{
2425 if (hw->chip_id == CHIP_ID_GENESIS)
2426 return status >> XMR_FS_LEN_SHIFT;
2427 else
2428 return status >> GMR_FS_LEN_SHIFT;
2429}
2430
2446static inline int bad_phy_status(const struct skge_hw *hw, u32 status) 2431static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2447{ 2432{
2448 if (hw->chip_id == CHIP_ID_GENESIS) 2433 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2452,80 +2437,99 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2452 (status & GMR_FS_RX_OK) == 0; 2437 (status & GMR_FS_RX_OK) == 0;
2453} 2438}
2454 2439
2455static void skge_rx_error(struct skge_port *skge, int slot,
2456 u32 control, u32 status)
2457{
2458 if (netif_msg_rx_err(skge))
2459 printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n",
2460 skge->netdev->name, slot, control, status);
2461
2462 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2463 skge->net_stats.rx_length_errors++;
2464 else if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2465 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2466 skge->net_stats.rx_length_errors++;
2467 if (status & XMR_FS_FRA_ERR)
2468 skge->net_stats.rx_frame_errors++;
2469 if (status & XMR_FS_FCS_ERR)
2470 skge->net_stats.rx_crc_errors++;
2471 } else {
2472 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2473 skge->net_stats.rx_length_errors++;
2474 if (status & GMR_FS_FRAGMENT)
2475 skge->net_stats.rx_frame_errors++;
2476 if (status & GMR_FS_CRC_ERR)
2477 skge->net_stats.rx_crc_errors++;
2478 }
2479}
2480 2440
2481/* Get receive buffer from descriptor. 2441/* Get receive buffer from descriptor.
2482 * Handles copy of small buffers and reallocation failures 2442 * Handles copy of small buffers and reallocation failures
2483 */ 2443 */
2484static inline struct sk_buff *skge_rx_get(struct skge_port *skge, 2444static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2485 struct skge_element *e, 2445 struct skge_element *e,
2486 unsigned int len) 2446 u32 control, u32 status, u16 csum)
2487{ 2447{
2488 struct sk_buff *nskb, *skb; 2448 struct sk_buff *skb;
2449 u16 len = control & BMU_BBC;
2450
2451 if (unlikely(netif_msg_rx_status(skge)))
2452 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2453 skge->netdev->name, e - skge->rx_ring.start,
2454 status, len);
2455
2456 if (len > skge->rx_buf_size)
2457 goto error;
2458
2459 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2460 goto error;
2461
2462 if (bad_phy_status(skge->hw, status))
2463 goto error;
2464
2465 if (phy_length(skge->hw, status) != len)
2466 goto error;
2489 2467
2490 if (len < RX_COPY_THRESHOLD) { 2468 if (len < RX_COPY_THRESHOLD) {
2491 nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN); 2469 skb = dev_alloc_skb(len + 2);
2492 if (unlikely(!nskb)) 2470 if (!skb)
2493 return NULL; 2471 goto resubmit;
2494 2472
2473 skb_reserve(skb, 2);
2495 pci_dma_sync_single_for_cpu(skge->hw->pdev, 2474 pci_dma_sync_single_for_cpu(skge->hw->pdev,
2496 pci_unmap_addr(e, mapaddr), 2475 pci_unmap_addr(e, mapaddr),
2497 len, PCI_DMA_FROMDEVICE); 2476 len, PCI_DMA_FROMDEVICE);
2498 memcpy(nskb->data, e->skb->data, len); 2477 memcpy(skb->data, e->skb->data, len);
2499 pci_dma_sync_single_for_device(skge->hw->pdev, 2478 pci_dma_sync_single_for_device(skge->hw->pdev,
2500 pci_unmap_addr(e, mapaddr), 2479 pci_unmap_addr(e, mapaddr),
2501 len, PCI_DMA_FROMDEVICE); 2480 len, PCI_DMA_FROMDEVICE);
2502
2503 if (skge->rx_csum) {
2504 struct skge_rx_desc *rd = e->desc;
2505 nskb->csum = le16_to_cpu(rd->csum2);
2506 nskb->ip_summed = CHECKSUM_HW;
2507 }
2508 skge_rx_reuse(e, skge->rx_buf_size); 2481 skge_rx_reuse(e, skge->rx_buf_size);
2509 return nskb;
2510 } else { 2482 } else {
2511 nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size); 2483 struct sk_buff *nskb;
2512 if (unlikely(!nskb)) 2484 nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
2513 return NULL; 2485 if (!nskb)
2486 goto resubmit;
2514 2487
2515 pci_unmap_single(skge->hw->pdev, 2488 pci_unmap_single(skge->hw->pdev,
2516 pci_unmap_addr(e, mapaddr), 2489 pci_unmap_addr(e, mapaddr),
2517 pci_unmap_len(e, maplen), 2490 pci_unmap_len(e, maplen),
2518 PCI_DMA_FROMDEVICE); 2491 PCI_DMA_FROMDEVICE);
2519 skb = e->skb; 2492 skb = e->skb;
2520 if (skge->rx_csum) { 2493 prefetch(skb->data);
2521 struct skge_rx_desc *rd = e->desc;
2522 skb->csum = le16_to_cpu(rd->csum2);
2523 skb->ip_summed = CHECKSUM_HW;
2524 }
2525
2526 skge_rx_setup(skge, e, nskb, skge->rx_buf_size); 2494 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
2527 return skb;
2528 } 2495 }
2496
2497 skb_put(skb, len);
2498 skb->dev = skge->netdev;
2499 if (skge->rx_csum) {
2500 skb->csum = csum;
2501 skb->ip_summed = CHECKSUM_HW;
2502 }
2503
2504 skb->protocol = eth_type_trans(skb, skge->netdev);
2505
2506 return skb;
2507error:
2508
2509 if (netif_msg_rx_err(skge))
2510 printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
2511 skge->netdev->name, e - skge->rx_ring.start,
2512 control, status);
2513
2514 if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2515 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2516 skge->net_stats.rx_length_errors++;
2517 if (status & XMR_FS_FRA_ERR)
2518 skge->net_stats.rx_frame_errors++;
2519 if (status & XMR_FS_FCS_ERR)
2520 skge->net_stats.rx_crc_errors++;
2521 } else {
2522 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2523 skge->net_stats.rx_length_errors++;
2524 if (status & GMR_FS_FRAGMENT)
2525 skge->net_stats.rx_frame_errors++;
2526 if (status & GMR_FS_CRC_ERR)
2527 skge->net_stats.rx_crc_errors++;
2528 }
2529
2530resubmit:
2531 skge_rx_reuse(e, skge->rx_buf_size);
2532 return NULL;
2529} 2533}
2530 2534
2531 2535
@@ -2541,32 +2545,16 @@ static int skge_poll(struct net_device *dev, int *budget)
2541 for (e = ring->to_clean; work_done < to_do; e = e->next) { 2545 for (e = ring->to_clean; work_done < to_do; e = e->next) {
2542 struct skge_rx_desc *rd = e->desc; 2546 struct skge_rx_desc *rd = e->desc;
2543 struct sk_buff *skb; 2547 struct sk_buff *skb;
2544 u32 control, len, status; 2548 u32 control;
2545 2549
2546 rmb(); 2550 rmb();
2547 control = rd->control; 2551 control = rd->control;
2548 if (control & BMU_OWN) 2552 if (control & BMU_OWN)
2549 break; 2553 break;
2550 2554
2551 len = control & BMU_BBC; 2555 skb = skge_rx_get(skge, e, control, rd->status,
2552 status = rd->status; 2556 le16_to_cpu(rd->csum2));
2553
2554 if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
2555 || bad_phy_status(hw, status))) {
2556 skge_rx_error(skge, e - ring->start, control, status);
2557 skge_rx_reuse(e, skge->rx_buf_size);
2558 continue;
2559 }
2560
2561 if (netif_msg_rx_status(skge))
2562 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2563 dev->name, e - ring->start, rd->status, len);
2564
2565 skb = skge_rx_get(skge, e, len);
2566 if (likely(skb)) { 2557 if (likely(skb)) {
2567 skb_put(skb, len);
2568 skb->protocol = eth_type_trans(skb, dev);
2569
2570 dev->last_rx = jiffies; 2558 dev->last_rx = jiffies;
2571 netif_receive_skb(skb); 2559 netif_receive_skb(skb);
2572 2560