aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2005-09-19 18:37:16 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-21 22:32:50 -0400
commit383181ac7e59542ff47e2b81f7e4c40aba39b30b (patch)
treea738880a7da8c987aa0c0b09af4966bbdfb951dd /drivers
parentc3f8be961808313a502c67d59e2b7f930477faf3 (diff)
[PATCH] skge: check length from PHY
Cleanup receive buffer allocation and management, Add more error handling checks from PHY and bump version. Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/skge.c165
-rw-r--r--drivers/net/skge.h2
2 files changed, 85 insertions, 82 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 62e2ae0eb858..ae1996a3bc5c 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "1.0" 45#define DRV_VERSION "1.1"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
@@ -762,17 +762,6 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
762 return 0; 762 return 0;
763} 763}
764 764
765static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size)
766{
767 struct sk_buff *skb = dev_alloc_skb(size);
768
769 if (likely(skb)) {
770 skb->dev = dev;
771 skb_reserve(skb, NET_IP_ALIGN);
772 }
773 return skb;
774}
775
776/* Allocate and setup a new buffer for receiving */ 765/* Allocate and setup a new buffer for receiving */
777static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 766static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
778 struct sk_buff *skb, unsigned int bufsize) 767 struct sk_buff *skb, unsigned int bufsize)
@@ -845,16 +834,17 @@ static int skge_rx_fill(struct skge_port *skge)
845{ 834{
846 struct skge_ring *ring = &skge->rx_ring; 835 struct skge_ring *ring = &skge->rx_ring;
847 struct skge_element *e; 836 struct skge_element *e;
848 unsigned int bufsize = skge->rx_buf_size;
849 837
850 e = ring->start; 838 e = ring->start;
851 do { 839 do {
852 struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize); 840 struct sk_buff *skb;
853 841
842 skb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
854 if (!skb) 843 if (!skb)
855 return -ENOMEM; 844 return -ENOMEM;
856 845
857 skge_rx_setup(skge, e, skb, bufsize); 846 skb_reserve(skb, NET_IP_ALIGN);
847 skge_rx_setup(skge, e, skb, skge->rx_buf_size);
858 } while ( (e = e->next) != ring->start); 848 } while ( (e = e->next) != ring->start);
859 849
860 ring->to_clean = ring->start; 850 ring->to_clean = ring->start;
@@ -2429,6 +2419,14 @@ static void yukon_set_multicast(struct net_device *dev)
2429 gma_write16(hw, port, GM_RX_CTRL, reg); 2419 gma_write16(hw, port, GM_RX_CTRL, reg);
2430} 2420}
2431 2421
2422static inline u16 phy_length(const struct skge_hw *hw, u32 status)
2423{
2424 if (hw->chip_id == CHIP_ID_GENESIS)
2425 return status >> XMR_FS_LEN_SHIFT;
2426 else
2427 return status >> GMR_FS_LEN_SHIFT;
2428}
2429
2432static inline int bad_phy_status(const struct skge_hw *hw, u32 status) 2430static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2433{ 2431{
2434 if (hw->chip_id == CHIP_ID_GENESIS) 2432 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2438,80 +2436,99 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2438 (status & GMR_FS_RX_OK) == 0; 2436 (status & GMR_FS_RX_OK) == 0;
2439} 2437}
2440 2438
2441static void skge_rx_error(struct skge_port *skge, int slot,
2442 u32 control, u32 status)
2443{
2444 if (netif_msg_rx_err(skge))
2445 printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n",
2446 skge->netdev->name, slot, control, status);
2447
2448 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2449 skge->net_stats.rx_length_errors++;
2450 else if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2451 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2452 skge->net_stats.rx_length_errors++;
2453 if (status & XMR_FS_FRA_ERR)
2454 skge->net_stats.rx_frame_errors++;
2455 if (status & XMR_FS_FCS_ERR)
2456 skge->net_stats.rx_crc_errors++;
2457 } else {
2458 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2459 skge->net_stats.rx_length_errors++;
2460 if (status & GMR_FS_FRAGMENT)
2461 skge->net_stats.rx_frame_errors++;
2462 if (status & GMR_FS_CRC_ERR)
2463 skge->net_stats.rx_crc_errors++;
2464 }
2465}
2466 2439
2467/* Get receive buffer from descriptor. 2440/* Get receive buffer from descriptor.
2468 * Handles copy of small buffers and reallocation failures 2441 * Handles copy of small buffers and reallocation failures
2469 */ 2442 */
2470static inline struct sk_buff *skge_rx_get(struct skge_port *skge, 2443static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2471 struct skge_element *e, 2444 struct skge_element *e,
2472 unsigned int len) 2445 u32 control, u32 status, u16 csum)
2473{ 2446{
2474 struct sk_buff *nskb, *skb; 2447 struct sk_buff *skb;
2448 u16 len = control & BMU_BBC;
2449
2450 if (unlikely(netif_msg_rx_status(skge)))
2451 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2452 skge->netdev->name, e - skge->rx_ring.start,
2453 status, len);
2454
2455 if (len > skge->rx_buf_size)
2456 goto error;
2457
2458 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2459 goto error;
2460
2461 if (bad_phy_status(skge->hw, status))
2462 goto error;
2463
2464 if (phy_length(skge->hw, status) != len)
2465 goto error;
2475 2466
2476 if (len < RX_COPY_THRESHOLD) { 2467 if (len < RX_COPY_THRESHOLD) {
2477 nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN); 2468 skb = dev_alloc_skb(len + 2);
2478 if (unlikely(!nskb)) 2469 if (!skb)
2479 return NULL; 2470 goto resubmit;
2480 2471
2472 skb_reserve(skb, 2);
2481 pci_dma_sync_single_for_cpu(skge->hw->pdev, 2473 pci_dma_sync_single_for_cpu(skge->hw->pdev,
2482 pci_unmap_addr(e, mapaddr), 2474 pci_unmap_addr(e, mapaddr),
2483 len, PCI_DMA_FROMDEVICE); 2475 len, PCI_DMA_FROMDEVICE);
2484 memcpy(nskb->data, e->skb->data, len); 2476 memcpy(skb->data, e->skb->data, len);
2485 pci_dma_sync_single_for_device(skge->hw->pdev, 2477 pci_dma_sync_single_for_device(skge->hw->pdev,
2486 pci_unmap_addr(e, mapaddr), 2478 pci_unmap_addr(e, mapaddr),
2487 len, PCI_DMA_FROMDEVICE); 2479 len, PCI_DMA_FROMDEVICE);
2488
2489 if (skge->rx_csum) {
2490 struct skge_rx_desc *rd = e->desc;
2491 nskb->csum = le16_to_cpu(rd->csum2);
2492 nskb->ip_summed = CHECKSUM_HW;
2493 }
2494 skge_rx_reuse(e, skge->rx_buf_size); 2480 skge_rx_reuse(e, skge->rx_buf_size);
2495 return nskb;
2496 } else { 2481 } else {
2497 nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size); 2482 struct sk_buff *nskb;
2498 if (unlikely(!nskb)) 2483 nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
2499 return NULL; 2484 if (!nskb)
2485 goto resubmit;
2500 2486
2501 pci_unmap_single(skge->hw->pdev, 2487 pci_unmap_single(skge->hw->pdev,
2502 pci_unmap_addr(e, mapaddr), 2488 pci_unmap_addr(e, mapaddr),
2503 pci_unmap_len(e, maplen), 2489 pci_unmap_len(e, maplen),
2504 PCI_DMA_FROMDEVICE); 2490 PCI_DMA_FROMDEVICE);
2505 skb = e->skb; 2491 skb = e->skb;
2506 if (skge->rx_csum) { 2492 prefetch(skb->data);
2507 struct skge_rx_desc *rd = e->desc;
2508 skb->csum = le16_to_cpu(rd->csum2);
2509 skb->ip_summed = CHECKSUM_HW;
2510 }
2511
2512 skge_rx_setup(skge, e, nskb, skge->rx_buf_size); 2493 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
2513 return skb;
2514 } 2494 }
2495
2496 skb_put(skb, len);
2497 skb->dev = skge->netdev;
2498 if (skge->rx_csum) {
2499 skb->csum = csum;
2500 skb->ip_summed = CHECKSUM_HW;
2501 }
2502
2503 skb->protocol = eth_type_trans(skb, skge->netdev);
2504
2505 return skb;
2506error:
2507
2508 if (netif_msg_rx_err(skge))
2509 printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
2510 skge->netdev->name, e - skge->rx_ring.start,
2511 control, status);
2512
2513 if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2514 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2515 skge->net_stats.rx_length_errors++;
2516 if (status & XMR_FS_FRA_ERR)
2517 skge->net_stats.rx_frame_errors++;
2518 if (status & XMR_FS_FCS_ERR)
2519 skge->net_stats.rx_crc_errors++;
2520 } else {
2521 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2522 skge->net_stats.rx_length_errors++;
2523 if (status & GMR_FS_FRAGMENT)
2524 skge->net_stats.rx_frame_errors++;
2525 if (status & GMR_FS_CRC_ERR)
2526 skge->net_stats.rx_crc_errors++;
2527 }
2528
2529resubmit:
2530 skge_rx_reuse(e, skge->rx_buf_size);
2531 return NULL;
2515} 2532}
2516 2533
2517 2534
@@ -2527,32 +2544,16 @@ static int skge_poll(struct net_device *dev, int *budget)
2527 for (e = ring->to_clean; work_done < to_do; e = e->next) { 2544 for (e = ring->to_clean; work_done < to_do; e = e->next) {
2528 struct skge_rx_desc *rd = e->desc; 2545 struct skge_rx_desc *rd = e->desc;
2529 struct sk_buff *skb; 2546 struct sk_buff *skb;
2530 u32 control, len, status; 2547 u32 control;
2531 2548
2532 rmb(); 2549 rmb();
2533 control = rd->control; 2550 control = rd->control;
2534 if (control & BMU_OWN) 2551 if (control & BMU_OWN)
2535 break; 2552 break;
2536 2553
2537 len = control & BMU_BBC; 2554 skb = skge_rx_get(skge, e, control, rd->status,
2538 status = rd->status; 2555 le16_to_cpu(rd->csum2));
2539
2540 if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
2541 || bad_phy_status(hw, status))) {
2542 skge_rx_error(skge, e - ring->start, control, status);
2543 skge_rx_reuse(e, skge->rx_buf_size);
2544 continue;
2545 }
2546
2547 if (netif_msg_rx_status(skge))
2548 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2549 dev->name, e - ring->start, rd->status, len);
2550
2551 skb = skge_rx_get(skge, e, len);
2552 if (likely(skb)) { 2556 if (likely(skb)) {
2553 skb_put(skb, len);
2554 skb->protocol = eth_type_trans(skb, dev);
2555
2556 dev->last_rx = jiffies; 2557 dev->last_rx = jiffies;
2557 netif_receive_skb(skb); 2558 netif_receive_skb(skb);
2558 2559
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index efbf98c675d2..72c175b87a5a 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -953,6 +953,7 @@ enum {
953 */ 953 */
954enum { 954enum {
955 XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */ 955 XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */
956 XMR_FS_LEN_SHIFT = 18,
956 XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/ 957 XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/
957 XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/ 958 XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/
958 XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */ 959 XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */
@@ -1868,6 +1869,7 @@ enum {
1868/* Receive Frame Status Encoding */ 1869/* Receive Frame Status Encoding */
1869enum { 1870enum {
1870 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ 1871 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
1872 GMR_FS_LEN_SHIFT = 16,
1871 GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */ 1873 GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */
1872 GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */ 1874 GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */
1873 GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */ 1875 GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */