diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/skge.c | 265 | ||||
-rw-r--r-- | drivers/net/skge.h | 1 |
2 files changed, 163 insertions, 103 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 9f24714260be..9d4ae78b6ad9 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -49,6 +49,8 @@ | |||
49 | #define DEFAULT_RX_RING_SIZE 512 | 49 | #define DEFAULT_RX_RING_SIZE 512 |
50 | #define MAX_TX_RING_SIZE 1024 | 50 | #define MAX_TX_RING_SIZE 1024 |
51 | #define MAX_RX_RING_SIZE 4096 | 51 | #define MAX_RX_RING_SIZE 4096 |
52 | #define RX_COPY_THRESHOLD 128 | ||
53 | #define RX_BUF_SIZE 1536 | ||
52 | #define PHY_RETRIES 1000 | 54 | #define PHY_RETRIES 1000 |
53 | #define ETH_JUMBO_MTU 9000 | 55 | #define ETH_JUMBO_MTU 9000 |
54 | #define TX_WATCHDOG (5 * HZ) | 56 | #define TX_WATCHDOG (5 * HZ) |
@@ -746,6 +748,7 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base) | |||
746 | 748 | ||
747 | for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { | 749 | for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { |
748 | e->desc = d; | 750 | e->desc = d; |
751 | e->skb = NULL; | ||
749 | if (i == ring->count - 1) { | 752 | if (i == ring->count - 1) { |
750 | e->next = ring->start; | 753 | e->next = ring->start; |
751 | d->next_offset = base; | 754 | d->next_offset = base; |
@@ -759,24 +762,23 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base) | |||
759 | return 0; | 762 | return 0; |
760 | } | 763 | } |
761 | 764 | ||
762 | /* Setup buffer for receiving */ | 765 | static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size) |
763 | static inline int skge_rx_alloc(struct skge_port *skge, | ||
764 | struct skge_element *e) | ||
765 | { | 766 | { |
766 | unsigned long bufsize = skge->netdev->mtu + ETH_HLEN; /* VLAN? */ | 767 | struct sk_buff *skb = dev_alloc_skb(size); |
767 | struct skge_rx_desc *rd = e->desc; | ||
768 | struct sk_buff *skb; | ||
769 | u64 map; | ||
770 | 768 | ||
771 | skb = dev_alloc_skb(bufsize + NET_IP_ALIGN); | 769 | if (likely(skb)) { |
772 | if (unlikely(!skb)) { | 770 | skb->dev = dev; |
773 | printk(KERN_DEBUG PFX "%s: out of memory for receive\n", | 771 | skb_reserve(skb, NET_IP_ALIGN); |
774 | skge->netdev->name); | ||
775 | return -ENOMEM; | ||
776 | } | 772 | } |
773 | return skb; | ||
774 | } | ||
777 | 775 | ||
778 | skb->dev = skge->netdev; | 776 | /* Allocate and setup a new buffer for receiving */ |
779 | skb_reserve(skb, NET_IP_ALIGN); | 777 | static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, |
778 | struct sk_buff *skb, unsigned int bufsize) | ||
779 | { | ||
780 | struct skge_rx_desc *rd = e->desc; | ||
781 | u64 map; | ||
780 | 782 | ||
781 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, | 783 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, |
782 | PCI_DMA_FROMDEVICE); | 784 | PCI_DMA_FROMDEVICE); |
@@ -794,55 +796,69 @@ static inline int skge_rx_alloc(struct skge_port *skge, | |||
794 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; | 796 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; |
795 | pci_unmap_addr_set(e, mapaddr, map); | 797 | pci_unmap_addr_set(e, mapaddr, map); |
796 | pci_unmap_len_set(e, maplen, bufsize); | 798 | pci_unmap_len_set(e, maplen, bufsize); |
797 | return 0; | ||
798 | } | 799 | } |
799 | 800 | ||
800 | /* Free all unused buffers in receive ring, assumes receiver stopped */ | 801 | /* Resume receiving using existing skb, |
802 | * Note: DMA address is not changed by chip. | ||
803 | * MTU not changed while receiver active. | ||
804 | */ | ||
805 | static void skge_rx_reuse(struct skge_element *e, unsigned int size) | ||
806 | { | ||
807 | struct skge_rx_desc *rd = e->desc; | ||
808 | |||
809 | rd->csum2 = 0; | ||
810 | rd->csum2_start = ETH_HLEN; | ||
811 | |||
812 | wmb(); | ||
813 | |||
814 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size; | ||
815 | } | ||
816 | |||
817 | |||
818 | /* Free all buffers in receive ring, assumes receiver stopped */ | ||
801 | static void skge_rx_clean(struct skge_port *skge) | 819 | static void skge_rx_clean(struct skge_port *skge) |
802 | { | 820 | { |
803 | struct skge_hw *hw = skge->hw; | 821 | struct skge_hw *hw = skge->hw; |
804 | struct skge_ring *ring = &skge->rx_ring; | 822 | struct skge_ring *ring = &skge->rx_ring; |
805 | struct skge_element *e; | 823 | struct skge_element *e; |
806 | 824 | ||
807 | for (e = ring->to_clean; e != ring->to_use; e = e->next) { | 825 | e = ring->start; |
826 | do { | ||
808 | struct skge_rx_desc *rd = e->desc; | 827 | struct skge_rx_desc *rd = e->desc; |
809 | rd->control = 0; | 828 | rd->control = 0; |
810 | 829 | if (e->skb) { | |
811 | pci_unmap_single(hw->pdev, | 830 | pci_unmap_single(hw->pdev, |
812 | pci_unmap_addr(e, mapaddr), | 831 | pci_unmap_addr(e, mapaddr), |
813 | pci_unmap_len(e, maplen), | 832 | pci_unmap_len(e, maplen), |
814 | PCI_DMA_FROMDEVICE); | 833 | PCI_DMA_FROMDEVICE); |
815 | dev_kfree_skb(e->skb); | 834 | dev_kfree_skb(e->skb); |
816 | e->skb = NULL; | 835 | e->skb = NULL; |
817 | } | 836 | } |
818 | ring->to_clean = e; | 837 | } while ((e = e->next) != ring->start); |
819 | } | 838 | } |
820 | 839 | ||
840 | |||
821 | /* Allocate buffers for receive ring | 841 | /* Allocate buffers for receive ring |
822 | * For receive: to_use is refill location | 842 | * For receive: to_clean is next received frame. |
823 | * to_clean is next received frame. | ||
824 | * | ||
825 | * if (to_use == to_clean) | ||
826 | * then ring all frames in ring need buffers | ||
827 | * if (to_use->next == to_clean) | ||
828 | * then ring all frames in ring have buffers | ||
829 | */ | 843 | */ |
830 | static int skge_rx_fill(struct skge_port *skge) | 844 | static int skge_rx_fill(struct skge_port *skge) |
831 | { | 845 | { |
832 | struct skge_ring *ring = &skge->rx_ring; | 846 | struct skge_ring *ring = &skge->rx_ring; |
833 | struct skge_element *e; | 847 | struct skge_element *e; |
834 | int ret = 0; | 848 | unsigned int bufsize = skge->rx_buf_size; |
835 | 849 | ||
836 | for (e = ring->to_use; e->next != ring->to_clean; e = e->next) { | 850 | e = ring->start; |
837 | if (skge_rx_alloc(skge, e)) { | 851 | do { |
838 | ret = 1; | 852 | struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize); |
839 | break; | ||
840 | } | ||
841 | 853 | ||
842 | } | 854 | if (!skb) |
843 | ring->to_use = e; | 855 | return -ENOMEM; |
856 | |||
857 | skge_rx_setup(skge, e, skb, bufsize); | ||
858 | } while ( (e = e->next) != ring->start); | ||
844 | 859 | ||
845 | return ret; | 860 | ring->to_clean = ring->start; |
861 | return 0; | ||
846 | } | 862 | } |
847 | 863 | ||
848 | static void skge_link_up(struct skge_port *skge) | 864 | static void skge_link_up(struct skge_port *skge) |
@@ -2048,6 +2064,12 @@ static int skge_up(struct net_device *dev) | |||
2048 | if (netif_msg_ifup(skge)) | 2064 | if (netif_msg_ifup(skge)) |
2049 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); | 2065 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); |
2050 | 2066 | ||
2067 | if (dev->mtu > RX_BUF_SIZE) | ||
2068 | skge->rx_buf_size = dev->mtu + ETH_HLEN + NET_IP_ALIGN; | ||
2069 | else | ||
2070 | skge->rx_buf_size = RX_BUF_SIZE; | ||
2071 | |||
2072 | |||
2051 | rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); | 2073 | rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); |
2052 | tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); | 2074 | tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); |
2053 | skge->mem_size = tx_size + rx_size; | 2075 | skge->mem_size = tx_size + rx_size; |
@@ -2060,7 +2082,8 @@ static int skge_up(struct net_device *dev) | |||
2060 | if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma))) | 2082 | if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma))) |
2061 | goto free_pci_mem; | 2083 | goto free_pci_mem; |
2062 | 2084 | ||
2063 | if (skge_rx_fill(skge)) | 2085 | err = skge_rx_fill(skge); |
2086 | if (err) | ||
2064 | goto free_rx_ring; | 2087 | goto free_rx_ring; |
2065 | 2088 | ||
2066 | if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, | 2089 | if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, |
@@ -2284,6 +2307,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
2284 | 2307 | ||
2285 | static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e) | 2308 | static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e) |
2286 | { | 2309 | { |
2310 | /* This ring element can be skb or fragment */ | ||
2287 | if (e->skb) { | 2311 | if (e->skb) { |
2288 | pci_unmap_single(hw->pdev, | 2312 | pci_unmap_single(hw->pdev, |
2289 | pci_unmap_addr(e, mapaddr), | 2313 | pci_unmap_addr(e, mapaddr), |
@@ -2328,16 +2352,17 @@ static void skge_tx_timeout(struct net_device *dev) | |||
2328 | static int skge_change_mtu(struct net_device *dev, int new_mtu) | 2352 | static int skge_change_mtu(struct net_device *dev, int new_mtu) |
2329 | { | 2353 | { |
2330 | int err = 0; | 2354 | int err = 0; |
2355 | int running = netif_running(dev); | ||
2331 | 2356 | ||
2332 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) | 2357 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) |
2333 | return -EINVAL; | 2358 | return -EINVAL; |
2334 | 2359 | ||
2335 | dev->mtu = new_mtu; | ||
2336 | 2360 | ||
2337 | if (netif_running(dev)) { | 2361 | if (running) |
2338 | skge_down(dev); | 2362 | skge_down(dev); |
2363 | dev->mtu = new_mtu; | ||
2364 | if (running) | ||
2339 | skge_up(dev); | 2365 | skge_up(dev); |
2340 | } | ||
2341 | 2366 | ||
2342 | return err; | 2367 | return err; |
2343 | } | 2368 | } |
@@ -2436,28 +2461,76 @@ static void skge_rx_error(struct skge_port *skge, int slot, | |||
2436 | printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n", | 2461 | printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n", |
2437 | skge->netdev->name, slot, control, status); | 2462 | skge->netdev->name, slot, control, status); |
2438 | 2463 | ||
2439 | if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF) | 2464 | if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) |
2440 | || (control & BMU_BBC) > skge->netdev->mtu + VLAN_ETH_HLEN) | ||
2441 | skge->net_stats.rx_length_errors++; | 2465 | skge->net_stats.rx_length_errors++; |
2442 | else { | 2466 | else if (skge->hw->chip_id == CHIP_ID_GENESIS) { |
2443 | if (skge->hw->chip_id == CHIP_ID_GENESIS) { | 2467 | if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) |
2444 | if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) | 2468 | skge->net_stats.rx_length_errors++; |
2445 | skge->net_stats.rx_length_errors++; | 2469 | if (status & XMR_FS_FRA_ERR) |
2446 | if (status & XMR_FS_FRA_ERR) | 2470 | skge->net_stats.rx_frame_errors++; |
2447 | skge->net_stats.rx_frame_errors++; | 2471 | if (status & XMR_FS_FCS_ERR) |
2448 | if (status & XMR_FS_FCS_ERR) | 2472 | skge->net_stats.rx_crc_errors++; |
2449 | skge->net_stats.rx_crc_errors++; | 2473 | } else { |
2450 | } else { | 2474 | if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) |
2451 | if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) | 2475 | skge->net_stats.rx_length_errors++; |
2452 | skge->net_stats.rx_length_errors++; | 2476 | if (status & GMR_FS_FRAGMENT) |
2453 | if (status & GMR_FS_FRAGMENT) | 2477 | skge->net_stats.rx_frame_errors++; |
2454 | skge->net_stats.rx_frame_errors++; | 2478 | if (status & GMR_FS_CRC_ERR) |
2455 | if (status & GMR_FS_CRC_ERR) | 2479 | skge->net_stats.rx_crc_errors++; |
2456 | skge->net_stats.rx_crc_errors++; | 2480 | } |
2481 | } | ||
2482 | |||
2483 | /* Get receive buffer from descriptor. | ||
2484 | * Handles copy of small buffers and reallocation failures | ||
2485 | */ | ||
2486 | static inline struct sk_buff *skge_rx_get(struct skge_port *skge, | ||
2487 | struct skge_element *e, | ||
2488 | unsigned int len) | ||
2489 | { | ||
2490 | struct sk_buff *nskb, *skb; | ||
2491 | |||
2492 | if (len < RX_COPY_THRESHOLD) { | ||
2493 | nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN); | ||
2494 | if (unlikely(!nskb)) | ||
2495 | return NULL; | ||
2496 | |||
2497 | pci_dma_sync_single_for_cpu(skge->hw->pdev, | ||
2498 | pci_unmap_addr(e, mapaddr), | ||
2499 | len, PCI_DMA_FROMDEVICE); | ||
2500 | memcpy(nskb->data, e->skb->data, len); | ||
2501 | pci_dma_sync_single_for_device(skge->hw->pdev, | ||
2502 | pci_unmap_addr(e, mapaddr), | ||
2503 | len, PCI_DMA_FROMDEVICE); | ||
2504 | |||
2505 | if (skge->rx_csum) { | ||
2506 | struct skge_rx_desc *rd = e->desc; | ||
2507 | nskb->csum = le16_to_cpu(rd->csum2); | ||
2508 | nskb->ip_summed = CHECKSUM_HW; | ||
2509 | } | ||
2510 | skge_rx_reuse(e, skge->rx_buf_size); | ||
2511 | return nskb; | ||
2512 | } else { | ||
2513 | nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size); | ||
2514 | if (unlikely(!nskb)) | ||
2515 | return NULL; | ||
2516 | |||
2517 | pci_unmap_single(skge->hw->pdev, | ||
2518 | pci_unmap_addr(e, mapaddr), | ||
2519 | pci_unmap_len(e, maplen), | ||
2520 | PCI_DMA_FROMDEVICE); | ||
2521 | skb = e->skb; | ||
2522 | if (skge->rx_csum) { | ||
2523 | struct skge_rx_desc *rd = e->desc; | ||
2524 | skb->csum = le16_to_cpu(rd->csum2); | ||
2525 | skb->ip_summed = CHECKSUM_HW; | ||
2457 | } | 2526 | } |
2527 | |||
2528 | skge_rx_setup(skge, e, nskb, skge->rx_buf_size); | ||
2529 | return skb; | ||
2458 | } | 2530 | } |
2459 | } | 2531 | } |
2460 | 2532 | ||
2533 | |||
2461 | static int skge_poll(struct net_device *dev, int *budget) | 2534 | static int skge_poll(struct net_device *dev, int *budget) |
2462 | { | 2535 | { |
2463 | struct skge_port *skge = netdev_priv(dev); | 2536 | struct skge_port *skge = netdev_priv(dev); |
@@ -2466,14 +2539,12 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2466 | struct skge_element *e; | 2539 | struct skge_element *e; |
2467 | unsigned int to_do = min(dev->quota, *budget); | 2540 | unsigned int to_do = min(dev->quota, *budget); |
2468 | unsigned int work_done = 0; | 2541 | unsigned int work_done = 0; |
2469 | int done; | ||
2470 | 2542 | ||
2471 | pr_debug("skge_poll\n"); | 2543 | pr_debug("skge_poll\n"); |
2472 | 2544 | ||
2473 | for (e = ring->to_clean; e != ring->to_use && work_done < to_do; | 2545 | for (e = ring->to_clean; work_done < to_do; e = e->next) { |
2474 | e = e->next) { | ||
2475 | struct skge_rx_desc *rd = e->desc; | 2546 | struct skge_rx_desc *rd = e->desc; |
2476 | struct sk_buff *skb = e->skb; | 2547 | struct sk_buff *skb; |
2477 | u32 control, len, status; | 2548 | u32 control, len, status; |
2478 | 2549 | ||
2479 | rmb(); | 2550 | rmb(); |
@@ -2482,19 +2553,12 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2482 | break; | 2553 | break; |
2483 | 2554 | ||
2484 | len = control & BMU_BBC; | 2555 | len = control & BMU_BBC; |
2485 | e->skb = NULL; | ||
2486 | |||
2487 | pci_unmap_single(hw->pdev, | ||
2488 | pci_unmap_addr(e, mapaddr), | ||
2489 | pci_unmap_len(e, maplen), | ||
2490 | PCI_DMA_FROMDEVICE); | ||
2491 | |||
2492 | status = rd->status; | 2556 | status = rd->status; |
2493 | if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF) | 2557 | |
2494 | || len > dev->mtu + VLAN_ETH_HLEN | 2558 | if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF) |
2495 | || bad_phy_status(hw, status)) { | 2559 | || bad_phy_status(hw, status))) { |
2496 | skge_rx_error(skge, e - ring->start, control, status); | 2560 | skge_rx_error(skge, e - ring->start, control, status); |
2497 | dev_kfree_skb(skb); | 2561 | skge_rx_reuse(e, skge->rx_buf_size); |
2498 | continue; | 2562 | continue; |
2499 | } | 2563 | } |
2500 | 2564 | ||
@@ -2502,42 +2566,37 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2502 | printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", | 2566 | printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", |
2503 | dev->name, e - ring->start, rd->status, len); | 2567 | dev->name, e - ring->start, rd->status, len); |
2504 | 2568 | ||
2505 | skb_put(skb, len); | 2569 | skb = skge_rx_get(skge, e, len); |
2506 | skb->protocol = eth_type_trans(skb, dev); | 2570 | if (likely(skb)) { |
2571 | skb_put(skb, len); | ||
2572 | skb->protocol = eth_type_trans(skb, dev); | ||
2507 | 2573 | ||
2508 | if (skge->rx_csum) { | 2574 | dev->last_rx = jiffies; |
2509 | skb->csum = le16_to_cpu(rd->csum2); | 2575 | netif_receive_skb(skb); |
2510 | skb->ip_summed = CHECKSUM_HW; | ||
2511 | } | ||
2512 | |||
2513 | dev->last_rx = jiffies; | ||
2514 | netif_receive_skb(skb); | ||
2515 | 2576 | ||
2516 | ++work_done; | 2577 | ++work_done; |
2578 | } else | ||
2579 | skge_rx_reuse(e, skge->rx_buf_size); | ||
2517 | } | 2580 | } |
2518 | ring->to_clean = e; | 2581 | ring->to_clean = e; |
2519 | 2582 | ||
2520 | *budget -= work_done; | ||
2521 | dev->quota -= work_done; | ||
2522 | done = work_done < to_do; | ||
2523 | |||
2524 | if (skge_rx_fill(skge)) | ||
2525 | done = 0; | ||
2526 | |||
2527 | /* restart receiver */ | 2583 | /* restart receiver */ |
2528 | wmb(); | 2584 | wmb(); |
2529 | skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), | 2585 | skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), |
2530 | CSR_START | CSR_IRQ_CL_F); | 2586 | CSR_START | CSR_IRQ_CL_F); |
2531 | 2587 | ||
2532 | if (done) { | 2588 | *budget -= work_done; |
2533 | local_irq_disable(); | 2589 | dev->quota -= work_done; |
2534 | __netif_rx_complete(dev); | 2590 | |
2535 | hw->intr_mask |= portirqmask[skge->port]; | 2591 | if (work_done >= to_do) |
2536 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2592 | return 1; /* not done */ |
2537 | local_irq_enable(); | ||
2538 | } | ||
2539 | 2593 | ||
2540 | return !done; | 2594 | local_irq_disable(); |
2595 | __netif_rx_complete(dev); | ||
2596 | hw->intr_mask |= portirqmask[skge->port]; | ||
2597 | skge_write32(hw, B0_IMSK, hw->intr_mask); | ||
2598 | local_irq_enable(); | ||
2599 | return 0; | ||
2541 | } | 2600 | } |
2542 | 2601 | ||
2543 | static inline void skge_tx_intr(struct net_device *dev) | 2602 | static inline void skge_tx_intr(struct net_device *dev) |
diff --git a/drivers/net/skge.h b/drivers/net/skge.h index 37323cd29e7e..14d0cc01fb9a 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h | |||
@@ -2503,6 +2503,7 @@ struct skge_port { | |||
2503 | void *mem; /* PCI memory for rings */ | 2503 | void *mem; /* PCI memory for rings */ |
2504 | dma_addr_t dma; | 2504 | dma_addr_t dma; |
2505 | unsigned long mem_size; | 2505 | unsigned long mem_size; |
2506 | unsigned int rx_buf_size; | ||
2506 | 2507 | ||
2507 | struct timer_list led_blink; | 2508 | struct timer_list led_blink; |
2508 | }; | 2509 | }; |