diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-10-11 07:17:47 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-10-16 14:13:18 -0400 |
commit | 6f0333b8fde44b8c04a53b2461504f0e8f1cebe6 (patch) | |
tree | 7a5f065c7f55e891760b6162723ec003b7fa922d /drivers | |
parent | 7662ff46b7b3678162ce125903115e4ab0607a2d (diff) |
r8169: use 50% less ram for RX ring
Using standard skb allocations in r8169 leads to order-3 allocations (if
PAGE_SIZE=4096), because NIC needs 16383 bytes, and skb overhead makes
this bigger than 16384 -> 32768 bytes per "skb"
Using kmalloc() permits to reduce memory requirements of one r8169 nic
by 4Mbytes. (256 frames * 16Kbytes). This is fine since a hardware bug
requires us to copy incoming frames, so we build real skb when doing
this copy.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/r8169.c | 183 |
1 files changed, 64 insertions, 119 deletions
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index bc669a40ae96..1760533852a4 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -187,12 +187,7 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { | |||
187 | 187 | ||
188 | MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); | 188 | MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); |
189 | 189 | ||
190 | /* | 190 | static int rx_buf_sz = 16383; |
191 | * we set our copybreak very high so that we don't have | ||
192 | * to allocate 16k frames all the time (see note in | ||
193 | * rtl8169_open() | ||
194 | */ | ||
195 | static int rx_copybreak = 16383; | ||
196 | static int use_dac; | 191 | static int use_dac; |
197 | static struct { | 192 | static struct { |
198 | u32 msg_enable; | 193 | u32 msg_enable; |
@@ -484,10 +479,8 @@ struct rtl8169_private { | |||
484 | struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ | 479 | struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ |
485 | dma_addr_t TxPhyAddr; | 480 | dma_addr_t TxPhyAddr; |
486 | dma_addr_t RxPhyAddr; | 481 | dma_addr_t RxPhyAddr; |
487 | struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */ | 482 | void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ |
488 | struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ | 483 | struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ |
489 | unsigned align; | ||
490 | unsigned rx_buf_sz; | ||
491 | struct timer_list timer; | 484 | struct timer_list timer; |
492 | u16 cp_cmd; | 485 | u16 cp_cmd; |
493 | u16 intr_event; | 486 | u16 intr_event; |
@@ -515,8 +508,6 @@ struct rtl8169_private { | |||
515 | 508 | ||
516 | MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); | 509 | MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); |
517 | MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); | 510 | MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); |
518 | module_param(rx_copybreak, int, 0); | ||
519 | MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); | ||
520 | module_param(use_dac, int, 0); | 511 | module_param(use_dac, int, 0); |
521 | MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); | 512 | MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); |
522 | module_param_named(debug, debug.msg_enable, int, 0); | 513 | module_param_named(debug, debug.msg_enable, int, 0); |
@@ -3196,7 +3187,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3196 | dev->features |= NETIF_F_GRO; | 3187 | dev->features |= NETIF_F_GRO; |
3197 | 3188 | ||
3198 | tp->intr_mask = 0xffff; | 3189 | tp->intr_mask = 0xffff; |
3199 | tp->align = cfg->align; | ||
3200 | tp->hw_start = cfg->hw_start; | 3190 | tp->hw_start = cfg->hw_start; |
3201 | tp->intr_event = cfg->intr_event; | 3191 | tp->intr_event = cfg->intr_event; |
3202 | tp->napi_event = cfg->napi_event; | 3192 | tp->napi_event = cfg->napi_event; |
@@ -3266,18 +3256,6 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) | |||
3266 | pci_set_drvdata(pdev, NULL); | 3256 | pci_set_drvdata(pdev, NULL); |
3267 | } | 3257 | } |
3268 | 3258 | ||
3269 | static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, | ||
3270 | unsigned int mtu) | ||
3271 | { | ||
3272 | unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; | ||
3273 | |||
3274 | if (max_frame != 16383) | ||
3275 | printk(KERN_WARNING PFX "WARNING! Changing of MTU on this " | ||
3276 | "NIC may lead to frame reception errors!\n"); | ||
3277 | |||
3278 | tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; | ||
3279 | } | ||
3280 | |||
3281 | static int rtl8169_open(struct net_device *dev) | 3259 | static int rtl8169_open(struct net_device *dev) |
3282 | { | 3260 | { |
3283 | struct rtl8169_private *tp = netdev_priv(dev); | 3261 | struct rtl8169_private *tp = netdev_priv(dev); |
@@ -3287,18 +3265,6 @@ static int rtl8169_open(struct net_device *dev) | |||
3287 | pm_runtime_get_sync(&pdev->dev); | 3265 | pm_runtime_get_sync(&pdev->dev); |
3288 | 3266 | ||
3289 | /* | 3267 | /* |
3290 | * Note that we use a magic value here, its wierd I know | ||
3291 | * its done because, some subset of rtl8169 hardware suffers from | ||
3292 | * a problem in which frames received that are longer than | ||
3293 | * the size set in RxMaxSize register return garbage sizes | ||
3294 | * when received. To avoid this we need to turn off filtering, | ||
3295 | * which is done by setting a value of 16383 in the RxMaxSize register | ||
3296 | * and allocating 16k frames to handle the largest possible rx value | ||
3297 | * thats what the magic math below does. | ||
3298 | */ | ||
3299 | rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN); | ||
3300 | |||
3301 | /* | ||
3302 | * Rx and Tx desscriptors needs 256 bytes alignment. | 3268 | * Rx and Tx desscriptors needs 256 bytes alignment. |
3303 | * dma_alloc_coherent provides more. | 3269 | * dma_alloc_coherent provides more. |
3304 | */ | 3270 | */ |
@@ -3474,7 +3440,7 @@ static void rtl_hw_start_8169(struct net_device *dev) | |||
3474 | 3440 | ||
3475 | RTL_W8(EarlyTxThres, EarlyTxThld); | 3441 | RTL_W8(EarlyTxThres, EarlyTxThld); |
3476 | 3442 | ||
3477 | rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); | 3443 | rtl_set_rx_max_size(ioaddr, rx_buf_sz); |
3478 | 3444 | ||
3479 | if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || | 3445 | if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || |
3480 | (tp->mac_version == RTL_GIGA_MAC_VER_02) || | 3446 | (tp->mac_version == RTL_GIGA_MAC_VER_02) || |
@@ -3735,7 +3701,7 @@ static void rtl_hw_start_8168(struct net_device *dev) | |||
3735 | 3701 | ||
3736 | RTL_W8(EarlyTxThres, EarlyTxThld); | 3702 | RTL_W8(EarlyTxThres, EarlyTxThld); |
3737 | 3703 | ||
3738 | rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); | 3704 | rtl_set_rx_max_size(ioaddr, rx_buf_sz); |
3739 | 3705 | ||
3740 | tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; | 3706 | tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; |
3741 | 3707 | ||
@@ -3915,7 +3881,7 @@ static void rtl_hw_start_8101(struct net_device *dev) | |||
3915 | 3881 | ||
3916 | RTL_W8(EarlyTxThres, EarlyTxThld); | 3882 | RTL_W8(EarlyTxThres, EarlyTxThld); |
3917 | 3883 | ||
3918 | rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); | 3884 | rtl_set_rx_max_size(ioaddr, rx_buf_sz); |
3919 | 3885 | ||
3920 | tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; | 3886 | tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; |
3921 | 3887 | ||
@@ -3956,8 +3922,6 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) | |||
3956 | 3922 | ||
3957 | rtl8169_down(dev); | 3923 | rtl8169_down(dev); |
3958 | 3924 | ||
3959 | rtl8169_set_rxbufsize(tp, dev->mtu); | ||
3960 | |||
3961 | ret = rtl8169_init_ring(dev); | 3925 | ret = rtl8169_init_ring(dev); |
3962 | if (ret < 0) | 3926 | if (ret < 0) |
3963 | goto out; | 3927 | goto out; |
@@ -3978,15 +3942,15 @@ static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) | |||
3978 | desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask); | 3942 | desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask); |
3979 | } | 3943 | } |
3980 | 3944 | ||
3981 | static void rtl8169_free_rx_skb(struct rtl8169_private *tp, | 3945 | static void rtl8169_free_rx_databuff(struct rtl8169_private *tp, |
3982 | struct sk_buff **sk_buff, struct RxDesc *desc) | 3946 | void **data_buff, struct RxDesc *desc) |
3983 | { | 3947 | { |
3984 | struct pci_dev *pdev = tp->pci_dev; | 3948 | struct pci_dev *pdev = tp->pci_dev; |
3985 | 3949 | ||
3986 | dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz, | 3950 | dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), rx_buf_sz, |
3987 | PCI_DMA_FROMDEVICE); | 3951 | PCI_DMA_FROMDEVICE); |
3988 | dev_kfree_skb(*sk_buff); | 3952 | kfree(*data_buff); |
3989 | *sk_buff = NULL; | 3953 | *data_buff = NULL; |
3990 | rtl8169_make_unusable_by_asic(desc); | 3954 | rtl8169_make_unusable_by_asic(desc); |
3991 | } | 3955 | } |
3992 | 3956 | ||
@@ -4005,33 +3969,34 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, | |||
4005 | rtl8169_mark_to_asic(desc, rx_buf_sz); | 3969 | rtl8169_mark_to_asic(desc, rx_buf_sz); |
4006 | } | 3970 | } |
4007 | 3971 | ||
4008 | static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev, | 3972 | static inline void *rtl8169_align(void *data) |
3973 | { | ||
3974 | return (void *)ALIGN((long)data, 16); | ||
3975 | } | ||
3976 | |||
3977 | static struct sk_buff *rtl8169_alloc_rx_data(struct pci_dev *pdev, | ||
4009 | struct net_device *dev, | 3978 | struct net_device *dev, |
4010 | struct RxDesc *desc, int rx_buf_sz, | 3979 | struct RxDesc *desc) |
4011 | unsigned int align, gfp_t gfp) | ||
4012 | { | 3980 | { |
4013 | struct sk_buff *skb; | 3981 | void *data; |
4014 | dma_addr_t mapping; | 3982 | dma_addr_t mapping; |
4015 | unsigned int pad; | 3983 | int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; |
4016 | 3984 | ||
4017 | pad = align ? align : NET_IP_ALIGN; | 3985 | data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); |
3986 | if (!data) | ||
3987 | return NULL; | ||
4018 | 3988 | ||
4019 | skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp); | 3989 | if (rtl8169_align(data) != data) { |
4020 | if (!skb) | 3990 | kfree(data); |
4021 | goto err_out; | 3991 | data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node); |
4022 | 3992 | if (!data) | |
4023 | skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad); | 3993 | return NULL; |
4024 | 3994 | } | |
4025 | mapping = dma_map_single(&pdev->dev, skb->data, rx_buf_sz, | 3995 | mapping = dma_map_single(&pdev->dev, rtl8169_align(data), rx_buf_sz, |
4026 | PCI_DMA_FROMDEVICE); | 3996 | PCI_DMA_FROMDEVICE); |
4027 | 3997 | ||
4028 | rtl8169_map_to_asic(desc, mapping, rx_buf_sz); | 3998 | rtl8169_map_to_asic(desc, mapping, rx_buf_sz); |
4029 | out: | 3999 | return data; |
4030 | return skb; | ||
4031 | |||
4032 | err_out: | ||
4033 | rtl8169_make_unusable_by_asic(desc); | ||
4034 | goto out; | ||
4035 | } | 4000 | } |
4036 | 4001 | ||
4037 | static void rtl8169_rx_clear(struct rtl8169_private *tp) | 4002 | static void rtl8169_rx_clear(struct rtl8169_private *tp) |
@@ -4039,8 +4004,8 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp) | |||
4039 | unsigned int i; | 4004 | unsigned int i; |
4040 | 4005 | ||
4041 | for (i = 0; i < NUM_RX_DESC; i++) { | 4006 | for (i = 0; i < NUM_RX_DESC; i++) { |
4042 | if (tp->Rx_skbuff[i]) { | 4007 | if (tp->Rx_databuff[i]) { |
4043 | rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i, | 4008 | rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i, |
4044 | tp->RxDescArray + i); | 4009 | tp->RxDescArray + i); |
4045 | } | 4010 | } |
4046 | } | 4011 | } |
@@ -4052,21 +4017,21 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, | |||
4052 | u32 cur; | 4017 | u32 cur; |
4053 | 4018 | ||
4054 | for (cur = start; end - cur != 0; cur++) { | 4019 | for (cur = start; end - cur != 0; cur++) { |
4055 | struct sk_buff *skb; | 4020 | void *data; |
4056 | unsigned int i = cur % NUM_RX_DESC; | 4021 | unsigned int i = cur % NUM_RX_DESC; |
4057 | 4022 | ||
4058 | WARN_ON((s32)(end - cur) < 0); | 4023 | WARN_ON((s32)(end - cur) < 0); |
4059 | 4024 | ||
4060 | if (tp->Rx_skbuff[i]) | 4025 | if (tp->Rx_databuff[i]) |
4061 | continue; | 4026 | continue; |
4062 | 4027 | ||
4063 | skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev, | 4028 | data = rtl8169_alloc_rx_data(tp->pci_dev, dev, |
4064 | tp->RxDescArray + i, | 4029 | tp->RxDescArray + i); |
4065 | tp->rx_buf_sz, tp->align, gfp); | 4030 | if (!data) { |
4066 | if (!skb) | 4031 | rtl8169_make_unusable_by_asic(tp->RxDescArray + i); |
4067 | break; | 4032 | break; |
4068 | 4033 | } | |
4069 | tp->Rx_skbuff[i] = skb; | 4034 | tp->Rx_databuff[i] = data; |
4070 | } | 4035 | } |
4071 | return cur - start; | 4036 | return cur - start; |
4072 | } | 4037 | } |
@@ -4088,7 +4053,7 @@ static int rtl8169_init_ring(struct net_device *dev) | |||
4088 | rtl8169_init_ring_indexes(tp); | 4053 | rtl8169_init_ring_indexes(tp); |
4089 | 4054 | ||
4090 | memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); | 4055 | memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); |
4091 | memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); | 4056 | memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *)); |
4092 | 4057 | ||
4093 | if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC) | 4058 | if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC) |
4094 | goto err_out; | 4059 | goto err_out; |
@@ -4473,27 +4438,23 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) | |||
4473 | skb_checksum_none_assert(skb); | 4438 | skb_checksum_none_assert(skb); |
4474 | } | 4439 | } |
4475 | 4440 | ||
4476 | static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff, | 4441 | static struct sk_buff *rtl8169_try_rx_copy(void *data, |
4477 | struct rtl8169_private *tp, int pkt_size, | 4442 | struct rtl8169_private *tp, |
4478 | dma_addr_t addr) | 4443 | int pkt_size, |
4444 | dma_addr_t addr) | ||
4479 | { | 4445 | { |
4480 | struct sk_buff *skb; | 4446 | struct sk_buff *skb; |
4481 | bool done = false; | ||
4482 | |||
4483 | if (pkt_size >= rx_copybreak) | ||
4484 | goto out; | ||
4485 | |||
4486 | skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); | ||
4487 | if (!skb) | ||
4488 | goto out; | ||
4489 | 4447 | ||
4448 | data = rtl8169_align(data); | ||
4490 | dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size, | 4449 | dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size, |
4491 | PCI_DMA_FROMDEVICE); | 4450 | PCI_DMA_FROMDEVICE); |
4492 | skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size); | 4451 | prefetch(data); |
4493 | *sk_buff = skb; | 4452 | skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); |
4494 | done = true; | 4453 | if (skb) |
4495 | out: | 4454 | memcpy(skb->data, data, pkt_size); |
4496 | return done; | 4455 | dma_sync_single_for_device(&tp->pci_dev->dev, addr, pkt_size, |
4456 | PCI_DMA_FROMDEVICE); | ||
4457 | return skb; | ||
4497 | } | 4458 | } |
4498 | 4459 | ||
4499 | /* | 4460 | /* |
@@ -4508,7 +4469,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev, | |||
4508 | void __iomem *ioaddr, u32 budget) | 4469 | void __iomem *ioaddr, u32 budget) |
4509 | { | 4470 | { |
4510 | unsigned int cur_rx, rx_left; | 4471 | unsigned int cur_rx, rx_left; |
4511 | unsigned int delta, count; | 4472 | unsigned int count; |
4512 | int polling = (budget != ~(u32)0) ? 1 : 0; | 4473 | int polling = (budget != ~(u32)0) ? 1 : 0; |
4513 | 4474 | ||
4514 | cur_rx = tp->cur_rx; | 4475 | cur_rx = tp->cur_rx; |
@@ -4537,12 +4498,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev, | |||
4537 | rtl8169_schedule_work(dev, rtl8169_reset_task); | 4498 | rtl8169_schedule_work(dev, rtl8169_reset_task); |
4538 | dev->stats.rx_fifo_errors++; | 4499 | dev->stats.rx_fifo_errors++; |
4539 | } | 4500 | } |
4540 | rtl8169_mark_to_asic(desc, tp->rx_buf_sz); | 4501 | rtl8169_mark_to_asic(desc, rx_buf_sz); |
4541 | } else { | 4502 | } else { |
4542 | struct sk_buff *skb = tp->Rx_skbuff[entry]; | 4503 | struct sk_buff *skb; |
4543 | dma_addr_t addr = le64_to_cpu(desc->addr); | 4504 | dma_addr_t addr = le64_to_cpu(desc->addr); |
4544 | int pkt_size = (status & 0x00001FFF) - 4; | 4505 | int pkt_size = (status & 0x00001FFF) - 4; |
4545 | struct pci_dev *pdev = tp->pci_dev; | ||
4546 | 4506 | ||
4547 | /* | 4507 | /* |
4548 | * The driver does not support incoming fragmented | 4508 | * The driver does not support incoming fragmented |
@@ -4552,18 +4512,16 @@ static int rtl8169_rx_interrupt(struct net_device *dev, | |||
4552 | if (unlikely(rtl8169_fragmented_frame(status))) { | 4512 | if (unlikely(rtl8169_fragmented_frame(status))) { |
4553 | dev->stats.rx_dropped++; | 4513 | dev->stats.rx_dropped++; |
4554 | dev->stats.rx_length_errors++; | 4514 | dev->stats.rx_length_errors++; |
4555 | rtl8169_mark_to_asic(desc, tp->rx_buf_sz); | 4515 | rtl8169_mark_to_asic(desc, rx_buf_sz); |
4556 | continue; | 4516 | continue; |
4557 | } | 4517 | } |
4558 | 4518 | ||
4559 | if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) { | 4519 | skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], |
4560 | dma_sync_single_for_device(&pdev->dev, addr, | 4520 | tp, pkt_size, addr); |
4561 | pkt_size, PCI_DMA_FROMDEVICE); | 4521 | rtl8169_mark_to_asic(desc, rx_buf_sz); |
4562 | rtl8169_mark_to_asic(desc, tp->rx_buf_sz); | 4522 | if (!skb) { |
4563 | } else { | 4523 | dev->stats.rx_dropped++; |
4564 | dma_unmap_single(&pdev->dev, addr, tp->rx_buf_sz, | 4524 | continue; |
4565 | PCI_DMA_FROMDEVICE); | ||
4566 | tp->Rx_skbuff[entry] = NULL; | ||
4567 | } | 4525 | } |
4568 | 4526 | ||
4569 | rtl8169_rx_csum(skb, status); | 4527 | rtl8169_rx_csum(skb, status); |
@@ -4592,20 +4550,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev, | |||
4592 | count = cur_rx - tp->cur_rx; | 4550 | count = cur_rx - tp->cur_rx; |
4593 | tp->cur_rx = cur_rx; | 4551 | tp->cur_rx = cur_rx; |
4594 | 4552 | ||
4595 | delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC); | 4553 | tp->dirty_rx += count; |
4596 | if (!delta && count) | ||
4597 | netif_info(tp, intr, dev, "no Rx buffer allocated\n"); | ||
4598 | tp->dirty_rx += delta; | ||
4599 | |||
4600 | /* | ||
4601 | * FIXME: until there is periodic timer to try and refill the ring, | ||
4602 | * a temporary shortage may definitely kill the Rx process. | ||
4603 | * - disable the asic to try and avoid an overflow and kick it again | ||
4604 | * after refill ? | ||
4605 | * - how do others driver handle this condition (Uh oh...). | ||
4606 | */ | ||
4607 | if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) | ||
4608 | netif_emerg(tp, intr, dev, "Rx buffers exhausted\n"); | ||
4609 | 4554 | ||
4610 | return count; | 4555 | return count; |
4611 | } | 4556 | } |