diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2009-10-27 11:52:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-28 04:20:28 -0400 |
commit | 4c844851d15cc08d995179ab5118172711be6eb0 (patch) | |
tree | 1dff1bd32ce280080730ccedf75846a34a6082e0 /drivers/net | |
parent | 80785298aa5b6f2005a34afb97457ae7a65af270 (diff) |
igb: move rx_buffer_len into the ring structure
This patch moves the rx_buffer_len value into the ring structure. This allows
greater flexibility and the option of doing things such as supporting packet
split only on some queues, or enabling virtualization.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/igb/igb.h | 3 | ||||
-rw-r--r-- | drivers/net/igb/igb_main.c | 41 |
2 files changed, 23 insertions, 21 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index de268620dd92..00ff274b16db 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h | |||
@@ -198,7 +198,7 @@ struct igb_ring { | |||
198 | /* RX */ | 198 | /* RX */ |
199 | struct { | 199 | struct { |
200 | struct igb_rx_queue_stats rx_stats; | 200 | struct igb_rx_queue_stats rx_stats; |
201 | u64 rx_queue_drops; | 201 | u32 rx_buffer_len; |
202 | }; | 202 | }; |
203 | }; | 203 | }; |
204 | }; | 204 | }; |
@@ -218,7 +218,6 @@ struct igb_adapter { | |||
218 | struct vlan_group *vlgrp; | 218 | struct vlan_group *vlgrp; |
219 | u16 mng_vlan_id; | 219 | u16 mng_vlan_id; |
220 | u32 bd_number; | 220 | u32 bd_number; |
221 | u32 rx_buffer_len; | ||
222 | u32 wol; | 221 | u32 wol; |
223 | u32 en_mng_pt; | 222 | u32 en_mng_pt; |
224 | u16 link_speed; | 223 | u16 link_speed; |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index ff16b7ac0d1e..04e860d4e080 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -443,6 +443,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter) | |||
443 | ring->count = adapter->rx_ring_count; | 443 | ring->count = adapter->rx_ring_count; |
444 | ring->queue_index = i; | 444 | ring->queue_index = i; |
445 | ring->pdev = adapter->pdev; | 445 | ring->pdev = adapter->pdev; |
446 | ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; | ||
446 | } | 447 | } |
447 | 448 | ||
448 | igb_cache_ring_register(adapter); | 449 | igb_cache_ring_register(adapter); |
@@ -1863,7 +1864,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) | |||
1863 | 1864 | ||
1864 | adapter->tx_ring_count = IGB_DEFAULT_TXD; | 1865 | adapter->tx_ring_count = IGB_DEFAULT_TXD; |
1865 | adapter->rx_ring_count = IGB_DEFAULT_RXD; | 1866 | adapter->rx_ring_count = IGB_DEFAULT_RXD; |
1866 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; | ||
1867 | adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | 1867 | adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
1868 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; | 1868 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; |
1869 | 1869 | ||
@@ -2358,8 +2358,8 @@ static void igb_configure_rx_ring(struct igb_adapter *adapter, | |||
2358 | writel(0, ring->tail); | 2358 | writel(0, ring->tail); |
2359 | 2359 | ||
2360 | /* set descriptor configuration */ | 2360 | /* set descriptor configuration */ |
2361 | if (adapter->rx_buffer_len < IGB_RXBUFFER_1024) { | 2361 | if (ring->rx_buffer_len < IGB_RXBUFFER_1024) { |
2362 | srrctl = ALIGN(adapter->rx_buffer_len, 64) << | 2362 | srrctl = ALIGN(ring->rx_buffer_len, 64) << |
2363 | E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; | 2363 | E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; |
2364 | #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 | 2364 | #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 |
2365 | srrctl |= IGB_RXBUFFER_16384 >> | 2365 | srrctl |= IGB_RXBUFFER_16384 >> |
@@ -2370,7 +2370,7 @@ static void igb_configure_rx_ring(struct igb_adapter *adapter, | |||
2370 | #endif | 2370 | #endif |
2371 | srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; | 2371 | srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; |
2372 | } else { | 2372 | } else { |
2373 | srrctl = ALIGN(adapter->rx_buffer_len, 1024) >> | 2373 | srrctl = ALIGN(ring->rx_buffer_len, 1024) >> |
2374 | E1000_SRRCTL_BSIZEPKT_SHIFT; | 2374 | E1000_SRRCTL_BSIZEPKT_SHIFT; |
2375 | srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; | 2375 | srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; |
2376 | } | 2376 | } |
@@ -2619,7 +2619,6 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) | |||
2619 | **/ | 2619 | **/ |
2620 | static void igb_clean_rx_ring(struct igb_ring *rx_ring) | 2620 | static void igb_clean_rx_ring(struct igb_ring *rx_ring) |
2621 | { | 2621 | { |
2622 | struct igb_adapter *adapter = rx_ring->q_vector->adapter; | ||
2623 | struct igb_buffer *buffer_info; | 2622 | struct igb_buffer *buffer_info; |
2624 | unsigned long size; | 2623 | unsigned long size; |
2625 | unsigned int i; | 2624 | unsigned int i; |
@@ -2632,7 +2631,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) | |||
2632 | if (buffer_info->dma) { | 2631 | if (buffer_info->dma) { |
2633 | pci_unmap_single(rx_ring->pdev, | 2632 | pci_unmap_single(rx_ring->pdev, |
2634 | buffer_info->dma, | 2633 | buffer_info->dma, |
2635 | adapter->rx_buffer_len, | 2634 | rx_ring->rx_buffer_len, |
2636 | PCI_DMA_FROMDEVICE); | 2635 | PCI_DMA_FROMDEVICE); |
2637 | buffer_info->dma = 0; | 2636 | buffer_info->dma = 0; |
2638 | } | 2637 | } |
@@ -3746,6 +3745,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) | |||
3746 | { | 3745 | { |
3747 | struct igb_adapter *adapter = netdev_priv(netdev); | 3746 | struct igb_adapter *adapter = netdev_priv(netdev); |
3748 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | 3747 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; |
3748 | u32 rx_buffer_len, i; | ||
3749 | 3749 | ||
3750 | if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || | 3750 | if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || |
3751 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | 3751 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
@@ -3763,9 +3763,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) | |||
3763 | 3763 | ||
3764 | /* igb_down has a dependency on max_frame_size */ | 3764 | /* igb_down has a dependency on max_frame_size */ |
3765 | adapter->max_frame_size = max_frame; | 3765 | adapter->max_frame_size = max_frame; |
3766 | if (netif_running(netdev)) | ||
3767 | igb_down(adapter); | ||
3768 | |||
3769 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | 3766 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN |
3770 | * means we reserve 2 more, this pushes us to allocate from the next | 3767 | * means we reserve 2 more, this pushes us to allocate from the next |
3771 | * larger slab size. | 3768 | * larger slab size. |
@@ -3773,16 +3770,22 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) | |||
3773 | */ | 3770 | */ |
3774 | 3771 | ||
3775 | if (max_frame <= IGB_RXBUFFER_1024) | 3772 | if (max_frame <= IGB_RXBUFFER_1024) |
3776 | adapter->rx_buffer_len = IGB_RXBUFFER_1024; | 3773 | rx_buffer_len = IGB_RXBUFFER_1024; |
3777 | else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) | 3774 | else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) |
3778 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; | 3775 | rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
3779 | else | 3776 | else |
3780 | adapter->rx_buffer_len = IGB_RXBUFFER_128; | 3777 | rx_buffer_len = IGB_RXBUFFER_128; |
3778 | |||
3779 | if (netif_running(netdev)) | ||
3780 | igb_down(adapter); | ||
3781 | 3781 | ||
3782 | dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", | 3782 | dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", |
3783 | netdev->mtu, new_mtu); | 3783 | netdev->mtu, new_mtu); |
3784 | netdev->mtu = new_mtu; | 3784 | netdev->mtu = new_mtu; |
3785 | 3785 | ||
3786 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
3787 | adapter->rx_ring[i].rx_buffer_len = rx_buffer_len; | ||
3788 | |||
3786 | if (netif_running(netdev)) | 3789 | if (netif_running(netdev)) |
3787 | igb_up(adapter); | 3790 | igb_up(adapter); |
3788 | else | 3791 | else |
@@ -4828,7 +4831,7 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, | |||
4828 | dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err); | 4831 | dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err); |
4829 | } | 4832 | } |
4830 | 4833 | ||
4831 | static inline u16 igb_get_hlen(struct igb_adapter *adapter, | 4834 | static inline u16 igb_get_hlen(struct igb_ring *rx_ring, |
4832 | union e1000_adv_rx_desc *rx_desc) | 4835 | union e1000_adv_rx_desc *rx_desc) |
4833 | { | 4836 | { |
4834 | /* HW will not DMA in data larger than the given buffer, even if it | 4837 | /* HW will not DMA in data larger than the given buffer, even if it |
@@ -4837,8 +4840,8 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter, | |||
4837 | */ | 4840 | */ |
4838 | u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & | 4841 | u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & |
4839 | E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; | 4842 | E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; |
4840 | if (hlen > adapter->rx_buffer_len) | 4843 | if (hlen > rx_ring->rx_buffer_len) |
4841 | hlen = adapter->rx_buffer_len; | 4844 | hlen = rx_ring->rx_buffer_len; |
4842 | return hlen; | 4845 | return hlen; |
4843 | } | 4846 | } |
4844 | 4847 | ||
@@ -4888,14 +4891,14 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, | |||
4888 | 4891 | ||
4889 | if (buffer_info->dma) { | 4892 | if (buffer_info->dma) { |
4890 | pci_unmap_single(pdev, buffer_info->dma, | 4893 | pci_unmap_single(pdev, buffer_info->dma, |
4891 | adapter->rx_buffer_len, | 4894 | rx_ring->rx_buffer_len, |
4892 | PCI_DMA_FROMDEVICE); | 4895 | PCI_DMA_FROMDEVICE); |
4893 | buffer_info->dma = 0; | 4896 | buffer_info->dma = 0; |
4894 | if (adapter->rx_buffer_len >= IGB_RXBUFFER_1024) { | 4897 | if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) { |
4895 | skb_put(skb, length); | 4898 | skb_put(skb, length); |
4896 | goto send_up; | 4899 | goto send_up; |
4897 | } | 4900 | } |
4898 | skb_put(skb, igb_get_hlen(adapter, rx_desc)); | 4901 | skb_put(skb, igb_get_hlen(rx_ring, rx_desc)); |
4899 | } | 4902 | } |
4900 | 4903 | ||
4901 | if (length) { | 4904 | if (length) { |
@@ -5034,7 +5037,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, | |||
5034 | i = rx_ring->next_to_use; | 5037 | i = rx_ring->next_to_use; |
5035 | buffer_info = &rx_ring->buffer_info[i]; | 5038 | buffer_info = &rx_ring->buffer_info[i]; |
5036 | 5039 | ||
5037 | bufsz = adapter->rx_buffer_len; | 5040 | bufsz = rx_ring->rx_buffer_len; |
5038 | 5041 | ||
5039 | while (cleaned_count--) { | 5042 | while (cleaned_count--) { |
5040 | rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); | 5043 | rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); |