diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_ethtool.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 62 |
3 files changed, 36 insertions, 36 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 7036fd5aa34c..b1ca8ea385eb 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h | |||
@@ -374,10 +374,10 @@ extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); | |||
374 | extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); | 374 | extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); |
375 | extern void igb_setup_tctl(struct igb_adapter *); | 375 | extern void igb_setup_tctl(struct igb_adapter *); |
376 | extern void igb_setup_rctl(struct igb_adapter *); | 376 | extern void igb_setup_rctl(struct igb_adapter *); |
377 | extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *); | 377 | extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); |
378 | extern void igb_unmap_and_free_tx_resource(struct igb_ring *, | 378 | extern void igb_unmap_and_free_tx_resource(struct igb_ring *, |
379 | struct igb_buffer *); | 379 | struct igb_buffer *); |
380 | extern void igb_alloc_rx_buffers_adv(struct igb_ring *, u16); | 380 | extern void igb_alloc_rx_buffers(struct igb_ring *, u16); |
381 | extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); | 381 | extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); |
382 | extern bool igb_has_link(struct igb_adapter *adapter); | 382 | extern bool igb_has_link(struct igb_adapter *adapter); |
383 | extern void igb_set_ethtool_ops(struct net_device *); | 383 | extern void igb_set_ethtool_ops(struct net_device *); |
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 04bc7a5ec0de..67eee0a137ad 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c | |||
@@ -1382,7 +1382,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter) | |||
1382 | igb_setup_rctl(adapter); | 1382 | igb_setup_rctl(adapter); |
1383 | igb_configure_rx_ring(adapter, rx_ring); | 1383 | igb_configure_rx_ring(adapter, rx_ring); |
1384 | 1384 | ||
1385 | igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring)); | 1385 | igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring)); |
1386 | 1386 | ||
1387 | return 0; | 1387 | return 0; |
1388 | 1388 | ||
@@ -1622,7 +1622,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, | |||
1622 | } | 1622 | } |
1623 | 1623 | ||
1624 | /* re-map buffers to ring, store next to clean values */ | 1624 | /* re-map buffers to ring, store next to clean values */ |
1625 | igb_alloc_rx_buffers_adv(rx_ring, count); | 1625 | igb_alloc_rx_buffers(rx_ring, count); |
1626 | rx_ring->next_to_clean = rx_ntc; | 1626 | rx_ring->next_to_clean = rx_ntc; |
1627 | tx_ring->next_to_clean = tx_ntc; | 1627 | tx_ring->next_to_clean = tx_ntc; |
1628 | 1628 | ||
@@ -1665,7 +1665,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter) | |||
1665 | /* place 64 packets on the transmit queue*/ | 1665 | /* place 64 packets on the transmit queue*/ |
1666 | for (i = 0; i < 64; i++) { | 1666 | for (i = 0; i < 64; i++) { |
1667 | skb_get(skb); | 1667 | skb_get(skb); |
1668 | tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring); | 1668 | tx_ret_val = igb_xmit_frame_ring(skb, tx_ring); |
1669 | if (tx_ret_val == NETDEV_TX_OK) | 1669 | if (tx_ret_val == NETDEV_TX_OK) |
1670 | good_cnt++; | 1670 | good_cnt++; |
1671 | } | 1671 | } |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index dd85df0ed7f2..9a0cfd669f1b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -122,7 +122,7 @@ static void igb_set_rx_mode(struct net_device *); | |||
122 | static void igb_update_phy_info(unsigned long); | 122 | static void igb_update_phy_info(unsigned long); |
123 | static void igb_watchdog(unsigned long); | 123 | static void igb_watchdog(unsigned long); |
124 | static void igb_watchdog_task(struct work_struct *); | 124 | static void igb_watchdog_task(struct work_struct *); |
125 | static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *); | 125 | static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); |
126 | static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, | 126 | static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, |
127 | struct rtnl_link_stats64 *stats); | 127 | struct rtnl_link_stats64 *stats); |
128 | static int igb_change_mtu(struct net_device *, int); | 128 | static int igb_change_mtu(struct net_device *, int); |
@@ -138,7 +138,7 @@ static void igb_setup_dca(struct igb_adapter *); | |||
138 | #endif /* CONFIG_IGB_DCA */ | 138 | #endif /* CONFIG_IGB_DCA */ |
139 | static bool igb_clean_tx_irq(struct igb_q_vector *); | 139 | static bool igb_clean_tx_irq(struct igb_q_vector *); |
140 | static int igb_poll(struct napi_struct *, int); | 140 | static int igb_poll(struct napi_struct *, int); |
141 | static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int); | 141 | static bool igb_clean_rx_irq(struct igb_q_vector *, int); |
142 | static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); | 142 | static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); |
143 | static void igb_tx_timeout(struct net_device *); | 143 | static void igb_tx_timeout(struct net_device *); |
144 | static void igb_reset_task(struct work_struct *); | 144 | static void igb_reset_task(struct work_struct *); |
@@ -1436,7 +1436,7 @@ static void igb_configure(struct igb_adapter *adapter) | |||
1436 | * next_to_use != next_to_clean */ | 1436 | * next_to_use != next_to_clean */ |
1437 | for (i = 0; i < adapter->num_rx_queues; i++) { | 1437 | for (i = 0; i < adapter->num_rx_queues; i++) { |
1438 | struct igb_ring *ring = adapter->rx_ring[i]; | 1438 | struct igb_ring *ring = adapter->rx_ring[i]; |
1439 | igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); | 1439 | igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); |
1440 | } | 1440 | } |
1441 | } | 1441 | } |
1442 | 1442 | ||
@@ -1784,7 +1784,7 @@ static int igb_set_features(struct net_device *netdev, u32 features) | |||
1784 | static const struct net_device_ops igb_netdev_ops = { | 1784 | static const struct net_device_ops igb_netdev_ops = { |
1785 | .ndo_open = igb_open, | 1785 | .ndo_open = igb_open, |
1786 | .ndo_stop = igb_close, | 1786 | .ndo_stop = igb_close, |
1787 | .ndo_start_xmit = igb_xmit_frame_adv, | 1787 | .ndo_start_xmit = igb_xmit_frame, |
1788 | .ndo_get_stats64 = igb_get_stats64, | 1788 | .ndo_get_stats64 = igb_get_stats64, |
1789 | .ndo_set_rx_mode = igb_set_rx_mode, | 1789 | .ndo_set_rx_mode = igb_set_rx_mode, |
1790 | .ndo_set_mac_address = igb_set_mac, | 1790 | .ndo_set_mac_address = igb_set_mac, |
@@ -3955,8 +3955,8 @@ set_itr_now: | |||
3955 | #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 | 3955 | #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 |
3956 | #define IGB_TX_FLAGS_VLAN_SHIFT 16 | 3956 | #define IGB_TX_FLAGS_VLAN_SHIFT 16 |
3957 | 3957 | ||
3958 | static inline int igb_tso_adv(struct igb_ring *tx_ring, | 3958 | static inline int igb_tso(struct igb_ring *tx_ring, |
3959 | struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) | 3959 | struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) |
3960 | { | 3960 | { |
3961 | struct e1000_adv_tx_context_desc *context_desc; | 3961 | struct e1000_adv_tx_context_desc *context_desc; |
3962 | unsigned int i; | 3962 | unsigned int i; |
@@ -4035,8 +4035,8 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring, | |||
4035 | return true; | 4035 | return true; |
4036 | } | 4036 | } |
4037 | 4037 | ||
4038 | static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring, | 4038 | static inline bool igb_tx_csum(struct igb_ring *tx_ring, |
4039 | struct sk_buff *skb, u32 tx_flags) | 4039 | struct sk_buff *skb, u32 tx_flags) |
4040 | { | 4040 | { |
4041 | struct e1000_adv_tx_context_desc *context_desc; | 4041 | struct e1000_adv_tx_context_desc *context_desc; |
4042 | struct device *dev = tx_ring->dev; | 4042 | struct device *dev = tx_ring->dev; |
@@ -4120,8 +4120,8 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring, | |||
4120 | #define IGB_MAX_TXD_PWR 16 | 4120 | #define IGB_MAX_TXD_PWR 16 |
4121 | #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) | 4121 | #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) |
4122 | 4122 | ||
4123 | static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb, | 4123 | static inline int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb, |
4124 | unsigned int first) | 4124 | unsigned int first) |
4125 | { | 4125 | { |
4126 | struct igb_buffer *buffer_info; | 4126 | struct igb_buffer *buffer_info; |
4127 | struct device *dev = tx_ring->dev; | 4127 | struct device *dev = tx_ring->dev; |
@@ -4196,9 +4196,9 @@ dma_error: | |||
4196 | return 0; | 4196 | return 0; |
4197 | } | 4197 | } |
4198 | 4198 | ||
4199 | static inline void igb_tx_queue_adv(struct igb_ring *tx_ring, | 4199 | static inline void igb_tx_queue(struct igb_ring *tx_ring, |
4200 | u32 tx_flags, int count, u32 paylen, | 4200 | u32 tx_flags, int count, u32 paylen, |
4201 | u8 hdr_len) | 4201 | u8 hdr_len) |
4202 | { | 4202 | { |
4203 | union e1000_adv_tx_desc *tx_desc; | 4203 | union e1000_adv_tx_desc *tx_desc; |
4204 | struct igb_buffer *buffer_info; | 4204 | struct igb_buffer *buffer_info; |
@@ -4296,8 +4296,8 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) | |||
4296 | return __igb_maybe_stop_tx(tx_ring, size); | 4296 | return __igb_maybe_stop_tx(tx_ring, size); |
4297 | } | 4297 | } |
4298 | 4298 | ||
4299 | netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, | 4299 | netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, |
4300 | struct igb_ring *tx_ring) | 4300 | struct igb_ring *tx_ring) |
4301 | { | 4301 | { |
4302 | int tso = 0, count; | 4302 | int tso = 0, count; |
4303 | u32 tx_flags = 0; | 4303 | u32 tx_flags = 0; |
@@ -4329,7 +4329,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
4329 | 4329 | ||
4330 | first = tx_ring->next_to_use; | 4330 | first = tx_ring->next_to_use; |
4331 | if (skb_is_gso(skb)) { | 4331 | if (skb_is_gso(skb)) { |
4332 | tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len); | 4332 | tso = igb_tso(tx_ring, skb, tx_flags, &hdr_len); |
4333 | 4333 | ||
4334 | if (tso < 0) { | 4334 | if (tso < 0) { |
4335 | dev_kfree_skb_any(skb); | 4335 | dev_kfree_skb_any(skb); |
@@ -4339,7 +4339,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
4339 | 4339 | ||
4340 | if (tso) | 4340 | if (tso) |
4341 | tx_flags |= IGB_TX_FLAGS_TSO; | 4341 | tx_flags |= IGB_TX_FLAGS_TSO; |
4342 | else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) && | 4342 | else if (igb_tx_csum(tx_ring, skb, tx_flags) && |
4343 | (skb->ip_summed == CHECKSUM_PARTIAL)) | 4343 | (skb->ip_summed == CHECKSUM_PARTIAL)) |
4344 | tx_flags |= IGB_TX_FLAGS_CSUM; | 4344 | tx_flags |= IGB_TX_FLAGS_CSUM; |
4345 | 4345 | ||
@@ -4347,7 +4347,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
4347 | * count reflects descriptors mapped, if 0 or less then mapping error | 4347 | * count reflects descriptors mapped, if 0 or less then mapping error |
4348 | * has occurred and we need to rewind the descriptor queue | 4348 | * has occurred and we need to rewind the descriptor queue |
4349 | */ | 4349 | */ |
4350 | count = igb_tx_map_adv(tx_ring, skb, first); | 4350 | count = igb_tx_map(tx_ring, skb, first); |
4351 | if (!count) { | 4351 | if (!count) { |
4352 | dev_kfree_skb_any(skb); | 4352 | dev_kfree_skb_any(skb); |
4353 | tx_ring->buffer_info[first].time_stamp = 0; | 4353 | tx_ring->buffer_info[first].time_stamp = 0; |
@@ -4355,7 +4355,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
4355 | return NETDEV_TX_OK; | 4355 | return NETDEV_TX_OK; |
4356 | } | 4356 | } |
4357 | 4357 | ||
4358 | igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len); | 4358 | igb_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); |
4359 | 4359 | ||
4360 | /* Make sure there is space in the ring for the next send. */ | 4360 | /* Make sure there is space in the ring for the next send. */ |
4361 | igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); | 4361 | igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); |
@@ -4363,8 +4363,8 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
4363 | return NETDEV_TX_OK; | 4363 | return NETDEV_TX_OK; |
4364 | } | 4364 | } |
4365 | 4365 | ||
4366 | static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, | 4366 | static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, |
4367 | struct net_device *netdev) | 4367 | struct net_device *netdev) |
4368 | { | 4368 | { |
4369 | struct igb_adapter *adapter = netdev_priv(netdev); | 4369 | struct igb_adapter *adapter = netdev_priv(netdev); |
4370 | struct igb_ring *tx_ring; | 4370 | struct igb_ring *tx_ring; |
@@ -4387,7 +4387,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, | |||
4387 | * to a flow. Right now, performance is impacted slightly negatively | 4387 | * to a flow. Right now, performance is impacted slightly negatively |
4388 | * if using multiple tx queues. If the stack breaks away from a | 4388 | * if using multiple tx queues. If the stack breaks away from a |
4389 | * single qdisc implementation, we can look at this again. */ | 4389 | * single qdisc implementation, we can look at this again. */ |
4390 | return igb_xmit_frame_ring_adv(skb, tx_ring); | 4390 | return igb_xmit_frame_ring(skb, tx_ring); |
4391 | } | 4391 | } |
4392 | 4392 | ||
4393 | /** | 4393 | /** |
@@ -5491,7 +5491,7 @@ static int igb_poll(struct napi_struct *napi, int budget) | |||
5491 | clean_complete = !!igb_clean_tx_irq(q_vector); | 5491 | clean_complete = !!igb_clean_tx_irq(q_vector); |
5492 | 5492 | ||
5493 | if (q_vector->rx_ring) | 5493 | if (q_vector->rx_ring) |
5494 | clean_complete &= igb_clean_rx_irq_adv(q_vector, budget); | 5494 | clean_complete &= igb_clean_rx_irq(q_vector, budget); |
5495 | 5495 | ||
5496 | /* If all work not completed, return budget and keep polling */ | 5496 | /* If all work not completed, return budget and keep polling */ |
5497 | if (!clean_complete) | 5497 | if (!clean_complete) |
@@ -5670,8 +5670,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) | |||
5670 | return count < tx_ring->count; | 5670 | return count < tx_ring->count; |
5671 | } | 5671 | } |
5672 | 5672 | ||
5673 | static inline void igb_rx_checksum_adv(struct igb_ring *ring, | 5673 | static inline void igb_rx_checksum(struct igb_ring *ring, |
5674 | u32 status_err, struct sk_buff *skb) | 5674 | u32 status_err, struct sk_buff *skb) |
5675 | { | 5675 | { |
5676 | skb_checksum_none_assert(skb); | 5676 | skb_checksum_none_assert(skb); |
5677 | 5677 | ||
@@ -5750,7 +5750,7 @@ static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc) | |||
5750 | return hlen; | 5750 | return hlen; |
5751 | } | 5751 | } |
5752 | 5752 | ||
5753 | static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, int budget) | 5753 | static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) |
5754 | { | 5754 | { |
5755 | struct igb_ring *rx_ring = q_vector->rx_ring; | 5755 | struct igb_ring *rx_ring = q_vector->rx_ring; |
5756 | union e1000_adv_rx_desc *rx_desc; | 5756 | union e1000_adv_rx_desc *rx_desc; |
@@ -5836,7 +5836,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, int budget) | |||
5836 | total_bytes += skb->len; | 5836 | total_bytes += skb->len; |
5837 | total_packets++; | 5837 | total_packets++; |
5838 | 5838 | ||
5839 | igb_rx_checksum_adv(rx_ring, staterr, skb); | 5839 | igb_rx_checksum(rx_ring, staterr, skb); |
5840 | 5840 | ||
5841 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); | 5841 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); |
5842 | 5842 | ||
@@ -5855,7 +5855,7 @@ next_desc: | |||
5855 | cleaned_count++; | 5855 | cleaned_count++; |
5856 | /* return some buffers to hardware, one at a time is too slow */ | 5856 | /* return some buffers to hardware, one at a time is too slow */ |
5857 | if (cleaned_count >= IGB_RX_BUFFER_WRITE) { | 5857 | if (cleaned_count >= IGB_RX_BUFFER_WRITE) { |
5858 | igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); | 5858 | igb_alloc_rx_buffers(rx_ring, cleaned_count); |
5859 | cleaned_count = 0; | 5859 | cleaned_count = 0; |
5860 | } | 5860 | } |
5861 | 5861 | ||
@@ -5873,7 +5873,7 @@ next_desc: | |||
5873 | rx_ring->total_bytes += total_bytes; | 5873 | rx_ring->total_bytes += total_bytes; |
5874 | 5874 | ||
5875 | if (cleaned_count) | 5875 | if (cleaned_count) |
5876 | igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); | 5876 | igb_alloc_rx_buffers(rx_ring, cleaned_count); |
5877 | 5877 | ||
5878 | return !!budget; | 5878 | return !!budget; |
5879 | } | 5879 | } |
@@ -5946,10 +5946,10 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, | |||
5946 | } | 5946 | } |
5947 | 5947 | ||
5948 | /** | 5948 | /** |
5949 | * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split | 5949 | * igb_alloc_rx_buffers - Replace used receive buffers; packet split |
5950 | * @adapter: address of board private structure | 5950 | * @adapter: address of board private structure |
5951 | **/ | 5951 | **/ |
5952 | void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, u16 cleaned_count) | 5952 | void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) |
5953 | { | 5953 | { |
5954 | union e1000_adv_rx_desc *rx_desc; | 5954 | union e1000_adv_rx_desc *rx_desc; |
5955 | struct igb_buffer *bi; | 5955 | struct igb_buffer *bi; |