aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/e1000e/netdev.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 23:01:30 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 23:01:30 -0500
commitc5ce28df0e7c01a1de23c36ebdefcd803f2b6cbb (patch)
tree9830baf38832769e1cf621708889111bbe3c93df /drivers/net/ethernet/intel/e1000e/netdev.c
parent29afc4e9a408f2304e09c6dd0dbcfbd2356d0faa (diff)
parent9399f0c51489ae8c16d6559b82a452fdc1895e91 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) More iov_iter conversion work from Al Viro. [ The "crypto: switch af_alg_make_sg() to iov_iter" commit was wrong, and this pull actually adds an extra commit on top of the branch I'm pulling to fix that up, so that the pre-merge state is ok. - Linus ] 2) Various optimizations to the ipv4 forwarding information base trie lookup implementation. From Alexander Duyck. 3) Remove sock_iocb altogether, from CHristoph Hellwig. 4) Allow congestion control algorithm selection via routing metrics. From Daniel Borkmann. 5) Make ipv4 uncached route list per-cpu, from Eric Dumazet. 6) Handle rfs hash collisions more gracefully, also from Eric Dumazet. 7) Add xmit_more support to r8169, e1000, and e1000e drivers. From Florian Westphal. 8) Transparent Ethernet Bridging support for GRO, from Jesse Gross. 9) Add BPF packet actions to packet scheduler, from Jiri Pirko. 10) Add support for uniqu flow IDs to openvswitch, from Joe Stringer. 11) New NetCP ethernet driver, from Muralidharan Karicheri and Wingman Kwok. 12) More sanely handle out-of-window dupacks, which can result in serious ACK storms. From Neal Cardwell. 13) Various rhashtable bug fixes and enhancements, from Herbert Xu, Patrick McHardy, and Thomas Graf. 14) Support xmit_more in be2net, from Sathya Perla. 15) Group Policy extensions for vxlan, from Thomas Graf. 16) Remove Checksum Offload support for vxlan, from Tom Herbert. 17) Like ipv4, support lockless transmit over ipv6 UDP sockets. From Vlad Yasevich. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1494+1 commits) crypto: fix af_alg_make_sg() conversion to iov_iter ipv4: Namespecify TCP PMTU mechanism i40e: Fix for stats init function call in Rx setup tcp: don't include Fast Open option in SYN-ACK on pure SYN-data openvswitch: Only set TUNNEL_VXLAN_OPT if VXLAN-GBP metadata is set ipv6: Make __ipv6_select_ident static ipv6: Fix fragment id assignment on LE arches. bridge: Fix inability to add non-vlan fdb entry net: Mellanox: Delete unnecessary checks before the function call "vunmap" cxgb4: Add support in cxgb4 to get expansion rom version via ethtool ethtool: rename reserved1 memeber in ethtool_drvinfo for expansion ROM version net: dsa: Remove redundant phy_attach() IB/mlx4: Reset flow support for IB kernel ULPs IB/mlx4: Always use the correct port for mirrored multicast attachments net/bonding: Fix potential bad memory access during bonding events tipc: remove tipc_snprintf tipc: nl compat add noop and remove legacy nl framework tipc: convert legacy nl stats show to nl compat tipc: convert legacy nl net id get to nl compat tipc: convert legacy nl net id set to nl compat ...
Diffstat (limited to 'drivers/net/ethernet/intel/e1000e/netdev.c')
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c41
1 files changed, 24 insertions, 17 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e14fd85f64eb..1e8c40fd5c3d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4189,7 +4189,7 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
4189 /* Setup hardware time stamping cyclecounter */ 4189 /* Setup hardware time stamping cyclecounter */
4190 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { 4190 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
4191 adapter->cc.read = e1000e_cyclecounter_read; 4191 adapter->cc.read = e1000e_cyclecounter_read;
4192 adapter->cc.mask = CLOCKSOURCE_MASK(64); 4192 adapter->cc.mask = CYCLECOUNTER_MASK(64);
4193 adapter->cc.mult = 1; 4193 adapter->cc.mult = 1;
4194 /* cc.shift set in e1000e_get_base_tininca() */ 4194 /* cc.shift set in e1000e_get_base_tininca() */
4195 4195
@@ -5444,16 +5444,6 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5444 wmb(); 5444 wmb();
5445 5445
5446 tx_ring->next_to_use = i; 5446 tx_ring->next_to_use = i;
5447
5448 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
5449 e1000e_update_tdt_wa(tx_ring, i);
5450 else
5451 writel(i, tx_ring->tail);
5452
5453 /* we need this if more than one processor can write to our tail
5454 * at a time, it synchronizes IO on IA64/Altix systems
5455 */
5456 mmiowb();
5457} 5447}
5458 5448
5459#define MINIMUM_DHCP_PACKET_SIZE 282 5449#define MINIMUM_DHCP_PACKET_SIZE 282
@@ -5463,8 +5453,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
5463 struct e1000_hw *hw = &adapter->hw; 5453 struct e1000_hw *hw = &adapter->hw;
5464 u16 length, offset; 5454 u16 length, offset;
5465 5455
5466 if (vlan_tx_tag_present(skb) && 5456 if (skb_vlan_tag_present(skb) &&
5467 !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 5457 !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
5468 (adapter->hw.mng_cookie.status & 5458 (adapter->hw.mng_cookie.status &
5469 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) 5459 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
5470 return 0; 5460 return 0;
@@ -5603,9 +5593,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5603 if (e1000_maybe_stop_tx(tx_ring, count + 2)) 5593 if (e1000_maybe_stop_tx(tx_ring, count + 2))
5604 return NETDEV_TX_BUSY; 5594 return NETDEV_TX_BUSY;
5605 5595
5606 if (vlan_tx_tag_present(skb)) { 5596 if (skb_vlan_tag_present(skb)) {
5607 tx_flags |= E1000_TX_FLAGS_VLAN; 5597 tx_flags |= E1000_TX_FLAGS_VLAN;
5608 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 5598 tx_flags |= (skb_vlan_tag_get(skb) <<
5599 E1000_TX_FLAGS_VLAN_SHIFT);
5609 } 5600 }
5610 5601
5611 first = tx_ring->next_to_use; 5602 first = tx_ring->next_to_use;
@@ -5635,8 +5626,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5635 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, 5626 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5636 nr_frags); 5627 nr_frags);
5637 if (count) { 5628 if (count) {
5638 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 5629 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
5639 !adapter->tx_hwtstamp_skb)) { 5630 (adapter->flags & FLAG_HAS_HW_TIMESTAMP) &&
5631 !adapter->tx_hwtstamp_skb) {
5640 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 5632 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5641 tx_flags |= E1000_TX_FLAGS_HWTSTAMP; 5633 tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
5642 adapter->tx_hwtstamp_skb = skb_get(skb); 5634 adapter->tx_hwtstamp_skb = skb_get(skb);
@@ -5653,6 +5645,21 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5653 (MAX_SKB_FRAGS * 5645 (MAX_SKB_FRAGS *
5654 DIV_ROUND_UP(PAGE_SIZE, 5646 DIV_ROUND_UP(PAGE_SIZE,
5655 adapter->tx_fifo_limit) + 2)); 5647 adapter->tx_fifo_limit) + 2));
5648
5649 if (!skb->xmit_more ||
5650 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
5651 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
5652 e1000e_update_tdt_wa(tx_ring,
5653 tx_ring->next_to_use);
5654 else
5655 writel(tx_ring->next_to_use, tx_ring->tail);
5656
5657 /* we need this if more than one processor can write
5658 * to our tail at a time, it synchronizes IO on
5659 *IA64/Altix systems
5660 */
5661 mmiowb();
5662 }
5656 } else { 5663 } else {
5657 dev_kfree_skb_any(skb); 5664 dev_kfree_skb_any(skb);
5658 tx_ring->buffer_info[first].time_stamp = 0; 5665 tx_ring->buffer_info[first].time_stamp = 0;