diff options
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 169 |
1 files changed, 124 insertions, 45 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 38ab4f3f819..a060610a42d 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -3024,6 +3024,36 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | |||
3024 | } | 3024 | } |
3025 | } | 3025 | } |
3026 | 3026 | ||
3027 | void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, | ||
3028 | struct ixgbe_ring *ring) | ||
3029 | { | ||
3030 | struct ixgbe_hw *hw = &adapter->hw; | ||
3031 | int wait_loop = IXGBE_MAX_RX_DESC_POLL; | ||
3032 | u32 rxdctl; | ||
3033 | u8 reg_idx = ring->reg_idx; | ||
3034 | |||
3035 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | ||
3036 | rxdctl &= ~IXGBE_RXDCTL_ENABLE; | ||
3037 | |||
3038 | /* write value back with RXDCTL.ENABLE bit cleared */ | ||
3039 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); | ||
3040 | |||
3041 | if (hw->mac.type == ixgbe_mac_82598EB && | ||
3042 | !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) | ||
3043 | return; | ||
3044 | |||
3045 | /* the hardware may take up to 100us to really disable the rx queue */ | ||
3046 | do { | ||
3047 | udelay(10); | ||
3048 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | ||
3049 | } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); | ||
3050 | |||
3051 | if (!wait_loop) { | ||
3052 | e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " | ||
3053 | "the polling period\n", reg_idx); | ||
3054 | } | ||
3055 | } | ||
3056 | |||
3027 | void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, | 3057 | void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, |
3028 | struct ixgbe_ring *ring) | 3058 | struct ixgbe_ring *ring) |
3029 | { | 3059 | { |
@@ -3034,9 +3064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, | |||
3034 | 3064 | ||
3035 | /* disable queue to avoid issues while updating state */ | 3065 | /* disable queue to avoid issues while updating state */ |
3036 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | 3066 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); |
3037 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), | 3067 | ixgbe_disable_rx_queue(adapter, ring); |
3038 | rxdctl & ~IXGBE_RXDCTL_ENABLE); | ||
3039 | IXGBE_WRITE_FLUSH(hw); | ||
3040 | 3068 | ||
3041 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); | 3069 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); |
3042 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); | 3070 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); |
@@ -4064,7 +4092,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
4064 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | 4092 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
4065 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); | 4093 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); |
4066 | 4094 | ||
4067 | IXGBE_WRITE_FLUSH(hw); | 4095 | /* disable all enabled rx queues */ |
4096 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
4097 | /* this call also flushes the previous write */ | ||
4098 | ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); | ||
4099 | |||
4068 | msleep(10); | 4100 | msleep(10); |
4069 | 4101 | ||
4070 | netif_tx_stop_all_queues(netdev); | 4102 | netif_tx_stop_all_queues(netdev); |
@@ -4789,6 +4821,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
4789 | 4821 | ||
4790 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | 4822 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
4791 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | 4823 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; |
4824 | if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE | | ||
4825 | IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { | ||
4826 | e_err(probe, | ||
4827 | "Flow Director is not supported while multiple " | ||
4828 | "queues are disabled. Disabling Flow Director\n"); | ||
4829 | } | ||
4792 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | 4830 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; |
4793 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | 4831 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; |
4794 | adapter->atr_sample_rate = 0; | 4832 | adapter->atr_sample_rate = 0; |
@@ -5094,16 +5132,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
5094 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; | 5132 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; |
5095 | if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) | 5133 | if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) |
5096 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; | 5134 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; |
5097 | if (dev->features & NETIF_F_NTUPLE) { | 5135 | /* n-tuple support exists, always init our spinlock */ |
5098 | /* Flow Director perfect filter enabled */ | 5136 | spin_lock_init(&adapter->fdir_perfect_lock); |
5099 | adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | 5137 | /* Flow Director hash filters enabled */ |
5100 | adapter->atr_sample_rate = 0; | 5138 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; |
5101 | spin_lock_init(&adapter->fdir_perfect_lock); | 5139 | adapter->atr_sample_rate = 20; |
5102 | } else { | ||
5103 | /* Flow Director hash filters enabled */ | ||
5104 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
5105 | adapter->atr_sample_rate = 20; | ||
5106 | } | ||
5107 | adapter->ring_feature[RING_F_FDIR].indices = | 5140 | adapter->ring_feature[RING_F_FDIR].indices = |
5108 | IXGBE_MAX_FDIR_INDICES; | 5141 | IXGBE_MAX_FDIR_INDICES; |
5109 | adapter->fdir_pballoc = 0; | 5142 | adapter->fdir_pballoc = 0; |
@@ -6474,38 +6507,92 @@ static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, | |||
6474 | writel(i, tx_ring->tail); | 6507 | writel(i, tx_ring->tail); |
6475 | } | 6508 | } |
6476 | 6509 | ||
6477 | static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, | 6510 | static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, |
6478 | u8 queue, u32 tx_flags, __be16 protocol) | 6511 | u32 tx_flags, __be16 protocol) |
6479 | { | 6512 | { |
6480 | struct ixgbe_atr_input atr_input; | 6513 | struct ixgbe_q_vector *q_vector = ring->q_vector; |
6481 | struct iphdr *iph = ip_hdr(skb); | 6514 | union ixgbe_atr_hash_dword input = { .dword = 0 }; |
6482 | struct ethhdr *eth = (struct ethhdr *)skb->data; | 6515 | union ixgbe_atr_hash_dword common = { .dword = 0 }; |
6516 | union { | ||
6517 | unsigned char *network; | ||
6518 | struct iphdr *ipv4; | ||
6519 | struct ipv6hdr *ipv6; | ||
6520 | } hdr; | ||
6483 | struct tcphdr *th; | 6521 | struct tcphdr *th; |
6484 | u16 vlan_id; | 6522 | __be16 vlan_id; |
6485 | 6523 | ||
6486 | /* Right now, we support IPv4 w/ TCP only */ | 6524 | /* if ring doesn't have a interrupt vector, cannot perform ATR */ |
6487 | if (protocol != htons(ETH_P_IP) || | 6525 | if (!q_vector) |
6488 | iph->protocol != IPPROTO_TCP) | ||
6489 | return; | 6526 | return; |
6490 | 6527 | ||
6491 | memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); | 6528 | /* do nothing if sampling is disabled */ |
6529 | if (!ring->atr_sample_rate) | ||
6530 | return; | ||
6492 | 6531 | ||
6493 | vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> | 6532 | ring->atr_count++; |
6494 | IXGBE_TX_FLAGS_VLAN_SHIFT; | 6533 | |
6534 | /* snag network header to get L4 type and address */ | ||
6535 | hdr.network = skb_network_header(skb); | ||
6536 | |||
6537 | /* Currently only IPv4/IPv6 with TCP is supported */ | ||
6538 | if ((protocol != __constant_htons(ETH_P_IPV6) || | ||
6539 | hdr.ipv6->nexthdr != IPPROTO_TCP) && | ||
6540 | (protocol != __constant_htons(ETH_P_IP) || | ||
6541 | hdr.ipv4->protocol != IPPROTO_TCP)) | ||
6542 | return; | ||
6495 | 6543 | ||
6496 | th = tcp_hdr(skb); | 6544 | th = tcp_hdr(skb); |
6497 | 6545 | ||
6498 | ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); | 6546 | /* skip this packet since the socket is closing */ |
6499 | ixgbe_atr_set_src_port_82599(&atr_input, th->dest); | 6547 | if (th->fin) |
6500 | ixgbe_atr_set_dst_port_82599(&atr_input, th->source); | 6548 | return; |
6501 | ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto); | 6549 | |
6502 | ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP); | 6550 | /* sample on all syn packets or once every atr sample count */ |
6503 | /* src and dst are inverted, think how the receiver sees them */ | 6551 | if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) |
6504 | ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr); | 6552 | return; |
6505 | ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr); | 6553 | |
6554 | /* reset sample count */ | ||
6555 | ring->atr_count = 0; | ||
6556 | |||
6557 | vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); | ||
6558 | |||
6559 | /* | ||
6560 | * src and dst are inverted, think how the receiver sees them | ||
6561 | * | ||
6562 | * The input is broken into two sections, a non-compressed section | ||
6563 | * containing vm_pool, vlan_id, and flow_type. The rest of the data | ||
6564 | * is XORed together and stored in the compressed dword. | ||
6565 | */ | ||
6566 | input.formatted.vlan_id = vlan_id; | ||
6567 | |||
6568 | /* | ||
6569 | * since src port and flex bytes occupy the same word XOR them together | ||
6570 | * and write the value to source port portion of compressed dword | ||
6571 | */ | ||
6572 | if (vlan_id) | ||
6573 | common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); | ||
6574 | else | ||
6575 | common.port.src ^= th->dest ^ protocol; | ||
6576 | common.port.dst ^= th->source; | ||
6577 | |||
6578 | if (protocol == __constant_htons(ETH_P_IP)) { | ||
6579 | input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; | ||
6580 | common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; | ||
6581 | } else { | ||
6582 | input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; | ||
6583 | common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ | ||
6584 | hdr.ipv6->saddr.s6_addr32[1] ^ | ||
6585 | hdr.ipv6->saddr.s6_addr32[2] ^ | ||
6586 | hdr.ipv6->saddr.s6_addr32[3] ^ | ||
6587 | hdr.ipv6->daddr.s6_addr32[0] ^ | ||
6588 | hdr.ipv6->daddr.s6_addr32[1] ^ | ||
6589 | hdr.ipv6->daddr.s6_addr32[2] ^ | ||
6590 | hdr.ipv6->daddr.s6_addr32[3]; | ||
6591 | } | ||
6506 | 6592 | ||
6507 | /* This assumes the Rx queue and Tx queue are bound to the same CPU */ | 6593 | /* This assumes the Rx queue and Tx queue are bound to the same CPU */ |
6508 | ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); | 6594 | ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, |
6595 | input, common, ring->queue_index); | ||
6509 | } | 6596 | } |
6510 | 6597 | ||
6511 | static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) | 6598 | static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) |
@@ -6676,16 +6763,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
6676 | count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); | 6763 | count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); |
6677 | if (count) { | 6764 | if (count) { |
6678 | /* add the ATR filter if ATR is on */ | 6765 | /* add the ATR filter if ATR is on */ |
6679 | if (tx_ring->atr_sample_rate) { | 6766 | if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) |
6680 | ++tx_ring->atr_count; | 6767 | ixgbe_atr(tx_ring, skb, tx_flags, protocol); |
6681 | if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && | ||
6682 | test_bit(__IXGBE_TX_FDIR_INIT_DONE, | ||
6683 | &tx_ring->state)) { | ||
6684 | ixgbe_atr(adapter, skb, tx_ring->queue_index, | ||
6685 | tx_flags, protocol); | ||
6686 | tx_ring->atr_count = 0; | ||
6687 | } | ||
6688 | } | ||
6689 | txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); | 6768 | txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); |
6690 | txq->tx_bytes += skb->len; | 6769 | txq->tx_bytes += skb->len; |
6691 | txq->tx_packets++; | 6770 | txq->tx_packets++; |