diff options
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 110 |
1 files changed, 64 insertions, 46 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 79aa811c403c..286ecc0e6ab7 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -187,15 +187,14 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, | |||
187 | struct ixgbe_tx_buffer | 187 | struct ixgbe_tx_buffer |
188 | *tx_buffer_info) | 188 | *tx_buffer_info) |
189 | { | 189 | { |
190 | if (tx_buffer_info->dma) { | 190 | tx_buffer_info->dma = 0; |
191 | pci_unmap_page(adapter->pdev, tx_buffer_info->dma, | ||
192 | tx_buffer_info->length, PCI_DMA_TODEVICE); | ||
193 | tx_buffer_info->dma = 0; | ||
194 | } | ||
195 | if (tx_buffer_info->skb) { | 191 | if (tx_buffer_info->skb) { |
192 | skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb, | ||
193 | DMA_TO_DEVICE); | ||
196 | dev_kfree_skb_any(tx_buffer_info->skb); | 194 | dev_kfree_skb_any(tx_buffer_info->skb); |
197 | tx_buffer_info->skb = NULL; | 195 | tx_buffer_info->skb = NULL; |
198 | } | 196 | } |
197 | tx_buffer_info->time_stamp = 0; | ||
199 | /* tx_buffer_info must be completely set up in the transmit path */ | 198 | /* tx_buffer_info must be completely set up in the transmit path */ |
200 | } | 199 | } |
201 | 200 | ||
@@ -204,15 +203,11 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, | |||
204 | unsigned int eop) | 203 | unsigned int eop) |
205 | { | 204 | { |
206 | struct ixgbe_hw *hw = &adapter->hw; | 205 | struct ixgbe_hw *hw = &adapter->hw; |
207 | u32 head, tail; | ||
208 | 206 | ||
209 | /* Detect a transmit hang in hardware, this serializes the | 207 | /* Detect a transmit hang in hardware, this serializes the |
210 | * check with the clearing of time_stamp and movement of eop */ | 208 | * check with the clearing of time_stamp and movement of eop */ |
211 | head = IXGBE_READ_REG(hw, tx_ring->head); | ||
212 | tail = IXGBE_READ_REG(hw, tx_ring->tail); | ||
213 | adapter->detect_tx_hung = false; | 209 | adapter->detect_tx_hung = false; |
214 | if ((head != tail) && | 210 | if (tx_ring->tx_buffer_info[eop].time_stamp && |
215 | tx_ring->tx_buffer_info[eop].time_stamp && | ||
216 | time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && | 211 | time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && |
217 | !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { | 212 | !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { |
218 | /* detected Tx unit hang */ | 213 | /* detected Tx unit hang */ |
@@ -227,7 +222,8 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, | |||
227 | " time_stamp <%lx>\n" | 222 | " time_stamp <%lx>\n" |
228 | " jiffies <%lx>\n", | 223 | " jiffies <%lx>\n", |
229 | tx_ring->queue_index, | 224 | tx_ring->queue_index, |
230 | head, tail, | 225 | IXGBE_READ_REG(hw, tx_ring->head), |
226 | IXGBE_READ_REG(hw, tx_ring->tail), | ||
231 | tx_ring->next_to_use, eop, | 227 | tx_ring->next_to_use, eop, |
232 | tx_ring->tx_buffer_info[eop].time_stamp, jiffies); | 228 | tx_ring->tx_buffer_info[eop].time_stamp, jiffies); |
233 | return true; | 229 | return true; |
@@ -2934,6 +2930,7 @@ err_tx_ring_allocation: | |||
2934 | **/ | 2930 | **/ |
2935 | static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | 2931 | static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) |
2936 | { | 2932 | { |
2933 | struct ixgbe_hw *hw = &adapter->hw; | ||
2937 | int err = 0; | 2934 | int err = 0; |
2938 | int vector, v_budget; | 2935 | int vector, v_budget; |
2939 | 2936 | ||
@@ -2948,12 +2945,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
2948 | 2945 | ||
2949 | /* | 2946 | /* |
2950 | * At the same time, hardware can only support a maximum of | 2947 | * At the same time, hardware can only support a maximum of |
2951 | * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq, | 2948 | * hw.mac->max_msix_vectors vectors. With features |
2952 | * we can easily reach upwards of 64 Rx descriptor queues and | 2949 | * such as RSS and VMDq, we can easily surpass the number of Rx and Tx |
2953 | * 32 Tx queues. Thus, we cap it off in those rare cases where | 2950 | * descriptor queues supported by our device. Thus, we cap it off in |
2954 | * the cpu count also exceeds our vector limit. | 2951 | * those rare cases where the cpu count also exceeds our vector limit. |
2955 | */ | 2952 | */ |
2956 | v_budget = min(v_budget, MAX_MSIX_COUNT); | 2953 | v_budget = min(v_budget, (int)hw->mac.max_msix_vectors); |
2957 | 2954 | ||
2958 | /* A failure in MSI-X entry allocation isn't fatal, but it does | 2955 | /* A failure in MSI-X entry allocation isn't fatal, but it does |
2959 | * mean we disable MSI-X capabilities of the adapter. */ | 2956 | * mean we disable MSI-X capabilities of the adapter. */ |
@@ -3169,11 +3166,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
3169 | #endif | 3166 | #endif |
3170 | 3167 | ||
3171 | /* default flow control settings */ | 3168 | /* default flow control settings */ |
3172 | hw->fc.requested_mode = ixgbe_fc_none; | 3169 | hw->fc.requested_mode = ixgbe_fc_full; |
3170 | hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ | ||
3173 | hw->fc.high_water = IXGBE_DEFAULT_FCRTH; | 3171 | hw->fc.high_water = IXGBE_DEFAULT_FCRTH; |
3174 | hw->fc.low_water = IXGBE_DEFAULT_FCRTL; | 3172 | hw->fc.low_water = IXGBE_DEFAULT_FCRTL; |
3175 | hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; | 3173 | hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; |
3176 | hw->fc.send_xon = true; | 3174 | hw->fc.send_xon = true; |
3175 | hw->fc.disable_fc_autoneg = false; | ||
3177 | 3176 | ||
3178 | /* enable itr by default in dynamic mode */ | 3177 | /* enable itr by default in dynamic mode */ |
3179 | adapter->itr_setting = 1; | 3178 | adapter->itr_setting = 1; |
@@ -3489,10 +3488,10 @@ err_up: | |||
3489 | ixgbe_release_hw_control(adapter); | 3488 | ixgbe_release_hw_control(adapter); |
3490 | ixgbe_free_irq(adapter); | 3489 | ixgbe_free_irq(adapter); |
3491 | err_req_irq: | 3490 | err_req_irq: |
3492 | ixgbe_free_all_rx_resources(adapter); | ||
3493 | err_setup_rx: | 3491 | err_setup_rx: |
3494 | ixgbe_free_all_tx_resources(adapter); | 3492 | ixgbe_free_all_rx_resources(adapter); |
3495 | err_setup_tx: | 3493 | err_setup_tx: |
3494 | ixgbe_free_all_tx_resources(adapter); | ||
3496 | ixgbe_reset(adapter); | 3495 | ixgbe_reset(adapter); |
3497 | 3496 | ||
3498 | return err; | 3497 | return err; |
@@ -4163,32 +4162,39 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
4163 | struct sk_buff *skb, unsigned int first) | 4162 | struct sk_buff *skb, unsigned int first) |
4164 | { | 4163 | { |
4165 | struct ixgbe_tx_buffer *tx_buffer_info; | 4164 | struct ixgbe_tx_buffer *tx_buffer_info; |
4166 | unsigned int len = skb->len; | 4165 | unsigned int len = skb_headlen(skb); |
4167 | unsigned int offset = 0, size, count = 0, i; | 4166 | unsigned int offset = 0, size, count = 0, i; |
4168 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | 4167 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
4169 | unsigned int f; | 4168 | unsigned int f; |
4170 | 4169 | dma_addr_t *map; | |
4171 | len -= skb->data_len; | ||
4172 | 4170 | ||
4173 | i = tx_ring->next_to_use; | 4171 | i = tx_ring->next_to_use; |
4174 | 4172 | ||
4173 | if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { | ||
4174 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); | ||
4175 | return 0; | ||
4176 | } | ||
4177 | |||
4178 | map = skb_shinfo(skb)->dma_maps; | ||
4179 | |||
4175 | while (len) { | 4180 | while (len) { |
4176 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 4181 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
4177 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); | 4182 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); |
4178 | 4183 | ||
4179 | tx_buffer_info->length = size; | 4184 | tx_buffer_info->length = size; |
4180 | tx_buffer_info->dma = pci_map_single(adapter->pdev, | 4185 | tx_buffer_info->dma = map[0] + offset; |
4181 | skb->data + offset, | ||
4182 | size, PCI_DMA_TODEVICE); | ||
4183 | tx_buffer_info->time_stamp = jiffies; | 4186 | tx_buffer_info->time_stamp = jiffies; |
4184 | tx_buffer_info->next_to_watch = i; | 4187 | tx_buffer_info->next_to_watch = i; |
4185 | 4188 | ||
4186 | len -= size; | 4189 | len -= size; |
4187 | offset += size; | 4190 | offset += size; |
4188 | count++; | 4191 | count++; |
4189 | i++; | 4192 | |
4190 | if (i == tx_ring->count) | 4193 | if (len) { |
4191 | i = 0; | 4194 | i++; |
4195 | if (i == tx_ring->count) | ||
4196 | i = 0; | ||
4197 | } | ||
4192 | } | 4198 | } |
4193 | 4199 | ||
4194 | for (f = 0; f < nr_frags; f++) { | 4200 | for (f = 0; f < nr_frags; f++) { |
@@ -4196,33 +4202,27 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
4196 | 4202 | ||
4197 | frag = &skb_shinfo(skb)->frags[f]; | 4203 | frag = &skb_shinfo(skb)->frags[f]; |
4198 | len = frag->size; | 4204 | len = frag->size; |
4199 | offset = frag->page_offset; | 4205 | offset = 0; |
4200 | 4206 | ||
4201 | while (len) { | 4207 | while (len) { |
4208 | i++; | ||
4209 | if (i == tx_ring->count) | ||
4210 | i = 0; | ||
4211 | |||
4202 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 4212 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
4203 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); | 4213 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); |
4204 | 4214 | ||
4205 | tx_buffer_info->length = size; | 4215 | tx_buffer_info->length = size; |
4206 | tx_buffer_info->dma = pci_map_page(adapter->pdev, | 4216 | tx_buffer_info->dma = map[f + 1] + offset; |
4207 | frag->page, | ||
4208 | offset, | ||
4209 | size, | ||
4210 | PCI_DMA_TODEVICE); | ||
4211 | tx_buffer_info->time_stamp = jiffies; | 4217 | tx_buffer_info->time_stamp = jiffies; |
4212 | tx_buffer_info->next_to_watch = i; | 4218 | tx_buffer_info->next_to_watch = i; |
4213 | 4219 | ||
4214 | len -= size; | 4220 | len -= size; |
4215 | offset += size; | 4221 | offset += size; |
4216 | count++; | 4222 | count++; |
4217 | i++; | ||
4218 | if (i == tx_ring->count) | ||
4219 | i = 0; | ||
4220 | } | 4223 | } |
4221 | } | 4224 | } |
4222 | if (i == 0) | 4225 | |
4223 | i = tx_ring->count - 1; | ||
4224 | else | ||
4225 | i = i - 1; | ||
4226 | tx_ring->tx_buffer_info[i].skb = skb; | 4226 | tx_ring->tx_buffer_info[i].skb = skb; |
4227 | tx_ring->tx_buffer_info[first].next_to_watch = i; | 4227 | tx_ring->tx_buffer_info[first].next_to_watch = i; |
4228 | 4228 | ||
@@ -4388,13 +4388,19 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
4388 | (skb->ip_summed == CHECKSUM_PARTIAL)) | 4388 | (skb->ip_summed == CHECKSUM_PARTIAL)) |
4389 | tx_flags |= IXGBE_TX_FLAGS_CSUM; | 4389 | tx_flags |= IXGBE_TX_FLAGS_CSUM; |
4390 | 4390 | ||
4391 | ixgbe_tx_queue(adapter, tx_ring, tx_flags, | 4391 | count = ixgbe_tx_map(adapter, tx_ring, skb, first); |
4392 | ixgbe_tx_map(adapter, tx_ring, skb, first), | ||
4393 | skb->len, hdr_len); | ||
4394 | 4392 | ||
4395 | netdev->trans_start = jiffies; | 4393 | if (count) { |
4394 | ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, | ||
4395 | hdr_len); | ||
4396 | netdev->trans_start = jiffies; | ||
4397 | ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); | ||
4396 | 4398 | ||
4397 | ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); | 4399 | } else { |
4400 | dev_kfree_skb_any(skb); | ||
4401 | tx_ring->tx_buffer_info[first].time_stamp = 0; | ||
4402 | tx_ring->next_to_use = first; | ||
4403 | } | ||
4398 | 4404 | ||
4399 | return NETDEV_TX_OK; | 4405 | return NETDEV_TX_OK; |
4400 | } | 4406 | } |
@@ -4987,8 +4993,20 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, | |||
4987 | 4993 | ||
4988 | return ret_val ? NOTIFY_BAD : NOTIFY_DONE; | 4994 | return ret_val ? NOTIFY_BAD : NOTIFY_DONE; |
4989 | } | 4995 | } |
4996 | |||
4990 | #endif /* CONFIG_IXGBE_DCA */ | 4997 | #endif /* CONFIG_IXGBE_DCA */ |
4998 | #ifdef DEBUG | ||
4999 | /** | ||
5000 | * ixgbe_get_hw_dev_name - return device name string | ||
5001 | * used by hardware layer to print debugging information | ||
5002 | **/ | ||
5003 | char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw) | ||
5004 | { | ||
5005 | struct ixgbe_adapter *adapter = hw->back; | ||
5006 | return adapter->netdev->name; | ||
5007 | } | ||
4991 | 5008 | ||
5009 | #endif | ||
4992 | module_exit(ixgbe_exit_module); | 5010 | module_exit(ixgbe_exit_module); |
4993 | 5011 | ||
4994 | /* ixgbe_main.c */ | 5012 | /* ixgbe_main.c */ |