diff options
Diffstat (limited to 'drivers/net/ixgbevf/ixgbevf_main.c')
-rw-r--r-- | drivers/net/ixgbevf/ixgbevf_main.c | 190 |
1 files changed, 82 insertions, 108 deletions
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index 918c00359b0a..28d3cb21d376 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2009 Intel Corporation. | 4 | Copyright(c) 1999 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -44,19 +44,22 @@ | |||
44 | #include <net/ip6_checksum.h> | 44 | #include <net/ip6_checksum.h> |
45 | #include <linux/ethtool.h> | 45 | #include <linux/ethtool.h> |
46 | #include <linux/if_vlan.h> | 46 | #include <linux/if_vlan.h> |
47 | #include <linux/prefetch.h> | ||
47 | 48 | ||
48 | #include "ixgbevf.h" | 49 | #include "ixgbevf.h" |
49 | 50 | ||
50 | char ixgbevf_driver_name[] = "ixgbevf"; | 51 | char ixgbevf_driver_name[] = "ixgbevf"; |
51 | static const char ixgbevf_driver_string[] = | 52 | static const char ixgbevf_driver_string[] = |
52 | "Intel(R) 82599 Virtual Function"; | 53 | "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; |
53 | 54 | ||
54 | #define DRV_VERSION "1.0.0-k0" | 55 | #define DRV_VERSION "2.0.0-k2" |
55 | const char ixgbevf_driver_version[] = DRV_VERSION; | 56 | const char ixgbevf_driver_version[] = DRV_VERSION; |
56 | static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation."; | 57 | static char ixgbevf_copyright[] = |
58 | "Copyright (c) 2009 - 2010 Intel Corporation."; | ||
57 | 59 | ||
58 | static const struct ixgbevf_info *ixgbevf_info_tbl[] = { | 60 | static const struct ixgbevf_info *ixgbevf_info_tbl[] = { |
59 | [board_82599_vf] = &ixgbevf_vf_info, | 61 | [board_82599_vf] = &ixgbevf_82599_vf_info, |
62 | [board_X540_vf] = &ixgbevf_X540_vf_info, | ||
60 | }; | 63 | }; |
61 | 64 | ||
62 | /* ixgbevf_pci_tbl - PCI Device ID Table | 65 | /* ixgbevf_pci_tbl - PCI Device ID Table |
@@ -70,6 +73,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { | |||
70 | static struct pci_device_id ixgbevf_pci_tbl[] = { | 73 | static struct pci_device_id ixgbevf_pci_tbl[] = { |
71 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), | 74 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), |
72 | board_82599_vf}, | 75 | board_82599_vf}, |
76 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), | ||
77 | board_X540_vf}, | ||
73 | 78 | ||
74 | /* required last entry */ | 79 | /* required last entry */ |
75 | {0, } | 80 | {0, } |
@@ -103,7 +108,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, | |||
103 | } | 108 | } |
104 | 109 | ||
105 | /* | 110 | /* |
106 | * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors | 111 | * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors |
107 | * @adapter: pointer to adapter struct | 112 | * @adapter: pointer to adapter struct |
108 | * @direction: 0 for Rx, 1 for Tx, -1 for other causes | 113 | * @direction: 0 for Rx, 1 for Tx, -1 for other causes |
109 | * @queue: queue to map the corresponding interrupt to | 114 | * @queue: queue to map the corresponding interrupt to |
@@ -158,42 +163,6 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter, | |||
158 | /* tx_buffer_info must be completely set up in the transmit path */ | 163 | /* tx_buffer_info must be completely set up in the transmit path */ |
159 | } | 164 | } |
160 | 165 | ||
161 | static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter, | ||
162 | struct ixgbevf_ring *tx_ring, | ||
163 | unsigned int eop) | ||
164 | { | ||
165 | struct ixgbe_hw *hw = &adapter->hw; | ||
166 | u32 head, tail; | ||
167 | |||
168 | /* Detect a transmit hang in hardware, this serializes the | ||
169 | * check with the clearing of time_stamp and movement of eop */ | ||
170 | head = readl(hw->hw_addr + tx_ring->head); | ||
171 | tail = readl(hw->hw_addr + tx_ring->tail); | ||
172 | adapter->detect_tx_hung = false; | ||
173 | if ((head != tail) && | ||
174 | tx_ring->tx_buffer_info[eop].time_stamp && | ||
175 | time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) { | ||
176 | /* detected Tx unit hang */ | ||
177 | union ixgbe_adv_tx_desc *tx_desc; | ||
178 | tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); | ||
179 | printk(KERN_ERR "Detected Tx Unit Hang\n" | ||
180 | " Tx Queue <%d>\n" | ||
181 | " TDH, TDT <%x>, <%x>\n" | ||
182 | " next_to_use <%x>\n" | ||
183 | " next_to_clean <%x>\n" | ||
184 | "tx_buffer_info[next_to_clean]\n" | ||
185 | " time_stamp <%lx>\n" | ||
186 | " jiffies <%lx>\n", | ||
187 | tx_ring->queue_index, | ||
188 | head, tail, | ||
189 | tx_ring->next_to_use, eop, | ||
190 | tx_ring->tx_buffer_info[eop].time_stamp, jiffies); | ||
191 | return true; | ||
192 | } | ||
193 | |||
194 | return false; | ||
195 | } | ||
196 | |||
197 | #define IXGBE_MAX_TXD_PWR 14 | 166 | #define IXGBE_MAX_TXD_PWR 14 |
198 | #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) | 167 | #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) |
199 | 168 | ||
@@ -289,16 +258,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, | |||
289 | #endif | 258 | #endif |
290 | } | 259 | } |
291 | 260 | ||
292 | if (adapter->detect_tx_hung) { | ||
293 | if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) { | ||
294 | /* schedule immediate reset if we believe we hung */ | ||
295 | printk(KERN_INFO | ||
296 | "tx hang %d detected, resetting adapter\n", | ||
297 | adapter->tx_timeout_count + 1); | ||
298 | ixgbevf_tx_timeout(adapter->netdev); | ||
299 | } | ||
300 | } | ||
301 | |||
302 | /* re-arm the interrupt */ | 261 | /* re-arm the interrupt */ |
303 | if ((count >= tx_ring->work_limit) && | 262 | if ((count >= tx_ring->work_limit) && |
304 | (!test_bit(__IXGBEVF_DOWN, &adapter->state))) { | 263 | (!test_bit(__IXGBEVF_DOWN, &adapter->state))) { |
@@ -308,10 +267,10 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, | |||
308 | tx_ring->total_bytes += total_bytes; | 267 | tx_ring->total_bytes += total_bytes; |
309 | tx_ring->total_packets += total_packets; | 268 | tx_ring->total_packets += total_packets; |
310 | 269 | ||
311 | adapter->net_stats.tx_bytes += total_bytes; | 270 | netdev->stats.tx_bytes += total_bytes; |
312 | adapter->net_stats.tx_packets += total_packets; | 271 | netdev->stats.tx_packets += total_packets; |
313 | 272 | ||
314 | return (count < tx_ring->work_limit); | 273 | return count < tx_ring->work_limit; |
315 | } | 274 | } |
316 | 275 | ||
317 | /** | 276 | /** |
@@ -330,7 +289,6 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, | |||
330 | struct ixgbevf_adapter *adapter = q_vector->adapter; | 289 | struct ixgbevf_adapter *adapter = q_vector->adapter; |
331 | bool is_vlan = (status & IXGBE_RXD_STAT_VP); | 290 | bool is_vlan = (status & IXGBE_RXD_STAT_VP); |
332 | u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); | 291 | u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); |
333 | int ret; | ||
334 | 292 | ||
335 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { | 293 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { |
336 | if (adapter->vlgrp && is_vlan) | 294 | if (adapter->vlgrp && is_vlan) |
@@ -341,9 +299,9 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, | |||
341 | napi_gro_receive(&q_vector->napi, skb); | 299 | napi_gro_receive(&q_vector->napi, skb); |
342 | } else { | 300 | } else { |
343 | if (adapter->vlgrp && is_vlan) | 301 | if (adapter->vlgrp && is_vlan) |
344 | ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag); | 302 | vlan_hwaccel_rx(skb, adapter->vlgrp, tag); |
345 | else | 303 | else |
346 | ret = netif_rx(skb); | 304 | netif_rx(skb); |
347 | } | 305 | } |
348 | } | 306 | } |
349 | 307 | ||
@@ -356,7 +314,7 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, | |||
356 | static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, | 314 | static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, |
357 | u32 status_err, struct sk_buff *skb) | 315 | u32 status_err, struct sk_buff *skb) |
358 | { | 316 | { |
359 | skb->ip_summed = CHECKSUM_NONE; | 317 | skb_checksum_none_assert(skb); |
360 | 318 | ||
361 | /* Rx csum disabled */ | 319 | /* Rx csum disabled */ |
362 | if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) | 320 | if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) |
@@ -639,8 +597,8 @@ next_desc: | |||
639 | 597 | ||
640 | rx_ring->total_packets += total_rx_packets; | 598 | rx_ring->total_packets += total_rx_packets; |
641 | rx_ring->total_bytes += total_rx_bytes; | 599 | rx_ring->total_bytes += total_rx_bytes; |
642 | adapter->net_stats.rx_bytes += total_rx_bytes; | 600 | adapter->netdev->stats.rx_bytes += total_rx_bytes; |
643 | adapter->net_stats.rx_packets += total_rx_packets; | 601 | adapter->netdev->stats.rx_packets += total_rx_packets; |
644 | 602 | ||
645 | return cleaned; | 603 | return cleaned; |
646 | } | 604 | } |
@@ -1013,7 +971,7 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data) | |||
1013 | } | 971 | } |
1014 | 972 | ||
1015 | /** | 973 | /** |
1016 | * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) | 974 | * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues) |
1017 | * @irq: unused | 975 | * @irq: unused |
1018 | * @data: pointer to our q_vector struct for this interrupt vector | 976 | * @data: pointer to our q_vector struct for this interrupt vector |
1019 | **/ | 977 | **/ |
@@ -1495,7 +1453,7 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) | |||
1495 | 1453 | ||
1496 | if (adapter->vlgrp) { | 1454 | if (adapter->vlgrp) { |
1497 | u16 vid; | 1455 | u16 vid; |
1498 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | 1456 | for (vid = 0; vid < VLAN_N_VID; vid++) { |
1499 | if (!vlan_group_get_device(adapter->vlgrp, vid)) | 1457 | if (!vlan_group_get_device(adapter->vlgrp, vid)) |
1500 | continue; | 1458 | continue; |
1501 | ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); | 1459 | ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); |
@@ -1503,6 +1461,34 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) | |||
1503 | } | 1461 | } |
1504 | } | 1462 | } |
1505 | 1463 | ||
1464 | static int ixgbevf_write_uc_addr_list(struct net_device *netdev) | ||
1465 | { | ||
1466 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | ||
1467 | struct ixgbe_hw *hw = &adapter->hw; | ||
1468 | int count = 0; | ||
1469 | |||
1470 | if ((netdev_uc_count(netdev)) > 10) { | ||
1471 | printk(KERN_ERR "Too many unicast filters - No Space\n"); | ||
1472 | return -ENOSPC; | ||
1473 | } | ||
1474 | |||
1475 | if (!netdev_uc_empty(netdev)) { | ||
1476 | struct netdev_hw_addr *ha; | ||
1477 | netdev_for_each_uc_addr(ha, netdev) { | ||
1478 | hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); | ||
1479 | udelay(200); | ||
1480 | } | ||
1481 | } else { | ||
1482 | /* | ||
1483 | * If the list is empty then send message to PF driver to | ||
1484 | * clear all macvlans on this VF. | ||
1485 | */ | ||
1486 | hw->mac.ops.set_uc_addr(hw, 0, NULL); | ||
1487 | } | ||
1488 | |||
1489 | return count; | ||
1490 | } | ||
1491 | |||
1506 | /** | 1492 | /** |
1507 | * ixgbevf_set_rx_mode - Multicast set | 1493 | * ixgbevf_set_rx_mode - Multicast set |
1508 | * @netdev: network interface device structure | 1494 | * @netdev: network interface device structure |
@@ -1519,6 +1505,8 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) | |||
1519 | /* reprogram multicast list */ | 1505 | /* reprogram multicast list */ |
1520 | if (hw->mac.ops.update_mc_addr_list) | 1506 | if (hw->mac.ops.update_mc_addr_list) |
1521 | hw->mac.ops.update_mc_addr_list(hw, netdev); | 1507 | hw->mac.ops.update_mc_addr_list(hw, netdev); |
1508 | |||
1509 | ixgbevf_write_uc_addr_list(netdev); | ||
1522 | } | 1510 | } |
1523 | 1511 | ||
1524 | static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) | 1512 | static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) |
@@ -1661,6 +1649,11 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) | |||
1661 | j = adapter->rx_ring[i].reg_idx; | 1649 | j = adapter->rx_ring[i].reg_idx; |
1662 | rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); | 1650 | rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); |
1663 | rxdctl |= IXGBE_RXDCTL_ENABLE; | 1651 | rxdctl |= IXGBE_RXDCTL_ENABLE; |
1652 | if (hw->mac.type == ixgbe_mac_X540_vf) { | ||
1653 | rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; | ||
1654 | rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | | ||
1655 | IXGBE_RXDCTL_RLPML_EN); | ||
1656 | } | ||
1664 | IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); | 1657 | IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); |
1665 | ixgbevf_rx_desc_queue_enable(adapter, i); | 1658 | ixgbevf_rx_desc_queue_enable(adapter, i); |
1666 | } | 1659 | } |
@@ -1963,7 +1956,7 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, | |||
1963 | } | 1956 | } |
1964 | 1957 | ||
1965 | /* | 1958 | /* |
1966 | * ixgbe_set_num_queues: Allocate queues for device, feature dependant | 1959 | * ixgbevf_set_num_queues: Allocate queues for device, feature dependent |
1967 | * @adapter: board private structure to initialize | 1960 | * @adapter: board private structure to initialize |
1968 | * | 1961 | * |
1969 | * This is the top level queue allocation routine. The order here is very | 1962 | * This is the top level queue allocation routine. The order here is very |
@@ -2212,7 +2205,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) | |||
2212 | 2205 | ||
2213 | hw->vendor_id = pdev->vendor; | 2206 | hw->vendor_id = pdev->vendor; |
2214 | hw->device_id = pdev->device; | 2207 | hw->device_id = pdev->device; |
2215 | pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); | 2208 | hw->revision_id = pdev->revision; |
2216 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | 2209 | hw->subsystem_vendor_id = pdev->subsystem_vendor; |
2217 | hw->subsystem_device_id = pdev->subsystem_device; | 2210 | hw->subsystem_device_id = pdev->subsystem_device; |
2218 | 2211 | ||
@@ -2297,7 +2290,7 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) | |||
2297 | adapter->stats.vfmprc); | 2290 | adapter->stats.vfmprc); |
2298 | 2291 | ||
2299 | /* Fill out the OS statistics structure */ | 2292 | /* Fill out the OS statistics structure */ |
2300 | adapter->net_stats.multicast = adapter->stats.vfmprc - | 2293 | adapter->netdev->stats.multicast = adapter->stats.vfmprc - |
2301 | adapter->stats.base_vfmprc; | 2294 | adapter->stats.base_vfmprc; |
2302 | } | 2295 | } |
2303 | 2296 | ||
@@ -2406,9 +2399,6 @@ static void ixgbevf_watchdog_task(struct work_struct *work) | |||
2406 | 10 : 1); | 2399 | 10 : 1); |
2407 | netif_carrier_on(netdev); | 2400 | netif_carrier_on(netdev); |
2408 | netif_tx_wake_all_queues(netdev); | 2401 | netif_tx_wake_all_queues(netdev); |
2409 | } else { | ||
2410 | /* Force detection of hung controller */ | ||
2411 | adapter->detect_tx_hung = true; | ||
2412 | } | 2402 | } |
2413 | } else { | 2403 | } else { |
2414 | adapter->link_up = false; | 2404 | adapter->link_up = false; |
@@ -2423,9 +2413,6 @@ static void ixgbevf_watchdog_task(struct work_struct *work) | |||
2423 | ixgbevf_update_stats(adapter); | 2413 | ixgbevf_update_stats(adapter); |
2424 | 2414 | ||
2425 | pf_has_reset: | 2415 | pf_has_reset: |
2426 | /* Force detection of hung controller every watchdog period */ | ||
2427 | adapter->detect_tx_hung = true; | ||
2428 | |||
2429 | /* Reset the timer */ | 2416 | /* Reset the timer */ |
2430 | if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) | 2417 | if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) |
2431 | mod_timer(&adapter->watchdog_timer, | 2418 | mod_timer(&adapter->watchdog_timer, |
@@ -2488,10 +2475,9 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, | |||
2488 | int size; | 2475 | int size; |
2489 | 2476 | ||
2490 | size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; | 2477 | size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; |
2491 | tx_ring->tx_buffer_info = vmalloc(size); | 2478 | tx_ring->tx_buffer_info = vzalloc(size); |
2492 | if (!tx_ring->tx_buffer_info) | 2479 | if (!tx_ring->tx_buffer_info) |
2493 | goto err; | 2480 | goto err; |
2494 | memset(tx_ring->tx_buffer_info, 0, size); | ||
2495 | 2481 | ||
2496 | /* round up to nearest 4K */ | 2482 | /* round up to nearest 4K */ |
2497 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); | 2483 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); |
@@ -2555,14 +2541,13 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, | |||
2555 | int size; | 2541 | int size; |
2556 | 2542 | ||
2557 | size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; | 2543 | size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; |
2558 | rx_ring->rx_buffer_info = vmalloc(size); | 2544 | rx_ring->rx_buffer_info = vzalloc(size); |
2559 | if (!rx_ring->rx_buffer_info) { | 2545 | if (!rx_ring->rx_buffer_info) { |
2560 | hw_dbg(&adapter->hw, | 2546 | hw_dbg(&adapter->hw, |
2561 | "Unable to vmalloc buffer memory for " | 2547 | "Unable to vmalloc buffer memory for " |
2562 | "the receive descriptor ring\n"); | 2548 | "the receive descriptor ring\n"); |
2563 | goto alloc_failed; | 2549 | goto alloc_failed; |
2564 | } | 2550 | } |
2565 | memset(rx_ring->rx_buffer_info, 0, size); | ||
2566 | 2551 | ||
2567 | /* Round up to nearest 4K */ | 2552 | /* Round up to nearest 4K */ |
2568 | rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); | 2553 | rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); |
@@ -3134,7 +3119,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3134 | 3119 | ||
3135 | tx_ring = &adapter->tx_ring[r_idx]; | 3120 | tx_ring = &adapter->tx_ring[r_idx]; |
3136 | 3121 | ||
3137 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { | 3122 | if (vlan_tx_tag_present(skb)) { |
3138 | tx_flags |= vlan_tx_tag_get(skb); | 3123 | tx_flags |= vlan_tx_tag_get(skb); |
3139 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | 3124 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; |
3140 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | 3125 | tx_flags |= IXGBE_TX_FLAGS_VLAN; |
@@ -3181,21 +3166,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3181 | } | 3166 | } |
3182 | 3167 | ||
3183 | /** | 3168 | /** |
3184 | * ixgbevf_get_stats - Get System Network Statistics | ||
3185 | * @netdev: network interface device structure | ||
3186 | * | ||
3187 | * Returns the address of the device statistics structure. | ||
3188 | * The statistics are actually updated from the timer callback. | ||
3189 | **/ | ||
3190 | static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev) | ||
3191 | { | ||
3192 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | ||
3193 | |||
3194 | /* only return the current stats */ | ||
3195 | return &adapter->net_stats; | ||
3196 | } | ||
3197 | |||
3198 | /** | ||
3199 | * ixgbevf_set_mac - Change the Ethernet Address of the NIC | 3169 | * ixgbevf_set_mac - Change the Ethernet Address of the NIC |
3200 | * @netdev: network interface device structure | 3170 | * @netdev: network interface device structure |
3201 | * @p: pointer to an address structure | 3171 | * @p: pointer to an address structure |
@@ -3230,10 +3200,16 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) | |||
3230 | static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) | 3200 | static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) |
3231 | { | 3201 | { |
3232 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | 3202 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
3203 | struct ixgbe_hw *hw = &adapter->hw; | ||
3233 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | 3204 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; |
3205 | int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; | ||
3206 | u32 msg[2]; | ||
3207 | |||
3208 | if (adapter->hw.mac.type == ixgbe_mac_X540_vf) | ||
3209 | max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; | ||
3234 | 3210 | ||
3235 | /* MTU < 68 is an error and causes problems on some kernels */ | 3211 | /* MTU < 68 is an error and causes problems on some kernels */ |
3236 | if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) | 3212 | if ((new_mtu < 68) || (max_frame > max_possible_frame)) |
3237 | return -EINVAL; | 3213 | return -EINVAL; |
3238 | 3214 | ||
3239 | hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", | 3215 | hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", |
@@ -3241,6 +3217,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) | |||
3241 | /* must set new MTU before calling down or up */ | 3217 | /* must set new MTU before calling down or up */ |
3242 | netdev->mtu = new_mtu; | 3218 | netdev->mtu = new_mtu; |
3243 | 3219 | ||
3220 | msg[0] = IXGBE_VF_SET_LPE; | ||
3221 | msg[1] = max_frame; | ||
3222 | hw->mbx.ops.write_posted(hw, msg, 2); | ||
3223 | |||
3244 | if (netif_running(netdev)) | 3224 | if (netif_running(netdev)) |
3245 | ixgbevf_reinit_locked(adapter); | 3225 | ixgbevf_reinit_locked(adapter); |
3246 | 3226 | ||
@@ -3272,7 +3252,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
3272 | .ndo_open = &ixgbevf_open, | 3252 | .ndo_open = &ixgbevf_open, |
3273 | .ndo_stop = &ixgbevf_close, | 3253 | .ndo_stop = &ixgbevf_close, |
3274 | .ndo_start_xmit = &ixgbevf_xmit_frame, | 3254 | .ndo_start_xmit = &ixgbevf_xmit_frame, |
3275 | .ndo_get_stats = &ixgbevf_get_stats, | ||
3276 | .ndo_set_rx_mode = &ixgbevf_set_rx_mode, | 3255 | .ndo_set_rx_mode = &ixgbevf_set_rx_mode, |
3277 | .ndo_set_multicast_list = &ixgbevf_set_rx_mode, | 3256 | .ndo_set_multicast_list = &ixgbevf_set_rx_mode, |
3278 | .ndo_validate_addr = eth_validate_addr, | 3257 | .ndo_validate_addr = eth_validate_addr, |
@@ -3286,8 +3265,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
3286 | 3265 | ||
3287 | static void ixgbevf_assign_netdev_ops(struct net_device *dev) | 3266 | static void ixgbevf_assign_netdev_ops(struct net_device *dev) |
3288 | { | 3267 | { |
3289 | struct ixgbevf_adapter *adapter; | ||
3290 | adapter = netdev_priv(dev); | ||
3291 | dev->netdev_ops = &ixgbe_netdev_ops; | 3268 | dev->netdev_ops = &ixgbe_netdev_ops; |
3292 | ixgbevf_set_ethtool_ops(dev); | 3269 | ixgbevf_set_ethtool_ops(dev); |
3293 | dev->watchdog_timeo = 5 * HZ; | 3270 | dev->watchdog_timeo = 5 * HZ; |
@@ -3426,7 +3403,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, | |||
3426 | } | 3403 | } |
3427 | 3404 | ||
3428 | init_timer(&adapter->watchdog_timer); | 3405 | init_timer(&adapter->watchdog_timer); |
3429 | adapter->watchdog_timer.function = &ixgbevf_watchdog; | 3406 | adapter->watchdog_timer.function = ixgbevf_watchdog; |
3430 | adapter->watchdog_timer.data = (unsigned long)adapter; | 3407 | adapter->watchdog_timer.data = (unsigned long)adapter; |
3431 | 3408 | ||
3432 | INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); | 3409 | INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); |
@@ -3440,10 +3417,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, | |||
3440 | if (hw->mac.ops.get_bus_info) | 3417 | if (hw->mac.ops.get_bus_info) |
3441 | hw->mac.ops.get_bus_info(hw); | 3418 | hw->mac.ops.get_bus_info(hw); |
3442 | 3419 | ||
3443 | |||
3444 | netif_carrier_off(netdev); | ||
3445 | netif_tx_stop_all_queues(netdev); | ||
3446 | |||
3447 | strcpy(netdev->name, "eth%d"); | 3420 | strcpy(netdev->name, "eth%d"); |
3448 | 3421 | ||
3449 | err = register_netdev(netdev); | 3422 | err = register_netdev(netdev); |
@@ -3452,6 +3425,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, | |||
3452 | 3425 | ||
3453 | adapter->netdev_registered = true; | 3426 | adapter->netdev_registered = true; |
3454 | 3427 | ||
3428 | netif_carrier_off(netdev); | ||
3429 | |||
3455 | ixgbevf_init_last_counter_stats(adapter); | 3430 | ixgbevf_init_last_counter_stats(adapter); |
3456 | 3431 | ||
3457 | /* print the MAC address */ | 3432 | /* print the MAC address */ |
@@ -3503,10 +3478,9 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev) | |||
3503 | 3478 | ||
3504 | del_timer_sync(&adapter->watchdog_timer); | 3479 | del_timer_sync(&adapter->watchdog_timer); |
3505 | 3480 | ||
3481 | cancel_work_sync(&adapter->reset_task); | ||
3506 | cancel_work_sync(&adapter->watchdog_task); | 3482 | cancel_work_sync(&adapter->watchdog_task); |
3507 | 3483 | ||
3508 | flush_scheduled_work(); | ||
3509 | |||
3510 | if (adapter->netdev_registered) { | 3484 | if (adapter->netdev_registered) { |
3511 | unregister_netdev(netdev); | 3485 | unregister_netdev(netdev); |
3512 | adapter->netdev_registered = false; | 3486 | adapter->netdev_registered = false; |
@@ -3536,9 +3510,9 @@ static struct pci_driver ixgbevf_driver = { | |||
3536 | }; | 3510 | }; |
3537 | 3511 | ||
3538 | /** | 3512 | /** |
3539 | * ixgbe_init_module - Driver Registration Routine | 3513 | * ixgbevf_init_module - Driver Registration Routine |
3540 | * | 3514 | * |
3541 | * ixgbe_init_module is the first routine called when the driver is | 3515 | * ixgbevf_init_module is the first routine called when the driver is |
3542 | * loaded. All it does is register with the PCI subsystem. | 3516 | * loaded. All it does is register with the PCI subsystem. |
3543 | **/ | 3517 | **/ |
3544 | static int __init ixgbevf_init_module(void) | 3518 | static int __init ixgbevf_init_module(void) |
@@ -3556,9 +3530,9 @@ static int __init ixgbevf_init_module(void) | |||
3556 | module_init(ixgbevf_init_module); | 3530 | module_init(ixgbevf_init_module); |
3557 | 3531 | ||
3558 | /** | 3532 | /** |
3559 | * ixgbe_exit_module - Driver Exit Cleanup Routine | 3533 | * ixgbevf_exit_module - Driver Exit Cleanup Routine |
3560 | * | 3534 | * |
3561 | * ixgbe_exit_module is called just before the driver is removed | 3535 | * ixgbevf_exit_module is called just before the driver is removed |
3562 | * from memory. | 3536 | * from memory. |
3563 | **/ | 3537 | **/ |
3564 | static void __exit ixgbevf_exit_module(void) | 3538 | static void __exit ixgbevf_exit_module(void) |
@@ -3568,7 +3542,7 @@ static void __exit ixgbevf_exit_module(void) | |||
3568 | 3542 | ||
3569 | #ifdef DEBUG | 3543 | #ifdef DEBUG |
3570 | /** | 3544 | /** |
3571 | * ixgbe_get_hw_dev_name - return device name string | 3545 | * ixgbevf_get_hw_dev_name - return device name string |
3572 | * used by hardware layer to print debugging information | 3546 | * used by hardware layer to print debugging information |
3573 | **/ | 3547 | **/ |
3574 | char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) | 3548 | char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) |