diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r-- | drivers/net/ethernet/intel/e100.c | 36 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb.h | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 110 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 6 |
4 files changed, 35 insertions, 125 deletions
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index ec800b093e7e..d2bea3f07c73 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c | |||
@@ -870,7 +870,7 @@ err_unlock: | |||
870 | } | 870 | } |
871 | 871 | ||
872 | static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, | 872 | static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, |
873 | void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) | 873 | int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) |
874 | { | 874 | { |
875 | struct cb *cb; | 875 | struct cb *cb; |
876 | unsigned long flags; | 876 | unsigned long flags; |
@@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, | |||
888 | nic->cbs_avail--; | 888 | nic->cbs_avail--; |
889 | cb->skb = skb; | 889 | cb->skb = skb; |
890 | 890 | ||
891 | err = cb_prepare(nic, cb, skb); | ||
892 | if (err) | ||
893 | goto err_unlock; | ||
894 | |||
891 | if (unlikely(!nic->cbs_avail)) | 895 | if (unlikely(!nic->cbs_avail)) |
892 | err = -ENOSPC; | 896 | err = -ENOSPC; |
893 | 897 | ||
894 | cb_prepare(nic, cb, skb); | ||
895 | 898 | ||
896 | /* Order is important otherwise we'll be in a race with h/w: | 899 | /* Order is important otherwise we'll be in a race with h/w: |
897 | * set S-bit in current first, then clear S-bit in previous. */ | 900 | * set S-bit in current first, then clear S-bit in previous. */ |
@@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic) | |||
1091 | nic->mii.mdio_write = mdio_write; | 1094 | nic->mii.mdio_write = mdio_write; |
1092 | } | 1095 | } |
1093 | 1096 | ||
1094 | static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1097 | static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1095 | { | 1098 | { |
1096 | struct config *config = &cb->u.config; | 1099 | struct config *config = &cb->u.config; |
1097 | u8 *c = (u8 *)config; | 1100 | u8 *c = (u8 *)config; |
@@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) | |||
1181 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, | 1184 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1182 | "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", | 1185 | "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", |
1183 | c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); | 1186 | c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); |
1187 | return 0; | ||
1184 | } | 1188 | } |
1185 | 1189 | ||
1186 | /************************************************************************* | 1190 | /************************************************************************* |
@@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic) | |||
1331 | return fw; | 1335 | return fw; |
1332 | } | 1336 | } |
1333 | 1337 | ||
1334 | static void e100_setup_ucode(struct nic *nic, struct cb *cb, | 1338 | static int e100_setup_ucode(struct nic *nic, struct cb *cb, |
1335 | struct sk_buff *skb) | 1339 | struct sk_buff *skb) |
1336 | { | 1340 | { |
1337 | const struct firmware *fw = (void *)skb; | 1341 | const struct firmware *fw = (void *)skb; |
@@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, | |||
1358 | cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); | 1362 | cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); |
1359 | 1363 | ||
1360 | cb->command = cpu_to_le16(cb_ucode | cb_el); | 1364 | cb->command = cpu_to_le16(cb_ucode | cb_el); |
1365 | return 0; | ||
1361 | } | 1366 | } |
1362 | 1367 | ||
1363 | static inline int e100_load_ucode_wait(struct nic *nic) | 1368 | static inline int e100_load_ucode_wait(struct nic *nic) |
@@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic) | |||
1400 | return err; | 1405 | return err; |
1401 | } | 1406 | } |
1402 | 1407 | ||
1403 | static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, | 1408 | static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, |
1404 | struct sk_buff *skb) | 1409 | struct sk_buff *skb) |
1405 | { | 1410 | { |
1406 | cb->command = cpu_to_le16(cb_iaaddr); | 1411 | cb->command = cpu_to_le16(cb_iaaddr); |
1407 | memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); | 1412 | memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); |
1413 | return 0; | ||
1408 | } | 1414 | } |
1409 | 1415 | ||
1410 | static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1416 | static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1411 | { | 1417 | { |
1412 | cb->command = cpu_to_le16(cb_dump); | 1418 | cb->command = cpu_to_le16(cb_dump); |
1413 | cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + | 1419 | cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + |
1414 | offsetof(struct mem, dump_buf)); | 1420 | offsetof(struct mem, dump_buf)); |
1421 | return 0; | ||
1415 | } | 1422 | } |
1416 | 1423 | ||
1417 | static int e100_phy_check_without_mii(struct nic *nic) | 1424 | static int e100_phy_check_without_mii(struct nic *nic) |
@@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic) | |||
1581 | return 0; | 1588 | return 0; |
1582 | } | 1589 | } |
1583 | 1590 | ||
1584 | static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1591 | static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1585 | { | 1592 | { |
1586 | struct net_device *netdev = nic->netdev; | 1593 | struct net_device *netdev = nic->netdev; |
1587 | struct netdev_hw_addr *ha; | 1594 | struct netdev_hw_addr *ha; |
@@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) | |||
1596 | memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, | 1603 | memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, |
1597 | ETH_ALEN); | 1604 | ETH_ALEN); |
1598 | } | 1605 | } |
1606 | return 0; | ||
1599 | } | 1607 | } |
1600 | 1608 | ||
1601 | static void e100_set_multicast_list(struct net_device *netdev) | 1609 | static void e100_set_multicast_list(struct net_device *netdev) |
@@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data) | |||
1756 | round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); | 1764 | round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); |
1757 | } | 1765 | } |
1758 | 1766 | ||
1759 | static void e100_xmit_prepare(struct nic *nic, struct cb *cb, | 1767 | static int e100_xmit_prepare(struct nic *nic, struct cb *cb, |
1760 | struct sk_buff *skb) | 1768 | struct sk_buff *skb) |
1761 | { | 1769 | { |
1770 | dma_addr_t dma_addr; | ||
1762 | cb->command = nic->tx_command; | 1771 | cb->command = nic->tx_command; |
1763 | 1772 | ||
1773 | dma_addr = pci_map_single(nic->pdev, | ||
1774 | skb->data, skb->len, PCI_DMA_TODEVICE); | ||
1775 | /* If we can't map the skb, have the upper layer try later */ | ||
1776 | if (pci_dma_mapping_error(nic->pdev, dma_addr)) | ||
1777 | return -ENOMEM; | ||
1778 | |||
1764 | /* | 1779 | /* |
1765 | * Use the last 4 bytes of the SKB payload packet as the CRC, used for | 1780 | * Use the last 4 bytes of the SKB payload packet as the CRC, used for |
1766 | * testing, ie sending frames with bad CRC. | 1781 | * testing, ie sending frames with bad CRC. |
@@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb, | |||
1777 | cb->u.tcb.tcb_byte_count = 0; | 1792 | cb->u.tcb.tcb_byte_count = 0; |
1778 | cb->u.tcb.threshold = nic->tx_threshold; | 1793 | cb->u.tcb.threshold = nic->tx_threshold; |
1779 | cb->u.tcb.tbd_count = 1; | 1794 | cb->u.tcb.tbd_count = 1; |
1780 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, | 1795 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); |
1781 | skb->data, skb->len, PCI_DMA_TODEVICE)); | ||
1782 | /* check for mapping failure? */ | ||
1783 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); | 1796 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); |
1784 | skb_tx_timestamp(skb); | 1797 | skb_tx_timestamp(skb); |
1798 | return 0; | ||
1785 | } | 1799 | } |
1786 | 1800 | ||
1787 | static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, | 1801 | static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, |
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 25151401c2ab..ab577a763a20 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h | |||
@@ -284,18 +284,10 @@ struct igb_q_vector { | |||
284 | enum e1000_ring_flags_t { | 284 | enum e1000_ring_flags_t { |
285 | IGB_RING_FLAG_RX_SCTP_CSUM, | 285 | IGB_RING_FLAG_RX_SCTP_CSUM, |
286 | IGB_RING_FLAG_RX_LB_VLAN_BSWAP, | 286 | IGB_RING_FLAG_RX_LB_VLAN_BSWAP, |
287 | IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, | ||
288 | IGB_RING_FLAG_TX_CTX_IDX, | 287 | IGB_RING_FLAG_TX_CTX_IDX, |
289 | IGB_RING_FLAG_TX_DETECT_HANG | 288 | IGB_RING_FLAG_TX_DETECT_HANG |
290 | }; | 289 | }; |
291 | 290 | ||
292 | #define ring_uses_build_skb(ring) \ | ||
293 | test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) | ||
294 | #define set_ring_build_skb_enabled(ring) \ | ||
295 | set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) | ||
296 | #define clear_ring_build_skb_enabled(ring) \ | ||
297 | clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) | ||
298 | |||
299 | #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) | 291 | #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) |
300 | 292 | ||
301 | #define IGB_RX_DESC(R, i) \ | 293 | #define IGB_RX_DESC(R, i) \ |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8496adfc6a68..64f75291e3a5 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, | |||
3350 | wr32(E1000_RXDCTL(reg_idx), rxdctl); | 3350 | wr32(E1000_RXDCTL(reg_idx), rxdctl); |
3351 | } | 3351 | } |
3352 | 3352 | ||
3353 | static void igb_set_rx_buffer_len(struct igb_adapter *adapter, | ||
3354 | struct igb_ring *rx_ring) | ||
3355 | { | ||
3356 | #define IGB_MAX_BUILD_SKB_SIZE \ | ||
3357 | (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \ | ||
3358 | (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN)) | ||
3359 | |||
3360 | /* set build_skb flag */ | ||
3361 | if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE) | ||
3362 | set_ring_build_skb_enabled(rx_ring); | ||
3363 | else | ||
3364 | clear_ring_build_skb_enabled(rx_ring); | ||
3365 | } | ||
3366 | |||
3367 | /** | 3353 | /** |
3368 | * igb_configure_rx - Configure receive Unit after Reset | 3354 | * igb_configure_rx - Configure receive Unit after Reset |
3369 | * @adapter: board private structure | 3355 | * @adapter: board private structure |
@@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter) | |||
3383 | 3369 | ||
3384 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 3370 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
3385 | * the Base and Length of the Rx Descriptor Ring */ | 3371 | * the Base and Length of the Rx Descriptor Ring */ |
3386 | for (i = 0; i < adapter->num_rx_queues; i++) { | 3372 | for (i = 0; i < adapter->num_rx_queues; i++) |
3387 | struct igb_ring *rx_ring = adapter->rx_ring[i]; | 3373 | igb_configure_rx_ring(adapter, adapter->rx_ring[i]); |
3388 | igb_set_rx_buffer_len(adapter, rx_ring); | ||
3389 | igb_configure_rx_ring(adapter, rx_ring); | ||
3390 | } | ||
3391 | } | 3374 | } |
3392 | 3375 | ||
3393 | /** | 3376 | /** |
@@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, | |||
6203 | return igb_can_reuse_rx_page(rx_buffer, page, truesize); | 6186 | return igb_can_reuse_rx_page(rx_buffer, page, truesize); |
6204 | } | 6187 | } |
6205 | 6188 | ||
6206 | static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring, | ||
6207 | union e1000_adv_rx_desc *rx_desc) | ||
6208 | { | ||
6209 | struct igb_rx_buffer *rx_buffer; | ||
6210 | struct sk_buff *skb; | ||
6211 | struct page *page; | ||
6212 | void *page_addr; | ||
6213 | unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); | ||
6214 | #if (PAGE_SIZE < 8192) | ||
6215 | unsigned int truesize = IGB_RX_BUFSZ; | ||
6216 | #else | ||
6217 | unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + | ||
6218 | SKB_DATA_ALIGN(NET_SKB_PAD + | ||
6219 | NET_IP_ALIGN + | ||
6220 | size); | ||
6221 | #endif | ||
6222 | |||
6223 | /* If we spanned a buffer we have a huge mess so test for it */ | ||
6224 | BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); | ||
6225 | |||
6226 | rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; | ||
6227 | page = rx_buffer->page; | ||
6228 | prefetchw(page); | ||
6229 | |||
6230 | page_addr = page_address(page) + rx_buffer->page_offset; | ||
6231 | |||
6232 | /* prefetch first cache line of first page */ | ||
6233 | prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN); | ||
6234 | #if L1_CACHE_BYTES < 128 | ||
6235 | prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN); | ||
6236 | #endif | ||
6237 | |||
6238 | /* build an skb to around the page buffer */ | ||
6239 | skb = build_skb(page_addr, truesize); | ||
6240 | if (unlikely(!skb)) { | ||
6241 | rx_ring->rx_stats.alloc_failed++; | ||
6242 | return NULL; | ||
6243 | } | ||
6244 | |||
6245 | /* we are reusing so sync this buffer for CPU use */ | ||
6246 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
6247 | rx_buffer->dma, | ||
6248 | rx_buffer->page_offset, | ||
6249 | IGB_RX_BUFSZ, | ||
6250 | DMA_FROM_DEVICE); | ||
6251 | |||
6252 | /* update pointers within the skb to store the data */ | ||
6253 | skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); | ||
6254 | __skb_put(skb, size); | ||
6255 | |||
6256 | /* pull timestamp out of packet data */ | ||
6257 | if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { | ||
6258 | igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); | ||
6259 | __skb_pull(skb, IGB_TS_HDR_LEN); | ||
6260 | } | ||
6261 | |||
6262 | if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) { | ||
6263 | /* hand second half of page back to the ring */ | ||
6264 | igb_reuse_rx_page(rx_ring, rx_buffer); | ||
6265 | } else { | ||
6266 | /* we are not reusing the buffer so unmap it */ | ||
6267 | dma_unmap_page(rx_ring->dev, rx_buffer->dma, | ||
6268 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
6269 | } | ||
6270 | |||
6271 | /* clear contents of buffer_info */ | ||
6272 | rx_buffer->dma = 0; | ||
6273 | rx_buffer->page = NULL; | ||
6274 | |||
6275 | return skb; | ||
6276 | } | ||
6277 | |||
6278 | static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, | 6189 | static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, |
6279 | union e1000_adv_rx_desc *rx_desc, | 6190 | union e1000_adv_rx_desc *rx_desc, |
6280 | struct sk_buff *skb) | 6191 | struct sk_buff *skb) |
@@ -6690,10 +6601,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) | |||
6690 | rmb(); | 6601 | rmb(); |
6691 | 6602 | ||
6692 | /* retrieve a buffer from the ring */ | 6603 | /* retrieve a buffer from the ring */ |
6693 | if (ring_uses_build_skb(rx_ring)) | 6604 | skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); |
6694 | skb = igb_build_rx_buffer(rx_ring, rx_desc); | ||
6695 | else | ||
6696 | skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); | ||
6697 | 6605 | ||
6698 | /* exit if we failed to retrieve a buffer */ | 6606 | /* exit if we failed to retrieve a buffer */ |
6699 | if (!skb) | 6607 | if (!skb) |
@@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, | |||
6780 | return true; | 6688 | return true; |
6781 | } | 6689 | } |
6782 | 6690 | ||
6783 | static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) | ||
6784 | { | ||
6785 | if (ring_uses_build_skb(rx_ring)) | ||
6786 | return NET_SKB_PAD + NET_IP_ALIGN; | ||
6787 | else | ||
6788 | return 0; | ||
6789 | } | ||
6790 | |||
6791 | /** | 6691 | /** |
6792 | * igb_alloc_rx_buffers - Replace used receive buffers; packet split | 6692 | * igb_alloc_rx_buffers - Replace used receive buffers; packet split |
6793 | * @adapter: address of board private structure | 6693 | * @adapter: address of board private structure |
@@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) | |||
6814 | * Refresh the desc even if buffer_addrs didn't change | 6714 | * Refresh the desc even if buffer_addrs didn't change |
6815 | * because each write-back erases this info. | 6715 | * because each write-back erases this info. |
6816 | */ | 6716 | */ |
6817 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + | 6717 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); |
6818 | bi->page_offset + | ||
6819 | igb_rx_offset(rx_ring)); | ||
6820 | 6718 | ||
6821 | rx_desc++; | 6719 | rx_desc++; |
6822 | bi++; | 6720 | bi++; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d44b4d21268c..97e33669c0b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | |||
@@ -1049,6 +1049,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) | |||
1049 | if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) | 1049 | if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) |
1050 | return -EINVAL; | 1050 | return -EINVAL; |
1051 | if (vlan || qos) { | 1051 | if (vlan || qos) { |
1052 | if (adapter->vfinfo[vf].pf_vlan) | ||
1053 | err = ixgbe_set_vf_vlan(adapter, false, | ||
1054 | adapter->vfinfo[vf].pf_vlan, | ||
1055 | vf); | ||
1056 | if (err) | ||
1057 | goto out; | ||
1052 | err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); | 1058 | err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); |
1053 | if (err) | 1059 | if (err) |
1054 | goto out; | 1060 | goto out; |