diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/dsa/Kconfig | 6 | ||||
-rw-r--r-- | drivers/net/dsa/mv88e6171.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/ef10.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/efx.c | 22 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/falcon.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/farch.c | 27 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/mcdi.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/net_driver.h | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/nic.h | 112 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/siena.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/siena_sriov.c | 269 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/common.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | 80 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sunvnet.c | 7 | ||||
-rw-r--r-- | drivers/net/usb/r8152.c | 92 | ||||
-rw-r--r-- | drivers/net/xen-netback/netback.c | 26 |
17 files changed, 367 insertions, 332 deletions
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 2d1a55e980da..7cf8f4ac281f 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig | |||
@@ -37,13 +37,13 @@ config NET_DSA_MV88E6123_61_65 | |||
37 | ethernet switch chips. | 37 | ethernet switch chips. |
38 | 38 | ||
39 | config NET_DSA_MV88E6171 | 39 | config NET_DSA_MV88E6171 |
40 | tristate "Marvell 88E6171 ethernet switch chip support" | 40 | tristate "Marvell 88E6171/6172 ethernet switch chip support" |
41 | select NET_DSA | 41 | select NET_DSA |
42 | select NET_DSA_MV88E6XXX | 42 | select NET_DSA_MV88E6XXX |
43 | select NET_DSA_TAG_EDSA | 43 | select NET_DSA_TAG_EDSA |
44 | ---help--- | 44 | ---help--- |
45 | This enables support for the Marvell 88E6171 ethernet switch | 45 | This enables support for the Marvell 88E6171/6172 ethernet switch |
46 | chip. | 46 | chips. |
47 | 47 | ||
48 | config NET_DSA_MV88E6352 | 48 | config NET_DSA_MV88E6352 |
49 | tristate "Marvell 88E6176/88E6352 ethernet switch chip support" | 49 | tristate "Marvell 88E6176/88E6352 ethernet switch chip support" |
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 78d8e876f3aa..537eeedece21 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support | 1 | /* net/dsa/mv88e6171.c - Marvell 88e6171/8826172 switch chip support |
2 | * Copyright (c) 2008-2009 Marvell Semiconductor | 2 | * Copyright (c) 2008-2009 Marvell Semiconductor |
3 | * Copyright (c) 2014 Claudio Leite <leitec@staticky.com> | 3 | * Copyright (c) 2014 Claudio Leite <leitec@staticky.com> |
4 | * | 4 | * |
@@ -29,6 +29,8 @@ static char *mv88e6171_probe(struct device *host_dev, int sw_addr) | |||
29 | if (ret >= 0) { | 29 | if (ret >= 0) { |
30 | if ((ret & 0xfff0) == 0x1710) | 30 | if ((ret & 0xfff0) == 0x1710) |
31 | return "Marvell 88E6171"; | 31 | return "Marvell 88E6171"; |
32 | if ((ret & 0xfff0) == 0x1720) | ||
33 | return "Marvell 88E6172"; | ||
32 | } | 34 | } |
33 | 35 | ||
34 | return NULL; | 36 | return NULL; |
@@ -409,3 +411,4 @@ struct dsa_switch_driver mv88e6171_switch_driver = { | |||
409 | }; | 411 | }; |
410 | 412 | ||
411 | MODULE_ALIAS("platform:mv88e6171"); | 413 | MODULE_ALIAS("platform:mv88e6171"); |
414 | MODULE_ALIAS("platform:mv88e6172"); | ||
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 002d4cdc319f..ff55a1983014 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
@@ -3688,6 +3688,11 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { | |||
3688 | .ptp_write_host_time = efx_ef10_ptp_write_host_time, | 3688 | .ptp_write_host_time = efx_ef10_ptp_write_host_time, |
3689 | .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, | 3689 | .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, |
3690 | .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, | 3690 | .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, |
3691 | .sriov_init = efx_ef10_sriov_init, | ||
3692 | .sriov_fini = efx_ef10_sriov_fini, | ||
3693 | .sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed, | ||
3694 | .sriov_wanted = efx_ef10_sriov_wanted, | ||
3695 | .sriov_reset = efx_ef10_sriov_reset, | ||
3691 | 3696 | ||
3692 | .revision = EFX_REV_HUNT_A0, | 3697 | .revision = EFX_REV_HUNT_A0, |
3693 | .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), | 3698 | .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index b2cc590dd1dd..b49d04886d4f 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
@@ -1314,7 +1314,7 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) | |||
1314 | /* If RSS is requested for the PF *and* VFs then we can't write RSS | 1314 | /* If RSS is requested for the PF *and* VFs then we can't write RSS |
1315 | * table entries that are inaccessible to VFs | 1315 | * table entries that are inaccessible to VFs |
1316 | */ | 1316 | */ |
1317 | if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 && | 1317 | if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && |
1318 | count > efx_vf_size(efx)) { | 1318 | count > efx_vf_size(efx)) { |
1319 | netif_warn(efx, probe, efx->net_dev, | 1319 | netif_warn(efx, probe, efx->net_dev, |
1320 | "Reducing number of RSS channels from %u to %u for " | 1320 | "Reducing number of RSS channels from %u to %u for " |
@@ -1426,7 +1426,9 @@ static int efx_probe_interrupts(struct efx_nic *efx) | |||
1426 | } | 1426 | } |
1427 | 1427 | ||
1428 | /* RSS might be usable on VFs even if it is disabled on the PF */ | 1428 | /* RSS might be usable on VFs even if it is disabled on the PF */ |
1429 | efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ? | 1429 | |
1430 | efx->rss_spread = ((efx->n_rx_channels > 1 || | ||
1431 | !efx->type->sriov_wanted(efx)) ? | ||
1430 | efx->n_rx_channels : efx_vf_size(efx)); | 1432 | efx->n_rx_channels : efx_vf_size(efx)); |
1431 | 1433 | ||
1432 | return 0; | 1434 | return 0; |
@@ -2166,7 +2168,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) | |||
2166 | } | 2168 | } |
2167 | 2169 | ||
2168 | ether_addr_copy(net_dev->dev_addr, new_addr); | 2170 | ether_addr_copy(net_dev->dev_addr, new_addr); |
2169 | efx_sriov_mac_address_changed(efx); | 2171 | efx->type->sriov_mac_address_changed(efx); |
2170 | 2172 | ||
2171 | /* Reconfigure the MAC */ | 2173 | /* Reconfigure the MAC */ |
2172 | mutex_lock(&efx->mac_lock); | 2174 | mutex_lock(&efx->mac_lock); |
@@ -2210,10 +2212,10 @@ static const struct net_device_ops efx_farch_netdev_ops = { | |||
2210 | .ndo_set_rx_mode = efx_set_rx_mode, | 2212 | .ndo_set_rx_mode = efx_set_rx_mode, |
2211 | .ndo_set_features = efx_set_features, | 2213 | .ndo_set_features = efx_set_features, |
2212 | #ifdef CONFIG_SFC_SRIOV | 2214 | #ifdef CONFIG_SFC_SRIOV |
2213 | .ndo_set_vf_mac = efx_sriov_set_vf_mac, | 2215 | .ndo_set_vf_mac = efx_siena_sriov_set_vf_mac, |
2214 | .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, | 2216 | .ndo_set_vf_vlan = efx_siena_sriov_set_vf_vlan, |
2215 | .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, | 2217 | .ndo_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk, |
2216 | .ndo_get_vf_config = efx_sriov_get_vf_config, | 2218 | .ndo_get_vf_config = efx_siena_sriov_get_vf_config, |
2217 | #endif | 2219 | #endif |
2218 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2220 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2219 | .ndo_poll_controller = efx_netpoll, | 2221 | .ndo_poll_controller = efx_netpoll, |
@@ -2433,7 +2435,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) | |||
2433 | if (rc) | 2435 | if (rc) |
2434 | goto fail; | 2436 | goto fail; |
2435 | efx_restore_filters(efx); | 2437 | efx_restore_filters(efx); |
2436 | efx_sriov_reset(efx); | 2438 | efx->type->sriov_reset(efx); |
2437 | 2439 | ||
2438 | mutex_unlock(&efx->mac_lock); | 2440 | mutex_unlock(&efx->mac_lock); |
2439 | 2441 | ||
@@ -2826,7 +2828,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev) | |||
2826 | efx_disable_interrupts(efx); | 2828 | efx_disable_interrupts(efx); |
2827 | rtnl_unlock(); | 2829 | rtnl_unlock(); |
2828 | 2830 | ||
2829 | efx_sriov_fini(efx); | 2831 | efx->type->sriov_fini(efx); |
2830 | efx_unregister_netdev(efx); | 2832 | efx_unregister_netdev(efx); |
2831 | 2833 | ||
2832 | efx_mtd_remove(efx); | 2834 | efx_mtd_remove(efx); |
@@ -3023,7 +3025,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev, | |||
3023 | if (rc) | 3025 | if (rc) |
3024 | goto fail4; | 3026 | goto fail4; |
3025 | 3027 | ||
3026 | rc = efx_sriov_init(efx); | 3028 | rc = efx->type->sriov_init(efx); |
3027 | if (rc) | 3029 | if (rc) |
3028 | netif_err(efx, probe, efx->net_dev, | 3030 | netif_err(efx, probe, efx->net_dev, |
3029 | "SR-IOV can't be enabled rc %d\n", rc); | 3031 | "SR-IOV can't be enabled rc %d\n", rc); |
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 157037546d30..f166c8ef38a3 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c | |||
@@ -2766,6 +2766,11 @@ const struct efx_nic_type falcon_a1_nic_type = { | |||
2766 | .mtd_write = falcon_mtd_write, | 2766 | .mtd_write = falcon_mtd_write, |
2767 | .mtd_sync = falcon_mtd_sync, | 2767 | .mtd_sync = falcon_mtd_sync, |
2768 | #endif | 2768 | #endif |
2769 | .sriov_init = efx_falcon_sriov_init, | ||
2770 | .sriov_fini = efx_falcon_sriov_fini, | ||
2771 | .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed, | ||
2772 | .sriov_wanted = efx_falcon_sriov_wanted, | ||
2773 | .sriov_reset = efx_falcon_sriov_reset, | ||
2769 | 2774 | ||
2770 | .revision = EFX_REV_FALCON_A1, | 2775 | .revision = EFX_REV_FALCON_A1, |
2771 | .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, | 2776 | .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, |
@@ -2862,6 +2867,11 @@ const struct efx_nic_type falcon_b0_nic_type = { | |||
2862 | .mtd_write = falcon_mtd_write, | 2867 | .mtd_write = falcon_mtd_write, |
2863 | .mtd_sync = falcon_mtd_sync, | 2868 | .mtd_sync = falcon_mtd_sync, |
2864 | #endif | 2869 | #endif |
2870 | .sriov_init = efx_falcon_sriov_init, | ||
2871 | .sriov_fini = efx_falcon_sriov_fini, | ||
2872 | .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed, | ||
2873 | .sriov_wanted = efx_falcon_sriov_wanted, | ||
2874 | .sriov_reset = efx_falcon_sriov_reset, | ||
2865 | 2875 | ||
2866 | .revision = EFX_REV_FALCON_B0, | 2876 | .revision = EFX_REV_FALCON_B0, |
2867 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, | 2877 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, |
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 6859437b59fb..75975328e020 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c | |||
@@ -226,6 +226,9 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, | |||
226 | struct efx_special_buffer *buffer, | 226 | struct efx_special_buffer *buffer, |
227 | unsigned int len) | 227 | unsigned int len) |
228 | { | 228 | { |
229 | #ifdef CONFIG_SFC_SRIOV | ||
230 | struct siena_nic_data *nic_data = efx->nic_data; | ||
231 | #endif | ||
229 | len = ALIGN(len, EFX_BUF_SIZE); | 232 | len = ALIGN(len, EFX_BUF_SIZE); |
230 | 233 | ||
231 | if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) | 234 | if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) |
@@ -237,8 +240,8 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, | |||
237 | buffer->index = efx->next_buffer_table; | 240 | buffer->index = efx->next_buffer_table; |
238 | efx->next_buffer_table += buffer->entries; | 241 | efx->next_buffer_table += buffer->entries; |
239 | #ifdef CONFIG_SFC_SRIOV | 242 | #ifdef CONFIG_SFC_SRIOV |
240 | BUG_ON(efx_sriov_enabled(efx) && | 243 | BUG_ON(efx_siena_sriov_enabled(efx) && |
241 | efx->vf_buftbl_base < efx->next_buffer_table); | 244 | nic_data->vf_buftbl_base < efx->next_buffer_table); |
242 | #endif | 245 | #endif |
243 | 246 | ||
244 | netif_dbg(efx, probe, efx->net_dev, | 247 | netif_dbg(efx, probe, efx->net_dev, |
@@ -667,7 +670,7 @@ static int efx_farch_do_flush(struct efx_nic *efx) | |||
667 | * the firmware (though we will still have to poll for | 670 | * the firmware (though we will still have to poll for |
668 | * completion). If that fails, fall back to the old scheme. | 671 | * completion). If that fails, fall back to the old scheme. |
669 | */ | 672 | */ |
670 | if (efx_sriov_enabled(efx)) { | 673 | if (efx_siena_sriov_enabled(efx)) { |
671 | rc = efx_mcdi_flush_rxqs(efx); | 674 | rc = efx_mcdi_flush_rxqs(efx); |
672 | if (!rc) | 675 | if (!rc) |
673 | goto wait; | 676 | goto wait; |
@@ -1195,13 +1198,13 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | |||
1195 | netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", | 1198 | netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", |
1196 | channel->channel, ev_sub_data); | 1199 | channel->channel, ev_sub_data); |
1197 | efx_farch_handle_tx_flush_done(efx, event); | 1200 | efx_farch_handle_tx_flush_done(efx, event); |
1198 | efx_sriov_tx_flush_done(efx, event); | 1201 | efx_siena_sriov_tx_flush_done(efx, event); |
1199 | break; | 1202 | break; |
1200 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: | 1203 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: |
1201 | netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", | 1204 | netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", |
1202 | channel->channel, ev_sub_data); | 1205 | channel->channel, ev_sub_data); |
1203 | efx_farch_handle_rx_flush_done(efx, event); | 1206 | efx_farch_handle_rx_flush_done(efx, event); |
1204 | efx_sriov_rx_flush_done(efx, event); | 1207 | efx_siena_sriov_rx_flush_done(efx, event); |
1205 | break; | 1208 | break; |
1206 | case FSE_AZ_EVQ_INIT_DONE_EV: | 1209 | case FSE_AZ_EVQ_INIT_DONE_EV: |
1207 | netif_dbg(efx, hw, efx->net_dev, | 1210 | netif_dbg(efx, hw, efx->net_dev, |
@@ -1240,7 +1243,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | |||
1240 | ev_sub_data); | 1243 | ev_sub_data); |
1241 | efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); | 1244 | efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); |
1242 | } else | 1245 | } else |
1243 | efx_sriov_desc_fetch_err(efx, ev_sub_data); | 1246 | efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); |
1244 | break; | 1247 | break; |
1245 | case FSE_BZ_TX_DSC_ERROR_EV: | 1248 | case FSE_BZ_TX_DSC_ERROR_EV: |
1246 | if (ev_sub_data < EFX_VI_BASE) { | 1249 | if (ev_sub_data < EFX_VI_BASE) { |
@@ -1250,7 +1253,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | |||
1250 | ev_sub_data); | 1253 | ev_sub_data); |
1251 | efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); | 1254 | efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); |
1252 | } else | 1255 | } else |
1253 | efx_sriov_desc_fetch_err(efx, ev_sub_data); | 1256 | efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); |
1254 | break; | 1257 | break; |
1255 | default: | 1258 | default: |
1256 | netif_vdbg(efx, hw, efx->net_dev, | 1259 | netif_vdbg(efx, hw, efx->net_dev, |
@@ -1315,7 +1318,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget) | |||
1315 | efx_farch_handle_driver_event(channel, &event); | 1318 | efx_farch_handle_driver_event(channel, &event); |
1316 | break; | 1319 | break; |
1317 | case FSE_CZ_EV_CODE_USER_EV: | 1320 | case FSE_CZ_EV_CODE_USER_EV: |
1318 | efx_sriov_event(channel, &event); | 1321 | efx_siena_sriov_event(channel, &event); |
1319 | break; | 1322 | break; |
1320 | case FSE_CZ_EV_CODE_MCDI_EV: | 1323 | case FSE_CZ_EV_CODE_MCDI_EV: |
1321 | efx_mcdi_process_event(channel, &event); | 1324 | efx_mcdi_process_event(channel, &event); |
@@ -1668,6 +1671,10 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) | |||
1668 | { | 1671 | { |
1669 | unsigned vi_count, buftbl_min; | 1672 | unsigned vi_count, buftbl_min; |
1670 | 1673 | ||
1674 | #ifdef CONFIG_SFC_SRIOV | ||
1675 | struct siena_nic_data *nic_data = efx->nic_data; | ||
1676 | #endif | ||
1677 | |||
1671 | /* Account for the buffer table entries backing the datapath channels | 1678 | /* Account for the buffer table entries backing the datapath channels |
1672 | * and the descriptor caches for those channels. | 1679 | * and the descriptor caches for those channels. |
1673 | */ | 1680 | */ |
@@ -1678,10 +1685,10 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) | |||
1678 | vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); | 1685 | vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); |
1679 | 1686 | ||
1680 | #ifdef CONFIG_SFC_SRIOV | 1687 | #ifdef CONFIG_SFC_SRIOV |
1681 | if (efx_sriov_wanted(efx)) { | 1688 | if (efx->type->sriov_wanted(efx)) { |
1682 | unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; | 1689 | unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; |
1683 | 1690 | ||
1684 | efx->vf_buftbl_base = buftbl_min; | 1691 | nic_data->vf_buftbl_base = buftbl_min; |
1685 | 1692 | ||
1686 | vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; | 1693 | vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; |
1687 | vi_count = max(vi_count, EFX_VI_BASE); | 1694 | vi_count = max(vi_count, EFX_VI_BASE); |
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 5239cf9bdc56..d37928f01949 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c | |||
@@ -1035,7 +1035,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, | |||
1035 | /* MAC stats are gather lazily. We can ignore this. */ | 1035 | /* MAC stats are gather lazily. We can ignore this. */ |
1036 | break; | 1036 | break; |
1037 | case MCDI_EVENT_CODE_FLR: | 1037 | case MCDI_EVENT_CODE_FLR: |
1038 | efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); | 1038 | efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); |
1039 | break; | 1039 | break; |
1040 | case MCDI_EVENT_CODE_PTP_RX: | 1040 | case MCDI_EVENT_CODE_PTP_RX: |
1041 | case MCDI_EVENT_CODE_PTP_FAULT: | 1041 | case MCDI_EVENT_CODE_PTP_FAULT: |
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 9ede32064685..325dd94bca46 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h | |||
@@ -913,13 +913,6 @@ struct vfdi_status; | |||
913 | * @vf_count: Number of VFs intended to be enabled. | 913 | * @vf_count: Number of VFs intended to be enabled. |
914 | * @vf_init_count: Number of VFs that have been fully initialised. | 914 | * @vf_init_count: Number of VFs that have been fully initialised. |
915 | * @vi_scale: log2 number of vnics per VF. | 915 | * @vi_scale: log2 number of vnics per VF. |
916 | * @vf_buftbl_base: The zeroth buffer table index used to back VF queues. | ||
917 | * @vfdi_status: Common VFDI status page to be dmad to VF address space. | ||
918 | * @local_addr_list: List of local addresses. Protected by %local_lock. | ||
919 | * @local_page_list: List of DMA addressable pages used to broadcast | ||
920 | * %local_addr_list. Protected by %local_lock. | ||
921 | * @local_lock: Mutex protecting %local_addr_list and %local_page_list. | ||
922 | * @peer_work: Work item to broadcast peer addresses to VMs. | ||
923 | * @ptp_data: PTP state data | 916 | * @ptp_data: PTP state data |
924 | * @vpd_sn: Serial number read from VPD | 917 | * @vpd_sn: Serial number read from VPD |
925 | * @monitor_work: Hardware monitor workitem | 918 | * @monitor_work: Hardware monitor workitem |
@@ -1060,17 +1053,10 @@ struct efx_nic { | |||
1060 | wait_queue_head_t flush_wq; | 1053 | wait_queue_head_t flush_wq; |
1061 | 1054 | ||
1062 | #ifdef CONFIG_SFC_SRIOV | 1055 | #ifdef CONFIG_SFC_SRIOV |
1063 | struct efx_channel *vfdi_channel; | ||
1064 | struct efx_vf *vf; | 1056 | struct efx_vf *vf; |
1065 | unsigned vf_count; | 1057 | unsigned vf_count; |
1066 | unsigned vf_init_count; | 1058 | unsigned vf_init_count; |
1067 | unsigned vi_scale; | 1059 | unsigned vi_scale; |
1068 | unsigned vf_buftbl_base; | ||
1069 | struct efx_buffer vfdi_status; | ||
1070 | struct list_head local_addr_list; | ||
1071 | struct list_head local_page_list; | ||
1072 | struct mutex local_lock; | ||
1073 | struct work_struct peer_work; | ||
1074 | #endif | 1060 | #endif |
1075 | 1061 | ||
1076 | struct efx_ptp_data *ptp_data; | 1062 | struct efx_ptp_data *ptp_data; |
@@ -1344,6 +1330,11 @@ struct efx_nic_type { | |||
1344 | int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp); | 1330 | int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp); |
1345 | int (*ptp_set_ts_config)(struct efx_nic *efx, | 1331 | int (*ptp_set_ts_config)(struct efx_nic *efx, |
1346 | struct hwtstamp_config *init); | 1332 | struct hwtstamp_config *init); |
1333 | int (*sriov_init)(struct efx_nic *efx); | ||
1334 | void (*sriov_fini)(struct efx_nic *efx); | ||
1335 | void (*sriov_mac_address_changed)(struct efx_nic *efx); | ||
1336 | bool (*sriov_wanted)(struct efx_nic *efx); | ||
1337 | void (*sriov_reset)(struct efx_nic *efx); | ||
1347 | 1338 | ||
1348 | int revision; | 1339 | int revision; |
1349 | unsigned int txd_ptr_tbl_base; | 1340 | unsigned int txd_ptr_tbl_base; |
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index f77cce034ad4..93d10cbbd1cf 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h | |||
@@ -378,12 +378,30 @@ enum { | |||
378 | 378 | ||
379 | /** | 379 | /** |
380 | * struct siena_nic_data - Siena NIC state | 380 | * struct siena_nic_data - Siena NIC state |
381 | * @efx: Pointer back to main interface structure | ||
381 | * @wol_filter_id: Wake-on-LAN packet filter id | 382 | * @wol_filter_id: Wake-on-LAN packet filter id |
382 | * @stats: Hardware statistics | 383 | * @stats: Hardware statistics |
384 | * @vf_buftbl_base: The zeroth buffer table index used to back VF queues. | ||
385 | * @vfdi_status: Common VFDI status page to be dmad to VF address space. | ||
386 | * @local_addr_list: List of local addresses. Protected by %local_lock. | ||
387 | * @local_page_list: List of DMA addressable pages used to broadcast | ||
388 | * %local_addr_list. Protected by %local_lock. | ||
389 | * @local_lock: Mutex protecting %local_addr_list and %local_page_list. | ||
390 | * @peer_work: Work item to broadcast peer addresses to VMs. | ||
383 | */ | 391 | */ |
384 | struct siena_nic_data { | 392 | struct siena_nic_data { |
393 | struct efx_nic *efx; | ||
385 | int wol_filter_id; | 394 | int wol_filter_id; |
386 | u64 stats[SIENA_STAT_COUNT]; | 395 | u64 stats[SIENA_STAT_COUNT]; |
396 | #ifdef CONFIG_SFC_SRIOV | ||
397 | struct efx_channel *vfdi_channel; | ||
398 | unsigned vf_buftbl_base; | ||
399 | struct efx_buffer vfdi_status; | ||
400 | struct list_head local_addr_list; | ||
401 | struct list_head local_page_list; | ||
402 | struct mutex local_lock; | ||
403 | struct work_struct peer_work; | ||
404 | #endif | ||
387 | }; | 405 | }; |
388 | 406 | ||
389 | enum { | 407 | enum { |
@@ -522,62 +540,88 @@ struct efx_ef10_nic_data { | |||
522 | 540 | ||
523 | #ifdef CONFIG_SFC_SRIOV | 541 | #ifdef CONFIG_SFC_SRIOV |
524 | 542 | ||
525 | static inline bool efx_sriov_wanted(struct efx_nic *efx) | 543 | /* SIENA */ |
544 | static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) | ||
526 | { | 545 | { |
527 | return efx->vf_count != 0; | 546 | return efx->vf_count != 0; |
528 | } | 547 | } |
529 | static inline bool efx_sriov_enabled(struct efx_nic *efx) | 548 | |
549 | static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) | ||
530 | { | 550 | { |
531 | return efx->vf_init_count != 0; | 551 | return efx->vf_init_count != 0; |
532 | } | 552 | } |
553 | |||
533 | static inline unsigned int efx_vf_size(struct efx_nic *efx) | 554 | static inline unsigned int efx_vf_size(struct efx_nic *efx) |
534 | { | 555 | { |
535 | return 1 << efx->vi_scale; | 556 | return 1 << efx->vi_scale; |
536 | } | 557 | } |
537 | 558 | ||
538 | int efx_init_sriov(void); | 559 | int efx_init_sriov(void); |
539 | void efx_sriov_probe(struct efx_nic *efx); | 560 | void efx_siena_sriov_probe(struct efx_nic *efx); |
540 | int efx_sriov_init(struct efx_nic *efx); | 561 | int efx_siena_sriov_init(struct efx_nic *efx); |
541 | void efx_sriov_mac_address_changed(struct efx_nic *efx); | 562 | void efx_siena_sriov_mac_address_changed(struct efx_nic *efx); |
542 | void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); | 563 | void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); |
543 | void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); | 564 | void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); |
544 | void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event); | 565 | void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event); |
545 | void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); | 566 | void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); |
546 | void efx_sriov_flr(struct efx_nic *efx, unsigned flr); | 567 | void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr); |
547 | void efx_sriov_reset(struct efx_nic *efx); | 568 | void efx_siena_sriov_reset(struct efx_nic *efx); |
548 | void efx_sriov_fini(struct efx_nic *efx); | 569 | void efx_siena_sriov_fini(struct efx_nic *efx); |
549 | void efx_fini_sriov(void); | 570 | void efx_fini_sriov(void); |
550 | 571 | ||
572 | /* EF10 */ | ||
573 | static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; } | ||
574 | static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } | ||
575 | static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {} | ||
576 | static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {} | ||
577 | static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {} | ||
578 | |||
551 | #else | 579 | #else |
552 | 580 | ||
553 | static inline bool efx_sriov_wanted(struct efx_nic *efx) { return false; } | 581 | /* SIENA */ |
554 | static inline bool efx_sriov_enabled(struct efx_nic *efx) { return false; } | 582 | static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; } |
583 | static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; } | ||
555 | static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; } | 584 | static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; } |
556 | |||
557 | static inline int efx_init_sriov(void) { return 0; } | 585 | static inline int efx_init_sriov(void) { return 0; } |
558 | static inline void efx_sriov_probe(struct efx_nic *efx) {} | 586 | static inline void efx_siena_sriov_probe(struct efx_nic *efx) {} |
559 | static inline int efx_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } | 587 | static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } |
560 | static inline void efx_sriov_mac_address_changed(struct efx_nic *efx) {} | 588 | static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {} |
561 | static inline void efx_sriov_tx_flush_done(struct efx_nic *efx, | 589 | static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, |
562 | efx_qword_t *event) {} | 590 | efx_qword_t *event) {} |
563 | static inline void efx_sriov_rx_flush_done(struct efx_nic *efx, | 591 | static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, |
564 | efx_qword_t *event) {} | 592 | efx_qword_t *event) {} |
565 | static inline void efx_sriov_event(struct efx_channel *channel, | 593 | static inline void efx_siena_sriov_event(struct efx_channel *channel, |
566 | efx_qword_t *event) {} | 594 | efx_qword_t *event) {} |
567 | static inline void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) {} | 595 | static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, |
568 | static inline void efx_sriov_flr(struct efx_nic *efx, unsigned flr) {} | 596 | unsigned dmaq) {} |
569 | static inline void efx_sriov_reset(struct efx_nic *efx) {} | 597 | static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {} |
570 | static inline void efx_sriov_fini(struct efx_nic *efx) {} | 598 | static inline void efx_siena_sriov_reset(struct efx_nic *efx) {} |
599 | static inline void efx_siena_sriov_fini(struct efx_nic *efx) {} | ||
571 | static inline void efx_fini_sriov(void) {} | 600 | static inline void efx_fini_sriov(void) {} |
572 | 601 | ||
602 | /* EF10 */ | ||
603 | static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; } | ||
604 | static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } | ||
605 | static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {} | ||
606 | static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {} | ||
607 | static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {} | ||
608 | |||
573 | #endif | 609 | #endif |
574 | 610 | ||
575 | int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac); | 611 | /* FALCON */ |
576 | int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos); | 612 | static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; } |
577 | int efx_sriov_get_vf_config(struct net_device *dev, int vf, | 613 | static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } |
578 | struct ifla_vf_info *ivf); | 614 | static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {} |
579 | int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, | 615 | static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {} |
580 | bool spoofchk); | 616 | static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {} |
617 | |||
618 | int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac); | ||
619 | int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf, | ||
620 | u16 vlan, u8 qos); | ||
621 | int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf, | ||
622 | struct ifla_vf_info *ivf); | ||
623 | int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, | ||
624 | bool spoofchk); | ||
581 | 625 | ||
582 | struct ethtool_ts_info; | 626 | struct ethtool_ts_info; |
583 | int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); | 627 | int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); |
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index ae696855f21a..3583f0208a6e 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c | |||
@@ -251,6 +251,7 @@ static int siena_probe_nic(struct efx_nic *efx) | |||
251 | nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); | 251 | nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); |
252 | if (!nic_data) | 252 | if (!nic_data) |
253 | return -ENOMEM; | 253 | return -ENOMEM; |
254 | nic_data->efx = efx; | ||
254 | efx->nic_data = nic_data; | 255 | efx->nic_data = nic_data; |
255 | 256 | ||
256 | if (efx_farch_fpga_ver(efx) != 0) { | 257 | if (efx_farch_fpga_ver(efx) != 0) { |
@@ -306,7 +307,7 @@ static int siena_probe_nic(struct efx_nic *efx) | |||
306 | if (rc) | 307 | if (rc) |
307 | goto fail5; | 308 | goto fail5; |
308 | 309 | ||
309 | efx_sriov_probe(efx); | 310 | efx_siena_sriov_probe(efx); |
310 | efx_ptp_defer_probe_with_channel(efx); | 311 | efx_ptp_defer_probe_with_channel(efx); |
311 | 312 | ||
312 | return 0; | 313 | return 0; |
@@ -996,6 +997,11 @@ const struct efx_nic_type siena_a0_nic_type = { | |||
996 | #endif | 997 | #endif |
997 | .ptp_write_host_time = siena_ptp_write_host_time, | 998 | .ptp_write_host_time = siena_ptp_write_host_time, |
998 | .ptp_set_ts_config = siena_ptp_set_ts_config, | 999 | .ptp_set_ts_config = siena_ptp_set_ts_config, |
1000 | .sriov_init = efx_siena_sriov_init, | ||
1001 | .sriov_fini = efx_siena_sriov_fini, | ||
1002 | .sriov_mac_address_changed = efx_siena_sriov_mac_address_changed, | ||
1003 | .sriov_wanted = efx_siena_sriov_wanted, | ||
1004 | .sriov_reset = efx_siena_sriov_reset, | ||
999 | 1005 | ||
1000 | .revision = EFX_REV_SIENA_A0, | 1006 | .revision = EFX_REV_SIENA_A0, |
1001 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, | 1007 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, |
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 43d2e64546ed..a8bbbad68a88 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c | |||
@@ -66,7 +66,7 @@ enum efx_vf_tx_filter_mode { | |||
66 | * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr, | 66 | * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr, |
67 | * @peer_page_addrs and @peer_page_count from simultaneous | 67 | * @peer_page_addrs and @peer_page_count from simultaneous |
68 | * updates by the VM and consumption by | 68 | * updates by the VM and consumption by |
69 | * efx_sriov_update_vf_addr() | 69 | * efx_siena_sriov_update_vf_addr() |
70 | * @peer_page_addrs: Pointer to an array of guest pages for local addresses. | 70 | * @peer_page_addrs: Pointer to an array of guest pages for local addresses. |
71 | * @peer_page_count: Number of entries in @peer_page_count. | 71 | * @peer_page_count: Number of entries in @peer_page_count. |
72 | * @evq0_addrs: Array of guest pages backing evq0. | 72 | * @evq0_addrs: Array of guest pages backing evq0. |
@@ -194,8 +194,8 @@ static unsigned abs_index(struct efx_vf *vf, unsigned index) | |||
194 | return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index; | 194 | return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index; |
195 | } | 195 | } |
196 | 196 | ||
197 | static int efx_sriov_cmd(struct efx_nic *efx, bool enable, | 197 | static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable, |
198 | unsigned *vi_scale_out, unsigned *vf_total_out) | 198 | unsigned *vi_scale_out, unsigned *vf_total_out) |
199 | { | 199 | { |
200 | MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN); | 200 | MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN); |
201 | MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN); | 201 | MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN); |
@@ -227,18 +227,20 @@ static int efx_sriov_cmd(struct efx_nic *efx, bool enable, | |||
227 | return 0; | 227 | return 0; |
228 | } | 228 | } |
229 | 229 | ||
230 | static void efx_sriov_usrev(struct efx_nic *efx, bool enabled) | 230 | static void efx_siena_sriov_usrev(struct efx_nic *efx, bool enabled) |
231 | { | 231 | { |
232 | struct siena_nic_data *nic_data = efx->nic_data; | ||
232 | efx_oword_t reg; | 233 | efx_oword_t reg; |
233 | 234 | ||
234 | EFX_POPULATE_OWORD_2(reg, | 235 | EFX_POPULATE_OWORD_2(reg, |
235 | FRF_CZ_USREV_DIS, enabled ? 0 : 1, | 236 | FRF_CZ_USREV_DIS, enabled ? 0 : 1, |
236 | FRF_CZ_DFLT_EVQ, efx->vfdi_channel->channel); | 237 | FRF_CZ_DFLT_EVQ, nic_data->vfdi_channel->channel); |
237 | efx_writeo(efx, ®, FR_CZ_USR_EV_CFG); | 238 | efx_writeo(efx, ®, FR_CZ_USR_EV_CFG); |
238 | } | 239 | } |
239 | 240 | ||
240 | static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req, | 241 | static int efx_siena_sriov_memcpy(struct efx_nic *efx, |
241 | unsigned int count) | 242 | struct efx_memcpy_req *req, |
243 | unsigned int count) | ||
242 | { | 244 | { |
243 | MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1); | 245 | MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1); |
244 | MCDI_DECLARE_STRUCT_PTR(record); | 246 | MCDI_DECLARE_STRUCT_PTR(record); |
@@ -297,7 +299,7 @@ out: | |||
297 | /* The TX filter is entirely controlled by this driver, and is modified | 299 | /* The TX filter is entirely controlled by this driver, and is modified |
298 | * underneath the feet of the VF | 300 | * underneath the feet of the VF |
299 | */ | 301 | */ |
300 | static void efx_sriov_reset_tx_filter(struct efx_vf *vf) | 302 | static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf) |
301 | { | 303 | { |
302 | struct efx_nic *efx = vf->efx; | 304 | struct efx_nic *efx = vf->efx; |
303 | struct efx_filter_spec filter; | 305 | struct efx_filter_spec filter; |
@@ -341,7 +343,7 @@ static void efx_sriov_reset_tx_filter(struct efx_vf *vf) | |||
341 | } | 343 | } |
342 | 344 | ||
343 | /* The RX filter is managed here on behalf of the VF driver */ | 345 | /* The RX filter is managed here on behalf of the VF driver */ |
344 | static void efx_sriov_reset_rx_filter(struct efx_vf *vf) | 346 | static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf) |
345 | { | 347 | { |
346 | struct efx_nic *efx = vf->efx; | 348 | struct efx_nic *efx = vf->efx; |
347 | struct efx_filter_spec filter; | 349 | struct efx_filter_spec filter; |
@@ -380,22 +382,26 @@ static void efx_sriov_reset_rx_filter(struct efx_vf *vf) | |||
380 | } | 382 | } |
381 | } | 383 | } |
382 | 384 | ||
383 | static void __efx_sriov_update_vf_addr(struct efx_vf *vf) | 385 | static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf) |
384 | { | 386 | { |
385 | efx_sriov_reset_tx_filter(vf); | 387 | struct efx_nic *efx = vf->efx; |
386 | efx_sriov_reset_rx_filter(vf); | 388 | struct siena_nic_data *nic_data = efx->nic_data; |
387 | queue_work(vfdi_workqueue, &vf->efx->peer_work); | 389 | |
390 | efx_siena_sriov_reset_tx_filter(vf); | ||
391 | efx_siena_sriov_reset_rx_filter(vf); | ||
392 | queue_work(vfdi_workqueue, &nic_data->peer_work); | ||
388 | } | 393 | } |
389 | 394 | ||
390 | /* Push the peer list to this VF. The caller must hold status_lock to interlock | 395 | /* Push the peer list to this VF. The caller must hold status_lock to interlock |
391 | * with VFDI requests, and they must be serialised against manipulation of | 396 | * with VFDI requests, and they must be serialised against manipulation of |
392 | * local_page_list, either by acquiring local_lock or by running from | 397 | * local_page_list, either by acquiring local_lock or by running from |
393 | * efx_sriov_peer_work() | 398 | * efx_siena_sriov_peer_work() |
394 | */ | 399 | */ |
395 | static void __efx_sriov_push_vf_status(struct efx_vf *vf) | 400 | static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf) |
396 | { | 401 | { |
397 | struct efx_nic *efx = vf->efx; | 402 | struct efx_nic *efx = vf->efx; |
398 | struct vfdi_status *status = efx->vfdi_status.addr; | 403 | struct siena_nic_data *nic_data = efx->nic_data; |
404 | struct vfdi_status *status = nic_data->vfdi_status.addr; | ||
399 | struct efx_memcpy_req copy[4]; | 405 | struct efx_memcpy_req copy[4]; |
400 | struct efx_endpoint_page *epp; | 406 | struct efx_endpoint_page *epp; |
401 | unsigned int pos, count; | 407 | unsigned int pos, count; |
@@ -421,7 +427,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) | |||
421 | */ | 427 | */ |
422 | data_offset = offsetof(struct vfdi_status, version); | 428 | data_offset = offsetof(struct vfdi_status, version); |
423 | copy[1].from_rid = efx->pci_dev->devfn; | 429 | copy[1].from_rid = efx->pci_dev->devfn; |
424 | copy[1].from_addr = efx->vfdi_status.dma_addr + data_offset; | 430 | copy[1].from_addr = nic_data->vfdi_status.dma_addr + data_offset; |
425 | copy[1].to_rid = vf->pci_rid; | 431 | copy[1].to_rid = vf->pci_rid; |
426 | copy[1].to_addr = vf->status_addr + data_offset; | 432 | copy[1].to_addr = vf->status_addr + data_offset; |
427 | copy[1].length = status->length - data_offset; | 433 | copy[1].length = status->length - data_offset; |
@@ -429,7 +435,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) | |||
429 | /* Copy the peer pages */ | 435 | /* Copy the peer pages */ |
430 | pos = 2; | 436 | pos = 2; |
431 | count = 0; | 437 | count = 0; |
432 | list_for_each_entry(epp, &efx->local_page_list, link) { | 438 | list_for_each_entry(epp, &nic_data->local_page_list, link) { |
433 | if (count == vf->peer_page_count) { | 439 | if (count == vf->peer_page_count) { |
434 | /* The VF driver will know they need to provide more | 440 | /* The VF driver will know they need to provide more |
435 | * pages because peer_addr_count is too large. | 441 | * pages because peer_addr_count is too large. |
@@ -444,7 +450,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) | |||
444 | copy[pos].length = EFX_PAGE_SIZE; | 450 | copy[pos].length = EFX_PAGE_SIZE; |
445 | 451 | ||
446 | if (++pos == ARRAY_SIZE(copy)) { | 452 | if (++pos == ARRAY_SIZE(copy)) { |
447 | efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); | 453 | efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); |
448 | pos = 0; | 454 | pos = 0; |
449 | } | 455 | } |
450 | ++count; | 456 | ++count; |
@@ -456,7 +462,7 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) | |||
456 | copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status, | 462 | copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status, |
457 | generation_end); | 463 | generation_end); |
458 | copy[pos].length = sizeof(status->generation_end); | 464 | copy[pos].length = sizeof(status->generation_end); |
459 | efx_sriov_memcpy(efx, copy, pos + 1); | 465 | efx_siena_sriov_memcpy(efx, copy, pos + 1); |
460 | 466 | ||
461 | /* Notify the guest */ | 467 | /* Notify the guest */ |
462 | EFX_POPULATE_QWORD_3(event, | 468 | EFX_POPULATE_QWORD_3(event, |
@@ -469,8 +475,8 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) | |||
469 | &event); | 475 | &event); |
470 | } | 476 | } |
471 | 477 | ||
472 | static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset, | 478 | static void efx_siena_sriov_bufs(struct efx_nic *efx, unsigned offset, |
473 | u64 *addr, unsigned count) | 479 | u64 *addr, unsigned count) |
474 | { | 480 | { |
475 | efx_qword_t buf; | 481 | efx_qword_t buf; |
476 | unsigned pos; | 482 | unsigned pos; |
@@ -539,7 +545,7 @@ static int efx_vfdi_init_evq(struct efx_vf *vf) | |||
539 | return VFDI_RC_EINVAL; | 545 | return VFDI_RC_EINVAL; |
540 | } | 546 | } |
541 | 547 | ||
542 | efx_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count); | 548 | efx_siena_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count); |
543 | 549 | ||
544 | EFX_POPULATE_OWORD_3(reg, | 550 | EFX_POPULATE_OWORD_3(reg, |
545 | FRF_CZ_TIMER_Q_EN, 1, | 551 | FRF_CZ_TIMER_Q_EN, 1, |
@@ -584,7 +590,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf) | |||
584 | } | 590 | } |
585 | if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask)) | 591 | if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask)) |
586 | ++vf->rxq_count; | 592 | ++vf->rxq_count; |
587 | efx_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count); | 593 | efx_siena_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count); |
588 | 594 | ||
589 | label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL); | 595 | label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL); |
590 | EFX_POPULATE_OWORD_6(reg, | 596 | EFX_POPULATE_OWORD_6(reg, |
@@ -628,7 +634,7 @@ static int efx_vfdi_init_txq(struct efx_vf *vf) | |||
628 | if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask)) | 634 | if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask)) |
629 | ++vf->txq_count; | 635 | ++vf->txq_count; |
630 | mutex_unlock(&vf->txq_lock); | 636 | mutex_unlock(&vf->txq_lock); |
631 | efx_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count); | 637 | efx_siena_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count); |
632 | 638 | ||
633 | eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON; | 639 | eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON; |
634 | 640 | ||
@@ -742,8 +748,8 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf) | |||
742 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, | 748 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, |
743 | vf_offset + index); | 749 | vf_offset + index); |
744 | } | 750 | } |
745 | efx_sriov_bufs(efx, vf->buftbl_base, NULL, | 751 | efx_siena_sriov_bufs(efx, vf->buftbl_base, NULL, |
746 | EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx)); | 752 | EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx)); |
747 | efx_vfdi_flush_clear(vf); | 753 | efx_vfdi_flush_clear(vf); |
748 | 754 | ||
749 | vf->evq0_count = 0; | 755 | vf->evq0_count = 0; |
@@ -754,6 +760,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf) | |||
754 | static int efx_vfdi_insert_filter(struct efx_vf *vf) | 760 | static int efx_vfdi_insert_filter(struct efx_vf *vf) |
755 | { | 761 | { |
756 | struct efx_nic *efx = vf->efx; | 762 | struct efx_nic *efx = vf->efx; |
763 | struct siena_nic_data *nic_data = efx->nic_data; | ||
757 | struct vfdi_req *req = vf->buf.addr; | 764 | struct vfdi_req *req = vf->buf.addr; |
758 | unsigned vf_rxq = req->u.mac_filter.rxq; | 765 | unsigned vf_rxq = req->u.mac_filter.rxq; |
759 | unsigned flags; | 766 | unsigned flags; |
@@ -776,17 +783,20 @@ static int efx_vfdi_insert_filter(struct efx_vf *vf) | |||
776 | vf->rx_filter_qid = vf_rxq; | 783 | vf->rx_filter_qid = vf_rxq; |
777 | vf->rx_filtering = true; | 784 | vf->rx_filtering = true; |
778 | 785 | ||
779 | efx_sriov_reset_rx_filter(vf); | 786 | efx_siena_sriov_reset_rx_filter(vf); |
780 | queue_work(vfdi_workqueue, &efx->peer_work); | 787 | queue_work(vfdi_workqueue, &nic_data->peer_work); |
781 | 788 | ||
782 | return VFDI_RC_SUCCESS; | 789 | return VFDI_RC_SUCCESS; |
783 | } | 790 | } |
784 | 791 | ||
785 | static int efx_vfdi_remove_all_filters(struct efx_vf *vf) | 792 | static int efx_vfdi_remove_all_filters(struct efx_vf *vf) |
786 | { | 793 | { |
794 | struct efx_nic *efx = vf->efx; | ||
795 | struct siena_nic_data *nic_data = efx->nic_data; | ||
796 | |||
787 | vf->rx_filtering = false; | 797 | vf->rx_filtering = false; |
788 | efx_sriov_reset_rx_filter(vf); | 798 | efx_siena_sriov_reset_rx_filter(vf); |
789 | queue_work(vfdi_workqueue, &vf->efx->peer_work); | 799 | queue_work(vfdi_workqueue, &nic_data->peer_work); |
790 | 800 | ||
791 | return VFDI_RC_SUCCESS; | 801 | return VFDI_RC_SUCCESS; |
792 | } | 802 | } |
@@ -794,6 +804,7 @@ static int efx_vfdi_remove_all_filters(struct efx_vf *vf) | |||
794 | static int efx_vfdi_set_status_page(struct efx_vf *vf) | 804 | static int efx_vfdi_set_status_page(struct efx_vf *vf) |
795 | { | 805 | { |
796 | struct efx_nic *efx = vf->efx; | 806 | struct efx_nic *efx = vf->efx; |
807 | struct siena_nic_data *nic_data = efx->nic_data; | ||
797 | struct vfdi_req *req = vf->buf.addr; | 808 | struct vfdi_req *req = vf->buf.addr; |
798 | u64 page_count = req->u.set_status_page.peer_page_count; | 809 | u64 page_count = req->u.set_status_page.peer_page_count; |
799 | u64 max_page_count = | 810 | u64 max_page_count = |
@@ -809,7 +820,7 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf) | |||
809 | return VFDI_RC_EINVAL; | 820 | return VFDI_RC_EINVAL; |
810 | } | 821 | } |
811 | 822 | ||
812 | mutex_lock(&efx->local_lock); | 823 | mutex_lock(&nic_data->local_lock); |
813 | mutex_lock(&vf->status_lock); | 824 | mutex_lock(&vf->status_lock); |
814 | vf->status_addr = req->u.set_status_page.dma_addr; | 825 | vf->status_addr = req->u.set_status_page.dma_addr; |
815 | 826 | ||
@@ -828,9 +839,9 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf) | |||
828 | } | 839 | } |
829 | } | 840 | } |
830 | 841 | ||
831 | __efx_sriov_push_vf_status(vf); | 842 | __efx_siena_sriov_push_vf_status(vf); |
832 | mutex_unlock(&vf->status_lock); | 843 | mutex_unlock(&vf->status_lock); |
833 | mutex_unlock(&efx->local_lock); | 844 | mutex_unlock(&nic_data->local_lock); |
834 | 845 | ||
835 | return VFDI_RC_SUCCESS; | 846 | return VFDI_RC_SUCCESS; |
836 | } | 847 | } |
@@ -857,7 +868,7 @@ static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = { | |||
857 | [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page, | 868 | [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page, |
858 | }; | 869 | }; |
859 | 870 | ||
860 | static void efx_sriov_vfdi(struct work_struct *work) | 871 | static void efx_siena_sriov_vfdi(struct work_struct *work) |
861 | { | 872 | { |
862 | struct efx_vf *vf = container_of(work, struct efx_vf, req); | 873 | struct efx_vf *vf = container_of(work, struct efx_vf, req); |
863 | struct efx_nic *efx = vf->efx; | 874 | struct efx_nic *efx = vf->efx; |
@@ -872,7 +883,7 @@ static void efx_sriov_vfdi(struct work_struct *work) | |||
872 | copy[0].to_rid = efx->pci_dev->devfn; | 883 | copy[0].to_rid = efx->pci_dev->devfn; |
873 | copy[0].to_addr = vf->buf.dma_addr; | 884 | copy[0].to_addr = vf->buf.dma_addr; |
874 | copy[0].length = EFX_PAGE_SIZE; | 885 | copy[0].length = EFX_PAGE_SIZE; |
875 | rc = efx_sriov_memcpy(efx, copy, 1); | 886 | rc = efx_siena_sriov_memcpy(efx, copy, 1); |
876 | if (rc) { | 887 | if (rc) { |
877 | /* If we can't get the request, we can't reply to the caller */ | 888 | /* If we can't get the request, we can't reply to the caller */ |
878 | if (net_ratelimit()) | 889 | if (net_ratelimit()) |
@@ -916,7 +927,7 @@ static void efx_sriov_vfdi(struct work_struct *work) | |||
916 | copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op); | 927 | copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op); |
917 | copy[1].length = sizeof(req->op); | 928 | copy[1].length = sizeof(req->op); |
918 | 929 | ||
919 | (void) efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); | 930 | (void)efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); |
920 | } | 931 | } |
921 | 932 | ||
922 | 933 | ||
@@ -925,7 +936,8 @@ static void efx_sriov_vfdi(struct work_struct *work) | |||
925 | * event ring in guest memory with VFDI reset events, then (re-initialise) the | 936 | * event ring in guest memory with VFDI reset events, then (re-initialise) the |
926 | * event queue to raise an interrupt. The guest driver will then recover. | 937 | * event queue to raise an interrupt. The guest driver will then recover. |
927 | */ | 938 | */ |
928 | static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer) | 939 | static void efx_siena_sriov_reset_vf(struct efx_vf *vf, |
940 | struct efx_buffer *buffer) | ||
929 | { | 941 | { |
930 | struct efx_nic *efx = vf->efx; | 942 | struct efx_nic *efx = vf->efx; |
931 | struct efx_memcpy_req copy_req[4]; | 943 | struct efx_memcpy_req copy_req[4]; |
@@ -961,7 +973,7 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer) | |||
961 | copy_req[k].to_addr = vf->evq0_addrs[pos + k]; | 973 | copy_req[k].to_addr = vf->evq0_addrs[pos + k]; |
962 | copy_req[k].length = EFX_PAGE_SIZE; | 974 | copy_req[k].length = EFX_PAGE_SIZE; |
963 | } | 975 | } |
964 | rc = efx_sriov_memcpy(efx, copy_req, count); | 976 | rc = efx_siena_sriov_memcpy(efx, copy_req, count); |
965 | if (rc) { | 977 | if (rc) { |
966 | if (net_ratelimit()) | 978 | if (net_ratelimit()) |
967 | netif_err(efx, hw, efx->net_dev, | 979 | netif_err(efx, hw, efx->net_dev, |
@@ -974,7 +986,7 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer) | |||
974 | /* Reinitialise, arm and trigger evq0 */ | 986 | /* Reinitialise, arm and trigger evq0 */ |
975 | abs_evq = abs_index(vf, 0); | 987 | abs_evq = abs_index(vf, 0); |
976 | buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0); | 988 | buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0); |
977 | efx_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count); | 989 | efx_siena_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count); |
978 | 990 | ||
979 | EFX_POPULATE_OWORD_3(reg, | 991 | EFX_POPULATE_OWORD_3(reg, |
980 | FRF_CZ_TIMER_Q_EN, 1, | 992 | FRF_CZ_TIMER_Q_EN, 1, |
@@ -992,19 +1004,19 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer) | |||
992 | mutex_unlock(&vf->status_lock); | 1004 | mutex_unlock(&vf->status_lock); |
993 | } | 1005 | } |
994 | 1006 | ||
995 | static void efx_sriov_reset_vf_work(struct work_struct *work) | 1007 | static void efx_siena_sriov_reset_vf_work(struct work_struct *work) |
996 | { | 1008 | { |
997 | struct efx_vf *vf = container_of(work, struct efx_vf, req); | 1009 | struct efx_vf *vf = container_of(work, struct efx_vf, req); |
998 | struct efx_nic *efx = vf->efx; | 1010 | struct efx_nic *efx = vf->efx; |
999 | struct efx_buffer buf; | 1011 | struct efx_buffer buf; |
1000 | 1012 | ||
1001 | if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) { | 1013 | if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) { |
1002 | efx_sriov_reset_vf(vf, &buf); | 1014 | efx_siena_sriov_reset_vf(vf, &buf); |
1003 | efx_nic_free_buffer(efx, &buf); | 1015 | efx_nic_free_buffer(efx, &buf); |
1004 | } | 1016 | } |
1005 | } | 1017 | } |
1006 | 1018 | ||
1007 | static void efx_sriov_handle_no_channel(struct efx_nic *efx) | 1019 | static void efx_siena_sriov_handle_no_channel(struct efx_nic *efx) |
1008 | { | 1020 | { |
1009 | netif_err(efx, drv, efx->net_dev, | 1021 | netif_err(efx, drv, efx->net_dev, |
1010 | "ERROR: IOV requires MSI-X and 1 additional interrupt" | 1022 | "ERROR: IOV requires MSI-X and 1 additional interrupt" |
@@ -1012,35 +1024,38 @@ static void efx_sriov_handle_no_channel(struct efx_nic *efx) | |||
1012 | efx->vf_count = 0; | 1024 | efx->vf_count = 0; |
1013 | } | 1025 | } |
1014 | 1026 | ||
1015 | static int efx_sriov_probe_channel(struct efx_channel *channel) | 1027 | static int efx_siena_sriov_probe_channel(struct efx_channel *channel) |
1016 | { | 1028 | { |
1017 | channel->efx->vfdi_channel = channel; | 1029 | struct siena_nic_data *nic_data = channel->efx->nic_data; |
1030 | nic_data->vfdi_channel = channel; | ||
1031 | |||
1018 | return 0; | 1032 | return 0; |
1019 | } | 1033 | } |
1020 | 1034 | ||
1021 | static void | 1035 | static void |
1022 | efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len) | 1036 | efx_siena_sriov_get_channel_name(struct efx_channel *channel, |
1037 | char *buf, size_t len) | ||
1023 | { | 1038 | { |
1024 | snprintf(buf, len, "%s-iov", channel->efx->name); | 1039 | snprintf(buf, len, "%s-iov", channel->efx->name); |
1025 | } | 1040 | } |
1026 | 1041 | ||
1027 | static const struct efx_channel_type efx_sriov_channel_type = { | 1042 | static const struct efx_channel_type efx_siena_sriov_channel_type = { |
1028 | .handle_no_channel = efx_sriov_handle_no_channel, | 1043 | .handle_no_channel = efx_siena_sriov_handle_no_channel, |
1029 | .pre_probe = efx_sriov_probe_channel, | 1044 | .pre_probe = efx_siena_sriov_probe_channel, |
1030 | .post_remove = efx_channel_dummy_op_void, | 1045 | .post_remove = efx_channel_dummy_op_void, |
1031 | .get_name = efx_sriov_get_channel_name, | 1046 | .get_name = efx_siena_sriov_get_channel_name, |
1032 | /* no copy operation; channel must not be reallocated */ | 1047 | /* no copy operation; channel must not be reallocated */ |
1033 | .keep_eventq = true, | 1048 | .keep_eventq = true, |
1034 | }; | 1049 | }; |
1035 | 1050 | ||
1036 | void efx_sriov_probe(struct efx_nic *efx) | 1051 | void efx_siena_sriov_probe(struct efx_nic *efx) |
1037 | { | 1052 | { |
1038 | unsigned count; | 1053 | unsigned count; |
1039 | 1054 | ||
1040 | if (!max_vfs) | 1055 | if (!max_vfs) |
1041 | return; | 1056 | return; |
1042 | 1057 | ||
1043 | if (efx_sriov_cmd(efx, false, &efx->vi_scale, &count)) | 1058 | if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) |
1044 | return; | 1059 | return; |
1045 | if (count > 0 && count > max_vfs) | 1060 | if (count > 0 && count > max_vfs) |
1046 | count = max_vfs; | 1061 | count = max_vfs; |
@@ -1048,17 +1063,20 @@ void efx_sriov_probe(struct efx_nic *efx) | |||
1048 | /* efx_nic_dimension_resources() will reduce vf_count as appopriate */ | 1063 | /* efx_nic_dimension_resources() will reduce vf_count as appopriate */ |
1049 | efx->vf_count = count; | 1064 | efx->vf_count = count; |
1050 | 1065 | ||
1051 | efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_sriov_channel_type; | 1066 | efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_siena_sriov_channel_type; |
1052 | } | 1067 | } |
1053 | 1068 | ||
1054 | /* Copy the list of individual addresses into the vfdi_status.peers | 1069 | /* Copy the list of individual addresses into the vfdi_status.peers |
1055 | * array and auxillary pages, protected by %local_lock. Drop that lock | 1070 | * array and auxillary pages, protected by %local_lock. Drop that lock |
1056 | * and then broadcast the address list to every VF. | 1071 | * and then broadcast the address list to every VF. |
1057 | */ | 1072 | */ |
1058 | static void efx_sriov_peer_work(struct work_struct *data) | 1073 | static void efx_siena_sriov_peer_work(struct work_struct *data) |
1059 | { | 1074 | { |
1060 | struct efx_nic *efx = container_of(data, struct efx_nic, peer_work); | 1075 | struct siena_nic_data *nic_data = container_of(data, |
1061 | struct vfdi_status *vfdi_status = efx->vfdi_status.addr; | 1076 | struct siena_nic_data, |
1077 | peer_work); | ||
1078 | struct efx_nic *efx = nic_data->efx; | ||
1079 | struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; | ||
1062 | struct efx_vf *vf; | 1080 | struct efx_vf *vf; |
1063 | struct efx_local_addr *local_addr; | 1081 | struct efx_local_addr *local_addr; |
1064 | struct vfdi_endpoint *peer; | 1082 | struct vfdi_endpoint *peer; |
@@ -1068,11 +1086,11 @@ static void efx_sriov_peer_work(struct work_struct *data) | |||
1068 | unsigned int peer_count; | 1086 | unsigned int peer_count; |
1069 | unsigned int pos; | 1087 | unsigned int pos; |
1070 | 1088 | ||
1071 | mutex_lock(&efx->local_lock); | 1089 | mutex_lock(&nic_data->local_lock); |
1072 | 1090 | ||
1073 | /* Move the existing peer pages off %local_page_list */ | 1091 | /* Move the existing peer pages off %local_page_list */ |
1074 | INIT_LIST_HEAD(&pages); | 1092 | INIT_LIST_HEAD(&pages); |
1075 | list_splice_tail_init(&efx->local_page_list, &pages); | 1093 | list_splice_tail_init(&nic_data->local_page_list, &pages); |
1076 | 1094 | ||
1077 | /* Populate the VF addresses starting from entry 1 (entry 0 is | 1095 | /* Populate the VF addresses starting from entry 1 (entry 0 is |
1078 | * the PF address) | 1096 | * the PF address) |
@@ -1094,7 +1112,7 @@ static void efx_sriov_peer_work(struct work_struct *data) | |||
1094 | } | 1112 | } |
1095 | 1113 | ||
1096 | /* Fill the remaining addresses */ | 1114 | /* Fill the remaining addresses */ |
1097 | list_for_each_entry(local_addr, &efx->local_addr_list, link) { | 1115 | list_for_each_entry(local_addr, &nic_data->local_addr_list, link) { |
1098 | ether_addr_copy(peer->mac_addr, local_addr->addr); | 1116 | ether_addr_copy(peer->mac_addr, local_addr->addr); |
1099 | peer->tci = 0; | 1117 | peer->tci = 0; |
1100 | ++peer; | 1118 | ++peer; |
@@ -1117,13 +1135,13 @@ static void efx_sriov_peer_work(struct work_struct *data) | |||
1117 | list_del(&epp->link); | 1135 | list_del(&epp->link); |
1118 | } | 1136 | } |
1119 | 1137 | ||
1120 | list_add_tail(&epp->link, &efx->local_page_list); | 1138 | list_add_tail(&epp->link, &nic_data->local_page_list); |
1121 | peer = (struct vfdi_endpoint *)epp->ptr; | 1139 | peer = (struct vfdi_endpoint *)epp->ptr; |
1122 | peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint); | 1140 | peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint); |
1123 | } | 1141 | } |
1124 | } | 1142 | } |
1125 | vfdi_status->peer_count = peer_count; | 1143 | vfdi_status->peer_count = peer_count; |
1126 | mutex_unlock(&efx->local_lock); | 1144 | mutex_unlock(&nic_data->local_lock); |
1127 | 1145 | ||
1128 | /* Free any now unused endpoint pages */ | 1146 | /* Free any now unused endpoint pages */ |
1129 | while (!list_empty(&pages)) { | 1147 | while (!list_empty(&pages)) { |
@@ -1141,25 +1159,26 @@ static void efx_sriov_peer_work(struct work_struct *data) | |||
1141 | 1159 | ||
1142 | mutex_lock(&vf->status_lock); | 1160 | mutex_lock(&vf->status_lock); |
1143 | if (vf->status_addr) | 1161 | if (vf->status_addr) |
1144 | __efx_sriov_push_vf_status(vf); | 1162 | __efx_siena_sriov_push_vf_status(vf); |
1145 | mutex_unlock(&vf->status_lock); | 1163 | mutex_unlock(&vf->status_lock); |
1146 | } | 1164 | } |
1147 | } | 1165 | } |
1148 | 1166 | ||
1149 | static void efx_sriov_free_local(struct efx_nic *efx) | 1167 | static void efx_siena_sriov_free_local(struct efx_nic *efx) |
1150 | { | 1168 | { |
1169 | struct siena_nic_data *nic_data = efx->nic_data; | ||
1151 | struct efx_local_addr *local_addr; | 1170 | struct efx_local_addr *local_addr; |
1152 | struct efx_endpoint_page *epp; | 1171 | struct efx_endpoint_page *epp; |
1153 | 1172 | ||
1154 | while (!list_empty(&efx->local_addr_list)) { | 1173 | while (!list_empty(&nic_data->local_addr_list)) { |
1155 | local_addr = list_first_entry(&efx->local_addr_list, | 1174 | local_addr = list_first_entry(&nic_data->local_addr_list, |
1156 | struct efx_local_addr, link); | 1175 | struct efx_local_addr, link); |
1157 | list_del(&local_addr->link); | 1176 | list_del(&local_addr->link); |
1158 | kfree(local_addr); | 1177 | kfree(local_addr); |
1159 | } | 1178 | } |
1160 | 1179 | ||
1161 | while (!list_empty(&efx->local_page_list)) { | 1180 | while (!list_empty(&nic_data->local_page_list)) { |
1162 | epp = list_first_entry(&efx->local_page_list, | 1181 | epp = list_first_entry(&nic_data->local_page_list, |
1163 | struct efx_endpoint_page, link); | 1182 | struct efx_endpoint_page, link); |
1164 | list_del(&epp->link); | 1183 | list_del(&epp->link); |
1165 | dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, | 1184 | dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, |
@@ -1168,7 +1187,7 @@ static void efx_sriov_free_local(struct efx_nic *efx) | |||
1168 | } | 1187 | } |
1169 | } | 1188 | } |
1170 | 1189 | ||
1171 | static int efx_sriov_vf_alloc(struct efx_nic *efx) | 1190 | static int efx_siena_sriov_vf_alloc(struct efx_nic *efx) |
1172 | { | 1191 | { |
1173 | unsigned index; | 1192 | unsigned index; |
1174 | struct efx_vf *vf; | 1193 | struct efx_vf *vf; |
@@ -1185,8 +1204,8 @@ static int efx_sriov_vf_alloc(struct efx_nic *efx) | |||
1185 | vf->rx_filter_id = -1; | 1204 | vf->rx_filter_id = -1; |
1186 | vf->tx_filter_mode = VF_TX_FILTER_AUTO; | 1205 | vf->tx_filter_mode = VF_TX_FILTER_AUTO; |
1187 | vf->tx_filter_id = -1; | 1206 | vf->tx_filter_id = -1; |
1188 | INIT_WORK(&vf->req, efx_sriov_vfdi); | 1207 | INIT_WORK(&vf->req, efx_siena_sriov_vfdi); |
1189 | INIT_WORK(&vf->reset_work, efx_sriov_reset_vf_work); | 1208 | INIT_WORK(&vf->reset_work, efx_siena_sriov_reset_vf_work); |
1190 | init_waitqueue_head(&vf->flush_waitq); | 1209 | init_waitqueue_head(&vf->flush_waitq); |
1191 | mutex_init(&vf->status_lock); | 1210 | mutex_init(&vf->status_lock); |
1192 | mutex_init(&vf->txq_lock); | 1211 | mutex_init(&vf->txq_lock); |
@@ -1195,7 +1214,7 @@ static int efx_sriov_vf_alloc(struct efx_nic *efx) | |||
1195 | return 0; | 1214 | return 0; |
1196 | } | 1215 | } |
1197 | 1216 | ||
1198 | static void efx_sriov_vfs_fini(struct efx_nic *efx) | 1217 | static void efx_siena_sriov_vfs_fini(struct efx_nic *efx) |
1199 | { | 1218 | { |
1200 | struct efx_vf *vf; | 1219 | struct efx_vf *vf; |
1201 | unsigned int pos; | 1220 | unsigned int pos; |
@@ -1212,9 +1231,10 @@ static void efx_sriov_vfs_fini(struct efx_nic *efx) | |||
1212 | } | 1231 | } |
1213 | } | 1232 | } |
1214 | 1233 | ||
1215 | static int efx_sriov_vfs_init(struct efx_nic *efx) | 1234 | static int efx_siena_sriov_vfs_init(struct efx_nic *efx) |
1216 | { | 1235 | { |
1217 | struct pci_dev *pci_dev = efx->pci_dev; | 1236 | struct pci_dev *pci_dev = efx->pci_dev; |
1237 | struct siena_nic_data *nic_data = efx->nic_data; | ||
1218 | unsigned index, devfn, sriov, buftbl_base; | 1238 | unsigned index, devfn, sriov, buftbl_base; |
1219 | u16 offset, stride; | 1239 | u16 offset, stride; |
1220 | struct efx_vf *vf; | 1240 | struct efx_vf *vf; |
@@ -1227,7 +1247,7 @@ static int efx_sriov_vfs_init(struct efx_nic *efx) | |||
1227 | pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset); | 1247 | pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset); |
1228 | pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride); | 1248 | pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride); |
1229 | 1249 | ||
1230 | buftbl_base = efx->vf_buftbl_base; | 1250 | buftbl_base = nic_data->vf_buftbl_base; |
1231 | devfn = pci_dev->devfn + offset; | 1251 | devfn = pci_dev->devfn + offset; |
1232 | for (index = 0; index < efx->vf_count; ++index) { | 1252 | for (index = 0; index < efx->vf_count; ++index) { |
1233 | vf = efx->vf + index; | 1253 | vf = efx->vf + index; |
@@ -1253,13 +1273,14 @@ static int efx_sriov_vfs_init(struct efx_nic *efx) | |||
1253 | return 0; | 1273 | return 0; |
1254 | 1274 | ||
1255 | fail: | 1275 | fail: |
1256 | efx_sriov_vfs_fini(efx); | 1276 | efx_siena_sriov_vfs_fini(efx); |
1257 | return rc; | 1277 | return rc; |
1258 | } | 1278 | } |
1259 | 1279 | ||
1260 | int efx_sriov_init(struct efx_nic *efx) | 1280 | int efx_siena_sriov_init(struct efx_nic *efx) |
1261 | { | 1281 | { |
1262 | struct net_device *net_dev = efx->net_dev; | 1282 | struct net_device *net_dev = efx->net_dev; |
1283 | struct siena_nic_data *nic_data = efx->nic_data; | ||
1263 | struct vfdi_status *vfdi_status; | 1284 | struct vfdi_status *vfdi_status; |
1264 | int rc; | 1285 | int rc; |
1265 | 1286 | ||
@@ -1271,15 +1292,15 @@ int efx_sriov_init(struct efx_nic *efx) | |||
1271 | if (efx->vf_count == 0) | 1292 | if (efx->vf_count == 0) |
1272 | return 0; | 1293 | return 0; |
1273 | 1294 | ||
1274 | rc = efx_sriov_cmd(efx, true, NULL, NULL); | 1295 | rc = efx_siena_sriov_cmd(efx, true, NULL, NULL); |
1275 | if (rc) | 1296 | if (rc) |
1276 | goto fail_cmd; | 1297 | goto fail_cmd; |
1277 | 1298 | ||
1278 | rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status), | 1299 | rc = efx_nic_alloc_buffer(efx, &nic_data->vfdi_status, |
1279 | GFP_KERNEL); | 1300 | sizeof(*vfdi_status), GFP_KERNEL); |
1280 | if (rc) | 1301 | if (rc) |
1281 | goto fail_status; | 1302 | goto fail_status; |
1282 | vfdi_status = efx->vfdi_status.addr; | 1303 | vfdi_status = nic_data->vfdi_status.addr; |
1283 | memset(vfdi_status, 0, sizeof(*vfdi_status)); | 1304 | memset(vfdi_status, 0, sizeof(*vfdi_status)); |
1284 | vfdi_status->version = 1; | 1305 | vfdi_status->version = 1; |
1285 | vfdi_status->length = sizeof(*vfdi_status); | 1306 | vfdi_status->length = sizeof(*vfdi_status); |
@@ -1289,16 +1310,16 @@ int efx_sriov_init(struct efx_nic *efx) | |||
1289 | vfdi_status->peer_count = 1 + efx->vf_count; | 1310 | vfdi_status->peer_count = 1 + efx->vf_count; |
1290 | vfdi_status->timer_quantum_ns = efx->timer_quantum_ns; | 1311 | vfdi_status->timer_quantum_ns = efx->timer_quantum_ns; |
1291 | 1312 | ||
1292 | rc = efx_sriov_vf_alloc(efx); | 1313 | rc = efx_siena_sriov_vf_alloc(efx); |
1293 | if (rc) | 1314 | if (rc) |
1294 | goto fail_alloc; | 1315 | goto fail_alloc; |
1295 | 1316 | ||
1296 | mutex_init(&efx->local_lock); | 1317 | mutex_init(&nic_data->local_lock); |
1297 | INIT_WORK(&efx->peer_work, efx_sriov_peer_work); | 1318 | INIT_WORK(&nic_data->peer_work, efx_siena_sriov_peer_work); |
1298 | INIT_LIST_HEAD(&efx->local_addr_list); | 1319 | INIT_LIST_HEAD(&nic_data->local_addr_list); |
1299 | INIT_LIST_HEAD(&efx->local_page_list); | 1320 | INIT_LIST_HEAD(&nic_data->local_page_list); |
1300 | 1321 | ||
1301 | rc = efx_sriov_vfs_init(efx); | 1322 | rc = efx_siena_sriov_vfs_init(efx); |
1302 | if (rc) | 1323 | if (rc) |
1303 | goto fail_vfs; | 1324 | goto fail_vfs; |
1304 | 1325 | ||
@@ -1307,7 +1328,7 @@ int efx_sriov_init(struct efx_nic *efx) | |||
1307 | efx->vf_init_count = efx->vf_count; | 1328 | efx->vf_init_count = efx->vf_count; |
1308 | rtnl_unlock(); | 1329 | rtnl_unlock(); |
1309 | 1330 | ||
1310 | efx_sriov_usrev(efx, true); | 1331 | efx_siena_sriov_usrev(efx, true); |
1311 | 1332 | ||
1312 | /* At this point we must be ready to accept VFDI requests */ | 1333 | /* At this point we must be ready to accept VFDI requests */ |
1313 | 1334 | ||
@@ -1321,34 +1342,35 @@ int efx_sriov_init(struct efx_nic *efx) | |||
1321 | return 0; | 1342 | return 0; |
1322 | 1343 | ||
1323 | fail_pci: | 1344 | fail_pci: |
1324 | efx_sriov_usrev(efx, false); | 1345 | efx_siena_sriov_usrev(efx, false); |
1325 | rtnl_lock(); | 1346 | rtnl_lock(); |
1326 | efx->vf_init_count = 0; | 1347 | efx->vf_init_count = 0; |
1327 | rtnl_unlock(); | 1348 | rtnl_unlock(); |
1328 | efx_sriov_vfs_fini(efx); | 1349 | efx_siena_sriov_vfs_fini(efx); |
1329 | fail_vfs: | 1350 | fail_vfs: |
1330 | cancel_work_sync(&efx->peer_work); | 1351 | cancel_work_sync(&nic_data->peer_work); |
1331 | efx_sriov_free_local(efx); | 1352 | efx_siena_sriov_free_local(efx); |
1332 | kfree(efx->vf); | 1353 | kfree(efx->vf); |
1333 | fail_alloc: | 1354 | fail_alloc: |
1334 | efx_nic_free_buffer(efx, &efx->vfdi_status); | 1355 | efx_nic_free_buffer(efx, &nic_data->vfdi_status); |
1335 | fail_status: | 1356 | fail_status: |
1336 | efx_sriov_cmd(efx, false, NULL, NULL); | 1357 | efx_siena_sriov_cmd(efx, false, NULL, NULL); |
1337 | fail_cmd: | 1358 | fail_cmd: |
1338 | return rc; | 1359 | return rc; |
1339 | } | 1360 | } |
1340 | 1361 | ||
1341 | void efx_sriov_fini(struct efx_nic *efx) | 1362 | void efx_siena_sriov_fini(struct efx_nic *efx) |
1342 | { | 1363 | { |
1343 | struct efx_vf *vf; | 1364 | struct efx_vf *vf; |
1344 | unsigned int pos; | 1365 | unsigned int pos; |
1366 | struct siena_nic_data *nic_data = efx->nic_data; | ||
1345 | 1367 | ||
1346 | if (efx->vf_init_count == 0) | 1368 | if (efx->vf_init_count == 0) |
1347 | return; | 1369 | return; |
1348 | 1370 | ||
1349 | /* Disable all interfaces to reconfiguration */ | 1371 | /* Disable all interfaces to reconfiguration */ |
1350 | BUG_ON(efx->vfdi_channel->enabled); | 1372 | BUG_ON(nic_data->vfdi_channel->enabled); |
1351 | efx_sriov_usrev(efx, false); | 1373 | efx_siena_sriov_usrev(efx, false); |
1352 | rtnl_lock(); | 1374 | rtnl_lock(); |
1353 | efx->vf_init_count = 0; | 1375 | efx->vf_init_count = 0; |
1354 | rtnl_unlock(); | 1376 | rtnl_unlock(); |
@@ -1359,19 +1381,19 @@ void efx_sriov_fini(struct efx_nic *efx) | |||
1359 | cancel_work_sync(&vf->req); | 1381 | cancel_work_sync(&vf->req); |
1360 | cancel_work_sync(&vf->reset_work); | 1382 | cancel_work_sync(&vf->reset_work); |
1361 | } | 1383 | } |
1362 | cancel_work_sync(&efx->peer_work); | 1384 | cancel_work_sync(&nic_data->peer_work); |
1363 | 1385 | ||
1364 | pci_disable_sriov(efx->pci_dev); | 1386 | pci_disable_sriov(efx->pci_dev); |
1365 | 1387 | ||
1366 | /* Tear down back-end state */ | 1388 | /* Tear down back-end state */ |
1367 | efx_sriov_vfs_fini(efx); | 1389 | efx_siena_sriov_vfs_fini(efx); |
1368 | efx_sriov_free_local(efx); | 1390 | efx_siena_sriov_free_local(efx); |
1369 | kfree(efx->vf); | 1391 | kfree(efx->vf); |
1370 | efx_nic_free_buffer(efx, &efx->vfdi_status); | 1392 | efx_nic_free_buffer(efx, &nic_data->vfdi_status); |
1371 | efx_sriov_cmd(efx, false, NULL, NULL); | 1393 | efx_siena_sriov_cmd(efx, false, NULL, NULL); |
1372 | } | 1394 | } |
1373 | 1395 | ||
1374 | void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event) | 1396 | void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event) |
1375 | { | 1397 | { |
1376 | struct efx_nic *efx = channel->efx; | 1398 | struct efx_nic *efx = channel->efx; |
1377 | struct efx_vf *vf; | 1399 | struct efx_vf *vf; |
@@ -1428,7 +1450,7 @@ error: | |||
1428 | vf->req_seqno = seq + 1; | 1450 | vf->req_seqno = seq + 1; |
1429 | } | 1451 | } |
1430 | 1452 | ||
1431 | void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i) | 1453 | void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i) |
1432 | { | 1454 | { |
1433 | struct efx_vf *vf; | 1455 | struct efx_vf *vf; |
1434 | 1456 | ||
@@ -1445,18 +1467,19 @@ void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i) | |||
1445 | vf->evq0_count = 0; | 1467 | vf->evq0_count = 0; |
1446 | } | 1468 | } |
1447 | 1469 | ||
1448 | void efx_sriov_mac_address_changed(struct efx_nic *efx) | 1470 | void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) |
1449 | { | 1471 | { |
1450 | struct vfdi_status *vfdi_status = efx->vfdi_status.addr; | 1472 | struct siena_nic_data *nic_data = efx->nic_data; |
1473 | struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; | ||
1451 | 1474 | ||
1452 | if (!efx->vf_init_count) | 1475 | if (!efx->vf_init_count) |
1453 | return; | 1476 | return; |
1454 | ether_addr_copy(vfdi_status->peers[0].mac_addr, | 1477 | ether_addr_copy(vfdi_status->peers[0].mac_addr, |
1455 | efx->net_dev->dev_addr); | 1478 | efx->net_dev->dev_addr); |
1456 | queue_work(vfdi_workqueue, &efx->peer_work); | 1479 | queue_work(vfdi_workqueue, &nic_data->peer_work); |
1457 | } | 1480 | } |
1458 | 1481 | ||
1459 | void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) | 1482 | void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) |
1460 | { | 1483 | { |
1461 | struct efx_vf *vf; | 1484 | struct efx_vf *vf; |
1462 | unsigned queue, qid; | 1485 | unsigned queue, qid; |
@@ -1475,7 +1498,7 @@ void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) | |||
1475 | wake_up(&vf->flush_waitq); | 1498 | wake_up(&vf->flush_waitq); |
1476 | } | 1499 | } |
1477 | 1500 | ||
1478 | void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) | 1501 | void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) |
1479 | { | 1502 | { |
1480 | struct efx_vf *vf; | 1503 | struct efx_vf *vf; |
1481 | unsigned ev_failed, queue, qid; | 1504 | unsigned ev_failed, queue, qid; |
@@ -1500,7 +1523,7 @@ void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) | |||
1500 | } | 1523 | } |
1501 | 1524 | ||
1502 | /* Called from napi. Schedule the reset work item */ | 1525 | /* Called from napi. Schedule the reset work item */ |
1503 | void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) | 1526 | void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) |
1504 | { | 1527 | { |
1505 | struct efx_vf *vf; | 1528 | struct efx_vf *vf; |
1506 | unsigned int rel; | 1529 | unsigned int rel; |
@@ -1516,7 +1539,7 @@ void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) | |||
1516 | } | 1539 | } |
1517 | 1540 | ||
1518 | /* Reset all VFs */ | 1541 | /* Reset all VFs */ |
1519 | void efx_sriov_reset(struct efx_nic *efx) | 1542 | void efx_siena_sriov_reset(struct efx_nic *efx) |
1520 | { | 1543 | { |
1521 | unsigned int vf_i; | 1544 | unsigned int vf_i; |
1522 | struct efx_buffer buf; | 1545 | struct efx_buffer buf; |
@@ -1527,15 +1550,15 @@ void efx_sriov_reset(struct efx_nic *efx) | |||
1527 | if (efx->vf_init_count == 0) | 1550 | if (efx->vf_init_count == 0) |
1528 | return; | 1551 | return; |
1529 | 1552 | ||
1530 | efx_sriov_usrev(efx, true); | 1553 | efx_siena_sriov_usrev(efx, true); |
1531 | (void)efx_sriov_cmd(efx, true, NULL, NULL); | 1554 | (void)efx_siena_sriov_cmd(efx, true, NULL, NULL); |
1532 | 1555 | ||
1533 | if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) | 1556 | if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) |
1534 | return; | 1557 | return; |
1535 | 1558 | ||
1536 | for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) { | 1559 | for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) { |
1537 | vf = efx->vf + vf_i; | 1560 | vf = efx->vf + vf_i; |
1538 | efx_sriov_reset_vf(vf, &buf); | 1561 | efx_siena_sriov_reset_vf(vf, &buf); |
1539 | } | 1562 | } |
1540 | 1563 | ||
1541 | efx_nic_free_buffer(efx, &buf); | 1564 | efx_nic_free_buffer(efx, &buf); |
@@ -1543,8 +1566,8 @@ void efx_sriov_reset(struct efx_nic *efx) | |||
1543 | 1566 | ||
1544 | int efx_init_sriov(void) | 1567 | int efx_init_sriov(void) |
1545 | { | 1568 | { |
1546 | /* A single threaded workqueue is sufficient. efx_sriov_vfdi() and | 1569 | /* A single threaded workqueue is sufficient. efx_siena_sriov_vfdi() and |
1547 | * efx_sriov_peer_work() spend almost all their time sleeping for | 1570 | * efx_siena_sriov_peer_work() spend almost all their time sleeping for |
1548 | * MCDI to complete anyway | 1571 | * MCDI to complete anyway |
1549 | */ | 1572 | */ |
1550 | vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi"); | 1573 | vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi"); |
@@ -1559,7 +1582,7 @@ void efx_fini_sriov(void) | |||
1559 | destroy_workqueue(vfdi_workqueue); | 1582 | destroy_workqueue(vfdi_workqueue); |
1560 | } | 1583 | } |
1561 | 1584 | ||
1562 | int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) | 1585 | int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) |
1563 | { | 1586 | { |
1564 | struct efx_nic *efx = netdev_priv(net_dev); | 1587 | struct efx_nic *efx = netdev_priv(net_dev); |
1565 | struct efx_vf *vf; | 1588 | struct efx_vf *vf; |
@@ -1570,14 +1593,14 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) | |||
1570 | 1593 | ||
1571 | mutex_lock(&vf->status_lock); | 1594 | mutex_lock(&vf->status_lock); |
1572 | ether_addr_copy(vf->addr.mac_addr, mac); | 1595 | ether_addr_copy(vf->addr.mac_addr, mac); |
1573 | __efx_sriov_update_vf_addr(vf); | 1596 | __efx_siena_sriov_update_vf_addr(vf); |
1574 | mutex_unlock(&vf->status_lock); | 1597 | mutex_unlock(&vf->status_lock); |
1575 | 1598 | ||
1576 | return 0; | 1599 | return 0; |
1577 | } | 1600 | } |
1578 | 1601 | ||
1579 | int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, | 1602 | int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, |
1580 | u16 vlan, u8 qos) | 1603 | u16 vlan, u8 qos) |
1581 | { | 1604 | { |
1582 | struct efx_nic *efx = netdev_priv(net_dev); | 1605 | struct efx_nic *efx = netdev_priv(net_dev); |
1583 | struct efx_vf *vf; | 1606 | struct efx_vf *vf; |
@@ -1590,14 +1613,14 @@ int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, | |||
1590 | mutex_lock(&vf->status_lock); | 1613 | mutex_lock(&vf->status_lock); |
1591 | tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT); | 1614 | tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT); |
1592 | vf->addr.tci = htons(tci); | 1615 | vf->addr.tci = htons(tci); |
1593 | __efx_sriov_update_vf_addr(vf); | 1616 | __efx_siena_sriov_update_vf_addr(vf); |
1594 | mutex_unlock(&vf->status_lock); | 1617 | mutex_unlock(&vf->status_lock); |
1595 | 1618 | ||
1596 | return 0; | 1619 | return 0; |
1597 | } | 1620 | } |
1598 | 1621 | ||
1599 | int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, | 1622 | int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, |
1600 | bool spoofchk) | 1623 | bool spoofchk) |
1601 | { | 1624 | { |
1602 | struct efx_nic *efx = netdev_priv(net_dev); | 1625 | struct efx_nic *efx = netdev_priv(net_dev); |
1603 | struct efx_vf *vf; | 1626 | struct efx_vf *vf; |
@@ -1620,8 +1643,8 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, | |||
1620 | return rc; | 1643 | return rc; |
1621 | } | 1644 | } |
1622 | 1645 | ||
1623 | int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, | 1646 | int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i, |
1624 | struct ifla_vf_info *ivi) | 1647 | struct ifla_vf_info *ivi) |
1625 | { | 1648 | { |
1626 | struct efx_nic *efx = netdev_priv(net_dev); | 1649 | struct efx_nic *efx = netdev_priv(net_dev); |
1627 | struct efx_vf *vf; | 1650 | struct efx_vf *vf; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 593e6c4144a7..12a174e01d56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h | |||
@@ -341,6 +341,9 @@ struct stmmac_desc_ops { | |||
341 | int (*get_rx_timestamp_status) (void *desc, u32 ats); | 341 | int (*get_rx_timestamp_status) (void *desc, u32 ats); |
342 | }; | 342 | }; |
343 | 343 | ||
344 | extern const struct stmmac_desc_ops enh_desc_ops; | ||
345 | extern const struct stmmac_desc_ops ndesc_ops; | ||
346 | |||
344 | struct stmmac_dma_ops { | 347 | struct stmmac_dma_ops { |
345 | /* DMA core initialization */ | 348 | /* DMA core initialization */ |
346 | int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb, | 349 | int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb, |
@@ -410,6 +413,8 @@ struct stmmac_hwtimestamp { | |||
410 | u64(*get_systime) (void __iomem *ioaddr); | 413 | u64(*get_systime) (void __iomem *ioaddr); |
411 | }; | 414 | }; |
412 | 415 | ||
416 | extern const struct stmmac_hwtimestamp stmmac_ptp; | ||
417 | |||
413 | struct mac_link { | 418 | struct mac_link { |
414 | int port; | 419 | int port; |
415 | int duplex; | 420 | int duplex; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 23aad9adef5a..709798b6aec3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
@@ -122,9 +122,7 @@ int stmmac_mdio_unregister(struct net_device *ndev); | |||
122 | int stmmac_mdio_register(struct net_device *ndev); | 122 | int stmmac_mdio_register(struct net_device *ndev); |
123 | int stmmac_mdio_reset(struct mii_bus *mii); | 123 | int stmmac_mdio_reset(struct mii_bus *mii); |
124 | void stmmac_set_ethtool_ops(struct net_device *netdev); | 124 | void stmmac_set_ethtool_ops(struct net_device *netdev); |
125 | extern const struct stmmac_desc_ops enh_desc_ops; | 125 | |
126 | extern const struct stmmac_desc_ops ndesc_ops; | ||
127 | extern const struct stmmac_hwtimestamp stmmac_ptp; | ||
128 | int stmmac_ptp_register(struct stmmac_priv *priv); | 126 | int stmmac_ptp_register(struct stmmac_priv *priv); |
129 | void stmmac_ptp_unregister(struct stmmac_priv *priv); | 127 | void stmmac_ptp_unregister(struct stmmac_priv *priv); |
130 | int stmmac_resume(struct net_device *ndev); | 128 | int stmmac_resume(struct net_device *ndev); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index e17a970eaf2b..5084699baeab 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | |||
@@ -71,64 +71,46 @@ static void stmmac_default_data(void) | |||
71 | static int stmmac_pci_probe(struct pci_dev *pdev, | 71 | static int stmmac_pci_probe(struct pci_dev *pdev, |
72 | const struct pci_device_id *id) | 72 | const struct pci_device_id *id) |
73 | { | 73 | { |
74 | int ret = 0; | 74 | struct stmmac_priv *priv; |
75 | void __iomem *addr = NULL; | ||
76 | struct stmmac_priv *priv = NULL; | ||
77 | int i; | 75 | int i; |
76 | int ret; | ||
78 | 77 | ||
79 | /* Enable pci device */ | 78 | /* Enable pci device */ |
80 | ret = pci_enable_device(pdev); | 79 | ret = pcim_enable_device(pdev); |
81 | if (ret) { | 80 | if (ret) { |
82 | pr_err("%s : ERROR: failed to enable %s device\n", __func__, | 81 | dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", |
83 | pci_name(pdev)); | 82 | __func__); |
84 | return ret; | 83 | return ret; |
85 | } | 84 | } |
86 | if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) { | ||
87 | pr_err("%s: ERROR: failed to get PCI region\n", __func__); | ||
88 | ret = -ENODEV; | ||
89 | goto err_out_req_reg_failed; | ||
90 | } | ||
91 | 85 | ||
92 | /* Get the base address of device */ | 86 | /* Get the base address of device */ |
93 | for (i = 0; i <= 5; i++) { | 87 | for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { |
94 | if (pci_resource_len(pdev, i) == 0) | 88 | if (pci_resource_len(pdev, i) == 0) |
95 | continue; | 89 | continue; |
96 | addr = pci_iomap(pdev, i, 0); | 90 | ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev)); |
97 | if (addr == NULL) { | 91 | if (ret) |
98 | pr_err("%s: ERROR: cannot map register memory aborting", | 92 | return ret; |
99 | __func__); | ||
100 | ret = -EIO; | ||
101 | goto err_out_map_failed; | ||
102 | } | ||
103 | break; | 93 | break; |
104 | } | 94 | } |
95 | |||
105 | pci_set_master(pdev); | 96 | pci_set_master(pdev); |
106 | 97 | ||
107 | stmmac_default_data(); | 98 | stmmac_default_data(); |
108 | 99 | ||
109 | priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr); | 100 | priv = stmmac_dvr_probe(&pdev->dev, &plat_dat, |
101 | pcim_iomap_table(pdev)[i]); | ||
110 | if (IS_ERR(priv)) { | 102 | if (IS_ERR(priv)) { |
111 | pr_err("%s: main driver probe failed", __func__); | 103 | dev_err(&pdev->dev, "%s: main driver probe failed\n", __func__); |
112 | ret = PTR_ERR(priv); | 104 | return PTR_ERR(priv); |
113 | goto err_out; | ||
114 | } | 105 | } |
115 | priv->dev->irq = pdev->irq; | 106 | priv->dev->irq = pdev->irq; |
116 | priv->wol_irq = pdev->irq; | 107 | priv->wol_irq = pdev->irq; |
117 | 108 | ||
118 | pci_set_drvdata(pdev, priv->dev); | 109 | pci_set_drvdata(pdev, priv->dev); |
119 | 110 | ||
120 | pr_debug("STMMAC platform driver registration completed"); | 111 | dev_dbg(&pdev->dev, "STMMAC PCI driver registration completed\n"); |
121 | 112 | ||
122 | return 0; | 113 | return 0; |
123 | |||
124 | err_out: | ||
125 | pci_clear_master(pdev); | ||
126 | err_out_map_failed: | ||
127 | pci_release_regions(pdev); | ||
128 | err_out_req_reg_failed: | ||
129 | pci_disable_device(pdev); | ||
130 | |||
131 | return ret; | ||
132 | } | 114 | } |
133 | 115 | ||
134 | /** | 116 | /** |
@@ -141,39 +123,30 @@ err_out_req_reg_failed: | |||
141 | static void stmmac_pci_remove(struct pci_dev *pdev) | 123 | static void stmmac_pci_remove(struct pci_dev *pdev) |
142 | { | 124 | { |
143 | struct net_device *ndev = pci_get_drvdata(pdev); | 125 | struct net_device *ndev = pci_get_drvdata(pdev); |
144 | struct stmmac_priv *priv = netdev_priv(ndev); | ||
145 | 126 | ||
146 | stmmac_dvr_remove(ndev); | 127 | stmmac_dvr_remove(ndev); |
147 | |||
148 | pci_iounmap(pdev, priv->ioaddr); | ||
149 | pci_release_regions(pdev); | ||
150 | pci_disable_device(pdev); | ||
151 | } | 128 | } |
152 | 129 | ||
153 | #ifdef CONFIG_PM | 130 | #ifdef CONFIG_PM_SLEEP |
154 | static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state) | 131 | static int stmmac_pci_suspend(struct device *dev) |
155 | { | 132 | { |
133 | struct pci_dev *pdev = to_pci_dev(dev); | ||
156 | struct net_device *ndev = pci_get_drvdata(pdev); | 134 | struct net_device *ndev = pci_get_drvdata(pdev); |
157 | int ret; | ||
158 | |||
159 | ret = stmmac_suspend(ndev); | ||
160 | pci_save_state(pdev); | ||
161 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
162 | 135 | ||
163 | return ret; | 136 | return stmmac_suspend(ndev); |
164 | } | 137 | } |
165 | 138 | ||
166 | static int stmmac_pci_resume(struct pci_dev *pdev) | 139 | static int stmmac_pci_resume(struct device *dev) |
167 | { | 140 | { |
141 | struct pci_dev *pdev = to_pci_dev(dev); | ||
168 | struct net_device *ndev = pci_get_drvdata(pdev); | 142 | struct net_device *ndev = pci_get_drvdata(pdev); |
169 | 143 | ||
170 | pci_set_power_state(pdev, PCI_D0); | ||
171 | pci_restore_state(pdev); | ||
172 | |||
173 | return stmmac_resume(ndev); | 144 | return stmmac_resume(ndev); |
174 | } | 145 | } |
175 | #endif | 146 | #endif |
176 | 147 | ||
148 | static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); | ||
149 | |||
177 | #define STMMAC_VENDOR_ID 0x700 | 150 | #define STMMAC_VENDOR_ID 0x700 |
178 | #define STMMAC_DEVICE_ID 0x1108 | 151 | #define STMMAC_DEVICE_ID 0x1108 |
179 | 152 | ||
@@ -190,10 +163,9 @@ struct pci_driver stmmac_pci_driver = { | |||
190 | .id_table = stmmac_id_table, | 163 | .id_table = stmmac_id_table, |
191 | .probe = stmmac_pci_probe, | 164 | .probe = stmmac_pci_probe, |
192 | .remove = stmmac_pci_remove, | 165 | .remove = stmmac_pci_remove, |
193 | #ifdef CONFIG_PM | 166 | .driver = { |
194 | .suspend = stmmac_pci_suspend, | 167 | .pm = &stmmac_pm_ops, |
195 | .resume = stmmac_pci_resume, | 168 | }, |
196 | #endif | ||
197 | }; | 169 | }; |
198 | 170 | ||
199 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); | 171 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); |
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index e7bb63b2d525..5c5fb59adf76 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c | |||
@@ -627,7 +627,7 @@ static void maybe_tx_wakeup(struct vnet_port *port) | |||
627 | struct vio_dring_state *dr; | 627 | struct vio_dring_state *dr; |
628 | 628 | ||
629 | dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | 629 | dr = &port->vio.drings[VIO_DRIVER_TX_RING]; |
630 | netif_tx_wake_queue(txq); | 630 | netif_tx_wake_queue(txq); |
631 | } | 631 | } |
632 | __netif_tx_unlock(txq); | 632 | __netif_tx_unlock(txq); |
633 | } | 633 | } |
@@ -691,7 +691,6 @@ ldc_ctrl: | |||
691 | pkt->end_idx = -1; | 691 | pkt->end_idx = -1; |
692 | goto napi_resume; | 692 | goto napi_resume; |
693 | } | 693 | } |
694 | ldc_read: | ||
695 | err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); | 694 | err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); |
696 | if (unlikely(err < 0)) { | 695 | if (unlikely(err < 0)) { |
697 | if (err == -ECONNRESET) | 696 | if (err == -ECONNRESET) |
@@ -722,8 +721,8 @@ napi_resume: | |||
722 | err = vnet_rx(port, &msgbuf, &npkts, budget); | 721 | err = vnet_rx(port, &msgbuf, &npkts, budget); |
723 | if (npkts >= budget) | 722 | if (npkts >= budget) |
724 | break; | 723 | break; |
725 | if (npkts == 0 && err != -ECONNRESET) | 724 | if (npkts == 0) |
726 | goto ldc_read; | 725 | break; |
727 | } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { | 726 | } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { |
728 | err = vnet_ack(port, &msgbuf); | 727 | err = vnet_ack(port, &msgbuf); |
729 | if (err > 0) | 728 | if (err > 0) |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index fd41675bc038..66b139a8b6ca 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -461,11 +461,7 @@ enum rtl8152_flags { | |||
461 | 461 | ||
462 | /* Define these values to match your device */ | 462 | /* Define these values to match your device */ |
463 | #define VENDOR_ID_REALTEK 0x0bda | 463 | #define VENDOR_ID_REALTEK 0x0bda |
464 | #define PRODUCT_ID_RTL8152 0x8152 | ||
465 | #define PRODUCT_ID_RTL8153 0x8153 | ||
466 | |||
467 | #define VENDOR_ID_SAMSUNG 0x04e8 | 464 | #define VENDOR_ID_SAMSUNG 0x04e8 |
468 | #define PRODUCT_ID_SAMSUNG 0xa101 | ||
469 | 465 | ||
470 | #define MCU_TYPE_PLA 0x0100 | 466 | #define MCU_TYPE_PLA 0x0100 |
471 | #define MCU_TYPE_USB 0x0000 | 467 | #define MCU_TYPE_USB 0x0000 |
@@ -3742,66 +3738,43 @@ static void rtl8153_unload(struct r8152 *tp) | |||
3742 | r8153_power_cut_en(tp, false); | 3738 | r8153_power_cut_en(tp, false); |
3743 | } | 3739 | } |
3744 | 3740 | ||
3745 | static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) | 3741 | static int rtl_ops_init(struct r8152 *tp) |
3746 | { | 3742 | { |
3747 | struct rtl_ops *ops = &tp->rtl_ops; | 3743 | struct rtl_ops *ops = &tp->rtl_ops; |
3748 | int ret = -ENODEV; | 3744 | int ret = 0; |
3749 | 3745 | ||
3750 | switch (id->idVendor) { | 3746 | switch (tp->version) { |
3751 | case VENDOR_ID_REALTEK: | 3747 | case RTL_VER_01: |
3752 | switch (id->idProduct) { | 3748 | case RTL_VER_02: |
3753 | case PRODUCT_ID_RTL8152: | 3749 | ops->init = r8152b_init; |
3754 | ops->init = r8152b_init; | 3750 | ops->enable = rtl8152_enable; |
3755 | ops->enable = rtl8152_enable; | 3751 | ops->disable = rtl8152_disable; |
3756 | ops->disable = rtl8152_disable; | 3752 | ops->up = rtl8152_up; |
3757 | ops->up = rtl8152_up; | 3753 | ops->down = rtl8152_down; |
3758 | ops->down = rtl8152_down; | 3754 | ops->unload = rtl8152_unload; |
3759 | ops->unload = rtl8152_unload; | 3755 | ops->eee_get = r8152_get_eee; |
3760 | ops->eee_get = r8152_get_eee; | 3756 | ops->eee_set = r8152_set_eee; |
3761 | ops->eee_set = r8152_set_eee; | ||
3762 | ret = 0; | ||
3763 | break; | ||
3764 | case PRODUCT_ID_RTL8153: | ||
3765 | ops->init = r8153_init; | ||
3766 | ops->enable = rtl8153_enable; | ||
3767 | ops->disable = rtl8153_disable; | ||
3768 | ops->up = rtl8153_up; | ||
3769 | ops->down = rtl8153_down; | ||
3770 | ops->unload = rtl8153_unload; | ||
3771 | ops->eee_get = r8153_get_eee; | ||
3772 | ops->eee_set = r8153_set_eee; | ||
3773 | ret = 0; | ||
3774 | break; | ||
3775 | default: | ||
3776 | break; | ||
3777 | } | ||
3778 | break; | 3757 | break; |
3779 | 3758 | ||
3780 | case VENDOR_ID_SAMSUNG: | 3759 | case RTL_VER_03: |
3781 | switch (id->idProduct) { | 3760 | case RTL_VER_04: |
3782 | case PRODUCT_ID_SAMSUNG: | 3761 | case RTL_VER_05: |
3783 | ops->init = r8153_init; | 3762 | ops->init = r8153_init; |
3784 | ops->enable = rtl8153_enable; | 3763 | ops->enable = rtl8153_enable; |
3785 | ops->disable = rtl8153_disable; | 3764 | ops->disable = rtl8153_disable; |
3786 | ops->up = rtl8153_up; | 3765 | ops->up = rtl8153_up; |
3787 | ops->down = rtl8153_down; | 3766 | ops->down = rtl8153_down; |
3788 | ops->unload = rtl8153_unload; | 3767 | ops->unload = rtl8153_unload; |
3789 | ops->eee_get = r8153_get_eee; | 3768 | ops->eee_get = r8153_get_eee; |
3790 | ops->eee_set = r8153_set_eee; | 3769 | ops->eee_set = r8153_set_eee; |
3791 | ret = 0; | ||
3792 | break; | ||
3793 | default: | ||
3794 | break; | ||
3795 | } | ||
3796 | break; | 3770 | break; |
3797 | 3771 | ||
3798 | default: | 3772 | default: |
3773 | ret = -ENODEV; | ||
3774 | netif_err(tp, probe, tp->netdev, "Unknown Device\n"); | ||
3799 | break; | 3775 | break; |
3800 | } | 3776 | } |
3801 | 3777 | ||
3802 | if (ret) | ||
3803 | netif_err(tp, probe, tp->netdev, "Unknown Device\n"); | ||
3804 | |||
3805 | return ret; | 3778 | return ret; |
3806 | } | 3779 | } |
3807 | 3780 | ||
@@ -3833,7 +3806,8 @@ static int rtl8152_probe(struct usb_interface *intf, | |||
3833 | tp->netdev = netdev; | 3806 | tp->netdev = netdev; |
3834 | tp->intf = intf; | 3807 | tp->intf = intf; |
3835 | 3808 | ||
3836 | ret = rtl_ops_init(tp, id); | 3809 | r8152b_get_version(tp); |
3810 | ret = rtl_ops_init(tp); | ||
3837 | if (ret) | 3811 | if (ret) |
3838 | goto out; | 3812 | goto out; |
3839 | 3813 | ||
@@ -3866,11 +3840,9 @@ static int rtl8152_probe(struct usb_interface *intf, | |||
3866 | tp->mii.phy_id_mask = 0x3f; | 3840 | tp->mii.phy_id_mask = 0x3f; |
3867 | tp->mii.reg_num_mask = 0x1f; | 3841 | tp->mii.reg_num_mask = 0x1f; |
3868 | tp->mii.phy_id = R8152_PHY_ID; | 3842 | tp->mii.phy_id = R8152_PHY_ID; |
3869 | tp->mii.supports_gmii = 0; | ||
3870 | 3843 | ||
3871 | intf->needs_remote_wakeup = 1; | 3844 | intf->needs_remote_wakeup = 1; |
3872 | 3845 | ||
3873 | r8152b_get_version(tp); | ||
3874 | tp->rtl_ops.init(tp); | 3846 | tp->rtl_ops.init(tp); |
3875 | set_ethernet_addr(tp); | 3847 | set_ethernet_addr(tp); |
3876 | 3848 | ||
@@ -3922,9 +3894,9 @@ static void rtl8152_disconnect(struct usb_interface *intf) | |||
3922 | 3894 | ||
3923 | /* table of devices that work with this driver */ | 3895 | /* table of devices that work with this driver */ |
3924 | static struct usb_device_id rtl8152_table[] = { | 3896 | static struct usb_device_id rtl8152_table[] = { |
3925 | {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)}, | 3897 | {USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, |
3926 | {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)}, | 3898 | {USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, |
3927 | {USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)}, | 3899 | {USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, |
3928 | {} | 3900 | {} |
3929 | }; | 3901 | }; |
3930 | 3902 | ||
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 45755f9aa3f9..4a509f715fe8 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -82,6 +82,16 @@ MODULE_PARM_DESC(max_queues, | |||
82 | static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; | 82 | static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; |
83 | module_param(fatal_skb_slots, uint, 0444); | 83 | module_param(fatal_skb_slots, uint, 0444); |
84 | 84 | ||
85 | /* The amount to copy out of the first guest Tx slot into the skb's | ||
86 | * linear area. If the first slot has more data, it will be mapped | ||
87 | * and put into the first frag. | ||
88 | * | ||
89 | * This is sized to avoid pulling headers from the frags for most | ||
90 | * TCP/IP packets. | ||
91 | */ | ||
92 | #define XEN_NETBACK_TX_COPY_LEN 128 | ||
93 | |||
94 | |||
85 | static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, | 95 | static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, |
86 | u8 status); | 96 | u8 status); |
87 | 97 | ||
@@ -125,13 +135,6 @@ static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf) | |||
125 | pending_tx_info[0]); | 135 | pending_tx_info[0]); |
126 | } | 136 | } |
127 | 137 | ||
128 | /* This is a miniumum size for the linear area to avoid lots of | ||
129 | * calls to __pskb_pull_tail() as we set up checksum offsets. The | ||
130 | * value 128 was chosen as it covers all IPv4 and most likely | ||
131 | * IPv6 headers. | ||
132 | */ | ||
133 | #define PKT_PROT_LEN 128 | ||
134 | |||
135 | static u16 frag_get_pending_idx(skb_frag_t *frag) | 138 | static u16 frag_get_pending_idx(skb_frag_t *frag) |
136 | { | 139 | { |
137 | return (u16)frag->page_offset; | 140 | return (u16)frag->page_offset; |
@@ -1446,9 +1449,9 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, | |||
1446 | index = pending_index(queue->pending_cons); | 1449 | index = pending_index(queue->pending_cons); |
1447 | pending_idx = queue->pending_ring[index]; | 1450 | pending_idx = queue->pending_ring[index]; |
1448 | 1451 | ||
1449 | data_len = (txreq.size > PKT_PROT_LEN && | 1452 | data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN && |
1450 | ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? | 1453 | ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? |
1451 | PKT_PROT_LEN : txreq.size; | 1454 | XEN_NETBACK_TX_COPY_LEN : txreq.size; |
1452 | 1455 | ||
1453 | skb = xenvif_alloc_skb(data_len); | 1456 | skb = xenvif_alloc_skb(data_len); |
1454 | if (unlikely(skb == NULL)) { | 1457 | if (unlikely(skb == NULL)) { |
@@ -1653,11 +1656,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) | |||
1653 | } | 1656 | } |
1654 | } | 1657 | } |
1655 | 1658 | ||
1656 | if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) { | ||
1657 | int target = min_t(int, skb->len, PKT_PROT_LEN); | ||
1658 | __pskb_pull_tail(skb, target - skb_headlen(skb)); | ||
1659 | } | ||
1660 | |||
1661 | skb->dev = queue->vif->dev; | 1659 | skb->dev = queue->vif->dev; |
1662 | skb->protocol = eth_type_trans(skb, skb->dev); | 1660 | skb->protocol = eth_type_trans(skb, skb->dev); |
1663 | skb_reset_network_header(skb); | 1661 | skb_reset_network_header(skb); |