diff options
Diffstat (limited to 'drivers/net/ethernet/sfc/ef10.c')
-rw-r--r-- | drivers/net/ethernet/sfc/ef10.c | 1265 |
1 files changed, 1056 insertions, 209 deletions
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index fbb6cfa0f5f1..605cc8948594 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include "nic.h" | 15 | #include "nic.h" |
16 | #include "workarounds.h" | 16 | #include "workarounds.h" |
17 | #include "selftest.h" | 17 | #include "selftest.h" |
18 | #include "ef10_sriov.h" | ||
18 | #include <linux/in.h> | 19 | #include <linux/in.h> |
19 | #include <linux/jhash.h> | 20 | #include <linux/jhash.h> |
20 | #include <linux/wait.h> | 21 | #include <linux/wait.h> |
@@ -30,6 +31,9 @@ enum { | |||
30 | 31 | ||
31 | /* The reserved RSS context value */ | 32 | /* The reserved RSS context value */ |
32 | #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff | 33 | #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff |
34 | /* The maximum size of a shared RSS context */ | ||
35 | /* TODO: this should really be from the mcdi protocol export */ | ||
36 | #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL | ||
33 | 37 | ||
34 | /* The filter table(s) are managed by firmware and we have write-only | 38 | /* The filter table(s) are managed by firmware and we have write-only |
35 | * access. When removing filters we must identify them to the | 39 | * access. When removing filters we must identify them to the |
@@ -77,7 +81,6 @@ struct efx_ef10_filter_table { | |||
77 | /* An arbitrary search limit for the software hash table */ | 81 | /* An arbitrary search limit for the software hash table */ |
78 | #define EFX_EF10_FILTER_SEARCH_LIMIT 200 | 82 | #define EFX_EF10_FILTER_SEARCH_LIMIT 200 |
79 | 83 | ||
80 | static void efx_ef10_rx_push_rss_config(struct efx_nic *efx); | ||
81 | static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); | 84 | static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); |
82 | static void efx_ef10_filter_table_remove(struct efx_nic *efx); | 85 | static void efx_ef10_filter_table_remove(struct efx_nic *efx); |
83 | 86 | ||
@@ -92,9 +95,55 @@ static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) | |||
92 | 95 | ||
93 | static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) | 96 | static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) |
94 | { | 97 | { |
95 | return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]); | 98 | int bar; |
99 | |||
100 | bar = efx->type->mem_bar; | ||
101 | return resource_size(&efx->pci_dev->resource[bar]); | ||
102 | } | ||
103 | |||
104 | static bool efx_ef10_is_vf(struct efx_nic *efx) | ||
105 | { | ||
106 | return efx->type->is_vf; | ||
107 | } | ||
108 | |||
109 | static int efx_ef10_get_pf_index(struct efx_nic *efx) | ||
110 | { | ||
111 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); | ||
112 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | ||
113 | size_t outlen; | ||
114 | int rc; | ||
115 | |||
116 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, | ||
117 | sizeof(outbuf), &outlen); | ||
118 | if (rc) | ||
119 | return rc; | ||
120 | if (outlen < sizeof(outbuf)) | ||
121 | return -EIO; | ||
122 | |||
123 | nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF); | ||
124 | return 0; | ||
96 | } | 125 | } |
97 | 126 | ||
127 | #ifdef CONFIG_SFC_SRIOV | ||
128 | static int efx_ef10_get_vf_index(struct efx_nic *efx) | ||
129 | { | ||
130 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); | ||
131 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | ||
132 | size_t outlen; | ||
133 | int rc; | ||
134 | |||
135 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, | ||
136 | sizeof(outbuf), &outlen); | ||
137 | if (rc) | ||
138 | return rc; | ||
139 | if (outlen < sizeof(outbuf)) | ||
140 | return -EIO; | ||
141 | |||
142 | nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF); | ||
143 | return 0; | ||
144 | } | ||
145 | #endif | ||
146 | |||
98 | static int efx_ef10_init_datapath_caps(struct efx_nic *efx) | 147 | static int efx_ef10_init_datapath_caps(struct efx_nic *efx) |
99 | { | 148 | { |
100 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); | 149 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); |
@@ -117,6 +166,13 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx) | |||
117 | nic_data->datapath_caps = | 166 | nic_data->datapath_caps = |
118 | MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); | 167 | MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); |
119 | 168 | ||
169 | /* record the DPCPU firmware IDs to determine VEB vswitching support. | ||
170 | */ | ||
171 | nic_data->rx_dpcpu_fw_id = | ||
172 | MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); | ||
173 | nic_data->tx_dpcpu_fw_id = | ||
174 | MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); | ||
175 | |||
120 | if (!(nic_data->datapath_caps & | 176 | if (!(nic_data->datapath_caps & |
121 | (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) { | 177 | (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) { |
122 | netif_err(efx, drv, efx->net_dev, | 178 | netif_err(efx, drv, efx->net_dev, |
@@ -147,7 +203,7 @@ static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) | |||
147 | return rc > 0 ? rc : -ERANGE; | 203 | return rc > 0 ? rc : -ERANGE; |
148 | } | 204 | } |
149 | 205 | ||
150 | static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address) | 206 | static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address) |
151 | { | 207 | { |
152 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); | 208 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); |
153 | size_t outlen; | 209 | size_t outlen; |
@@ -167,9 +223,66 @@ static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address) | |||
167 | return 0; | 223 | return 0; |
168 | } | 224 | } |
169 | 225 | ||
226 | static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) | ||
227 | { | ||
228 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN); | ||
229 | MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); | ||
230 | size_t outlen; | ||
231 | int num_addrs, rc; | ||
232 | |||
233 | MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, | ||
234 | EVB_PORT_ID_ASSIGNED); | ||
235 | rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf, | ||
236 | sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); | ||
237 | |||
238 | if (rc) | ||
239 | return rc; | ||
240 | if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) | ||
241 | return -EIO; | ||
242 | |||
243 | num_addrs = MCDI_DWORD(outbuf, | ||
244 | VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT); | ||
245 | |||
246 | WARN_ON(num_addrs != 1); | ||
247 | |||
248 | ether_addr_copy(mac_address, | ||
249 | MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR)); | ||
250 | |||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | static ssize_t efx_ef10_show_link_control_flag(struct device *dev, | ||
255 | struct device_attribute *attr, | ||
256 | char *buf) | ||
257 | { | ||
258 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | ||
259 | |||
260 | return sprintf(buf, "%d\n", | ||
261 | ((efx->mcdi->fn_flags) & | ||
262 | (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) | ||
263 | ? 1 : 0); | ||
264 | } | ||
265 | |||
266 | static ssize_t efx_ef10_show_primary_flag(struct device *dev, | ||
267 | struct device_attribute *attr, | ||
268 | char *buf) | ||
269 | { | ||
270 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | ||
271 | |||
272 | return sprintf(buf, "%d\n", | ||
273 | ((efx->mcdi->fn_flags) & | ||
274 | (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) | ||
275 | ? 1 : 0); | ||
276 | } | ||
277 | |||
278 | static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, | ||
279 | NULL); | ||
280 | static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); | ||
281 | |||
170 | static int efx_ef10_probe(struct efx_nic *efx) | 282 | static int efx_ef10_probe(struct efx_nic *efx) |
171 | { | 283 | { |
172 | struct efx_ef10_nic_data *nic_data; | 284 | struct efx_ef10_nic_data *nic_data; |
285 | struct net_device *net_dev = efx->net_dev; | ||
173 | int i, rc; | 286 | int i, rc; |
174 | 287 | ||
175 | /* We can have one VI for each 8K region. However, until we | 288 | /* We can have one VI for each 8K region. However, until we |
@@ -178,7 +291,7 @@ static int efx_ef10_probe(struct efx_nic *efx) | |||
178 | efx->max_channels = | 291 | efx->max_channels = |
179 | min_t(unsigned int, | 292 | min_t(unsigned int, |
180 | EFX_MAX_CHANNELS, | 293 | EFX_MAX_CHANNELS, |
181 | resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) / | 294 | efx_ef10_mem_map_size(efx) / |
182 | (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); | 295 | (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); |
183 | if (WARN_ON(efx->max_channels == 0)) | 296 | if (WARN_ON(efx->max_channels == 0)) |
184 | return -EIO; | 297 | return -EIO; |
@@ -188,6 +301,9 @@ static int efx_ef10_probe(struct efx_nic *efx) | |||
188 | return -ENOMEM; | 301 | return -ENOMEM; |
189 | efx->nic_data = nic_data; | 302 | efx->nic_data = nic_data; |
190 | 303 | ||
304 | /* we assume later that we can copy from this buffer in dwords */ | ||
305 | BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); | ||
306 | |||
191 | rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, | 307 | rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, |
192 | 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); | 308 | 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); |
193 | if (rc) | 309 | if (rc) |
@@ -209,6 +325,8 @@ static int efx_ef10_probe(struct efx_nic *efx) | |||
209 | 325 | ||
210 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; | 326 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
211 | 327 | ||
328 | nic_data->vport_id = EVB_PORT_ID_ASSIGNED; | ||
329 | |||
212 | /* In case we're recovering from a crash (kexec), we want to | 330 | /* In case we're recovering from a crash (kexec), we want to |
213 | * cancel any outstanding request by the previous user of this | 331 | * cancel any outstanding request by the previous user of this |
214 | * function. We send a special message using the least | 332 | * function. We send a special message using the least |
@@ -230,45 +348,85 @@ static int efx_ef10_probe(struct efx_nic *efx) | |||
230 | if (rc) | 348 | if (rc) |
231 | goto fail3; | 349 | goto fail3; |
232 | 350 | ||
351 | rc = device_create_file(&efx->pci_dev->dev, | ||
352 | &dev_attr_link_control_flag); | ||
353 | if (rc) | ||
354 | goto fail3; | ||
355 | |||
356 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); | ||
357 | if (rc) | ||
358 | goto fail4; | ||
359 | |||
360 | rc = efx_ef10_get_pf_index(efx); | ||
361 | if (rc) | ||
362 | goto fail5; | ||
363 | |||
233 | rc = efx_ef10_init_datapath_caps(efx); | 364 | rc = efx_ef10_init_datapath_caps(efx); |
234 | if (rc < 0) | 365 | if (rc < 0) |
235 | goto fail3; | 366 | goto fail5; |
236 | 367 | ||
237 | efx->rx_packet_len_offset = | 368 | efx->rx_packet_len_offset = |
238 | ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; | 369 | ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; |
239 | 370 | ||
240 | rc = efx_mcdi_port_get_number(efx); | 371 | rc = efx_mcdi_port_get_number(efx); |
241 | if (rc < 0) | 372 | if (rc < 0) |
242 | goto fail3; | 373 | goto fail5; |
243 | efx->port_num = rc; | 374 | efx->port_num = rc; |
375 | net_dev->dev_port = rc; | ||
244 | 376 | ||
245 | rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr); | 377 | rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); |
246 | if (rc) | 378 | if (rc) |
247 | goto fail3; | 379 | goto fail5; |
248 | 380 | ||
249 | rc = efx_ef10_get_sysclk_freq(efx); | 381 | rc = efx_ef10_get_sysclk_freq(efx); |
250 | if (rc < 0) | 382 | if (rc < 0) |
251 | goto fail3; | 383 | goto fail5; |
252 | efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */ | 384 | efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */ |
253 | 385 | ||
254 | /* Check whether firmware supports bug 35388 workaround */ | 386 | /* Check whether firmware supports bug 35388 workaround. |
387 | * First try to enable it, then if we get EPERM, just | ||
388 | * ask if it's already enabled | ||
389 | */ | ||
255 | rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true); | 390 | rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true); |
256 | if (rc == 0) | 391 | if (rc == 0) { |
257 | nic_data->workaround_35388 = true; | 392 | nic_data->workaround_35388 = true; |
258 | else if (rc != -ENOSYS && rc != -ENOENT) | 393 | } else if (rc == -EPERM) { |
259 | goto fail3; | 394 | unsigned int enabled; |
395 | |||
396 | rc = efx_mcdi_get_workarounds(efx, NULL, &enabled); | ||
397 | if (rc) | ||
398 | goto fail3; | ||
399 | nic_data->workaround_35388 = enabled & | ||
400 | MC_CMD_GET_WORKAROUNDS_OUT_BUG35388; | ||
401 | } else if (rc != -ENOSYS && rc != -ENOENT) { | ||
402 | goto fail5; | ||
403 | } | ||
260 | netif_dbg(efx, probe, efx->net_dev, | 404 | netif_dbg(efx, probe, efx->net_dev, |
261 | "workaround for bug 35388 is %sabled\n", | 405 | "workaround for bug 35388 is %sabled\n", |
262 | nic_data->workaround_35388 ? "en" : "dis"); | 406 | nic_data->workaround_35388 ? "en" : "dis"); |
263 | 407 | ||
264 | rc = efx_mcdi_mon_probe(efx); | 408 | rc = efx_mcdi_mon_probe(efx); |
265 | if (rc) | 409 | if (rc && rc != -EPERM) |
266 | goto fail3; | 410 | goto fail5; |
267 | 411 | ||
268 | efx_ptp_probe(efx, NULL); | 412 | efx_ptp_probe(efx, NULL); |
269 | 413 | ||
414 | #ifdef CONFIG_SFC_SRIOV | ||
415 | if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) { | ||
416 | struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; | ||
417 | struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); | ||
418 | |||
419 | efx_pf->type->get_mac_address(efx_pf, nic_data->port_id); | ||
420 | } else | ||
421 | #endif | ||
422 | ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); | ||
423 | |||
270 | return 0; | 424 | return 0; |
271 | 425 | ||
426 | fail5: | ||
427 | device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); | ||
428 | fail4: | ||
429 | device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); | ||
272 | fail3: | 430 | fail3: |
273 | efx_mcdi_fini(efx); | 431 | efx_mcdi_fini(efx); |
274 | fail2: | 432 | fail2: |
@@ -281,7 +439,7 @@ fail1: | |||
281 | 439 | ||
282 | static int efx_ef10_free_vis(struct efx_nic *efx) | 440 | static int efx_ef10_free_vis(struct efx_nic *efx) |
283 | { | 441 | { |
284 | MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0); | 442 | MCDI_DECLARE_BUF_ERR(outbuf); |
285 | size_t outlen; | 443 | size_t outlen; |
286 | int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0, | 444 | int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0, |
287 | outbuf, sizeof(outbuf), &outlen); | 445 | outbuf, sizeof(outbuf), &outlen); |
@@ -352,9 +510,9 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) | |||
352 | static int efx_ef10_link_piobufs(struct efx_nic *efx) | 510 | static int efx_ef10_link_piobufs(struct efx_nic *efx) |
353 | { | 511 | { |
354 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | 512 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
355 | MCDI_DECLARE_BUF(inbuf, | 513 | _MCDI_DECLARE_BUF(inbuf, |
356 | max(MC_CMD_LINK_PIOBUF_IN_LEN, | 514 | max(MC_CMD_LINK_PIOBUF_IN_LEN, |
357 | MC_CMD_UNLINK_PIOBUF_IN_LEN)); | 515 | MC_CMD_UNLINK_PIOBUF_IN_LEN)); |
358 | struct efx_channel *channel; | 516 | struct efx_channel *channel; |
359 | struct efx_tx_queue *tx_queue; | 517 | struct efx_tx_queue *tx_queue; |
360 | unsigned int offset, index; | 518 | unsigned int offset, index; |
@@ -363,6 +521,8 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx) | |||
363 | BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); | 521 | BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); |
364 | BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); | 522 | BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); |
365 | 523 | ||
524 | memset(inbuf, 0, sizeof(inbuf)); | ||
525 | |||
366 | /* Link a buffer to each VI in the write-combining mapping */ | 526 | /* Link a buffer to each VI in the write-combining mapping */ |
367 | for (index = 0; index < nic_data->n_piobufs; ++index) { | 527 | for (index = 0; index < nic_data->n_piobufs; ++index) { |
368 | MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, | 528 | MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, |
@@ -475,6 +635,25 @@ static void efx_ef10_remove(struct efx_nic *efx) | |||
475 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | 635 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
476 | int rc; | 636 | int rc; |
477 | 637 | ||
638 | #ifdef CONFIG_SFC_SRIOV | ||
639 | struct efx_ef10_nic_data *nic_data_pf; | ||
640 | struct pci_dev *pci_dev_pf; | ||
641 | struct efx_nic *efx_pf; | ||
642 | struct ef10_vf *vf; | ||
643 | |||
644 | if (efx->pci_dev->is_virtfn) { | ||
645 | pci_dev_pf = efx->pci_dev->physfn; | ||
646 | if (pci_dev_pf) { | ||
647 | efx_pf = pci_get_drvdata(pci_dev_pf); | ||
648 | nic_data_pf = efx_pf->nic_data; | ||
649 | vf = nic_data_pf->vf + nic_data->vf_index; | ||
650 | vf->efx = NULL; | ||
651 | } else | ||
652 | netif_info(efx, drv, efx->net_dev, | ||
653 | "Could not get the PF id from VF\n"); | ||
654 | } | ||
655 | #endif | ||
656 | |||
478 | efx_ptp_remove(efx); | 657 | efx_ptp_remove(efx); |
479 | 658 | ||
480 | efx_mcdi_mon_remove(efx); | 659 | efx_mcdi_mon_remove(efx); |
@@ -490,11 +669,120 @@ static void efx_ef10_remove(struct efx_nic *efx) | |||
490 | if (!nic_data->must_restore_piobufs) | 669 | if (!nic_data->must_restore_piobufs) |
491 | efx_ef10_free_piobufs(efx); | 670 | efx_ef10_free_piobufs(efx); |
492 | 671 | ||
672 | device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); | ||
673 | device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); | ||
674 | |||
493 | efx_mcdi_fini(efx); | 675 | efx_mcdi_fini(efx); |
494 | efx_nic_free_buffer(efx, &nic_data->mcdi_buf); | 676 | efx_nic_free_buffer(efx, &nic_data->mcdi_buf); |
495 | kfree(nic_data); | 677 | kfree(nic_data); |
496 | } | 678 | } |
497 | 679 | ||
680 | static int efx_ef10_probe_pf(struct efx_nic *efx) | ||
681 | { | ||
682 | return efx_ef10_probe(efx); | ||
683 | } | ||
684 | |||
685 | int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) | ||
686 | { | ||
687 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); | ||
688 | |||
689 | MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); | ||
690 | return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), | ||
691 | NULL, 0, NULL); | ||
692 | } | ||
693 | |||
694 | int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) | ||
695 | { | ||
696 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); | ||
697 | |||
698 | MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); | ||
699 | return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), | ||
700 | NULL, 0, NULL); | ||
701 | } | ||
702 | |||
703 | int efx_ef10_vport_add_mac(struct efx_nic *efx, | ||
704 | unsigned int port_id, u8 *mac) | ||
705 | { | ||
706 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); | ||
707 | |||
708 | MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); | ||
709 | ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); | ||
710 | |||
711 | return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, | ||
712 | sizeof(inbuf), NULL, 0, NULL); | ||
713 | } | ||
714 | |||
715 | int efx_ef10_vport_del_mac(struct efx_nic *efx, | ||
716 | unsigned int port_id, u8 *mac) | ||
717 | { | ||
718 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); | ||
719 | |||
720 | MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); | ||
721 | ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); | ||
722 | |||
723 | return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, | ||
724 | sizeof(inbuf), NULL, 0, NULL); | ||
725 | } | ||
726 | |||
727 | #ifdef CONFIG_SFC_SRIOV | ||
728 | static int efx_ef10_probe_vf(struct efx_nic *efx) | ||
729 | { | ||
730 | int rc; | ||
731 | struct pci_dev *pci_dev_pf; | ||
732 | |||
733 | /* If the parent PF has no VF data structure, it doesn't know about this | ||
734 | * VF so fail probe. The VF needs to be re-created. This can happen | ||
735 | * if the PF driver is unloaded while the VF is assigned to a guest. | ||
736 | */ | ||
737 | pci_dev_pf = efx->pci_dev->physfn; | ||
738 | if (pci_dev_pf) { | ||
739 | struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); | ||
740 | struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data; | ||
741 | |||
742 | if (!nic_data_pf->vf) { | ||
743 | netif_info(efx, drv, efx->net_dev, | ||
744 | "The VF cannot link to its parent PF; " | ||
745 | "please destroy and re-create the VF\n"); | ||
746 | return -EBUSY; | ||
747 | } | ||
748 | } | ||
749 | |||
750 | rc = efx_ef10_probe(efx); | ||
751 | if (rc) | ||
752 | return rc; | ||
753 | |||
754 | rc = efx_ef10_get_vf_index(efx); | ||
755 | if (rc) | ||
756 | goto fail; | ||
757 | |||
758 | if (efx->pci_dev->is_virtfn) { | ||
759 | if (efx->pci_dev->physfn) { | ||
760 | struct efx_nic *efx_pf = | ||
761 | pci_get_drvdata(efx->pci_dev->physfn); | ||
762 | struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data; | ||
763 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | ||
764 | |||
765 | nic_data_p->vf[nic_data->vf_index].efx = efx; | ||
766 | nic_data_p->vf[nic_data->vf_index].pci_dev = | ||
767 | efx->pci_dev; | ||
768 | } else | ||
769 | netif_info(efx, drv, efx->net_dev, | ||
770 | "Could not get the PF id from VF\n"); | ||
771 | } | ||
772 | |||
773 | return 0; | ||
774 | |||
775 | fail: | ||
776 | efx_ef10_remove(efx); | ||
777 | return rc; | ||
778 | } | ||
779 | #else | ||
780 | static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused))) | ||
781 | { | ||
782 | return 0; | ||
783 | } | ||
784 | #endif | ||
785 | |||
498 | static int efx_ef10_alloc_vis(struct efx_nic *efx, | 786 | static int efx_ef10_alloc_vis(struct efx_nic *efx, |
499 | unsigned int min_vis, unsigned int max_vis) | 787 | unsigned int min_vis, unsigned int max_vis) |
500 | { | 788 | { |
@@ -687,7 +975,9 @@ static int efx_ef10_init_nic(struct efx_nic *efx) | |||
687 | nic_data->must_restore_piobufs = false; | 975 | nic_data->must_restore_piobufs = false; |
688 | } | 976 | } |
689 | 977 | ||
690 | efx_ef10_rx_push_rss_config(efx); | 978 | /* don't fail init if RSS setup doesn't work */ |
979 | efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table); | ||
980 | |||
691 | return 0; | 981 | return 0; |
692 | } | 982 | } |
693 | 983 | ||
@@ -702,6 +992,14 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) | |||
702 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; | 992 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
703 | } | 993 | } |
704 | 994 | ||
995 | static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason) | ||
996 | { | ||
997 | if (reason == RESET_TYPE_MC_FAILURE) | ||
998 | return RESET_TYPE_DATAPATH; | ||
999 | |||
1000 | return efx_mcdi_map_reset_reason(reason); | ||
1001 | } | ||
1002 | |||
705 | static int efx_ef10_map_reset_flags(u32 *flags) | 1003 | static int efx_ef10_map_reset_flags(u32 *flags) |
706 | { | 1004 | { |
707 | enum { | 1005 | enum { |
@@ -760,93 +1058,112 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) | |||
760 | [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } | 1058 | [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } |
761 | 1059 | ||
762 | static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { | 1060 | static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { |
763 | EF10_DMA_STAT(tx_bytes, TX_BYTES), | 1061 | EF10_DMA_STAT(port_tx_bytes, TX_BYTES), |
764 | EF10_DMA_STAT(tx_packets, TX_PKTS), | 1062 | EF10_DMA_STAT(port_tx_packets, TX_PKTS), |
765 | EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS), | 1063 | EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), |
766 | EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS), | 1064 | EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS), |
767 | EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS), | 1065 | EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), |
768 | EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS), | 1066 | EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), |
769 | EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS), | 1067 | EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), |
770 | EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS), | 1068 | EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), |
771 | EF10_DMA_STAT(tx_64, TX_64_PKTS), | 1069 | EF10_DMA_STAT(port_tx_64, TX_64_PKTS), |
772 | EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS), | 1070 | EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), |
773 | EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS), | 1071 | EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), |
774 | EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS), | 1072 | EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), |
775 | EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS), | 1073 | EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), |
776 | EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), | 1074 | EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), |
777 | EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), | 1075 | EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), |
778 | EF10_DMA_STAT(rx_bytes, RX_BYTES), | 1076 | EF10_DMA_STAT(port_rx_bytes, RX_BYTES), |
779 | EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES), | 1077 | EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES), |
780 | EF10_OTHER_STAT(rx_good_bytes), | 1078 | EF10_OTHER_STAT(port_rx_good_bytes), |
781 | EF10_OTHER_STAT(rx_bad_bytes), | 1079 | EF10_OTHER_STAT(port_rx_bad_bytes), |
782 | EF10_DMA_STAT(rx_packets, RX_PKTS), | 1080 | EF10_DMA_STAT(port_rx_packets, RX_PKTS), |
783 | EF10_DMA_STAT(rx_good, RX_GOOD_PKTS), | 1081 | EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS), |
784 | EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS), | 1082 | EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), |
785 | EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS), | 1083 | EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), |
786 | EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS), | 1084 | EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS), |
787 | EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS), | 1085 | EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), |
788 | EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS), | 1086 | EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), |
789 | EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS), | 1087 | EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), |
790 | EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS), | 1088 | EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), |
791 | EF10_DMA_STAT(rx_64, RX_64_PKTS), | 1089 | EF10_DMA_STAT(port_rx_64, RX_64_PKTS), |
792 | EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS), | 1090 | EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), |
793 | EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS), | 1091 | EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), |
794 | EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS), | 1092 | EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), |
795 | EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS), | 1093 | EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), |
796 | EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), | 1094 | EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), |
797 | EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), | 1095 | EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), |
798 | EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS), | 1096 | EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), |
799 | EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS), | 1097 | EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), |
800 | EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS), | 1098 | EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), |
801 | EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), | 1099 | EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), |
802 | EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), | 1100 | EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), |
803 | EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS), | 1101 | EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), |
804 | GENERIC_SW_STAT(rx_nodesc_trunc), | 1102 | GENERIC_SW_STAT(rx_nodesc_trunc), |
805 | GENERIC_SW_STAT(rx_noskb_drops), | 1103 | GENERIC_SW_STAT(rx_noskb_drops), |
806 | EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), | 1104 | EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), |
807 | EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), | 1105 | EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), |
808 | EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), | 1106 | EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), |
809 | EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), | 1107 | EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), |
810 | EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB), | 1108 | EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB), |
811 | EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB), | 1109 | EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB), |
812 | EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING), | 1110 | EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING), |
813 | EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), | 1111 | EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), |
814 | EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), | 1112 | EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), |
815 | EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS), | 1113 | EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), |
816 | EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS), | 1114 | EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), |
817 | EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS), | 1115 | EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), |
1116 | EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS), | ||
1117 | EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES), | ||
1118 | EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS), | ||
1119 | EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES), | ||
1120 | EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS), | ||
1121 | EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES), | ||
1122 | EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS), | ||
1123 | EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES), | ||
1124 | EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW), | ||
1125 | EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS), | ||
1126 | EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES), | ||
1127 | EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS), | ||
1128 | EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES), | ||
1129 | EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS), | ||
1130 | EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES), | ||
1131 | EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), | ||
1132 | EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), | ||
1133 | EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), | ||
818 | }; | 1134 | }; |
819 | 1135 | ||
820 | #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ | 1136 | #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ |
821 | (1ULL << EF10_STAT_tx_packets) | \ | 1137 | (1ULL << EF10_STAT_port_tx_packets) | \ |
822 | (1ULL << EF10_STAT_tx_pause) | \ | 1138 | (1ULL << EF10_STAT_port_tx_pause) | \ |
823 | (1ULL << EF10_STAT_tx_unicast) | \ | 1139 | (1ULL << EF10_STAT_port_tx_unicast) | \ |
824 | (1ULL << EF10_STAT_tx_multicast) | \ | 1140 | (1ULL << EF10_STAT_port_tx_multicast) | \ |
825 | (1ULL << EF10_STAT_tx_broadcast) | \ | 1141 | (1ULL << EF10_STAT_port_tx_broadcast) | \ |
826 | (1ULL << EF10_STAT_rx_bytes) | \ | 1142 | (1ULL << EF10_STAT_port_rx_bytes) | \ |
827 | (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \ | 1143 | (1ULL << \ |
828 | (1ULL << EF10_STAT_rx_good_bytes) | \ | 1144 | EF10_STAT_port_rx_bytes_minus_good_bytes) | \ |
829 | (1ULL << EF10_STAT_rx_bad_bytes) | \ | 1145 | (1ULL << EF10_STAT_port_rx_good_bytes) | \ |
830 | (1ULL << EF10_STAT_rx_packets) | \ | 1146 | (1ULL << EF10_STAT_port_rx_bad_bytes) | \ |
831 | (1ULL << EF10_STAT_rx_good) | \ | 1147 | (1ULL << EF10_STAT_port_rx_packets) | \ |
832 | (1ULL << EF10_STAT_rx_bad) | \ | 1148 | (1ULL << EF10_STAT_port_rx_good) | \ |
833 | (1ULL << EF10_STAT_rx_pause) | \ | 1149 | (1ULL << EF10_STAT_port_rx_bad) | \ |
834 | (1ULL << EF10_STAT_rx_control) | \ | 1150 | (1ULL << EF10_STAT_port_rx_pause) | \ |
835 | (1ULL << EF10_STAT_rx_unicast) | \ | 1151 | (1ULL << EF10_STAT_port_rx_control) | \ |
836 | (1ULL << EF10_STAT_rx_multicast) | \ | 1152 | (1ULL << EF10_STAT_port_rx_unicast) | \ |
837 | (1ULL << EF10_STAT_rx_broadcast) | \ | 1153 | (1ULL << EF10_STAT_port_rx_multicast) | \ |
838 | (1ULL << EF10_STAT_rx_lt64) | \ | 1154 | (1ULL << EF10_STAT_port_rx_broadcast) | \ |
839 | (1ULL << EF10_STAT_rx_64) | \ | 1155 | (1ULL << EF10_STAT_port_rx_lt64) | \ |
840 | (1ULL << EF10_STAT_rx_65_to_127) | \ | 1156 | (1ULL << EF10_STAT_port_rx_64) | \ |
841 | (1ULL << EF10_STAT_rx_128_to_255) | \ | 1157 | (1ULL << EF10_STAT_port_rx_65_to_127) | \ |
842 | (1ULL << EF10_STAT_rx_256_to_511) | \ | 1158 | (1ULL << EF10_STAT_port_rx_128_to_255) | \ |
843 | (1ULL << EF10_STAT_rx_512_to_1023) | \ | 1159 | (1ULL << EF10_STAT_port_rx_256_to_511) | \ |
844 | (1ULL << EF10_STAT_rx_1024_to_15xx) | \ | 1160 | (1ULL << EF10_STAT_port_rx_512_to_1023) |\ |
845 | (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \ | 1161 | (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\ |
846 | (1ULL << EF10_STAT_rx_gtjumbo) | \ | 1162 | (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\ |
847 | (1ULL << EF10_STAT_rx_bad_gtjumbo) | \ | 1163 | (1ULL << EF10_STAT_port_rx_gtjumbo) | \ |
848 | (1ULL << EF10_STAT_rx_overflow) | \ | 1164 | (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\ |
849 | (1ULL << EF10_STAT_rx_nodesc_drops) | \ | 1165 | (1ULL << EF10_STAT_port_rx_overflow) | \ |
1166 | (1ULL << EF10_STAT_port_rx_nodesc_drops) |\ | ||
850 | (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ | 1167 | (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ |
851 | (1ULL << GENERIC_STAT_rx_noskb_drops)) | 1168 | (1ULL << GENERIC_STAT_rx_noskb_drops)) |
852 | 1169 | ||
@@ -854,39 +1171,39 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { | |||
854 | * switchable port we do not expose these because they might not | 1171 | * switchable port we do not expose these because they might not |
855 | * include all the packets they should. | 1172 | * include all the packets they should. |
856 | */ | 1173 | */ |
857 | #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \ | 1174 | #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ |
858 | (1ULL << EF10_STAT_tx_lt64) | \ | 1175 | (1ULL << EF10_STAT_port_tx_lt64) | \ |
859 | (1ULL << EF10_STAT_tx_64) | \ | 1176 | (1ULL << EF10_STAT_port_tx_64) | \ |
860 | (1ULL << EF10_STAT_tx_65_to_127) | \ | 1177 | (1ULL << EF10_STAT_port_tx_65_to_127) |\ |
861 | (1ULL << EF10_STAT_tx_128_to_255) | \ | 1178 | (1ULL << EF10_STAT_port_tx_128_to_255) |\ |
862 | (1ULL << EF10_STAT_tx_256_to_511) | \ | 1179 | (1ULL << EF10_STAT_port_tx_256_to_511) |\ |
863 | (1ULL << EF10_STAT_tx_512_to_1023) | \ | 1180 | (1ULL << EF10_STAT_port_tx_512_to_1023) |\ |
864 | (1ULL << EF10_STAT_tx_1024_to_15xx) | \ | 1181 | (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\ |
865 | (1ULL << EF10_STAT_tx_15xx_to_jumbo)) | 1182 | (1ULL << EF10_STAT_port_tx_15xx_to_jumbo)) |
866 | 1183 | ||
867 | /* These statistics are only provided by the 40G MAC. For a 10G/40G | 1184 | /* These statistics are only provided by the 40G MAC. For a 10G/40G |
868 | * switchable port we do expose these because the errors will otherwise | 1185 | * switchable port we do expose these because the errors will otherwise |
869 | * be silent. | 1186 | * be silent. |
870 | */ | 1187 | */ |
871 | #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \ | 1188 | #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\ |
872 | (1ULL << EF10_STAT_rx_length_error)) | 1189 | (1ULL << EF10_STAT_port_rx_length_error)) |
873 | 1190 | ||
874 | /* These statistics are only provided if the firmware supports the | 1191 | /* These statistics are only provided if the firmware supports the |
875 | * capability PM_AND_RXDP_COUNTERS. | 1192 | * capability PM_AND_RXDP_COUNTERS. |
876 | */ | 1193 | */ |
877 | #define HUNT_PM_AND_RXDP_STAT_MASK ( \ | 1194 | #define HUNT_PM_AND_RXDP_STAT_MASK ( \ |
878 | (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \ | 1195 | (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \ |
879 | (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \ | 1196 | (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \ |
880 | (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \ | 1197 | (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \ |
881 | (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \ | 1198 | (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \ |
882 | (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \ | 1199 | (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \ |
883 | (1ULL << EF10_STAT_rx_pm_discard_qbb) | \ | 1200 | (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \ |
884 | (1ULL << EF10_STAT_rx_pm_discard_mapping) | \ | 1201 | (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \ |
885 | (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \ | 1202 | (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \ |
886 | (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \ | 1203 | (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \ |
887 | (1ULL << EF10_STAT_rx_dp_streaming_packets) | \ | 1204 | (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \ |
888 | (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \ | 1205 | (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \ |
889 | (1ULL << EF10_STAT_rx_dp_hlb_wait)) | 1206 | (1ULL << EF10_STAT_port_rx_dp_hlb_wait)) |
890 | 1207 | ||
891 | static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) | 1208 | static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) |
892 | { | 1209 | { |
@@ -894,6 +1211,10 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) | |||
894 | u32 port_caps = efx_mcdi_phy_get_caps(efx); | 1211 | u32 port_caps = efx_mcdi_phy_get_caps(efx); |
895 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | 1212 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
896 | 1213 | ||
1214 | if (!(efx->mcdi->fn_flags & | ||
1215 | 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) | ||
1216 | return 0; | ||
1217 | |||
897 | if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) | 1218 | if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) |
898 | raw_mask |= HUNT_40G_EXTRA_STAT_MASK; | 1219 | raw_mask |= HUNT_40G_EXTRA_STAT_MASK; |
899 | else | 1220 | else |
@@ -908,13 +1229,28 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) | |||
908 | 1229 | ||
909 | static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) | 1230 | static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) |
910 | { | 1231 | { |
911 | u64 raw_mask = efx_ef10_raw_stat_mask(efx); | 1232 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
1233 | u64 raw_mask[2]; | ||
1234 | |||
1235 | raw_mask[0] = efx_ef10_raw_stat_mask(efx); | ||
1236 | |||
1237 | /* Only show vadaptor stats when EVB capability is present */ | ||
1238 | if (nic_data->datapath_caps & | ||
1239 | (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) { | ||
1240 | raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); | ||
1241 | raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1; | ||
1242 | } else { | ||
1243 | raw_mask[1] = 0; | ||
1244 | } | ||
912 | 1245 | ||
913 | #if BITS_PER_LONG == 64 | 1246 | #if BITS_PER_LONG == 64 |
914 | mask[0] = raw_mask; | 1247 | mask[0] = raw_mask[0]; |
1248 | mask[1] = raw_mask[1]; | ||
915 | #else | 1249 | #else |
916 | mask[0] = raw_mask & 0xffffffff; | 1250 | mask[0] = raw_mask[0] & 0xffffffff; |
917 | mask[1] = raw_mask >> 32; | 1251 | mask[1] = raw_mask[0] >> 32; |
1252 | mask[2] = raw_mask[1] & 0xffffffff; | ||
1253 | mask[3] = raw_mask[1] >> 32; | ||
918 | #endif | 1254 | #endif |
919 | } | 1255 | } |
920 | 1256 | ||
@@ -927,7 +1263,51 @@ static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) | |||
927 | mask, names); | 1263 | mask, names); |
928 | } | 1264 | } |
929 | 1265 | ||
930 | static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) | 1266 | static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, |
1267 | struct rtnl_link_stats64 *core_stats) | ||
1268 | { | ||
1269 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); | ||
1270 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | ||
1271 | u64 *stats = nic_data->stats; | ||
1272 | size_t stats_count = 0, index; | ||
1273 | |||
1274 | efx_ef10_get_stat_mask(efx, mask); | ||
1275 | |||
1276 | if (full_stats) { | ||
1277 | for_each_set_bit(index, mask, EF10_STAT_COUNT) { | ||
1278 | if (efx_ef10_stat_desc[index].name) { | ||
1279 | *full_stats++ = stats[index]; | ||
1280 | ++stats_count; | ||
1281 | } | ||
1282 | } | ||
1283 | } | ||
1284 | |||
1285 | if (core_stats) { | ||
1286 | core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + | ||
1287 | stats[EF10_STAT_rx_multicast] + | ||
1288 | stats[EF10_STAT_rx_broadcast]; | ||
1289 | core_stats->tx_packets = stats[EF10_STAT_tx_unicast] + | ||
1290 | stats[EF10_STAT_tx_multicast] + | ||
1291 | stats[EF10_STAT_tx_broadcast]; | ||
1292 | core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] + | ||
1293 | stats[EF10_STAT_rx_multicast_bytes] + | ||
1294 | stats[EF10_STAT_rx_broadcast_bytes]; | ||
1295 | core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] + | ||
1296 | stats[EF10_STAT_tx_multicast_bytes] + | ||
1297 | stats[EF10_STAT_tx_broadcast_bytes]; | ||
1298 | core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] + | ||
1299 | stats[GENERIC_STAT_rx_noskb_drops]; | ||
1300 | core_stats->multicast = stats[EF10_STAT_rx_multicast]; | ||
1301 | core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; | ||
1302 | core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; | ||
1303 | core_stats->rx_errors = core_stats->rx_crc_errors; | ||
1304 | core_stats->tx_errors = stats[EF10_STAT_tx_bad]; | ||
1305 | } | ||
1306 | |||
1307 | return stats_count; | ||
1308 | } | ||
1309 | |||
1310 | static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx) | ||
931 | { | 1311 | { |
932 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | 1312 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
933 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); | 1313 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
@@ -952,67 +1332,114 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) | |||
952 | return -EAGAIN; | 1332 | return -EAGAIN; |
953 | 1333 | ||
954 | /* Update derived statistics */ | 1334 | /* Update derived statistics */ |
955 | efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]); | 1335 | efx_nic_fix_nodesc_drop_stat(efx, |
956 | stats[EF10_STAT_rx_good_bytes] = | 1336 | &stats[EF10_STAT_port_rx_nodesc_drops]); |
957 | stats[EF10_STAT_rx_bytes] - | 1337 | stats[EF10_STAT_port_rx_good_bytes] = |
958 | stats[EF10_STAT_rx_bytes_minus_good_bytes]; | 1338 | stats[EF10_STAT_port_rx_bytes] - |
959 | efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes], | 1339 | stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; |
960 | stats[EF10_STAT_rx_bytes_minus_good_bytes]); | 1340 | efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], |
1341 | stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); | ||
961 | efx_update_sw_stats(efx, stats); | 1342 | efx_update_sw_stats(efx, stats); |
962 | return 0; | 1343 | return 0; |
963 | } | 1344 | } |
964 | 1345 | ||
965 | 1346 | ||
966 | static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, | 1347 | static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, |
967 | struct rtnl_link_stats64 *core_stats) | 1348 | struct rtnl_link_stats64 *core_stats) |
968 | { | 1349 | { |
969 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); | ||
970 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | ||
971 | u64 *stats = nic_data->stats; | ||
972 | size_t stats_count = 0, index; | ||
973 | int retry; | 1350 | int retry; |
974 | 1351 | ||
975 | efx_ef10_get_stat_mask(efx, mask); | ||
976 | |||
977 | /* If we're unlucky enough to read statistics during the DMA, wait | 1352 | /* If we're unlucky enough to read statistics during the DMA, wait |
978 | * up to 10ms for it to finish (typically takes <500us) | 1353 | * up to 10ms for it to finish (typically takes <500us) |
979 | */ | 1354 | */ |
980 | for (retry = 0; retry < 100; ++retry) { | 1355 | for (retry = 0; retry < 100; ++retry) { |
981 | if (efx_ef10_try_update_nic_stats(efx) == 0) | 1356 | if (efx_ef10_try_update_nic_stats_pf(efx) == 0) |
982 | break; | 1357 | break; |
983 | udelay(100); | 1358 | udelay(100); |
984 | } | 1359 | } |
985 | 1360 | ||
986 | if (full_stats) { | 1361 | return efx_ef10_update_stats_common(efx, full_stats, core_stats); |
987 | for_each_set_bit(index, mask, EF10_STAT_COUNT) { | 1362 | } |
988 | if (efx_ef10_stat_desc[index].name) { | 1363 | |
989 | *full_stats++ = stats[index]; | 1364 | static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) |
990 | ++stats_count; | 1365 | { |
991 | } | 1366 | MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); |
992 | } | 1367 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
1368 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); | ||
1369 | __le64 generation_start, generation_end; | ||
1370 | u64 *stats = nic_data->stats; | ||
1371 | u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64); | ||
1372 | struct efx_buffer stats_buf; | ||
1373 | __le64 *dma_stats; | ||
1374 | int rc; | ||
1375 | |||
1376 | spin_unlock_bh(&efx->stats_lock); | ||
1377 | |||
1378 | if (in_interrupt()) { | ||
1379 | /* If in atomic context, cannot update stats. Just update the | ||
1380 | * software stats and return so the caller can continue. | ||
1381 | */ | ||
1382 | spin_lock_bh(&efx->stats_lock); | ||
1383 | efx_update_sw_stats(efx, stats); | ||
1384 | return 0; | ||
993 | } | 1385 | } |
994 | 1386 | ||
995 | if (core_stats) { | 1387 | efx_ef10_get_stat_mask(efx, mask); |
996 | core_stats->rx_packets = stats[EF10_STAT_rx_packets]; | 1388 | |
997 | core_stats->tx_packets = stats[EF10_STAT_tx_packets]; | 1389 | rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC); |
998 | core_stats->rx_bytes = stats[EF10_STAT_rx_bytes]; | 1390 | if (rc) { |
999 | core_stats->tx_bytes = stats[EF10_STAT_tx_bytes]; | 1391 | spin_lock_bh(&efx->stats_lock); |
1000 | core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops] + | 1392 | return rc; |
1001 | stats[GENERIC_STAT_rx_nodesc_trunc] + | ||
1002 | stats[GENERIC_STAT_rx_noskb_drops]; | ||
1003 | core_stats->multicast = stats[EF10_STAT_rx_multicast]; | ||
1004 | core_stats->rx_length_errors = | ||
1005 | stats[EF10_STAT_rx_gtjumbo] + | ||
1006 | stats[EF10_STAT_rx_length_error]; | ||
1007 | core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; | ||
1008 | core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error]; | ||
1009 | core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; | ||
1010 | core_stats->rx_errors = (core_stats->rx_length_errors + | ||
1011 | core_stats->rx_crc_errors + | ||
1012 | core_stats->rx_frame_errors); | ||
1013 | } | 1393 | } |
1014 | 1394 | ||
1015 | return stats_count; | 1395 | dma_stats = stats_buf.addr; |
1396 | dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; | ||
1397 | |||
1398 | MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); | ||
1399 | MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, | ||
1400 | MAC_STATS_IN_DMA, 1); | ||
1401 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); | ||
1402 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); | ||
1403 | |||
1404 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), | ||
1405 | NULL, 0, NULL); | ||
1406 | spin_lock_bh(&efx->stats_lock); | ||
1407 | if (rc) { | ||
1408 | /* Expect ENOENT if DMA queues have not been set up */ | ||
1409 | if (rc != -ENOENT || atomic_read(&efx->active_queues)) | ||
1410 | efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, | ||
1411 | sizeof(inbuf), NULL, 0, rc); | ||
1412 | goto out; | ||
1413 | } | ||
1414 | |||
1415 | generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; | ||
1416 | if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { | ||
1417 | WARN_ON_ONCE(1); | ||
1418 | goto out; | ||
1419 | } | ||
1420 | rmb(); | ||
1421 | efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, | ||
1422 | stats, stats_buf.addr, false); | ||
1423 | rmb(); | ||
1424 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; | ||
1425 | if (generation_end != generation_start) { | ||
1426 | rc = -EAGAIN; | ||
1427 | goto out; | ||
1428 | } | ||
1429 | |||
1430 | efx_update_sw_stats(efx, stats); | ||
1431 | out: | ||
1432 | efx_nic_free_buffer(efx, &stats_buf); | ||
1433 | return rc; | ||
1434 | } | ||
1435 | |||
1436 | static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, | ||
1437 | struct rtnl_link_stats64 *core_stats) | ||
1438 | { | ||
1439 | if (efx_ef10_try_update_nic_stats_vf(efx)) | ||
1440 | return 0; | ||
1441 | |||
1442 | return efx_ef10_update_stats_common(efx, full_stats, core_stats); | ||
1016 | } | 1443 | } |
1017 | 1444 | ||
1018 | static void efx_ef10_push_irq_moderation(struct efx_channel *channel) | 1445 | static void efx_ef10_push_irq_moderation(struct efx_channel *channel) |
@@ -1044,6 +1471,14 @@ static void efx_ef10_push_irq_moderation(struct efx_channel *channel) | |||
1044 | } | 1471 | } |
1045 | } | 1472 | } |
1046 | 1473 | ||
1474 | static void efx_ef10_get_wol_vf(struct efx_nic *efx, | ||
1475 | struct ethtool_wolinfo *wol) {} | ||
1476 | |||
1477 | static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type) | ||
1478 | { | ||
1479 | return -EOPNOTSUPP; | ||
1480 | } | ||
1481 | |||
1047 | static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) | 1482 | static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) |
1048 | { | 1483 | { |
1049 | wol->supported = 0; | 1484 | wol->supported = 0; |
@@ -1123,13 +1558,17 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) | |||
1123 | /* All our allocations have been reset */ | 1558 | /* All our allocations have been reset */ |
1124 | efx_ef10_reset_mc_allocations(efx); | 1559 | efx_ef10_reset_mc_allocations(efx); |
1125 | 1560 | ||
1561 | /* Driver-created vswitches and vports must be re-created */ | ||
1562 | nic_data->must_probe_vswitching = true; | ||
1563 | nic_data->vport_id = EVB_PORT_ID_ASSIGNED; | ||
1564 | |||
1126 | /* The datapath firmware might have been changed */ | 1565 | /* The datapath firmware might have been changed */ |
1127 | nic_data->must_check_datapath_caps = true; | 1566 | nic_data->must_check_datapath_caps = true; |
1128 | 1567 | ||
1129 | /* MAC statistics have been cleared on the NIC; clear the local | 1568 | /* MAC statistics have been cleared on the NIC; clear the local |
1130 | * statistic that we update with efx_update_diff_stat(). | 1569 | * statistic that we update with efx_update_diff_stat(). |
1131 | */ | 1570 | */ |
1132 | nic_data->stats[EF10_STAT_rx_bad_bytes] = 0; | 1571 | nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0; |
1133 | 1572 | ||
1134 | return -EIO; | 1573 | return -EIO; |
1135 | } | 1574 | } |
@@ -1232,16 +1671,17 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) | |||
1232 | { | 1671 | { |
1233 | MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / | 1672 | MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / |
1234 | EFX_BUF_SIZE)); | 1673 | EFX_BUF_SIZE)); |
1235 | MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN); | ||
1236 | bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; | 1674 | bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; |
1237 | size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; | 1675 | size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; |
1238 | struct efx_channel *channel = tx_queue->channel; | 1676 | struct efx_channel *channel = tx_queue->channel; |
1239 | struct efx_nic *efx = tx_queue->efx; | 1677 | struct efx_nic *efx = tx_queue->efx; |
1240 | size_t inlen, outlen; | 1678 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
1679 | size_t inlen; | ||
1241 | dma_addr_t dma_addr; | 1680 | dma_addr_t dma_addr; |
1242 | efx_qword_t *txd; | 1681 | efx_qword_t *txd; |
1243 | int rc; | 1682 | int rc; |
1244 | int i; | 1683 | int i; |
1684 | BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0); | ||
1245 | 1685 | ||
1246 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); | 1686 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); |
1247 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); | 1687 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); |
@@ -1251,7 +1691,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) | |||
1251 | INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, | 1691 | INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, |
1252 | INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload); | 1692 | INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload); |
1253 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); | 1693 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); |
1254 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); | 1694 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id); |
1255 | 1695 | ||
1256 | dma_addr = tx_queue->txd.buf.dma_addr; | 1696 | dma_addr = tx_queue->txd.buf.dma_addr; |
1257 | 1697 | ||
@@ -1266,7 +1706,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) | |||
1266 | inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); | 1706 | inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); |
1267 | 1707 | ||
1268 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen, | 1708 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen, |
1269 | outbuf, sizeof(outbuf), &outlen); | 1709 | NULL, 0, NULL); |
1270 | if (rc) | 1710 | if (rc) |
1271 | goto fail; | 1711 | goto fail; |
1272 | 1712 | ||
@@ -1299,7 +1739,7 @@ fail: | |||
1299 | static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) | 1739 | static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) |
1300 | { | 1740 | { |
1301 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); | 1741 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); |
1302 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN); | 1742 | MCDI_DECLARE_BUF_ERR(outbuf); |
1303 | struct efx_nic *efx = tx_queue->efx; | 1743 | struct efx_nic *efx = tx_queue->efx; |
1304 | size_t outlen; | 1744 | size_t outlen; |
1305 | int rc; | 1745 | int rc; |
@@ -1378,19 +1818,33 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) | |||
1378 | } | 1818 | } |
1379 | } | 1819 | } |
1380 | 1820 | ||
1381 | static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context) | 1821 | static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context, |
1822 | bool exclusive, unsigned *context_size) | ||
1382 | { | 1823 | { |
1383 | MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); | 1824 | MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); |
1384 | MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); | 1825 | MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); |
1826 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | ||
1385 | size_t outlen; | 1827 | size_t outlen; |
1386 | int rc; | 1828 | int rc; |
1829 | u32 alloc_type = exclusive ? | ||
1830 | MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE : | ||
1831 | MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED; | ||
1832 | unsigned rss_spread = exclusive ? | ||
1833 | efx->rss_spread : | ||
1834 | min(rounddown_pow_of_two(efx->rss_spread), | ||
1835 | EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE); | ||
1836 | |||
1837 | if (!exclusive && rss_spread == 1) { | ||
1838 | *context = EFX_EF10_RSS_CONTEXT_INVALID; | ||
1839 | if (context_size) | ||
1840 | *context_size = 1; | ||
1841 | return 0; | ||
1842 | } | ||
1387 | 1843 | ||
1388 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, | 1844 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, |
1389 | EVB_PORT_ID_ASSIGNED); | 1845 | nic_data->vport_id); |
1390 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, | 1846 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); |
1391 | MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE); | 1847 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread); |
1392 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, | ||
1393 | EFX_MAX_CHANNELS); | ||
1394 | 1848 | ||
1395 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), | 1849 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), |
1396 | outbuf, sizeof(outbuf), &outlen); | 1850 | outbuf, sizeof(outbuf), &outlen); |
@@ -1402,6 +1856,9 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context) | |||
1402 | 1856 | ||
1403 | *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); | 1857 | *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); |
1404 | 1858 | ||
1859 | if (context_size) | ||
1860 | *context_size = rss_spread; | ||
1861 | |||
1405 | return 0; | 1862 | return 0; |
1406 | } | 1863 | } |
1407 | 1864 | ||
@@ -1418,7 +1875,8 @@ static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) | |||
1418 | WARN_ON(rc != 0); | 1875 | WARN_ON(rc != 0); |
1419 | } | 1876 | } |
1420 | 1877 | ||
1421 | static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context) | 1878 | static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, |
1879 | const u32 *rx_indir_table) | ||
1422 | { | 1880 | { |
1423 | MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); | 1881 | MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); |
1424 | MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); | 1882 | MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); |
@@ -1432,7 +1890,7 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context) | |||
1432 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) | 1890 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) |
1433 | MCDI_PTR(tablebuf, | 1891 | MCDI_PTR(tablebuf, |
1434 | RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = | 1892 | RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = |
1435 | (u8) efx->rx_indir_table[i]; | 1893 | (u8) rx_indir_table[i]; |
1436 | 1894 | ||
1437 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, | 1895 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, |
1438 | sizeof(tablebuf), NULL, 0, NULL); | 1896 | sizeof(tablebuf), NULL, 0, NULL); |
@@ -1460,27 +1918,119 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) | |||
1460 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; | 1918 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
1461 | } | 1919 | } |
1462 | 1920 | ||
1463 | static void efx_ef10_rx_push_rss_config(struct efx_nic *efx) | 1921 | static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx, |
1922 | unsigned *context_size) | ||
1464 | { | 1923 | { |
1924 | u32 new_rx_rss_context; | ||
1465 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | 1925 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
1466 | int rc; | 1926 | int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, |
1927 | false, context_size); | ||
1467 | 1928 | ||
1468 | netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n"); | 1929 | if (rc != 0) |
1930 | return rc; | ||
1469 | 1931 | ||
1470 | if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) { | 1932 | nic_data->rx_rss_context = new_rx_rss_context; |
1471 | rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context); | 1933 | nic_data->rx_rss_context_exclusive = false; |
1472 | if (rc != 0) | 1934 | efx_set_default_rx_indir_table(efx); |
1473 | goto fail; | 1935 | return 0; |
1936 | } | ||
1937 | |||
1938 | static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, | ||
1939 | const u32 *rx_indir_table) | ||
1940 | { | ||
1941 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | ||
1942 | int rc; | ||
1943 | u32 new_rx_rss_context; | ||
1944 | |||
1945 | if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID || | ||
1946 | !nic_data->rx_rss_context_exclusive) { | ||
1947 | rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, | ||
1948 | true, NULL); | ||
1949 | if (rc == -EOPNOTSUPP) | ||
1950 | return rc; | ||
1951 | else if (rc != 0) | ||
1952 | goto fail1; | ||
1953 | } else { | ||
1954 | new_rx_rss_context = nic_data->rx_rss_context; | ||
1474 | } | 1955 | } |
1475 | 1956 | ||
1476 | rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context); | 1957 | rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context, |
1958 | rx_indir_table); | ||
1477 | if (rc != 0) | 1959 | if (rc != 0) |
1478 | goto fail; | 1960 | goto fail2; |
1479 | 1961 | ||
1480 | return; | 1962 | if (nic_data->rx_rss_context != new_rx_rss_context) |
1963 | efx_ef10_rx_free_indir_table(efx); | ||
1964 | nic_data->rx_rss_context = new_rx_rss_context; | ||
1965 | nic_data->rx_rss_context_exclusive = true; | ||
1966 | if (rx_indir_table != efx->rx_indir_table) | ||
1967 | memcpy(efx->rx_indir_table, rx_indir_table, | ||
1968 | sizeof(efx->rx_indir_table)); | ||
1969 | return 0; | ||
1481 | 1970 | ||
1482 | fail: | 1971 | fail2: |
1972 | if (new_rx_rss_context != nic_data->rx_rss_context) | ||
1973 | efx_ef10_free_rss_context(efx, new_rx_rss_context); | ||
1974 | fail1: | ||
1483 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 1975 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
1976 | return rc; | ||
1977 | } | ||
1978 | |||
1979 | static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, | ||
1980 | const u32 *rx_indir_table) | ||
1981 | { | ||
1982 | int rc; | ||
1983 | |||
1984 | if (efx->rss_spread == 1) | ||
1985 | return 0; | ||
1986 | |||
1987 | rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table); | ||
1988 | |||
1989 | if (rc == -ENOBUFS && !user) { | ||
1990 | unsigned context_size; | ||
1991 | bool mismatch = false; | ||
1992 | size_t i; | ||
1993 | |||
1994 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch; | ||
1995 | i++) | ||
1996 | mismatch = rx_indir_table[i] != | ||
1997 | ethtool_rxfh_indir_default(i, efx->rss_spread); | ||
1998 | |||
1999 | rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size); | ||
2000 | if (rc == 0) { | ||
2001 | if (context_size != efx->rss_spread) | ||
2002 | netif_warn(efx, probe, efx->net_dev, | ||
2003 | "Could not allocate an exclusive RSS" | ||
2004 | " context; allocated a shared one of" | ||
2005 | " different size." | ||
2006 | " Wanted %u, got %u.\n", | ||
2007 | efx->rss_spread, context_size); | ||
2008 | else if (mismatch) | ||
2009 | netif_warn(efx, probe, efx->net_dev, | ||
2010 | "Could not allocate an exclusive RSS" | ||
2011 | " context; allocated a shared one but" | ||
2012 | " could not apply custom" | ||
2013 | " indirection.\n"); | ||
2014 | else | ||
2015 | netif_info(efx, probe, efx->net_dev, | ||
2016 | "Could not allocate an exclusive RSS" | ||
2017 | " context; allocated a shared one.\n"); | ||
2018 | } | ||
2019 | } | ||
2020 | return rc; | ||
2021 | } | ||
2022 | |||
2023 | static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, | ||
2024 | const u32 *rx_indir_table | ||
2025 | __attribute__ ((unused))) | ||
2026 | { | ||
2027 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | ||
2028 | |||
2029 | if (user) | ||
2030 | return -EOPNOTSUPP; | ||
2031 | if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) | ||
2032 | return 0; | ||
2033 | return efx_ef10_rx_push_shared_rss_config(efx, NULL); | ||
1484 | } | 2034 | } |
1485 | 2035 | ||
1486 | static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) | 2036 | static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) |
@@ -1496,14 +2046,15 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) | |||
1496 | MCDI_DECLARE_BUF(inbuf, | 2046 | MCDI_DECLARE_BUF(inbuf, |
1497 | MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / | 2047 | MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / |
1498 | EFX_BUF_SIZE)); | 2048 | EFX_BUF_SIZE)); |
1499 | MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN); | ||
1500 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | 2049 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
1501 | size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; | 2050 | size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; |
1502 | struct efx_nic *efx = rx_queue->efx; | 2051 | struct efx_nic *efx = rx_queue->efx; |
1503 | size_t inlen, outlen; | 2052 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
2053 | size_t inlen; | ||
1504 | dma_addr_t dma_addr; | 2054 | dma_addr_t dma_addr; |
1505 | int rc; | 2055 | int rc; |
1506 | int i; | 2056 | int i; |
2057 | BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0); | ||
1507 | 2058 | ||
1508 | rx_queue->scatter_n = 0; | 2059 | rx_queue->scatter_n = 0; |
1509 | rx_queue->scatter_len = 0; | 2060 | rx_queue->scatter_len = 0; |
@@ -1517,7 +2068,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) | |||
1517 | INIT_RXQ_IN_FLAG_PREFIX, 1, | 2068 | INIT_RXQ_IN_FLAG_PREFIX, 1, |
1518 | INIT_RXQ_IN_FLAG_TIMESTAMP, 1); | 2069 | INIT_RXQ_IN_FLAG_TIMESTAMP, 1); |
1519 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); | 2070 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); |
1520 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); | 2071 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id); |
1521 | 2072 | ||
1522 | dma_addr = rx_queue->rxd.buf.dma_addr; | 2073 | dma_addr = rx_queue->rxd.buf.dma_addr; |
1523 | 2074 | ||
@@ -1532,7 +2083,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) | |||
1532 | inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); | 2083 | inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); |
1533 | 2084 | ||
1534 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, | 2085 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, |
1535 | outbuf, sizeof(outbuf), &outlen); | 2086 | NULL, 0, NULL); |
1536 | if (rc) | 2087 | if (rc) |
1537 | netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n", | 2088 | netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n", |
1538 | efx_rx_queue_index(rx_queue)); | 2089 | efx_rx_queue_index(rx_queue)); |
@@ -1541,7 +2092,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) | |||
1541 | static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) | 2092 | static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) |
1542 | { | 2093 | { |
1543 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); | 2094 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); |
1544 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN); | 2095 | MCDI_DECLARE_BUF_ERR(outbuf); |
1545 | struct efx_nic *efx = rx_queue->efx; | 2096 | struct efx_nic *efx = rx_queue->efx; |
1546 | size_t outlen; | 2097 | size_t outlen; |
1547 | int rc; | 2098 | int rc; |
@@ -1703,7 +2254,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel) | |||
1703 | static void efx_ef10_ev_fini(struct efx_channel *channel) | 2254 | static void efx_ef10_ev_fini(struct efx_channel *channel) |
1704 | { | 2255 | { |
1705 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); | 2256 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); |
1706 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN); | 2257 | MCDI_DECLARE_BUF_ERR(outbuf); |
1707 | struct efx_nic *efx = channel->efx; | 2258 | struct efx_nic *efx = channel->efx; |
1708 | size_t outlen; | 2259 | size_t outlen; |
1709 | int rc; | 2260 | int rc; |
@@ -2286,11 +2837,12 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, | |||
2286 | match_fields); | 2837 | match_fields); |
2287 | } | 2838 | } |
2288 | 2839 | ||
2289 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); | 2840 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id); |
2290 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, | 2841 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, |
2291 | spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? | 2842 | spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? |
2292 | MC_CMD_FILTER_OP_IN_RX_DEST_DROP : | 2843 | MC_CMD_FILTER_OP_IN_RX_DEST_DROP : |
2293 | MC_CMD_FILTER_OP_IN_RX_DEST_HOST); | 2844 | MC_CMD_FILTER_OP_IN_RX_DEST_HOST); |
2845 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0); | ||
2294 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, | 2846 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, |
2295 | MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); | 2847 | MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); |
2296 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, | 2848 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, |
@@ -3055,6 +3607,9 @@ fail: | |||
3055 | return rc; | 3607 | return rc; |
3056 | } | 3608 | } |
3057 | 3609 | ||
3610 | /* Caller must hold efx->filter_sem for read if race against | ||
3611 | * efx_ef10_filter_table_remove() is possible | ||
3612 | */ | ||
3058 | static void efx_ef10_filter_table_restore(struct efx_nic *efx) | 3613 | static void efx_ef10_filter_table_restore(struct efx_nic *efx) |
3059 | { | 3614 | { |
3060 | struct efx_ef10_filter_table *table = efx->filter_state; | 3615 | struct efx_ef10_filter_table *table = efx->filter_state; |
@@ -3064,9 +3619,14 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) | |||
3064 | bool failed = false; | 3619 | bool failed = false; |
3065 | int rc; | 3620 | int rc; |
3066 | 3621 | ||
3622 | WARN_ON(!rwsem_is_locked(&efx->filter_sem)); | ||
3623 | |||
3067 | if (!nic_data->must_restore_filters) | 3624 | if (!nic_data->must_restore_filters) |
3068 | return; | 3625 | return; |
3069 | 3626 | ||
3627 | if (!table) | ||
3628 | return; | ||
3629 | |||
3070 | spin_lock_bh(&efx->filter_lock); | 3630 | spin_lock_bh(&efx->filter_lock); |
3071 | 3631 | ||
3072 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { | 3632 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { |
@@ -3102,6 +3662,7 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) | |||
3102 | nic_data->must_restore_filters = false; | 3662 | nic_data->must_restore_filters = false; |
3103 | } | 3663 | } |
3104 | 3664 | ||
3665 | /* Caller must hold efx->filter_sem for write */ | ||
3105 | static void efx_ef10_filter_table_remove(struct efx_nic *efx) | 3666 | static void efx_ef10_filter_table_remove(struct efx_nic *efx) |
3106 | { | 3667 | { |
3107 | struct efx_ef10_filter_table *table = efx->filter_state; | 3668 | struct efx_ef10_filter_table *table = efx->filter_state; |
@@ -3110,6 +3671,10 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) | |||
3110 | unsigned int filter_idx; | 3671 | unsigned int filter_idx; |
3111 | int rc; | 3672 | int rc; |
3112 | 3673 | ||
3674 | efx->filter_state = NULL; | ||
3675 | if (!table) | ||
3676 | return; | ||
3677 | |||
3113 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { | 3678 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { |
3114 | spec = efx_ef10_filter_entry_spec(table, filter_idx); | 3679 | spec = efx_ef10_filter_entry_spec(table, filter_idx); |
3115 | if (!spec) | 3680 | if (!spec) |
@@ -3135,6 +3700,9 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) | |||
3135 | kfree(table); | 3700 | kfree(table); |
3136 | } | 3701 | } |
3137 | 3702 | ||
3703 | /* Caller must hold efx->filter_sem for read if race against | ||
3704 | * efx_ef10_filter_table_remove() is possible | ||
3705 | */ | ||
3138 | static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) | 3706 | static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) |
3139 | { | 3707 | { |
3140 | struct efx_ef10_filter_table *table = efx->filter_state; | 3708 | struct efx_ef10_filter_table *table = efx->filter_state; |
@@ -3149,6 +3717,9 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) | |||
3149 | if (!efx_dev_registered(efx)) | 3717 | if (!efx_dev_registered(efx)) |
3150 | return; | 3718 | return; |
3151 | 3719 | ||
3720 | if (!table) | ||
3721 | return; | ||
3722 | |||
3152 | /* Mark old filters that may need to be removed */ | 3723 | /* Mark old filters that may need to be removed */ |
3153 | spin_lock_bh(&efx->filter_lock); | 3724 | spin_lock_bh(&efx->filter_lock); |
3154 | n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count; | 3725 | n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count; |
@@ -3280,6 +3851,149 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) | |||
3280 | WARN_ON(remove_failed); | 3851 | WARN_ON(remove_failed); |
3281 | } | 3852 | } |
3282 | 3853 | ||
3854 | static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) | ||
3855 | { | ||
3856 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | ||
3857 | u8 mac_old[ETH_ALEN]; | ||
3858 | int rc, rc2; | ||
3859 | |||
3860 | /* Only reconfigure a PF-created vport */ | ||
3861 | if (is_zero_ether_addr(nic_data->vport_mac)) | ||
3862 | return 0; | ||
3863 | |||
3864 | efx_device_detach_sync(efx); | ||
3865 | efx_net_stop(efx->net_dev); | ||
3866 | down_write(&efx->filter_sem); | ||
3867 | efx_ef10_filter_table_remove(efx); | ||
3868 | up_write(&efx->filter_sem); | ||
3869 | |||
3870 | rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id); | ||
3871 | if (rc) | ||
3872 | goto restore_filters; | ||
3873 | |||
3874 | ether_addr_copy(mac_old, nic_data->vport_mac); | ||
3875 | rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id, | ||
3876 | nic_data->vport_mac); | ||
3877 | if (rc) | ||
3878 | goto restore_vadaptor; | ||
3879 | |||
3880 | rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, | ||
3881 | efx->net_dev->dev_addr); | ||
3882 | if (!rc) { | ||
3883 | ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr); | ||
3884 | } else { | ||
3885 | rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old); | ||
3886 | if (rc2) { | ||
3887 | /* Failed to add original MAC, so clear vport_mac */ | ||
3888 | eth_zero_addr(nic_data->vport_mac); | ||
3889 | goto reset_nic; | ||
3890 | } | ||
3891 | } | ||
3892 | |||
3893 | restore_vadaptor: | ||
3894 | rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); | ||
3895 | if (rc2) | ||
3896 | goto reset_nic; | ||
3897 | restore_filters: | ||
3898 | down_write(&efx->filter_sem); | ||
3899 | rc2 = efx_ef10_filter_table_probe(efx); | ||
3900 | up_write(&efx->filter_sem); | ||
3901 | if (rc2) | ||
3902 | goto reset_nic; | ||
3903 | |||
3904 | rc2 = efx_net_open(efx->net_dev); | ||
3905 | if (rc2) | ||
3906 | goto reset_nic; | ||
3907 | |||
3908 | netif_device_attach(efx->net_dev); | ||
3909 | |||
3910 | return rc; | ||
3911 | |||
3912 | reset_nic: | ||
3913 | netif_err(efx, drv, efx->net_dev, | ||
3914 | "Failed to restore when changing MAC address - scheduling reset\n"); | ||
3915 | efx_schedule_reset(efx, RESET_TYPE_DATAPATH); | ||
3916 | |||
3917 | return rc ? rc : rc2; | ||
3918 | } | ||
3919 | |||
3920 | static int efx_ef10_set_mac_address(struct efx_nic *efx) | ||
3921 | { | ||
3922 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); | ||
3923 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | ||
3924 | bool was_enabled = efx->port_enabled; | ||
3925 | int rc; | ||
3926 | |||
3927 | efx_device_detach_sync(efx); | ||
3928 | efx_net_stop(efx->net_dev); | ||
3929 | down_write(&efx->filter_sem); | ||
3930 | efx_ef10_filter_table_remove(efx); | ||
3931 | |||
3932 | ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR), | ||
3933 | efx->net_dev->dev_addr); | ||
3934 | MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, | ||
3935 | nic_data->vport_id); | ||
3936 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, | ||
3937 | sizeof(inbuf), NULL, 0, NULL); | ||
3938 | |||
3939 | efx_ef10_filter_table_probe(efx); | ||
3940 | up_write(&efx->filter_sem); | ||
3941 | if (was_enabled) | ||
3942 | efx_net_open(efx->net_dev); | ||
3943 | netif_device_attach(efx->net_dev); | ||
3944 | |||
3945 | #ifdef CONFIG_SFC_SRIOV | ||
3946 | if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { | ||
3947 | struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; | ||
3948 | |||
3949 | if (rc == -EPERM) { | ||
3950 | struct efx_nic *efx_pf; | ||
3951 | |||
3952 | /* Switch to PF and change MAC address on vport */ | ||
3953 | efx_pf = pci_get_drvdata(pci_dev_pf); | ||
3954 | |||
3955 | rc = efx_ef10_sriov_set_vf_mac(efx_pf, | ||
3956 | nic_data->vf_index, | ||
3957 | efx->net_dev->dev_addr); | ||
3958 | } else if (!rc) { | ||
3959 | struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); | ||
3960 | struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; | ||
3961 | unsigned int i; | ||
3962 | |||
3963 | /* MAC address successfully changed by VF (with MAC | ||
3964 | * spoofing) so update the parent PF if possible. | ||
3965 | */ | ||
3966 | for (i = 0; i < efx_pf->vf_count; ++i) { | ||
3967 | struct ef10_vf *vf = nic_data->vf + i; | ||
3968 | |||
3969 | if (vf->efx == efx) { | ||
3970 | ether_addr_copy(vf->mac, | ||
3971 | efx->net_dev->dev_addr); | ||
3972 | return 0; | ||
3973 | } | ||
3974 | } | ||
3975 | } | ||
3976 | } else | ||
3977 | #endif | ||
3978 | if (rc == -EPERM) { | ||
3979 | netif_err(efx, drv, efx->net_dev, | ||
3980 | "Cannot change MAC address; use sfboot to enable" | ||
3981 | " mac-spoofing on this interface\n"); | ||
3982 | } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) { | ||
3983 | /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC | ||
3984 | * fall-back to the method of changing the MAC address on the | ||
3985 | * vport. This only applies to PFs because such versions of | ||
3986 | * MCFW do not support VFs. | ||
3987 | */ | ||
3988 | rc = efx_ef10_vport_set_mac_address(efx); | ||
3989 | } else { | ||
3990 | efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, | ||
3991 | sizeof(inbuf), NULL, 0, rc); | ||
3992 | } | ||
3993 | |||
3994 | return rc; | ||
3995 | } | ||
3996 | |||
3283 | static int efx_ef10_mac_reconfigure(struct efx_nic *efx) | 3997 | static int efx_ef10_mac_reconfigure(struct efx_nic *efx) |
3284 | { | 3998 | { |
3285 | efx_ef10_filter_sync_rx_mode(efx); | 3999 | efx_ef10_filter_sync_rx_mode(efx); |
@@ -3287,6 +4001,13 @@ static int efx_ef10_mac_reconfigure(struct efx_nic *efx) | |||
3287 | return efx_mcdi_set_mac(efx); | 4001 | return efx_mcdi_set_mac(efx); |
3288 | } | 4002 | } |
3289 | 4003 | ||
4004 | static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx) | ||
4005 | { | ||
4006 | efx_ef10_filter_sync_rx_mode(efx); | ||
4007 | |||
4008 | return 0; | ||
4009 | } | ||
4010 | |||
3290 | static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) | 4011 | static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) |
3291 | { | 4012 | { |
3292 | MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); | 4013 | MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); |
@@ -3494,6 +4215,9 @@ static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) | |||
3494 | _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); | 4215 | _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); |
3495 | } | 4216 | } |
3496 | 4217 | ||
4218 | static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx, | ||
4219 | u32 host_time) {} | ||
4220 | |||
3497 | static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel, | 4221 | static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel, |
3498 | bool temp) | 4222 | bool temp) |
3499 | { | 4223 | { |
@@ -3571,6 +4295,12 @@ static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en, | |||
3571 | return 0; | 4295 | return 0; |
3572 | } | 4296 | } |
3573 | 4297 | ||
4298 | static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx, | ||
4299 | struct hwtstamp_config *init) | ||
4300 | { | ||
4301 | return -EOPNOTSUPP; | ||
4302 | } | ||
4303 | |||
3574 | static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, | 4304 | static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, |
3575 | struct hwtstamp_config *init) | 4305 | struct hwtstamp_config *init) |
3576 | { | 4306 | { |
@@ -3607,14 +4337,118 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, | |||
3607 | } | 4337 | } |
3608 | } | 4338 | } |
3609 | 4339 | ||
4340 | const struct efx_nic_type efx_hunt_a0_vf_nic_type = { | ||
4341 | .is_vf = true, | ||
4342 | .mem_bar = EFX_MEM_VF_BAR, | ||
4343 | .mem_map_size = efx_ef10_mem_map_size, | ||
4344 | .probe = efx_ef10_probe_vf, | ||
4345 | .remove = efx_ef10_remove, | ||
4346 | .dimension_resources = efx_ef10_dimension_resources, | ||
4347 | .init = efx_ef10_init_nic, | ||
4348 | .fini = efx_port_dummy_op_void, | ||
4349 | .map_reset_reason = efx_ef10_map_reset_reason, | ||
4350 | .map_reset_flags = efx_ef10_map_reset_flags, | ||
4351 | .reset = efx_ef10_reset, | ||
4352 | .probe_port = efx_mcdi_port_probe, | ||
4353 | .remove_port = efx_mcdi_port_remove, | ||
4354 | .fini_dmaq = efx_ef10_fini_dmaq, | ||
4355 | .prepare_flr = efx_ef10_prepare_flr, | ||
4356 | .finish_flr = efx_port_dummy_op_void, | ||
4357 | .describe_stats = efx_ef10_describe_stats, | ||
4358 | .update_stats = efx_ef10_update_stats_vf, | ||
4359 | .start_stats = efx_port_dummy_op_void, | ||
4360 | .pull_stats = efx_port_dummy_op_void, | ||
4361 | .stop_stats = efx_port_dummy_op_void, | ||
4362 | .set_id_led = efx_mcdi_set_id_led, | ||
4363 | .push_irq_moderation = efx_ef10_push_irq_moderation, | ||
4364 | .reconfigure_mac = efx_ef10_mac_reconfigure_vf, | ||
4365 | .check_mac_fault = efx_mcdi_mac_check_fault, | ||
4366 | .reconfigure_port = efx_mcdi_port_reconfigure, | ||
4367 | .get_wol = efx_ef10_get_wol_vf, | ||
4368 | .set_wol = efx_ef10_set_wol_vf, | ||
4369 | .resume_wol = efx_port_dummy_op_void, | ||
4370 | .mcdi_request = efx_ef10_mcdi_request, | ||
4371 | .mcdi_poll_response = efx_ef10_mcdi_poll_response, | ||
4372 | .mcdi_read_response = efx_ef10_mcdi_read_response, | ||
4373 | .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, | ||
4374 | .irq_enable_master = efx_port_dummy_op_void, | ||
4375 | .irq_test_generate = efx_ef10_irq_test_generate, | ||
4376 | .irq_disable_non_ev = efx_port_dummy_op_void, | ||
4377 | .irq_handle_msi = efx_ef10_msi_interrupt, | ||
4378 | .irq_handle_legacy = efx_ef10_legacy_interrupt, | ||
4379 | .tx_probe = efx_ef10_tx_probe, | ||
4380 | .tx_init = efx_ef10_tx_init, | ||
4381 | .tx_remove = efx_ef10_tx_remove, | ||
4382 | .tx_write = efx_ef10_tx_write, | ||
4383 | .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config, | ||
4384 | .rx_probe = efx_ef10_rx_probe, | ||
4385 | .rx_init = efx_ef10_rx_init, | ||
4386 | .rx_remove = efx_ef10_rx_remove, | ||
4387 | .rx_write = efx_ef10_rx_write, | ||
4388 | .rx_defer_refill = efx_ef10_rx_defer_refill, | ||
4389 | .ev_probe = efx_ef10_ev_probe, | ||
4390 | .ev_init = efx_ef10_ev_init, | ||
4391 | .ev_fini = efx_ef10_ev_fini, | ||
4392 | .ev_remove = efx_ef10_ev_remove, | ||
4393 | .ev_process = efx_ef10_ev_process, | ||
4394 | .ev_read_ack = efx_ef10_ev_read_ack, | ||
4395 | .ev_test_generate = efx_ef10_ev_test_generate, | ||
4396 | .filter_table_probe = efx_ef10_filter_table_probe, | ||
4397 | .filter_table_restore = efx_ef10_filter_table_restore, | ||
4398 | .filter_table_remove = efx_ef10_filter_table_remove, | ||
4399 | .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, | ||
4400 | .filter_insert = efx_ef10_filter_insert, | ||
4401 | .filter_remove_safe = efx_ef10_filter_remove_safe, | ||
4402 | .filter_get_safe = efx_ef10_filter_get_safe, | ||
4403 | .filter_clear_rx = efx_ef10_filter_clear_rx, | ||
4404 | .filter_count_rx_used = efx_ef10_filter_count_rx_used, | ||
4405 | .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, | ||
4406 | .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, | ||
4407 | #ifdef CONFIG_RFS_ACCEL | ||
4408 | .filter_rfs_insert = efx_ef10_filter_rfs_insert, | ||
4409 | .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, | ||
4410 | #endif | ||
4411 | #ifdef CONFIG_SFC_MTD | ||
4412 | .mtd_probe = efx_port_dummy_op_int, | ||
4413 | #endif | ||
4414 | .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf, | ||
4415 | .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf, | ||
4416 | #ifdef CONFIG_SFC_SRIOV | ||
4417 | .vswitching_probe = efx_ef10_vswitching_probe_vf, | ||
4418 | .vswitching_restore = efx_ef10_vswitching_restore_vf, | ||
4419 | .vswitching_remove = efx_ef10_vswitching_remove_vf, | ||
4420 | .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id, | ||
4421 | #endif | ||
4422 | .get_mac_address = efx_ef10_get_mac_address_vf, | ||
4423 | .set_mac_address = efx_ef10_set_mac_address, | ||
4424 | |||
4425 | .revision = EFX_REV_HUNT_A0, | ||
4426 | .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), | ||
4427 | .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, | ||
4428 | .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, | ||
4429 | .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, | ||
4430 | .can_rx_scatter = true, | ||
4431 | .always_rx_scatter = true, | ||
4432 | .max_interrupt_mode = EFX_INT_MODE_MSIX, | ||
4433 | .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, | ||
4434 | .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | ||
4435 | NETIF_F_RXHASH | NETIF_F_NTUPLE), | ||
4436 | .mcdi_max_ver = 2, | ||
4437 | .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, | ||
4438 | .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | | ||
4439 | 1 << HWTSTAMP_FILTER_ALL, | ||
4440 | }; | ||
4441 | |||
3610 | const struct efx_nic_type efx_hunt_a0_nic_type = { | 4442 | const struct efx_nic_type efx_hunt_a0_nic_type = { |
4443 | .is_vf = false, | ||
4444 | .mem_bar = EFX_MEM_BAR, | ||
3611 | .mem_map_size = efx_ef10_mem_map_size, | 4445 | .mem_map_size = efx_ef10_mem_map_size, |
3612 | .probe = efx_ef10_probe, | 4446 | .probe = efx_ef10_probe_pf, |
3613 | .remove = efx_ef10_remove, | 4447 | .remove = efx_ef10_remove, |
3614 | .dimension_resources = efx_ef10_dimension_resources, | 4448 | .dimension_resources = efx_ef10_dimension_resources, |
3615 | .init = efx_ef10_init_nic, | 4449 | .init = efx_ef10_init_nic, |
3616 | .fini = efx_port_dummy_op_void, | 4450 | .fini = efx_port_dummy_op_void, |
3617 | .map_reset_reason = efx_mcdi_map_reset_reason, | 4451 | .map_reset_reason = efx_ef10_map_reset_reason, |
3618 | .map_reset_flags = efx_ef10_map_reset_flags, | 4452 | .map_reset_flags = efx_ef10_map_reset_flags, |
3619 | .reset = efx_ef10_reset, | 4453 | .reset = efx_ef10_reset, |
3620 | .probe_port = efx_mcdi_port_probe, | 4454 | .probe_port = efx_mcdi_port_probe, |
@@ -3623,7 +4457,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { | |||
3623 | .prepare_flr = efx_ef10_prepare_flr, | 4457 | .prepare_flr = efx_ef10_prepare_flr, |
3624 | .finish_flr = efx_port_dummy_op_void, | 4458 | .finish_flr = efx_port_dummy_op_void, |
3625 | .describe_stats = efx_ef10_describe_stats, | 4459 | .describe_stats = efx_ef10_describe_stats, |
3626 | .update_stats = efx_ef10_update_stats, | 4460 | .update_stats = efx_ef10_update_stats_pf, |
3627 | .start_stats = efx_mcdi_mac_start_stats, | 4461 | .start_stats = efx_mcdi_mac_start_stats, |
3628 | .pull_stats = efx_mcdi_mac_pull_stats, | 4462 | .pull_stats = efx_mcdi_mac_pull_stats, |
3629 | .stop_stats = efx_mcdi_mac_stop_stats, | 4463 | .stop_stats = efx_mcdi_mac_stop_stats, |
@@ -3650,7 +4484,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { | |||
3650 | .tx_init = efx_ef10_tx_init, | 4484 | .tx_init = efx_ef10_tx_init, |
3651 | .tx_remove = efx_ef10_tx_remove, | 4485 | .tx_remove = efx_ef10_tx_remove, |
3652 | .tx_write = efx_ef10_tx_write, | 4486 | .tx_write = efx_ef10_tx_write, |
3653 | .rx_push_rss_config = efx_ef10_rx_push_rss_config, | 4487 | .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, |
3654 | .rx_probe = efx_ef10_rx_probe, | 4488 | .rx_probe = efx_ef10_rx_probe, |
3655 | .rx_init = efx_ef10_rx_init, | 4489 | .rx_init = efx_ef10_rx_init, |
3656 | .rx_remove = efx_ef10_rx_remove, | 4490 | .rx_remove = efx_ef10_rx_remove, |
@@ -3689,11 +4523,24 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { | |||
3689 | .ptp_write_host_time = efx_ef10_ptp_write_host_time, | 4523 | .ptp_write_host_time = efx_ef10_ptp_write_host_time, |
3690 | .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, | 4524 | .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, |
3691 | .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, | 4525 | .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, |
4526 | #ifdef CONFIG_SFC_SRIOV | ||
4527 | .sriov_configure = efx_ef10_sriov_configure, | ||
3692 | .sriov_init = efx_ef10_sriov_init, | 4528 | .sriov_init = efx_ef10_sriov_init, |
3693 | .sriov_fini = efx_ef10_sriov_fini, | 4529 | .sriov_fini = efx_ef10_sriov_fini, |
3694 | .sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed, | ||
3695 | .sriov_wanted = efx_ef10_sriov_wanted, | 4530 | .sriov_wanted = efx_ef10_sriov_wanted, |
3696 | .sriov_reset = efx_ef10_sriov_reset, | 4531 | .sriov_reset = efx_ef10_sriov_reset, |
4532 | .sriov_flr = efx_ef10_sriov_flr, | ||
4533 | .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac, | ||
4534 | .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan, | ||
4535 | .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk, | ||
4536 | .sriov_get_vf_config = efx_ef10_sriov_get_vf_config, | ||
4537 | .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state, | ||
4538 | .vswitching_probe = efx_ef10_vswitching_probe_pf, | ||
4539 | .vswitching_restore = efx_ef10_vswitching_restore_pf, | ||
4540 | .vswitching_remove = efx_ef10_vswitching_remove_pf, | ||
4541 | #endif | ||
4542 | .get_mac_address = efx_ef10_get_mac_address_pf, | ||
4543 | .set_mac_address = efx_ef10_set_mac_address, | ||
3697 | 4544 | ||
3698 | .revision = EFX_REV_HUNT_A0, | 4545 | .revision = EFX_REV_HUNT_A0, |
3699 | .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), | 4546 | .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), |