diff options
author | David S. Miller <davem@davemloft.net> | 2012-07-18 12:08:36 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-07-18 12:08:36 -0400 |
commit | 54399a78c95f1be0ae9bf6587cbddbfc641aab9c (patch) | |
tree | 19b942f2f76ac87a508df1161091b088fcb616ad /drivers | |
parent | d3818c92afabecfe6b8e5d2e3734c8753522987c (diff) | |
parent | c2dbab39db1c3c2ccbdbb2c6bac6f07cc7a7c1f6 (diff) |
Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next
Ben Hutchings says:
====================
1. Fix potential badness when running a self-test with SR-IOV enabled.
2. Fix calculation of some interface statistics that could run backward.
3. Miscellaneous cleanup.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/sfc/efx.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/enum.h | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/ethtool.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/falcon.c | 35 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/falcon_xmac.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/filter.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/mcdi.c | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/net_driver.h | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/nic.c | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/nic.h | 18 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/rx.c | 22 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/selftest.c | 64 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/siena.c | 37 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/tx.c | 93 |
14 files changed, 181 insertions, 153 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index b95f2e1b33f0..70554a1b2b02 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
@@ -1103,8 +1103,8 @@ static int efx_init_io(struct efx_nic *efx) | |||
1103 | * masks event though they reject 46 bit masks. | 1103 | * masks event though they reject 46 bit masks. |
1104 | */ | 1104 | */ |
1105 | while (dma_mask > 0x7fffffffUL) { | 1105 | while (dma_mask > 0x7fffffffUL) { |
1106 | if (pci_dma_supported(pci_dev, dma_mask)) { | 1106 | if (dma_supported(&pci_dev->dev, dma_mask)) { |
1107 | rc = pci_set_dma_mask(pci_dev, dma_mask); | 1107 | rc = dma_set_mask(&pci_dev->dev, dma_mask); |
1108 | if (rc == 0) | 1108 | if (rc == 0) |
1109 | break; | 1109 | break; |
1110 | } | 1110 | } |
@@ -1117,10 +1117,10 @@ static int efx_init_io(struct efx_nic *efx) | |||
1117 | } | 1117 | } |
1118 | netif_dbg(efx, probe, efx->net_dev, | 1118 | netif_dbg(efx, probe, efx->net_dev, |
1119 | "using DMA mask %llx\n", (unsigned long long) dma_mask); | 1119 | "using DMA mask %llx\n", (unsigned long long) dma_mask); |
1120 | rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); | 1120 | rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask); |
1121 | if (rc) { | 1121 | if (rc) { |
1122 | /* pci_set_consistent_dma_mask() is not *allowed* to | 1122 | /* dma_set_coherent_mask() is not *allowed* to |
1123 | * fail with a mask that pci_set_dma_mask() accepted, | 1123 | * fail with a mask that dma_set_mask() accepted, |
1124 | * but just in case... | 1124 | * but just in case... |
1125 | */ | 1125 | */ |
1126 | netif_err(efx, probe, efx->net_dev, | 1126 | netif_err(efx, probe, efx->net_dev, |
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h index d725a8fbe1a6..182dbe2cc6e4 100644 --- a/drivers/net/ethernet/sfc/enum.h +++ b/drivers/net/ethernet/sfc/enum.h | |||
@@ -136,10 +136,10 @@ enum efx_loopback_mode { | |||
136 | * | 136 | * |
137 | * Reset methods are numbered in order of increasing scope. | 137 | * Reset methods are numbered in order of increasing scope. |
138 | * | 138 | * |
139 | * @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts | 139 | * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only) |
140 | * @RESET_TYPE_ALL: reset everything but PCI core blocks | 140 | * @RESET_TYPE_ALL: Reset datapath, MAC and PHY |
141 | * @RESET_TYPE_WORLD: reset everything, save & restore PCI config | 141 | * @RESET_TYPE_WORLD: Reset as much as possible |
142 | * @RESET_TYPE_DISABLE: disable NIC | 142 | * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled |
143 | * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog | 143 | * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog |
144 | * @RESET_TYPE_INT_ERROR: reset due to internal error | 144 | * @RESET_TYPE_INT_ERROR: reset due to internal error |
145 | * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors | 145 | * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors |
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 03ded364c8da..10536f93b561 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c | |||
@@ -453,7 +453,7 @@ static void efx_ethtool_get_strings(struct net_device *net_dev, | |||
453 | switch (string_set) { | 453 | switch (string_set) { |
454 | case ETH_SS_STATS: | 454 | case ETH_SS_STATS: |
455 | for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) | 455 | for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) |
456 | strncpy(ethtool_strings[i].name, | 456 | strlcpy(ethtool_strings[i].name, |
457 | efx_ethtool_stats[i].name, | 457 | efx_ethtool_stats[i].name, |
458 | sizeof(ethtool_strings[i].name)); | 458 | sizeof(ethtool_strings[i].name)); |
459 | break; | 459 | break; |
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 3a1ca2bd1548..12b573a8e82b 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c | |||
@@ -25,9 +25,12 @@ | |||
25 | #include "io.h" | 25 | #include "io.h" |
26 | #include "phy.h" | 26 | #include "phy.h" |
27 | #include "workarounds.h" | 27 | #include "workarounds.h" |
28 | #include "selftest.h" | ||
28 | 29 | ||
29 | /* Hardware control for SFC4000 (aka Falcon). */ | 30 | /* Hardware control for SFC4000 (aka Falcon). */ |
30 | 31 | ||
32 | static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method); | ||
33 | |||
31 | static const unsigned int | 34 | static const unsigned int |
32 | /* "Large" EEPROM device: Atmel AT25640 or similar | 35 | /* "Large" EEPROM device: Atmel AT25640 or similar |
33 | * 8 KB, 16-bit address, 32 B write block */ | 36 | * 8 KB, 16-bit address, 32 B write block */ |
@@ -1034,10 +1037,34 @@ static const struct efx_nic_register_test falcon_b0_register_tests[] = { | |||
1034 | EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, | 1037 | EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, |
1035 | }; | 1038 | }; |
1036 | 1039 | ||
1037 | static int falcon_b0_test_registers(struct efx_nic *efx) | 1040 | static int |
1041 | falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) | ||
1038 | { | 1042 | { |
1039 | return efx_nic_test_registers(efx, falcon_b0_register_tests, | 1043 | enum reset_type reset_method = RESET_TYPE_INVISIBLE; |
1040 | ARRAY_SIZE(falcon_b0_register_tests)); | 1044 | int rc, rc2; |
1045 | |||
1046 | mutex_lock(&efx->mac_lock); | ||
1047 | if (efx->loopback_modes) { | ||
1048 | /* We need the 312 clock from the PHY to test the XMAC | ||
1049 | * registers, so move into XGMII loopback if available */ | ||
1050 | if (efx->loopback_modes & (1 << LOOPBACK_XGMII)) | ||
1051 | efx->loopback_mode = LOOPBACK_XGMII; | ||
1052 | else | ||
1053 | efx->loopback_mode = __ffs(efx->loopback_modes); | ||
1054 | } | ||
1055 | __efx_reconfigure_port(efx); | ||
1056 | mutex_unlock(&efx->mac_lock); | ||
1057 | |||
1058 | efx_reset_down(efx, reset_method); | ||
1059 | |||
1060 | tests->registers = | ||
1061 | efx_nic_test_registers(efx, falcon_b0_register_tests, | ||
1062 | ARRAY_SIZE(falcon_b0_register_tests)) | ||
1063 | ? -1 : 1; | ||
1064 | |||
1065 | rc = falcon_reset_hw(efx, reset_method); | ||
1066 | rc2 = efx_reset_up(efx, reset_method, rc == 0); | ||
1067 | return rc ? rc : rc2; | ||
1041 | } | 1068 | } |
1042 | 1069 | ||
1043 | /************************************************************************** | 1070 | /************************************************************************** |
@@ -1818,7 +1845,7 @@ const struct efx_nic_type falcon_b0_nic_type = { | |||
1818 | .get_wol = falcon_get_wol, | 1845 | .get_wol = falcon_get_wol, |
1819 | .set_wol = falcon_set_wol, | 1846 | .set_wol = falcon_set_wol, |
1820 | .resume_wol = efx_port_dummy_op_void, | 1847 | .resume_wol = efx_port_dummy_op_void, |
1821 | .test_registers = falcon_b0_test_registers, | 1848 | .test_chip = falcon_b0_test_chip, |
1822 | .test_nvram = falcon_test_nvram, | 1849 | .test_nvram = falcon_test_nvram, |
1823 | 1850 | ||
1824 | .revision = EFX_REV_FALCON_B0, | 1851 | .revision = EFX_REV_FALCON_B0, |
diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c index 6106ef15dee3..8333865d4c95 100644 --- a/drivers/net/ethernet/sfc/falcon_xmac.c +++ b/drivers/net/ethernet/sfc/falcon_xmac.c | |||
@@ -341,12 +341,12 @@ void falcon_update_stats_xmac(struct efx_nic *efx) | |||
341 | FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error); | 341 | FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error); |
342 | 342 | ||
343 | /* Update derived statistics */ | 343 | /* Update derived statistics */ |
344 | mac_stats->tx_good_bytes = | 344 | efx_update_diff_stat(&mac_stats->tx_good_bytes, |
345 | (mac_stats->tx_bytes - mac_stats->tx_bad_bytes - | 345 | mac_stats->tx_bytes - mac_stats->tx_bad_bytes - |
346 | mac_stats->tx_control * 64); | 346 | mac_stats->tx_control * 64); |
347 | mac_stats->rx_bad_bytes = | 347 | efx_update_diff_stat(&mac_stats->rx_bad_bytes, |
348 | (mac_stats->rx_bytes - mac_stats->rx_good_bytes - | 348 | mac_stats->rx_bytes - mac_stats->rx_good_bytes - |
349 | mac_stats->rx_control * 64); | 349 | mac_stats->rx_control * 64); |
350 | } | 350 | } |
351 | 351 | ||
352 | void falcon_poll_xmac(struct efx_nic *efx) | 352 | void falcon_poll_xmac(struct efx_nic *efx) |
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index fea7f7300675..c3fd61f0a95c 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c | |||
@@ -662,7 +662,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, | |||
662 | struct efx_filter_table *table = efx_filter_spec_table(state, spec); | 662 | struct efx_filter_table *table = efx_filter_spec_table(state, spec); |
663 | struct efx_filter_spec *saved_spec; | 663 | struct efx_filter_spec *saved_spec; |
664 | efx_oword_t filter; | 664 | efx_oword_t filter; |
665 | unsigned int filter_idx, depth; | 665 | unsigned int filter_idx, depth = 0; |
666 | u32 key; | 666 | u32 key; |
667 | int rc; | 667 | int rc; |
668 | 668 | ||
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 17b6463e459c..fc5e7bbcbc9e 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c | |||
@@ -1001,12 +1001,17 @@ static void efx_mcdi_exit_assertion(struct efx_nic *efx) | |||
1001 | { | 1001 | { |
1002 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; | 1002 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; |
1003 | 1003 | ||
1004 | /* Atomically reboot the mcfw out of the assertion handler */ | 1004 | /* If the MC is running debug firmware, it might now be |
1005 | * waiting for a debugger to attach, but we just want it to | ||
1006 | * reboot. We set a flag that makes the command a no-op if it | ||
1007 | * has already done so. We don't know what return code to | ||
1008 | * expect (0 or -EIO), so ignore it. | ||
1009 | */ | ||
1005 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); | 1010 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); |
1006 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, | 1011 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, |
1007 | MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); | 1012 | MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); |
1008 | efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, | 1013 | (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, |
1009 | NULL, 0, NULL); | 1014 | NULL, 0, NULL); |
1010 | } | 1015 | } |
1011 | 1016 | ||
1012 | int efx_mcdi_handle_assertion(struct efx_nic *efx) | 1017 | int efx_mcdi_handle_assertion(struct efx_nic *efx) |
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index a1965c07d1e3..cd9c0a989692 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h | |||
@@ -68,6 +68,8 @@ | |||
68 | #define EFX_TXQ_TYPES 4 | 68 | #define EFX_TXQ_TYPES 4 |
69 | #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) | 69 | #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) |
70 | 70 | ||
71 | struct efx_self_tests; | ||
72 | |||
71 | /** | 73 | /** |
72 | * struct efx_special_buffer - An Efx special buffer | 74 | * struct efx_special_buffer - An Efx special buffer |
73 | * @addr: CPU base address of the buffer | 75 | * @addr: CPU base address of the buffer |
@@ -100,7 +102,7 @@ struct efx_special_buffer { | |||
100 | * @len: Length of this fragment. | 102 | * @len: Length of this fragment. |
101 | * This field is zero when the queue slot is empty. | 103 | * This field is zero when the queue slot is empty. |
102 | * @continuation: True if this fragment is not the end of a packet. | 104 | * @continuation: True if this fragment is not the end of a packet. |
103 | * @unmap_single: True if pci_unmap_single should be used. | 105 | * @unmap_single: True if dma_unmap_single should be used. |
104 | * @unmap_len: Length of this fragment to unmap | 106 | * @unmap_len: Length of this fragment to unmap |
105 | */ | 107 | */ |
106 | struct efx_tx_buffer { | 108 | struct efx_tx_buffer { |
@@ -901,7 +903,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) | |||
901 | * @get_wol: Get WoL configuration from driver state | 903 | * @get_wol: Get WoL configuration from driver state |
902 | * @set_wol: Push WoL configuration to the NIC | 904 | * @set_wol: Push WoL configuration to the NIC |
903 | * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) | 905 | * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) |
904 | * @test_registers: Test read/write functionality of control registers | 906 | * @test_chip: Test registers. Should use efx_nic_test_registers(), and is |
907 | * expected to reset the NIC. | ||
905 | * @test_nvram: Test validity of NVRAM contents | 908 | * @test_nvram: Test validity of NVRAM contents |
906 | * @revision: Hardware architecture revision | 909 | * @revision: Hardware architecture revision |
907 | * @mem_map_size: Memory BAR mapped size | 910 | * @mem_map_size: Memory BAR mapped size |
@@ -946,7 +949,7 @@ struct efx_nic_type { | |||
946 | void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); | 949 | void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); |
947 | int (*set_wol)(struct efx_nic *efx, u32 type); | 950 | int (*set_wol)(struct efx_nic *efx, u32 type); |
948 | void (*resume_wol)(struct efx_nic *efx); | 951 | void (*resume_wol)(struct efx_nic *efx); |
949 | int (*test_registers)(struct efx_nic *efx); | 952 | int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests); |
950 | int (*test_nvram)(struct efx_nic *efx); | 953 | int (*test_nvram)(struct efx_nic *efx); |
951 | 954 | ||
952 | int revision; | 955 | int revision; |
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 4a9a5beec8fc..326d799762d6 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c | |||
@@ -124,9 +124,6 @@ int efx_nic_test_registers(struct efx_nic *efx, | |||
124 | unsigned address = 0, i, j; | 124 | unsigned address = 0, i, j; |
125 | efx_oword_t mask, imask, original, reg, buf; | 125 | efx_oword_t mask, imask, original, reg, buf; |
126 | 126 | ||
127 | /* Falcon should be in loopback to isolate the XMAC from the PHY */ | ||
128 | WARN_ON(!LOOPBACK_INTERNAL(efx)); | ||
129 | |||
130 | for (i = 0; i < n_regs; ++i) { | 127 | for (i = 0; i < n_regs; ++i) { |
131 | address = regs[i].address; | 128 | address = regs[i].address; |
132 | mask = imask = regs[i].mask; | 129 | mask = imask = regs[i].mask; |
@@ -308,8 +305,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | |||
308 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, | 305 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, |
309 | unsigned int len) | 306 | unsigned int len) |
310 | { | 307 | { |
311 | buffer->addr = pci_alloc_consistent(efx->pci_dev, len, | 308 | buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, |
312 | &buffer->dma_addr); | 309 | &buffer->dma_addr, GFP_ATOMIC); |
313 | if (!buffer->addr) | 310 | if (!buffer->addr) |
314 | return -ENOMEM; | 311 | return -ENOMEM; |
315 | buffer->len = len; | 312 | buffer->len = len; |
@@ -320,8 +317,8 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, | |||
320 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) | 317 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) |
321 | { | 318 | { |
322 | if (buffer->addr) { | 319 | if (buffer->addr) { |
323 | pci_free_consistent(efx->pci_dev, buffer->len, | 320 | dma_free_coherent(&efx->pci_dev->dev, buffer->len, |
324 | buffer->addr, buffer->dma_addr); | 321 | buffer->addr, buffer->dma_addr); |
325 | buffer->addr = NULL; | 322 | buffer->addr = NULL; |
326 | } | 323 | } |
327 | } | 324 | } |
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index f48ccf6bb3b9..bab5cd9f5740 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h | |||
@@ -294,6 +294,24 @@ extern bool falcon_xmac_check_fault(struct efx_nic *efx); | |||
294 | extern int falcon_reconfigure_xmac(struct efx_nic *efx); | 294 | extern int falcon_reconfigure_xmac(struct efx_nic *efx); |
295 | extern void falcon_update_stats_xmac(struct efx_nic *efx); | 295 | extern void falcon_update_stats_xmac(struct efx_nic *efx); |
296 | 296 | ||
297 | /* Some statistics are computed as A - B where A and B each increase | ||
298 | * linearly with some hardware counter(s) and the counters are read | ||
299 | * asynchronously. If the counters contributing to B are always read | ||
300 | * after those contributing to A, the computed value may be lower than | ||
301 | * the true value by some variable amount, and may decrease between | ||
302 | * subsequent computations. | ||
303 | * | ||
304 | * We should never allow statistics to decrease or to exceed the true | ||
305 | * value. Since the computed value will never be greater than the | ||
306 | * true value, we can achieve this by only storing the computed value | ||
307 | * when it increases. | ||
308 | */ | ||
309 | static inline void efx_update_diff_stat(u64 *stat, u64 diff) | ||
310 | { | ||
311 | if ((s64)(diff - *stat) > 0) | ||
312 | *stat = diff; | ||
313 | } | ||
314 | |||
297 | /* Interrupts and test events */ | 315 | /* Interrupts and test events */ |
298 | extern int efx_nic_init_interrupt(struct efx_nic *efx); | 316 | extern int efx_nic_init_interrupt(struct efx_nic *efx); |
299 | extern void efx_nic_enable_interrupts(struct efx_nic *efx); | 317 | extern void efx_nic_enable_interrupts(struct efx_nic *efx); |
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index fca61fea38e0..719319b89d7a 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c | |||
@@ -155,11 +155,11 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) | |||
155 | rx_buf->len = skb_len - NET_IP_ALIGN; | 155 | rx_buf->len = skb_len - NET_IP_ALIGN; |
156 | rx_buf->flags = 0; | 156 | rx_buf->flags = 0; |
157 | 157 | ||
158 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, | 158 | rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev, |
159 | skb->data, rx_buf->len, | 159 | skb->data, rx_buf->len, |
160 | PCI_DMA_FROMDEVICE); | 160 | DMA_FROM_DEVICE); |
161 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, | 161 | if (unlikely(dma_mapping_error(&efx->pci_dev->dev, |
162 | rx_buf->dma_addr))) { | 162 | rx_buf->dma_addr))) { |
163 | dev_kfree_skb_any(skb); | 163 | dev_kfree_skb_any(skb); |
164 | rx_buf->u.skb = NULL; | 164 | rx_buf->u.skb = NULL; |
165 | return -EIO; | 165 | return -EIO; |
@@ -200,10 +200,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) | |||
200 | efx->rx_buffer_order); | 200 | efx->rx_buffer_order); |
201 | if (unlikely(page == NULL)) | 201 | if (unlikely(page == NULL)) |
202 | return -ENOMEM; | 202 | return -ENOMEM; |
203 | dma_addr = pci_map_page(efx->pci_dev, page, 0, | 203 | dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0, |
204 | efx_rx_buf_size(efx), | 204 | efx_rx_buf_size(efx), |
205 | PCI_DMA_FROMDEVICE); | 205 | DMA_FROM_DEVICE); |
206 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { | 206 | if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) { |
207 | __free_pages(page, efx->rx_buffer_order); | 207 | __free_pages(page, efx->rx_buffer_order); |
208 | return -EIO; | 208 | return -EIO; |
209 | } | 209 | } |
@@ -247,14 +247,14 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, | |||
247 | 247 | ||
248 | state = page_address(rx_buf->u.page); | 248 | state = page_address(rx_buf->u.page); |
249 | if (--state->refcnt == 0) { | 249 | if (--state->refcnt == 0) { |
250 | pci_unmap_page(efx->pci_dev, | 250 | dma_unmap_page(&efx->pci_dev->dev, |
251 | state->dma_addr, | 251 | state->dma_addr, |
252 | efx_rx_buf_size(efx), | 252 | efx_rx_buf_size(efx), |
253 | PCI_DMA_FROMDEVICE); | 253 | DMA_FROM_DEVICE); |
254 | } | 254 | } |
255 | } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { | 255 | } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { |
256 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, | 256 | dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, |
257 | rx_buf->len, PCI_DMA_FROMDEVICE); | 257 | rx_buf->len, DMA_FROM_DEVICE); |
258 | } | 258 | } |
259 | } | 259 | } |
260 | 260 | ||
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index de4c0069f5b2..96068d15b601 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c | |||
@@ -120,19 +120,6 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) | |||
120 | return rc; | 120 | return rc; |
121 | } | 121 | } |
122 | 122 | ||
123 | static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) | ||
124 | { | ||
125 | int rc = 0; | ||
126 | |||
127 | /* Test register access */ | ||
128 | if (efx->type->test_registers) { | ||
129 | rc = efx->type->test_registers(efx); | ||
130 | tests->registers = rc ? -1 : 1; | ||
131 | } | ||
132 | |||
133 | return rc; | ||
134 | } | ||
135 | |||
136 | /************************************************************************** | 123 | /************************************************************************** |
137 | * | 124 | * |
138 | * Interrupt and event queue testing | 125 | * Interrupt and event queue testing |
@@ -488,7 +475,7 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue, | |||
488 | skb = state->skbs[i]; | 475 | skb = state->skbs[i]; |
489 | if (skb && !skb_shared(skb)) | 476 | if (skb && !skb_shared(skb)) |
490 | ++tx_done; | 477 | ++tx_done; |
491 | dev_kfree_skb_any(skb); | 478 | dev_kfree_skb(skb); |
492 | } | 479 | } |
493 | 480 | ||
494 | netif_tx_unlock_bh(efx->net_dev); | 481 | netif_tx_unlock_bh(efx->net_dev); |
@@ -699,8 +686,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, | |||
699 | { | 686 | { |
700 | enum efx_loopback_mode loopback_mode = efx->loopback_mode; | 687 | enum efx_loopback_mode loopback_mode = efx->loopback_mode; |
701 | int phy_mode = efx->phy_mode; | 688 | int phy_mode = efx->phy_mode; |
702 | enum reset_type reset_method = RESET_TYPE_INVISIBLE; | 689 | int rc_test = 0, rc_reset, rc; |
703 | int rc_test = 0, rc_reset = 0, rc; | ||
704 | 690 | ||
705 | efx_selftest_async_cancel(efx); | 691 | efx_selftest_async_cancel(efx); |
706 | 692 | ||
@@ -737,44 +723,26 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, | |||
737 | */ | 723 | */ |
738 | netif_device_detach(efx->net_dev); | 724 | netif_device_detach(efx->net_dev); |
739 | 725 | ||
740 | mutex_lock(&efx->mac_lock); | 726 | if (efx->type->test_chip) { |
741 | if (efx->loopback_modes) { | 727 | rc_reset = efx->type->test_chip(efx, tests); |
742 | /* We need the 312 clock from the PHY to test the XMAC | 728 | if (rc_reset) { |
743 | * registers, so move into XGMII loopback if available */ | 729 | netif_err(efx, hw, efx->net_dev, |
744 | if (efx->loopback_modes & (1 << LOOPBACK_XGMII)) | 730 | "Unable to recover from chip test\n"); |
745 | efx->loopback_mode = LOOPBACK_XGMII; | 731 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); |
746 | else | 732 | return rc_reset; |
747 | efx->loopback_mode = __ffs(efx->loopback_modes); | 733 | } |
748 | } | ||
749 | |||
750 | __efx_reconfigure_port(efx); | ||
751 | mutex_unlock(&efx->mac_lock); | ||
752 | |||
753 | /* free up all consumers of SRAM (including all the queues) */ | ||
754 | efx_reset_down(efx, reset_method); | ||
755 | |||
756 | rc = efx_test_chip(efx, tests); | ||
757 | if (rc && !rc_test) | ||
758 | rc_test = rc; | ||
759 | 734 | ||
760 | /* reset the chip to recover from the register test */ | 735 | if ((tests->registers < 0) && !rc_test) |
761 | rc_reset = efx->type->reset(efx, reset_method); | 736 | rc_test = -EIO; |
737 | } | ||
762 | 738 | ||
763 | /* Ensure that the phy is powered and out of loopback | 739 | /* Ensure that the phy is powered and out of loopback |
764 | * for the bist and loopback tests */ | 740 | * for the bist and loopback tests */ |
741 | mutex_lock(&efx->mac_lock); | ||
765 | efx->phy_mode &= ~PHY_MODE_LOW_POWER; | 742 | efx->phy_mode &= ~PHY_MODE_LOW_POWER; |
766 | efx->loopback_mode = LOOPBACK_NONE; | 743 | efx->loopback_mode = LOOPBACK_NONE; |
767 | 744 | __efx_reconfigure_port(efx); | |
768 | rc = efx_reset_up(efx, reset_method, rc_reset == 0); | 745 | mutex_unlock(&efx->mac_lock); |
769 | if (rc && !rc_reset) | ||
770 | rc_reset = rc; | ||
771 | |||
772 | if (rc_reset) { | ||
773 | netif_err(efx, drv, efx->net_dev, | ||
774 | "Unable to recover from chip test\n"); | ||
775 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | ||
776 | return rc_reset; | ||
777 | } | ||
778 | 746 | ||
779 | rc = efx_test_phy(efx, tests, flags); | 747 | rc = efx_test_phy(efx, tests, flags); |
780 | if (rc && !rc_test) | 748 | if (rc && !rc_test) |
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 9f8d7cea3967..6bafd216e55e 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c | |||
@@ -25,10 +25,12 @@ | |||
25 | #include "workarounds.h" | 25 | #include "workarounds.h" |
26 | #include "mcdi.h" | 26 | #include "mcdi.h" |
27 | #include "mcdi_pcol.h" | 27 | #include "mcdi_pcol.h" |
28 | #include "selftest.h" | ||
28 | 29 | ||
29 | /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ | 30 | /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ |
30 | 31 | ||
31 | static void siena_init_wol(struct efx_nic *efx); | 32 | static void siena_init_wol(struct efx_nic *efx); |
33 | static int siena_reset_hw(struct efx_nic *efx, enum reset_type method); | ||
32 | 34 | ||
33 | 35 | ||
34 | static void siena_push_irq_moderation(struct efx_channel *channel) | 36 | static void siena_push_irq_moderation(struct efx_channel *channel) |
@@ -154,10 +156,29 @@ static const struct efx_nic_register_test siena_register_tests[] = { | |||
154 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, | 156 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, |
155 | }; | 157 | }; |
156 | 158 | ||
157 | static int siena_test_registers(struct efx_nic *efx) | 159 | static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) |
158 | { | 160 | { |
159 | return efx_nic_test_registers(efx, siena_register_tests, | 161 | enum reset_type reset_method = reset_method; |
160 | ARRAY_SIZE(siena_register_tests)); | 162 | int rc, rc2; |
163 | |||
164 | efx_reset_down(efx, reset_method); | ||
165 | |||
166 | /* Reset the chip immediately so that it is completely | ||
167 | * quiescent regardless of what any VF driver does. | ||
168 | */ | ||
169 | rc = siena_reset_hw(efx, reset_method); | ||
170 | if (rc) | ||
171 | goto out; | ||
172 | |||
173 | tests->registers = | ||
174 | efx_nic_test_registers(efx, siena_register_tests, | ||
175 | ARRAY_SIZE(siena_register_tests)) | ||
176 | ? -1 : 1; | ||
177 | |||
178 | rc = siena_reset_hw(efx, reset_method); | ||
179 | out: | ||
180 | rc2 = efx_reset_up(efx, reset_method, rc == 0); | ||
181 | return rc ? rc : rc2; | ||
161 | } | 182 | } |
162 | 183 | ||
163 | /************************************************************************** | 184 | /************************************************************************** |
@@ -437,8 +458,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) | |||
437 | 458 | ||
438 | MAC_STAT(tx_bytes, TX_BYTES); | 459 | MAC_STAT(tx_bytes, TX_BYTES); |
439 | MAC_STAT(tx_bad_bytes, TX_BAD_BYTES); | 460 | MAC_STAT(tx_bad_bytes, TX_BAD_BYTES); |
440 | mac_stats->tx_good_bytes = (mac_stats->tx_bytes - | 461 | efx_update_diff_stat(&mac_stats->tx_good_bytes, |
441 | mac_stats->tx_bad_bytes); | 462 | mac_stats->tx_bytes - mac_stats->tx_bad_bytes); |
442 | MAC_STAT(tx_packets, TX_PKTS); | 463 | MAC_STAT(tx_packets, TX_PKTS); |
443 | MAC_STAT(tx_bad, TX_BAD_FCS_PKTS); | 464 | MAC_STAT(tx_bad, TX_BAD_FCS_PKTS); |
444 | MAC_STAT(tx_pause, TX_PAUSE_PKTS); | 465 | MAC_STAT(tx_pause, TX_PAUSE_PKTS); |
@@ -471,8 +492,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) | |||
471 | MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS); | 492 | MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS); |
472 | MAC_STAT(rx_bytes, RX_BYTES); | 493 | MAC_STAT(rx_bytes, RX_BYTES); |
473 | MAC_STAT(rx_bad_bytes, RX_BAD_BYTES); | 494 | MAC_STAT(rx_bad_bytes, RX_BAD_BYTES); |
474 | mac_stats->rx_good_bytes = (mac_stats->rx_bytes - | 495 | efx_update_diff_stat(&mac_stats->rx_good_bytes, |
475 | mac_stats->rx_bad_bytes); | 496 | mac_stats->rx_bytes - mac_stats->rx_bad_bytes); |
476 | MAC_STAT(rx_packets, RX_PKTS); | 497 | MAC_STAT(rx_packets, RX_PKTS); |
477 | MAC_STAT(rx_good, RX_GOOD_PKTS); | 498 | MAC_STAT(rx_good, RX_GOOD_PKTS); |
478 | MAC_STAT(rx_bad, RX_BAD_FCS_PKTS); | 499 | MAC_STAT(rx_bad, RX_BAD_FCS_PKTS); |
@@ -649,7 +670,7 @@ const struct efx_nic_type siena_a0_nic_type = { | |||
649 | .get_wol = siena_get_wol, | 670 | .get_wol = siena_get_wol, |
650 | .set_wol = siena_set_wol, | 671 | .set_wol = siena_set_wol, |
651 | .resume_wol = siena_init_wol, | 672 | .resume_wol = siena_init_wol, |
652 | .test_registers = siena_test_registers, | 673 | .test_chip = siena_test_chip, |
653 | .test_nvram = efx_mcdi_nvram_test_all, | 674 | .test_nvram = efx_mcdi_nvram_test_all, |
654 | 675 | ||
655 | .revision = EFX_REV_SIENA_A0, | 676 | .revision = EFX_REV_SIENA_A0, |
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 94d0365b31cd..9b225a7769f7 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c | |||
@@ -36,15 +36,15 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, | |||
36 | unsigned int *bytes_compl) | 36 | unsigned int *bytes_compl) |
37 | { | 37 | { |
38 | if (buffer->unmap_len) { | 38 | if (buffer->unmap_len) { |
39 | struct pci_dev *pci_dev = tx_queue->efx->pci_dev; | 39 | struct device *dma_dev = &tx_queue->efx->pci_dev->dev; |
40 | dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - | 40 | dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - |
41 | buffer->unmap_len); | 41 | buffer->unmap_len); |
42 | if (buffer->unmap_single) | 42 | if (buffer->unmap_single) |
43 | pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len, | 43 | dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, |
44 | PCI_DMA_TODEVICE); | 44 | DMA_TO_DEVICE); |
45 | else | 45 | else |
46 | pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len, | 46 | dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, |
47 | PCI_DMA_TODEVICE); | 47 | DMA_TO_DEVICE); |
48 | buffer->unmap_len = 0; | 48 | buffer->unmap_len = 0; |
49 | buffer->unmap_single = false; | 49 | buffer->unmap_single = false; |
50 | } | 50 | } |
@@ -138,7 +138,7 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) | |||
138 | netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | 138 | netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) |
139 | { | 139 | { |
140 | struct efx_nic *efx = tx_queue->efx; | 140 | struct efx_nic *efx = tx_queue->efx; |
141 | struct pci_dev *pci_dev = efx->pci_dev; | 141 | struct device *dma_dev = &efx->pci_dev->dev; |
142 | struct efx_tx_buffer *buffer; | 142 | struct efx_tx_buffer *buffer; |
143 | skb_frag_t *fragment; | 143 | skb_frag_t *fragment; |
144 | unsigned int len, unmap_len = 0, fill_level, insert_ptr; | 144 | unsigned int len, unmap_len = 0, fill_level, insert_ptr; |
@@ -167,17 +167,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
167 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; | 167 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; |
168 | q_space = efx->txq_entries - 1 - fill_level; | 168 | q_space = efx->txq_entries - 1 - fill_level; |
169 | 169 | ||
170 | /* Map for DMA. Use pci_map_single rather than pci_map_page | 170 | /* Map for DMA. Use dma_map_single rather than dma_map_page |
171 | * since this is more efficient on machines with sparse | 171 | * since this is more efficient on machines with sparse |
172 | * memory. | 172 | * memory. |
173 | */ | 173 | */ |
174 | unmap_single = true; | 174 | unmap_single = true; |
175 | dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); | 175 | dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); |
176 | 176 | ||
177 | /* Process all fragments */ | 177 | /* Process all fragments */ |
178 | while (1) { | 178 | while (1) { |
179 | if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) | 179 | if (unlikely(dma_mapping_error(dma_dev, dma_addr))) |
180 | goto pci_err; | 180 | goto dma_err; |
181 | 181 | ||
182 | /* Store fields for marking in the per-fragment final | 182 | /* Store fields for marking in the per-fragment final |
183 | * descriptor */ | 183 | * descriptor */ |
@@ -246,7 +246,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
246 | i++; | 246 | i++; |
247 | /* Map for DMA */ | 247 | /* Map for DMA */ |
248 | unmap_single = false; | 248 | unmap_single = false; |
249 | dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len, | 249 | dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, |
250 | DMA_TO_DEVICE); | 250 | DMA_TO_DEVICE); |
251 | } | 251 | } |
252 | 252 | ||
@@ -261,7 +261,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
261 | 261 | ||
262 | return NETDEV_TX_OK; | 262 | return NETDEV_TX_OK; |
263 | 263 | ||
264 | pci_err: | 264 | dma_err: |
265 | netif_err(efx, tx_err, efx->net_dev, | 265 | netif_err(efx, tx_err, efx->net_dev, |
266 | " TX queue %d could not map skb with %d bytes %d " | 266 | " TX queue %d could not map skb with %d bytes %d " |
267 | "fragments for DMA\n", tx_queue->queue, skb->len, | 267 | "fragments for DMA\n", tx_queue->queue, skb->len, |
@@ -284,11 +284,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
284 | /* Free the fragment we were mid-way through pushing */ | 284 | /* Free the fragment we were mid-way through pushing */ |
285 | if (unmap_len) { | 285 | if (unmap_len) { |
286 | if (unmap_single) | 286 | if (unmap_single) |
287 | pci_unmap_single(pci_dev, unmap_addr, unmap_len, | 287 | dma_unmap_single(dma_dev, unmap_addr, unmap_len, |
288 | PCI_DMA_TODEVICE); | 288 | DMA_TO_DEVICE); |
289 | else | 289 | else |
290 | pci_unmap_page(pci_dev, unmap_addr, unmap_len, | 290 | dma_unmap_page(dma_dev, unmap_addr, unmap_len, |
291 | PCI_DMA_TODEVICE); | 291 | DMA_TO_DEVICE); |
292 | } | 292 | } |
293 | 293 | ||
294 | return rc; | 294 | return rc; |
@@ -651,17 +651,8 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb) | |||
651 | EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != | 651 | EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != |
652 | protocol); | 652 | protocol); |
653 | if (protocol == htons(ETH_P_8021Q)) { | 653 | if (protocol == htons(ETH_P_8021Q)) { |
654 | /* Find the encapsulated protocol; reset network header | ||
655 | * and transport header based on that. */ | ||
656 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; | 654 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; |
657 | protocol = veh->h_vlan_encapsulated_proto; | 655 | protocol = veh->h_vlan_encapsulated_proto; |
658 | skb_set_network_header(skb, sizeof(*veh)); | ||
659 | if (protocol == htons(ETH_P_IP)) | ||
660 | skb_set_transport_header(skb, sizeof(*veh) + | ||
661 | 4 * ip_hdr(skb)->ihl); | ||
662 | else if (protocol == htons(ETH_P_IPV6)) | ||
663 | skb_set_transport_header(skb, sizeof(*veh) + | ||
664 | sizeof(struct ipv6hdr)); | ||
665 | } | 656 | } |
666 | 657 | ||
667 | if (protocol == htons(ETH_P_IP)) { | 658 | if (protocol == htons(ETH_P_IP)) { |
@@ -684,20 +675,19 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb) | |||
684 | */ | 675 | */ |
685 | static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) | 676 | static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) |
686 | { | 677 | { |
687 | 678 | struct device *dma_dev = &tx_queue->efx->pci_dev->dev; | |
688 | struct pci_dev *pci_dev = tx_queue->efx->pci_dev; | ||
689 | struct efx_tso_header *tsoh; | 679 | struct efx_tso_header *tsoh; |
690 | dma_addr_t dma_addr; | 680 | dma_addr_t dma_addr; |
691 | u8 *base_kva, *kva; | 681 | u8 *base_kva, *kva; |
692 | 682 | ||
693 | base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); | 683 | base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC); |
694 | if (base_kva == NULL) { | 684 | if (base_kva == NULL) { |
695 | netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, | 685 | netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, |
696 | "Unable to allocate page for TSO headers\n"); | 686 | "Unable to allocate page for TSO headers\n"); |
697 | return -ENOMEM; | 687 | return -ENOMEM; |
698 | } | 688 | } |
699 | 689 | ||
700 | /* pci_alloc_consistent() allocates pages. */ | 690 | /* dma_alloc_coherent() allocates pages. */ |
701 | EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); | 691 | EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); |
702 | 692 | ||
703 | for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { | 693 | for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { |
@@ -714,7 +704,7 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) | |||
714 | /* Free up a TSO header, and all others in the same page. */ | 704 | /* Free up a TSO header, and all others in the same page. */ |
715 | static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, | 705 | static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, |
716 | struct efx_tso_header *tsoh, | 706 | struct efx_tso_header *tsoh, |
717 | struct pci_dev *pci_dev) | 707 | struct device *dma_dev) |
718 | { | 708 | { |
719 | struct efx_tso_header **p; | 709 | struct efx_tso_header **p; |
720 | unsigned long base_kva; | 710 | unsigned long base_kva; |
@@ -731,7 +721,7 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, | |||
731 | p = &(*p)->next; | 721 | p = &(*p)->next; |
732 | } | 722 | } |
733 | 723 | ||
734 | pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); | 724 | dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma); |
735 | } | 725 | } |
736 | 726 | ||
737 | static struct efx_tso_header * | 727 | static struct efx_tso_header * |
@@ -743,11 +733,11 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) | |||
743 | if (unlikely(!tsoh)) | 733 | if (unlikely(!tsoh)) |
744 | return NULL; | 734 | return NULL; |
745 | 735 | ||
746 | tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, | 736 | tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, |
747 | TSOH_BUFFER(tsoh), header_len, | 737 | TSOH_BUFFER(tsoh), header_len, |
748 | PCI_DMA_TODEVICE); | 738 | DMA_TO_DEVICE); |
749 | if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, | 739 | if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, |
750 | tsoh->dma_addr))) { | 740 | tsoh->dma_addr))) { |
751 | kfree(tsoh); | 741 | kfree(tsoh); |
752 | return NULL; | 742 | return NULL; |
753 | } | 743 | } |
@@ -759,9 +749,9 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) | |||
759 | static void | 749 | static void |
760 | efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) | 750 | efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) |
761 | { | 751 | { |
762 | pci_unmap_single(tx_queue->efx->pci_dev, | 752 | dma_unmap_single(&tx_queue->efx->pci_dev->dev, |
763 | tsoh->dma_addr, tsoh->unmap_len, | 753 | tsoh->dma_addr, tsoh->unmap_len, |
764 | PCI_DMA_TODEVICE); | 754 | DMA_TO_DEVICE); |
765 | kfree(tsoh); | 755 | kfree(tsoh); |
766 | } | 756 | } |
767 | 757 | ||
@@ -892,13 +882,13 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | |||
892 | unmap_addr = (buffer->dma_addr + buffer->len - | 882 | unmap_addr = (buffer->dma_addr + buffer->len - |
893 | buffer->unmap_len); | 883 | buffer->unmap_len); |
894 | if (buffer->unmap_single) | 884 | if (buffer->unmap_single) |
895 | pci_unmap_single(tx_queue->efx->pci_dev, | 885 | dma_unmap_single(&tx_queue->efx->pci_dev->dev, |
896 | unmap_addr, buffer->unmap_len, | 886 | unmap_addr, buffer->unmap_len, |
897 | PCI_DMA_TODEVICE); | 887 | DMA_TO_DEVICE); |
898 | else | 888 | else |
899 | pci_unmap_page(tx_queue->efx->pci_dev, | 889 | dma_unmap_page(&tx_queue->efx->pci_dev->dev, |
900 | unmap_addr, buffer->unmap_len, | 890 | unmap_addr, buffer->unmap_len, |
901 | PCI_DMA_TODEVICE); | 891 | DMA_TO_DEVICE); |
902 | buffer->unmap_len = 0; | 892 | buffer->unmap_len = 0; |
903 | } | 893 | } |
904 | buffer->len = 0; | 894 | buffer->len = 0; |
@@ -927,7 +917,6 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb) | |||
927 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); | 917 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); |
928 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); | 918 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); |
929 | 919 | ||
930 | st->packet_space = st->full_packet_size; | ||
931 | st->out_len = skb->len - st->header_len; | 920 | st->out_len = skb->len - st->header_len; |
932 | st->unmap_len = 0; | 921 | st->unmap_len = 0; |
933 | st->unmap_single = false; | 922 | st->unmap_single = false; |
@@ -954,9 +943,9 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, | |||
954 | int hl = st->header_len; | 943 | int hl = st->header_len; |
955 | int len = skb_headlen(skb) - hl; | 944 | int len = skb_headlen(skb) - hl; |
956 | 945 | ||
957 | st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, | 946 | st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl, |
958 | len, PCI_DMA_TODEVICE); | 947 | len, DMA_TO_DEVICE); |
959 | if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { | 948 | if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { |
960 | st->unmap_single = true; | 949 | st->unmap_single = true; |
961 | st->unmap_len = len; | 950 | st->unmap_len = len; |
962 | st->in_len = len; | 951 | st->in_len = len; |
@@ -1008,7 +997,7 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, | |||
1008 | buffer->continuation = !end_of_packet; | 997 | buffer->continuation = !end_of_packet; |
1009 | 998 | ||
1010 | if (st->in_len == 0) { | 999 | if (st->in_len == 0) { |
1011 | /* Transfer ownership of the pci mapping */ | 1000 | /* Transfer ownership of the DMA mapping */ |
1012 | buffer->unmap_len = st->unmap_len; | 1001 | buffer->unmap_len = st->unmap_len; |
1013 | buffer->unmap_single = st->unmap_single; | 1002 | buffer->unmap_single = st->unmap_single; |
1014 | st->unmap_len = 0; | 1003 | st->unmap_len = 0; |
@@ -1181,18 +1170,18 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |||
1181 | 1170 | ||
1182 | mem_err: | 1171 | mem_err: |
1183 | netif_err(efx, tx_err, efx->net_dev, | 1172 | netif_err(efx, tx_err, efx->net_dev, |
1184 | "Out of memory for TSO headers, or PCI mapping error\n"); | 1173 | "Out of memory for TSO headers, or DMA mapping error\n"); |
1185 | dev_kfree_skb_any(skb); | 1174 | dev_kfree_skb_any(skb); |
1186 | 1175 | ||
1187 | unwind: | 1176 | unwind: |
1188 | /* Free the DMA mapping we were in the process of writing out */ | 1177 | /* Free the DMA mapping we were in the process of writing out */ |
1189 | if (state.unmap_len) { | 1178 | if (state.unmap_len) { |
1190 | if (state.unmap_single) | 1179 | if (state.unmap_single) |
1191 | pci_unmap_single(efx->pci_dev, state.unmap_addr, | 1180 | dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, |
1192 | state.unmap_len, PCI_DMA_TODEVICE); | 1181 | state.unmap_len, DMA_TO_DEVICE); |
1193 | else | 1182 | else |
1194 | pci_unmap_page(efx->pci_dev, state.unmap_addr, | 1183 | dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr, |
1195 | state.unmap_len, PCI_DMA_TODEVICE); | 1184 | state.unmap_len, DMA_TO_DEVICE); |
1196 | } | 1185 | } |
1197 | 1186 | ||
1198 | efx_enqueue_unwind(tx_queue); | 1187 | efx_enqueue_unwind(tx_queue); |
@@ -1216,5 +1205,5 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue) | |||
1216 | 1205 | ||
1217 | while (tx_queue->tso_headers_free != NULL) | 1206 | while (tx_queue->tso_headers_free != NULL) |
1218 | efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, | 1207 | efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, |
1219 | tx_queue->efx->pci_dev); | 1208 | &tx_queue->efx->pci_dev->dev); |
1220 | } | 1209 | } |