aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-01 14:52:27 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-01 14:52:27 -0500
commit6d04dfc8966019b8b0977b2cb942351f13d2b178 (patch)
tree2d4f239c1daff620704b77a992c1e70ce1ce6b08
parent2883aaea363f7a897ff06d2e6c73ae7aae285bcb (diff)
parent06425c308b92eaf60767bc71d359f4cbc7a561f8 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix handling of interrupt status in stmmac driver. Just because we have masked the event from generating interrupts, doesn't mean the bit won't still be set in the interrupt status register. From Alexey Brodkin. 2) Fix DMA API debugging splats in gianfar driver, from Arseny Solokha. 3) Fix off-by-one error in __ip6_append_data(), from Vlad Yasevich. 4) cls_flow does not match on icmpv6 codes properly, from Simon Horman. 5) Initial MAC address can be set incorrectly in some scenerios, from Ivan Vecera. 6) Packet header pointer arithmetic fix in ip6_tnl_parse_tlv_end_lim(), from Dan Carpenter. 7) Fix divide by zero in __tcp_select_window(), from Eric Dumazet. 8) Fix crash in iwlwifi when unregistering thermal zone, from Jens Axboe. 9) Check for DMA mapping errors in starfire driver, from Alexey Khoroshilov. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (31 commits) tcp: fix 0 divide in __tcp_select_window() ipv6: pointer math error in ip6_tnl_parse_tlv_enc_lim() net: fix ndo_features_check/ndo_fix_features comment ordering net/sched: matchall: Fix configuration race be2net: fix initial MAC setting ipv6: fix flow labels when the traffic class is non-0 net: thunderx: avoid dereferencing xcv when NULL net/sched: cls_flower: Correct matching on ICMPv6 code ipv6: Paritially checksum full MTU frames net/mlx4_core: Avoid command timeouts during VF driver device shutdown gianfar: synchronize DMA API usage by free_skb_rx_queue w/ gfar_new_page net: ethtool: add support for 2500BaseT and 5000BaseT link modes can: bcm: fix hrtimer/tasklet termination in bcm op removal net: adaptec: starfire: add checks for dma mapping errors net: phy: micrel: KSZ8795 do not set SUPPORTED_[Asym_]Pause can: Fix kernel panic at security_sock_rcv_skb net: macb: Fix 64 bit addressing support for GEM stmmac: Discard masked flags in interrupt status register net/mlx5e: Check ets capability before ets query FW command net/mlx5e: Fix update of hash function/key via ethtool ...
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/bcma/bcma_private.h3
-rw-r--r--drivers/bcma/driver_chipcommon.c11
-rw-r--r--drivers/bcma/driver_mips.c3
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c45
-rw-r--r--drivers/net/ethernet/cadence/macb.c188
-rw-r--r--drivers/net/ethernet/cadence/macb.h20
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_xcv.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c33
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c4
-rw-r--r--drivers/net/phy/micrel.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c12
-rw-r--r--include/linux/can/core.h7
-rw-r--r--include/linux/netdevice.h29
-rw-r--r--include/net/ipv6.h5
-rw-r--r--include/uapi/linux/ethtool.h4
-rw-r--r--net/can/af_can.c12
-rw-r--r--net/can/af_can.h3
-rw-r--r--net/can/bcm.c27
-rw-r--r--net/can/gw.c2
-rw-r--r--net/can/raw.c4
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/sched/cls_flower.c4
-rw-r--r--net/sched/cls_matchall.c127
47 files changed, 555 insertions, 370 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 5f10c28b2e15..3960e7faaa99 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10195,7 +10195,6 @@ F: drivers/media/tuners/qt1010*
10195QUALCOMM ATHEROS ATH9K WIRELESS DRIVER 10195QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
10196M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com> 10196M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
10197L: linux-wireless@vger.kernel.org 10197L: linux-wireless@vger.kernel.org
10198L: ath9k-devel@lists.ath9k.org
10199W: http://wireless.kernel.org/en/users/Drivers/ath9k 10198W: http://wireless.kernel.org/en/users/Drivers/ath9k
10200S: Supported 10199S: Supported
10201F: drivers/net/wireless/ath/ath9k/ 10200F: drivers/net/wireless/ath/ath9k/
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index f642c4264c27..168fa175d65a 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
45void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc); 45void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
46void bcma_core_chipcommon_init(struct bcma_drv_cc *cc); 46void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
47void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); 47void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
48#ifdef CONFIG_BCMA_DRIVER_MIPS
49void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
50#endif /* CONFIG_BCMA_DRIVER_MIPS */
48 51
49/* driver_chipcommon_b.c */ 52/* driver_chipcommon_b.c */
50int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb); 53int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index b4f6520e74f0..62f5bfa5065d 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -15,8 +15,6 @@
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/bcma/bcma.h> 16#include <linux/bcma/bcma.h>
17 17
18static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
19
20static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset, 18static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
21 u32 mask, u32 value) 19 u32 mask, u32 value)
22{ 20{
@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
186 if (cc->capabilities & BCMA_CC_CAP_PMU) 184 if (cc->capabilities & BCMA_CC_CAP_PMU)
187 bcma_pmu_early_init(cc); 185 bcma_pmu_early_init(cc);
188 186
189 if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
190 bcma_chipco_serial_init(cc);
191
192 if (bus->hosttype == BCMA_HOSTTYPE_SOC) 187 if (bus->hosttype == BCMA_HOSTTYPE_SOC)
193 bcma_core_chipcommon_flash_detect(cc); 188 bcma_core_chipcommon_flash_detect(cc);
194 189
@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
378 return res; 373 return res;
379} 374}
380 375
381static void bcma_chipco_serial_init(struct bcma_drv_cc *cc) 376#ifdef CONFIG_BCMA_DRIVER_MIPS
377void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
382{ 378{
383#if IS_BUILTIN(CONFIG_BCM47XX)
384 unsigned int irq; 379 unsigned int irq;
385 u32 baud_base; 380 u32 baud_base;
386 u32 i; 381 u32 i;
@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
422 ports[i].baud_base = baud_base; 417 ports[i].baud_base = baud_base;
423 ports[i].reg_shift = 0; 418 ports[i].reg_shift = 0;
424 } 419 }
425#endif /* CONFIG_BCM47XX */
426} 420}
421#endif /* CONFIG_BCMA_DRIVER_MIPS */
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 96f171328200..89af807cf29c 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
278 278
279void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) 279void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
280{ 280{
281 struct bcma_bus *bus = mcore->core->bus;
282
281 if (mcore->early_setup_done) 283 if (mcore->early_setup_done)
282 return; 284 return;
283 285
286 bcma_chipco_serial_init(&bus->drv_cc);
284 bcma_core_mips_nvram_init(mcore); 287 bcma_core_mips_nvram_init(mcore);
285 288
286 mcore->early_setup_done = true; 289 mcore->early_setup_done = true;
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index c12d2618eebf..3872ab96b80a 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
1152 if (skb == NULL) 1152 if (skb == NULL)
1153 break; 1153 break;
1154 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1154 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1155 if (pci_dma_mapping_error(np->pci_dev,
1156 np->rx_info[i].mapping)) {
1157 dev_kfree_skb(skb);
1158 np->rx_info[i].skb = NULL;
1159 break;
1160 }
1155 /* Grrr, we cannot offset to correctly align the IP header. */ 1161 /* Grrr, we cannot offset to correctly align the IP header. */
1156 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); 1162 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1157 } 1163 }
@@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1182{ 1188{
1183 struct netdev_private *np = netdev_priv(dev); 1189 struct netdev_private *np = netdev_priv(dev);
1184 unsigned int entry; 1190 unsigned int entry;
1191 unsigned int prev_tx;
1185 u32 status; 1192 u32 status;
1186 int i; 1193 int i, j;
1187 1194
1188 /* 1195 /*
1189 * be cautious here, wrapping the queue has weird semantics 1196 * be cautious here, wrapping the queue has weird semantics
@@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1201 } 1208 }
1202#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ 1209#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1203 1210
1211 prev_tx = np->cur_tx;
1204 entry = np->cur_tx % TX_RING_SIZE; 1212 entry = np->cur_tx % TX_RING_SIZE;
1205 for (i = 0; i < skb_num_frags(skb); i++) { 1213 for (i = 0; i < skb_num_frags(skb); i++) {
1206 int wrap_ring = 0; 1214 int wrap_ring = 0;
@@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1234 skb_frag_size(this_frag), 1242 skb_frag_size(this_frag),
1235 PCI_DMA_TODEVICE); 1243 PCI_DMA_TODEVICE);
1236 } 1244 }
1245 if (pci_dma_mapping_error(np->pci_dev,
1246 np->tx_info[entry].mapping)) {
1247 dev->stats.tx_dropped++;
1248 goto err_out;
1249 }
1237 1250
1238 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); 1251 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1239 np->tx_ring[entry].status = cpu_to_le32(status); 1252 np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1268 netif_stop_queue(dev); 1281 netif_stop_queue(dev);
1269 1282
1270 return NETDEV_TX_OK; 1283 return NETDEV_TX_OK;
1271}
1272 1284
1285err_out:
1286 entry = prev_tx % TX_RING_SIZE;
1287 np->tx_info[entry].skb = NULL;
1288 if (i > 0) {
1289 pci_unmap_single(np->pci_dev,
1290 np->tx_info[entry].mapping,
1291 skb_first_frag_len(skb),
1292 PCI_DMA_TODEVICE);
1293 np->tx_info[entry].mapping = 0;
1294 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1295 for (j = 1; j < i; j++) {
1296 pci_unmap_single(np->pci_dev,
1297 np->tx_info[entry].mapping,
1298 skb_frag_size(
1299 &skb_shinfo(skb)->frags[j-1]),
1300 PCI_DMA_TODEVICE);
1301 entry++;
1302 }
1303 }
1304 dev_kfree_skb_any(skb);
1305 np->cur_tx = prev_tx;
1306 return NETDEV_TX_OK;
1307}
1273 1308
1274/* The interrupt handler does all of the Rx thread work and cleans up 1309/* The interrupt handler does all of the Rx thread work and cleans up
1275 after the Tx thread. */ 1310 after the Tx thread. */
@@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
1569 break; /* Better luck next round. */ 1604 break; /* Better luck next round. */
1570 np->rx_info[entry].mapping = 1605 np->rx_info[entry].mapping =
1571 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1606 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1607 if (pci_dma_mapping_error(np->pci_dev,
1608 np->rx_info[entry].mapping)) {
1609 dev_kfree_skb(skb);
1610 np->rx_info[entry].skb = NULL;
1611 break;
1612 }
1572 np->rx_ring[entry].rxaddr = 1613 np->rx_ring[entry].rxaddr =
1573 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); 1614 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1574 } 1615 }
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index c0fb80acc2da..baba2db9d9c2 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -43,13 +43,13 @@
43#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ 43#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
44#define MIN_RX_RING_SIZE 64 44#define MIN_RX_RING_SIZE 64
45#define MAX_RX_RING_SIZE 8192 45#define MAX_RX_RING_SIZE 8192
46#define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ 46#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
47 * (bp)->rx_ring_size) 47 * (bp)->rx_ring_size)
48 48
49#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ 49#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
50#define MIN_TX_RING_SIZE 64 50#define MIN_TX_RING_SIZE 64
51#define MAX_TX_RING_SIZE 4096 51#define MAX_TX_RING_SIZE 4096
52#define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ 52#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
53 * (bp)->tx_ring_size) 53 * (bp)->tx_ring_size)
54 54
55/* level of occupied TX descriptors under which we wake up TX process */ 55/* level of occupied TX descriptors under which we wake up TX process */
@@ -78,6 +78,37 @@
78 */ 78 */
79#define MACB_HALT_TIMEOUT 1230 79#define MACB_HALT_TIMEOUT 1230
80 80
81/* DMA buffer descriptor might be different size
82 * depends on hardware configuration.
83 */
84static unsigned int macb_dma_desc_get_size(struct macb *bp)
85{
86#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
87 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
88 return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
89#endif
90 return sizeof(struct macb_dma_desc);
91}
92
93static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
94{
95#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
96 /* Dma buffer descriptor is 4 words length (instead of 2 words)
97 * for 64b GEM.
98 */
99 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
100 idx <<= 1;
101#endif
102 return idx;
103}
104
105#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
106static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
107{
108 return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
109}
110#endif
111
81/* Ring buffer accessors */ 112/* Ring buffer accessors */
82static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) 113static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
83{ 114{
@@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
87static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, 118static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
88 unsigned int index) 119 unsigned int index)
89{ 120{
90 return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)]; 121 index = macb_tx_ring_wrap(queue->bp, index);
122 index = macb_adj_dma_desc_idx(queue->bp, index);
123 return &queue->tx_ring[index];
91} 124}
92 125
93static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, 126static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
@@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
101 dma_addr_t offset; 134 dma_addr_t offset;
102 135
103 offset = macb_tx_ring_wrap(queue->bp, index) * 136 offset = macb_tx_ring_wrap(queue->bp, index) *
104 sizeof(struct macb_dma_desc); 137 macb_dma_desc_get_size(queue->bp);
105 138
106 return queue->tx_ring_dma + offset; 139 return queue->tx_ring_dma + offset;
107} 140}
@@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
113 146
114static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) 147static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
115{ 148{
116 return &bp->rx_ring[macb_rx_ring_wrap(bp, index)]; 149 index = macb_rx_ring_wrap(bp, index);
150 index = macb_adj_dma_desc_idx(bp, index);
151 return &bp->rx_ring[index];
117} 152}
118 153
119static void *macb_rx_buffer(struct macb *bp, unsigned int index) 154static void *macb_rx_buffer(struct macb *bp, unsigned int index)
@@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
560 } 595 }
561} 596}
562 597
563static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr) 598static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
564{ 599{
565 desc->addr = (u32)addr;
566#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 600#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
567 desc->addrh = (u32)(addr >> 32); 601 struct macb_dma_desc_64 *desc_64;
602
603 if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
604 desc_64 = macb_64b_desc(bp, desc);
605 desc_64->addrh = upper_32_bits(addr);
606 }
568#endif 607#endif
608 desc->addr = lower_32_bits(addr);
609}
610
611static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
612{
613 dma_addr_t addr = 0;
614#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
615 struct macb_dma_desc_64 *desc_64;
616
617 if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
618 desc_64 = macb_64b_desc(bp, desc);
619 addr = ((u64)(desc_64->addrh) << 32);
620 }
621#endif
622 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
623 return addr;
569} 624}
570 625
571static void macb_tx_error_task(struct work_struct *work) 626static void macb_tx_error_task(struct work_struct *work)
@@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work)
649 704
650 /* Set end of TX queue */ 705 /* Set end of TX queue */
651 desc = macb_tx_desc(queue, 0); 706 desc = macb_tx_desc(queue, 0);
652 macb_set_addr(desc, 0); 707 macb_set_addr(bp, desc, 0);
653 desc->ctrl = MACB_BIT(TX_USED); 708 desc->ctrl = MACB_BIT(TX_USED);
654 709
655 /* Make descriptor updates visible to hardware */ 710 /* Make descriptor updates visible to hardware */
656 wmb(); 711 wmb();
657 712
658 /* Reinitialize the TX desc queue */ 713 /* Reinitialize the TX desc queue */
659 queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); 714 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
660#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 715#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
661 queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); 716 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
717 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
662#endif 718#endif
663 /* Make TX ring reflect state of hardware */ 719 /* Make TX ring reflect state of hardware */
664 queue->tx_head = 0; 720 queue->tx_head = 0;
@@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp)
750 unsigned int entry; 806 unsigned int entry;
751 struct sk_buff *skb; 807 struct sk_buff *skb;
752 dma_addr_t paddr; 808 dma_addr_t paddr;
809 struct macb_dma_desc *desc;
753 810
754 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, 811 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
755 bp->rx_ring_size) > 0) { 812 bp->rx_ring_size) > 0) {
@@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp)
759 rmb(); 816 rmb();
760 817
761 bp->rx_prepared_head++; 818 bp->rx_prepared_head++;
819 desc = macb_rx_desc(bp, entry);
762 820
763 if (!bp->rx_skbuff[entry]) { 821 if (!bp->rx_skbuff[entry]) {
764 /* allocate sk_buff for this free entry in ring */ 822 /* allocate sk_buff for this free entry in ring */
@@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp)
782 840
783 if (entry == bp->rx_ring_size - 1) 841 if (entry == bp->rx_ring_size - 1)
784 paddr |= MACB_BIT(RX_WRAP); 842 paddr |= MACB_BIT(RX_WRAP);
785 macb_set_addr(&(bp->rx_ring[entry]), paddr); 843 macb_set_addr(bp, desc, paddr);
786 bp->rx_ring[entry].ctrl = 0; 844 desc->ctrl = 0;
787 845
788 /* properly align Ethernet header */ 846 /* properly align Ethernet header */
789 skb_reserve(skb, NET_IP_ALIGN); 847 skb_reserve(skb, NET_IP_ALIGN);
790 } else { 848 } else {
791 bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); 849 desc->addr &= ~MACB_BIT(RX_USED);
792 bp->rx_ring[entry].ctrl = 0; 850 desc->ctrl = 0;
793 } 851 }
794 } 852 }
795 853
@@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget)
835 bool rxused; 893 bool rxused;
836 894
837 entry = macb_rx_ring_wrap(bp, bp->rx_tail); 895 entry = macb_rx_ring_wrap(bp, bp->rx_tail);
838 desc = &bp->rx_ring[entry]; 896 desc = macb_rx_desc(bp, entry);
839 897
840 /* Make hw descriptor updates visible to CPU */ 898 /* Make hw descriptor updates visible to CPU */
841 rmb(); 899 rmb();
842 900
843 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; 901 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
844 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 902 addr = macb_get_addr(bp, desc);
845#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
846 addr |= ((u64)(desc->addrh) << 32);
847#endif
848 ctrl = desc->ctrl; 903 ctrl = desc->ctrl;
849 904
850 if (!rxused) 905 if (!rxused)
@@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
987static inline void macb_init_rx_ring(struct macb *bp) 1042static inline void macb_init_rx_ring(struct macb *bp)
988{ 1043{
989 dma_addr_t addr; 1044 dma_addr_t addr;
1045 struct macb_dma_desc *desc = NULL;
990 int i; 1046 int i;
991 1047
992 addr = bp->rx_buffers_dma; 1048 addr = bp->rx_buffers_dma;
993 for (i = 0; i < bp->rx_ring_size; i++) { 1049 for (i = 0; i < bp->rx_ring_size; i++) {
994 bp->rx_ring[i].addr = addr; 1050 desc = macb_rx_desc(bp, i);
995 bp->rx_ring[i].ctrl = 0; 1051 macb_set_addr(bp, desc, addr);
1052 desc->ctrl = 0;
996 addr += bp->rx_buffer_size; 1053 addr += bp->rx_buffer_size;
997 } 1054 }
998 bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP); 1055 desc->addr |= MACB_BIT(RX_WRAP);
999 bp->rx_tail = 0; 1056 bp->rx_tail = 0;
1000} 1057}
1001 1058
@@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget)
1008 1065
1009 for (tail = bp->rx_tail; budget > 0; tail++) { 1066 for (tail = bp->rx_tail; budget > 0; tail++) {
1010 struct macb_dma_desc *desc = macb_rx_desc(bp, tail); 1067 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
1011 u32 addr, ctrl; 1068 u32 ctrl;
1012 1069
1013 /* Make hw descriptor updates visible to CPU */ 1070 /* Make hw descriptor updates visible to CPU */
1014 rmb(); 1071 rmb();
1015 1072
1016 addr = desc->addr;
1017 ctrl = desc->ctrl; 1073 ctrl = desc->ctrl;
1018 1074
1019 if (!(addr & MACB_BIT(RX_USED))) 1075 if (!(desc->addr & MACB_BIT(RX_USED)))
1020 break; 1076 break;
1021 1077
1022 if (ctrl & MACB_BIT(RX_SOF)) { 1078 if (ctrl & MACB_BIT(RX_SOF)) {
@@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1336 i = tx_head; 1392 i = tx_head;
1337 entry = macb_tx_ring_wrap(bp, i); 1393 entry = macb_tx_ring_wrap(bp, i);
1338 ctrl = MACB_BIT(TX_USED); 1394 ctrl = MACB_BIT(TX_USED);
1339 desc = &queue->tx_ring[entry]; 1395 desc = macb_tx_desc(queue, entry);
1340 desc->ctrl = ctrl; 1396 desc->ctrl = ctrl;
1341 1397
1342 if (lso_ctrl) { 1398 if (lso_ctrl) {
@@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1358 i--; 1414 i--;
1359 entry = macb_tx_ring_wrap(bp, i); 1415 entry = macb_tx_ring_wrap(bp, i);
1360 tx_skb = &queue->tx_skb[entry]; 1416 tx_skb = &queue->tx_skb[entry];
1361 desc = &queue->tx_ring[entry]; 1417 desc = macb_tx_desc(queue, entry);
1362 1418
1363 ctrl = (u32)tx_skb->size; 1419 ctrl = (u32)tx_skb->size;
1364 if (eof) { 1420 if (eof) {
@@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1379 ctrl |= MACB_BF(MSS_MFS, mss_mfs); 1435 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1380 1436
1381 /* Set TX buffer descriptor */ 1437 /* Set TX buffer descriptor */
1382 macb_set_addr(desc, tx_skb->mapping); 1438 macb_set_addr(bp, desc, tx_skb->mapping);
1383 /* desc->addr must be visible to hardware before clearing 1439 /* desc->addr must be visible to hardware before clearing
1384 * 'TX_USED' bit in desc->ctrl. 1440 * 'TX_USED' bit in desc->ctrl.
1385 */ 1441 */
@@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp)
1586 if (!skb) 1642 if (!skb)
1587 continue; 1643 continue;
1588 1644
1589 desc = &bp->rx_ring[i]; 1645 desc = macb_rx_desc(bp, i);
1590 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 1646 addr = macb_get_addr(bp, desc);
1591#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1647
1592 addr |= ((u64)(desc->addrh) << 32);
1593#endif
1594 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, 1648 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1595 DMA_FROM_DEVICE); 1649 DMA_FROM_DEVICE);
1596 dev_kfree_skb_any(skb); 1650 dev_kfree_skb_any(skb);
@@ -1711,15 +1765,17 @@ out_err:
1711static void gem_init_rings(struct macb *bp) 1765static void gem_init_rings(struct macb *bp)
1712{ 1766{
1713 struct macb_queue *queue; 1767 struct macb_queue *queue;
1768 struct macb_dma_desc *desc = NULL;
1714 unsigned int q; 1769 unsigned int q;
1715 int i; 1770 int i;
1716 1771
1717 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1772 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1718 for (i = 0; i < bp->tx_ring_size; i++) { 1773 for (i = 0; i < bp->tx_ring_size; i++) {
1719 queue->tx_ring[i].addr = 0; 1774 desc = macb_tx_desc(queue, i);
1720 queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); 1775 macb_set_addr(bp, desc, 0);
1776 desc->ctrl = MACB_BIT(TX_USED);
1721 } 1777 }
1722 queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); 1778 desc->ctrl |= MACB_BIT(TX_WRAP);
1723 queue->tx_head = 0; 1779 queue->tx_head = 0;
1724 queue->tx_tail = 0; 1780 queue->tx_tail = 0;
1725 } 1781 }
@@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp)
1733static void macb_init_rings(struct macb *bp) 1789static void macb_init_rings(struct macb *bp)
1734{ 1790{
1735 int i; 1791 int i;
1792 struct macb_dma_desc *desc = NULL;
1736 1793
1737 macb_init_rx_ring(bp); 1794 macb_init_rx_ring(bp);
1738 1795
1739 for (i = 0; i < bp->tx_ring_size; i++) { 1796 for (i = 0; i < bp->tx_ring_size; i++) {
1740 bp->queues[0].tx_ring[i].addr = 0; 1797 desc = macb_tx_desc(&bp->queues[0], i);
1741 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); 1798 macb_set_addr(bp, desc, 0);
1799 desc->ctrl = MACB_BIT(TX_USED);
1742 } 1800 }
1743 bp->queues[0].tx_head = 0; 1801 bp->queues[0].tx_head = 0;
1744 bp->queues[0].tx_tail = 0; 1802 bp->queues[0].tx_tail = 0;
1745 bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); 1803 desc->ctrl |= MACB_BIT(TX_WRAP);
1746} 1804}
1747 1805
1748static void macb_reset_hw(struct macb *bp) 1806static void macb_reset_hw(struct macb *bp)
@@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp)
1863 dmacfg &= ~GEM_BIT(TXCOEN); 1921 dmacfg &= ~GEM_BIT(TXCOEN);
1864 1922
1865#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1923#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1866 dmacfg |= GEM_BIT(ADDR64); 1924 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1925 dmacfg |= GEM_BIT(ADDR64);
1867#endif 1926#endif
1868 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", 1927 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
1869 dmacfg); 1928 dmacfg);
@@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp)
1910 macb_configure_dma(bp); 1969 macb_configure_dma(bp);
1911 1970
1912 /* Initialize TX and RX buffers */ 1971 /* Initialize TX and RX buffers */
1913 macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma)); 1972 macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
1914#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1973#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1915 macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32)); 1974 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1975 macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
1916#endif 1976#endif
1917 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1977 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1918 queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); 1978 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1919#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1979#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1920 queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); 1980 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1981 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
1921#endif 1982#endif
1922 1983
1923 /* Enable interrupts */ 1984 /* Enable interrupts */
@@ -2627,7 +2688,8 @@ static int macb_init(struct platform_device *pdev)
2627 queue->IMR = GEM_IMR(hw_q - 1); 2688 queue->IMR = GEM_IMR(hw_q - 1);
2628 queue->TBQP = GEM_TBQP(hw_q - 1); 2689 queue->TBQP = GEM_TBQP(hw_q - 1);
2629#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2690#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2630 queue->TBQPH = GEM_TBQPH(hw_q -1); 2691 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
2692 queue->TBQPH = GEM_TBQPH(hw_q - 1);
2631#endif 2693#endif
2632 } else { 2694 } else {
2633 /* queue0 uses legacy registers */ 2695 /* queue0 uses legacy registers */
@@ -2637,7 +2699,8 @@ static int macb_init(struct platform_device *pdev)
2637 queue->IMR = MACB_IMR; 2699 queue->IMR = MACB_IMR;
2638 queue->TBQP = MACB_TBQP; 2700 queue->TBQP = MACB_TBQP;
2639#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2701#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2640 queue->TBQPH = MACB_TBQPH; 2702 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
2703 queue->TBQPH = MACB_TBQPH;
2641#endif 2704#endif
2642 } 2705 }
2643 2706
@@ -2730,13 +2793,14 @@ static int macb_init(struct platform_device *pdev)
2730static int at91ether_start(struct net_device *dev) 2793static int at91ether_start(struct net_device *dev)
2731{ 2794{
2732 struct macb *lp = netdev_priv(dev); 2795 struct macb *lp = netdev_priv(dev);
2796 struct macb_dma_desc *desc;
2733 dma_addr_t addr; 2797 dma_addr_t addr;
2734 u32 ctl; 2798 u32 ctl;
2735 int i; 2799 int i;
2736 2800
2737 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 2801 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
2738 (AT91ETHER_MAX_RX_DESCR * 2802 (AT91ETHER_MAX_RX_DESCR *
2739 sizeof(struct macb_dma_desc)), 2803 macb_dma_desc_get_size(lp)),
2740 &lp->rx_ring_dma, GFP_KERNEL); 2804 &lp->rx_ring_dma, GFP_KERNEL);
2741 if (!lp->rx_ring) 2805 if (!lp->rx_ring)
2742 return -ENOMEM; 2806 return -ENOMEM;
@@ -2748,7 +2812,7 @@ static int at91ether_start(struct net_device *dev)
2748 if (!lp->rx_buffers) { 2812 if (!lp->rx_buffers) {
2749 dma_free_coherent(&lp->pdev->dev, 2813 dma_free_coherent(&lp->pdev->dev,
2750 AT91ETHER_MAX_RX_DESCR * 2814 AT91ETHER_MAX_RX_DESCR *
2751 sizeof(struct macb_dma_desc), 2815 macb_dma_desc_get_size(lp),
2752 lp->rx_ring, lp->rx_ring_dma); 2816 lp->rx_ring, lp->rx_ring_dma);
2753 lp->rx_ring = NULL; 2817 lp->rx_ring = NULL;
2754 return -ENOMEM; 2818 return -ENOMEM;
@@ -2756,13 +2820,14 @@ static int at91ether_start(struct net_device *dev)
2756 2820
2757 addr = lp->rx_buffers_dma; 2821 addr = lp->rx_buffers_dma;
2758 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { 2822 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
2759 lp->rx_ring[i].addr = addr; 2823 desc = macb_rx_desc(lp, i);
2760 lp->rx_ring[i].ctrl = 0; 2824 macb_set_addr(lp, desc, addr);
2825 desc->ctrl = 0;
2761 addr += AT91ETHER_MAX_RBUFF_SZ; 2826 addr += AT91ETHER_MAX_RBUFF_SZ;
2762 } 2827 }
2763 2828
2764 /* Set the Wrap bit on the last descriptor */ 2829 /* Set the Wrap bit on the last descriptor */
2765 lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP); 2830 desc->addr |= MACB_BIT(RX_WRAP);
2766 2831
2767 /* Reset buffer index */ 2832 /* Reset buffer index */
2768 lp->rx_tail = 0; 2833 lp->rx_tail = 0;
@@ -2834,7 +2899,7 @@ static int at91ether_close(struct net_device *dev)
2834 2899
2835 dma_free_coherent(&lp->pdev->dev, 2900 dma_free_coherent(&lp->pdev->dev,
2836 AT91ETHER_MAX_RX_DESCR * 2901 AT91ETHER_MAX_RX_DESCR *
2837 sizeof(struct macb_dma_desc), 2902 macb_dma_desc_get_size(lp),
2838 lp->rx_ring, lp->rx_ring_dma); 2903 lp->rx_ring, lp->rx_ring_dma);
2839 lp->rx_ring = NULL; 2904 lp->rx_ring = NULL;
2840 2905
@@ -2885,13 +2950,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
2885static void at91ether_rx(struct net_device *dev) 2950static void at91ether_rx(struct net_device *dev)
2886{ 2951{
2887 struct macb *lp = netdev_priv(dev); 2952 struct macb *lp = netdev_priv(dev);
2953 struct macb_dma_desc *desc;
2888 unsigned char *p_recv; 2954 unsigned char *p_recv;
2889 struct sk_buff *skb; 2955 struct sk_buff *skb;
2890 unsigned int pktlen; 2956 unsigned int pktlen;
2891 2957
2892 while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) { 2958 desc = macb_rx_desc(lp, lp->rx_tail);
2959 while (desc->addr & MACB_BIT(RX_USED)) {
2893 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; 2960 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
2894 pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl); 2961 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
2895 skb = netdev_alloc_skb(dev, pktlen + 2); 2962 skb = netdev_alloc_skb(dev, pktlen + 2);
2896 if (skb) { 2963 if (skb) {
2897 skb_reserve(skb, 2); 2964 skb_reserve(skb, 2);
@@ -2905,17 +2972,19 @@ static void at91ether_rx(struct net_device *dev)
2905 lp->stats.rx_dropped++; 2972 lp->stats.rx_dropped++;
2906 } 2973 }
2907 2974
2908 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) 2975 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
2909 lp->stats.multicast++; 2976 lp->stats.multicast++;
2910 2977
2911 /* reset ownership bit */ 2978 /* reset ownership bit */
2912 lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED); 2979 desc->addr &= ~MACB_BIT(RX_USED);
2913 2980
2914 /* wrap after last buffer */ 2981 /* wrap after last buffer */
2915 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) 2982 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
2916 lp->rx_tail = 0; 2983 lp->rx_tail = 0;
2917 else 2984 else
2918 lp->rx_tail++; 2985 lp->rx_tail++;
2986
2987 desc = macb_rx_desc(lp, lp->rx_tail);
2919 } 2988 }
2920} 2989}
2921 2990
@@ -3211,8 +3280,11 @@ static int macb_probe(struct platform_device *pdev)
3211 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 3280 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
3212 3281
3213#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3282#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3214 if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32) 3283 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
3215 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); 3284 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
3285 bp->hw_dma_cap = HW_DMA_CAP_64B;
3286 } else
3287 bp->hw_dma_cap = HW_DMA_CAP_32B;
3216#endif 3288#endif
3217 3289
3218 spin_lock_init(&bp->lock); 3290 spin_lock_init(&bp->lock);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d67adad67be1..fc8550a5d47f 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -385,6 +385,8 @@
385/* Bitfields in DCFG6. */ 385/* Bitfields in DCFG6. */
386#define GEM_PBUF_LSO_OFFSET 27 386#define GEM_PBUF_LSO_OFFSET 27
387#define GEM_PBUF_LSO_SIZE 1 387#define GEM_PBUF_LSO_SIZE 1
388#define GEM_DAW64_OFFSET 23
389#define GEM_DAW64_SIZE 1
388 390
389/* Constants for CLK */ 391/* Constants for CLK */
390#define MACB_CLK_DIV8 0 392#define MACB_CLK_DIV8 0
@@ -487,12 +489,20 @@
487struct macb_dma_desc { 489struct macb_dma_desc {
488 u32 addr; 490 u32 addr;
489 u32 ctrl; 491 u32 ctrl;
492};
493
490#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 494#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
491 u32 addrh; 495enum macb_hw_dma_cap {
492 u32 resvd; 496 HW_DMA_CAP_32B,
493#endif 497 HW_DMA_CAP_64B,
494}; 498};
495 499
500struct macb_dma_desc_64 {
501 u32 addrh;
502 u32 resvd;
503};
504#endif
505
496/* DMA descriptor bitfields */ 506/* DMA descriptor bitfields */
497#define MACB_RX_USED_OFFSET 0 507#define MACB_RX_USED_OFFSET 0
498#define MACB_RX_USED_SIZE 1 508#define MACB_RX_USED_SIZE 1
@@ -874,6 +884,10 @@ struct macb {
874 unsigned int jumbo_max_len; 884 unsigned int jumbo_max_len;
875 885
876 u32 wol; 886 u32 wol;
887
888#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
889 enum macb_hw_dma_cap hw_dma_cap;
890#endif
877}; 891};
878 892
879static inline bool macb_is_gem(struct macb *bp) 893static inline bool macb_is_gem(struct macb *bp)
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
index 67befedef709..578c7f8f11bf 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
116 int speed = 2; 116 int speed = 2;
117 117
118 if (!xcv) { 118 if (!xcv) {
119 dev_err(&xcv->pdev->dev, 119 pr_err("XCV init not done, probe may have failed\n");
120 "XCV init not done, probe may have failed\n");
121 return; 120 return;
122 } 121 }
123 122
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 1a7f8ad7b9c6..cd49a54c538d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -362,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
362 status = -EPERM; 362 status = -EPERM;
363 goto err; 363 goto err;
364 } 364 }
365done: 365
366 /* Remember currently programmed MAC */
366 ether_addr_copy(adapter->dev_mac, addr->sa_data); 367 ether_addr_copy(adapter->dev_mac, addr->sa_data);
368done:
367 ether_addr_copy(netdev->dev_addr, addr->sa_data); 369 ether_addr_copy(netdev->dev_addr, addr->sa_data);
368 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); 370 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
369 return 0; 371 return 0;
@@ -3618,8 +3620,10 @@ static void be_disable_if_filters(struct be_adapter *adapter)
3618{ 3620{
3619 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */ 3621 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3620 if (!BEx_chip(adapter) || !be_virtfn(adapter) || 3622 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3621 check_privilege(adapter, BE_PRIV_FILTMGMT)) 3623 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
3622 be_dev_mac_del(adapter, adapter->pmac_id[0]); 3624 be_dev_mac_del(adapter, adapter->pmac_id[0]);
3625 eth_zero_addr(adapter->dev_mac);
3626 }
3623 3627
3624 be_clear_uc_list(adapter); 3628 be_clear_uc_list(adapter);
3625 be_clear_mc_list(adapter); 3629 be_clear_mc_list(adapter);
@@ -3773,12 +3777,27 @@ static int be_enable_if_filters(struct be_adapter *adapter)
3773 if (status) 3777 if (status)
3774 return status; 3778 return status;
3775 3779
3776 /* Don't add MAC on BE3 VFs without FILTMGMT privilege */ 3780 /* Normally this condition usually true as the ->dev_mac is zeroed.
3777 if (!BEx_chip(adapter) || !be_virtfn(adapter) || 3781 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3778 check_privilege(adapter, BE_PRIV_FILTMGMT)) { 3782 * subsequent be_dev_mac_add() can fail (after fresh boot)
3783 */
3784 if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3785 int old_pmac_id = -1;
3786
3787 /* Remember old programmed MAC if any - can happen on BE3 VF */
3788 if (!is_zero_ether_addr(adapter->dev_mac))
3789 old_pmac_id = adapter->pmac_id[0];
3790
3779 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr); 3791 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
3780 if (status) 3792 if (status)
3781 return status; 3793 return status;
3794
3795 /* Delete the old programmed MAC as we successfully programmed
3796 * a new MAC
3797 */
3798 if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3799 be_dev_mac_del(adapter, old_pmac_id);
3800
3782 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr); 3801 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
3783 } 3802 }
3784 3803
@@ -4552,6 +4571,10 @@ static int be_mac_setup(struct be_adapter *adapter)
4552 4571
4553 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 4572 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4554 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 4573 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4574
4575 /* Initial MAC for BE3 VFs is already programmed by PF */
4576 if (BEx_chip(adapter) && be_virtfn(adapter))
4577 memcpy(adapter->dev_mac, mac, ETH_ALEN);
4555 } 4578 }
4556 4579
4557 return 0; 4580 return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index c1b671667920..957bfc220978 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2010,8 +2010,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
2010 if (!rxb->page) 2010 if (!rxb->page)
2011 continue; 2011 continue;
2012 2012
2013 dma_unmap_single(rx_queue->dev, rxb->dma, 2013 dma_unmap_page(rx_queue->dev, rxb->dma,
2014 PAGE_SIZE, DMA_FROM_DEVICE); 2014 PAGE_SIZE, DMA_FROM_DEVICE);
2015 __free_page(rxb->page); 2015 __free_page(rxb->page);
2016 2016
2017 rxb->page = NULL; 2017 rxb->page = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index c7e939945259..53daa6ca5d83 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
158 return -ETIMEDOUT; 158 return -ETIMEDOUT;
159} 159}
160 160
161static int mlx4_comm_internal_err(u32 slave_read) 161int mlx4_comm_internal_err(u32 slave_read)
162{ 162{
163 return (u32)COMM_CHAN_EVENT_INTERNAL_ERR == 163 return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
164 (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0; 164 (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0e8b7c44931f..8258d08acd8c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
222 return; 222 return;
223 223
224 mlx4_stop_catas_poll(dev); 224 mlx4_stop_catas_poll(dev);
225 if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
226 mlx4_is_slave(dev)) {
227 /* In mlx4_remove_one on a VF */
228 u32 slave_read =
229 swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
230
231 if (mlx4_comm_internal_err(slave_read)) {
232 mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
233 __func__);
234 mlx4_enter_error_state(dev->persist);
235 }
236 }
225 mutex_lock(&intf_mutex); 237 mutex_lock(&intf_mutex);
226 238
227 list_for_each_entry(intf, &intf_list, list) 239 list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 88ee7d8a5923..086920b615af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
1220void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); 1220void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
1221 1221
1222void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); 1222void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
1223int mlx4_comm_internal_err(u32 slave_read);
1223 1224
1224int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, 1225int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
1225 enum mlx4_port_type *type); 1226 enum mlx4_port_type *type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3797cc7c1288..caa837e5e2b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
1728 if (cmd->cmdif_rev > CMD_IF_REV) { 1728 if (cmd->cmdif_rev > CMD_IF_REV) {
1729 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", 1729 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1730 CMD_IF_REV, cmd->cmdif_rev); 1730 CMD_IF_REV, cmd->cmdif_rev);
1731 err = -ENOTSUPP; 1731 err = -EOPNOTSUPP;
1732 goto err_free_page; 1732 goto err_free_page;
1733 } 1733 }
1734 1734
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 951dbd58594d..d5ecb8f53fd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
791int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd); 791int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
792 792
793int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix); 793int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
794void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); 794void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
795 enum mlx5e_traffic_types tt);
795 796
796int mlx5e_open_locked(struct net_device *netdev); 797int mlx5e_open_locked(struct net_device *netdev);
797int mlx5e_close_locked(struct net_device *netdev); 798int mlx5e_close_locked(struct net_device *netdev);
@@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
863 864
864static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) 865static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
865{ 866{
866 return -ENOTSUPP; 867 return -EOPNOTSUPP;
867} 868}
868 869
869static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) 870static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
870{ 871{
871 return -ENOTSUPP; 872 return -EOPNOTSUPP;
872} 873}
873#else 874#else
874int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); 875int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index f0b460f47f29..0523ed47f597 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
89 int i; 89 int i;
90 90
91 if (!MLX5_CAP_GEN(priv->mdev, ets)) 91 if (!MLX5_CAP_GEN(priv->mdev, ets))
92 return -ENOTSUPP; 92 return -EOPNOTSUPP;
93 93
94 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; 94 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
95 for (i = 0; i < ets->ets_cap; i++) { 95 for (i = 0; i < ets->ets_cap; i++) {
@@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
236 int err; 236 int err;
237 237
238 if (!MLX5_CAP_GEN(priv->mdev, ets)) 238 if (!MLX5_CAP_GEN(priv->mdev, ets))
239 return -ENOTSUPP; 239 return -EOPNOTSUPP;
240 240
241 err = mlx5e_dbcnl_validate_ets(netdev, ets); 241 err = mlx5e_dbcnl_validate_ets(netdev, ets);
242 if (err) 242 if (err)
@@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
402 struct mlx5_core_dev *mdev = priv->mdev; 402 struct mlx5_core_dev *mdev = priv->mdev;
403 struct ieee_ets ets; 403 struct ieee_ets ets;
404 struct ieee_pfc pfc; 404 struct ieee_pfc pfc;
405 int err = -ENOTSUPP; 405 int err = -EOPNOTSUPP;
406 int i; 406 int i;
407 407
408 if (!MLX5_CAP_GEN(mdev, ets)) 408 if (!MLX5_CAP_GEN(mdev, ets))
@@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
511 struct mlx5e_priv *priv = netdev_priv(netdev); 511 struct mlx5e_priv *priv = netdev_priv(netdev);
512 struct mlx5_core_dev *mdev = priv->mdev; 512 struct mlx5_core_dev *mdev = priv->mdev;
513 513
514 if (!MLX5_CAP_GEN(priv->mdev, ets)) {
515 netdev_err(netdev, "%s, ets is not supported\n", __func__);
516 return;
517 }
518
514 if (priority >= CEE_DCBX_MAX_PRIO) { 519 if (priority >= CEE_DCBX_MAX_PRIO) {
515 netdev_err(netdev, 520 netdev_err(netdev,
516 "%s, priority is out of range\n", __func__); 521 "%s, priority is out of range\n", __func__);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 5197817e4b2f..bb67863aa361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -595,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
595 struct mlx5e_priv *priv = netdev_priv(netdev); 595 struct mlx5e_priv *priv = netdev_priv(netdev);
596 596
597 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) 597 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
598 return -ENOTSUPP; 598 return -EOPNOTSUPP;
599 599
600 coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec; 600 coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
601 coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts; 601 coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
@@ -620,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
620 int i; 620 int i;
621 621
622 if (!MLX5_CAP_GEN(mdev, cq_moderation)) 622 if (!MLX5_CAP_GEN(mdev, cq_moderation))
623 return -ENOTSUPP; 623 return -EOPNOTSUPP;
624 624
625 mutex_lock(&priv->state_lock); 625 mutex_lock(&priv->state_lock);
626 626
@@ -980,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
980 980
981static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) 981static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
982{ 982{
983 struct mlx5_core_dev *mdev = priv->mdev;
984 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); 983 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
985 int i; 984 struct mlx5_core_dev *mdev = priv->mdev;
985 int ctxlen = MLX5_ST_SZ_BYTES(tirc);
986 int tt;
986 987
987 MLX5_SET(modify_tir_in, in, bitmask.hash, 1); 988 MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
988 mlx5e_build_tir_ctx_hash(tirc, priv);
989 989
990 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) 990 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
991 mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen); 991 memset(tirc, 0, ctxlen);
992 mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
993 mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
994 }
992} 995}
993 996
994static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, 997static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -996,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
996{ 999{
997 struct mlx5e_priv *priv = netdev_priv(dev); 1000 struct mlx5e_priv *priv = netdev_priv(dev);
998 int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); 1001 int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1002 bool hash_changed = false;
999 void *in; 1003 void *in;
1000 1004
1001 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && 1005 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
@@ -1017,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
1017 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); 1021 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
1018 } 1022 }
1019 1023
1020 if (key) 1024 if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
1025 hfunc != priv->params.rss_hfunc) {
1026 priv->params.rss_hfunc = hfunc;
1027 hash_changed = true;
1028 }
1029
1030 if (key) {
1021 memcpy(priv->params.toeplitz_hash_key, key, 1031 memcpy(priv->params.toeplitz_hash_key, key,
1022 sizeof(priv->params.toeplitz_hash_key)); 1032 sizeof(priv->params.toeplitz_hash_key));
1033 hash_changed = hash_changed ||
1034 priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
1035 }
1023 1036
1024 if (hfunc != ETH_RSS_HASH_NO_CHANGE) 1037 if (hash_changed)
1025 priv->params.rss_hfunc = hfunc; 1038 mlx5e_modify_tirs_hash(priv, in, inlen);
1026
1027 mlx5e_modify_tirs_hash(priv, in, inlen);
1028 1039
1029 mutex_unlock(&priv->state_lock); 1040 mutex_unlock(&priv->state_lock);
1030 1041
@@ -1296,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1296 u32 mlx5_wol_mode; 1307 u32 mlx5_wol_mode;
1297 1308
1298 if (!wol_supported) 1309 if (!wol_supported)
1299 return -ENOTSUPP; 1310 return -EOPNOTSUPP;
1300 1311
1301 if (wol->wolopts & ~wol_supported) 1312 if (wol->wolopts & ~wol_supported)
1302 return -EINVAL; 1313 return -EINVAL;
@@ -1426,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
1426 1437
1427 if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && 1438 if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
1428 !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) 1439 !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
1429 return -ENOTSUPP; 1440 return -EOPNOTSUPP;
1430 1441
1431 if (!rx_mode_changed) 1442 if (!rx_mode_changed)
1432 return 0; 1443 return 0;
@@ -1452,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
1452 bool reset; 1463 bool reset;
1453 1464
1454 if (!MLX5_CAP_GEN(mdev, cqe_compression)) 1465 if (!MLX5_CAP_GEN(mdev, cqe_compression))
1455 return -ENOTSUPP; 1466 return -EOPNOTSUPP;
1456 1467
1457 if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { 1468 if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
1458 netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n"); 1469 netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 1fe80de5d68f..a0e5a69402b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1089 MLX5_FLOW_NAMESPACE_KERNEL); 1089 MLX5_FLOW_NAMESPACE_KERNEL);
1090 1090
1091 if (!priv->fs.ns) 1091 if (!priv->fs.ns)
1092 return -EINVAL; 1092 return -EOPNOTSUPP;
1093 1093
1094 err = mlx5e_arfs_create_tables(priv); 1094 err = mlx5e_arfs_create_tables(priv);
1095 if (err) { 1095 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index d088effd7160..f33f72d0237c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
92 ns = mlx5_get_flow_namespace(priv->mdev, 92 ns = mlx5_get_flow_namespace(priv->mdev,
93 MLX5_FLOW_NAMESPACE_ETHTOOL); 93 MLX5_FLOW_NAMESPACE_ETHTOOL);
94 if (!ns) 94 if (!ns)
95 return ERR_PTR(-ENOTSUPP); 95 return ERR_PTR(-EOPNOTSUPP);
96 96
97 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev, 97 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
98 flow_table_properties_nic_receive.log_max_ft_size)), 98 flow_table_properties_nic_receive.log_max_ft_size)),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2b7dd315020c..f14ca3385fdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2022,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
2022 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout); 2022 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
2023} 2023}
2024 2024
2025void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) 2025void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
2026 enum mlx5e_traffic_types tt)
2026{ 2027{
2028 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2029
2030#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2031 MLX5_HASH_FIELD_SEL_DST_IP)
2032
2033#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2034 MLX5_HASH_FIELD_SEL_DST_IP |\
2035 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2036 MLX5_HASH_FIELD_SEL_L4_DPORT)
2037
2038#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2039 MLX5_HASH_FIELD_SEL_DST_IP |\
2040 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2041
2027 MLX5_SET(tirc, tirc, rx_hash_fn, 2042 MLX5_SET(tirc, tirc, rx_hash_fn,
2028 mlx5e_rx_hash_fn(priv->params.rss_hfunc)); 2043 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
2029 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { 2044 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -2035,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
2035 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); 2050 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2036 memcpy(rss_key, priv->params.toeplitz_hash_key, len); 2051 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
2037 } 2052 }
2053
2054 switch (tt) {
2055 case MLX5E_TT_IPV4_TCP:
2056 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2057 MLX5_L3_PROT_TYPE_IPV4);
2058 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2059 MLX5_L4_PROT_TYPE_TCP);
2060 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2061 MLX5_HASH_IP_L4PORTS);
2062 break;
2063
2064 case MLX5E_TT_IPV6_TCP:
2065 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2066 MLX5_L3_PROT_TYPE_IPV6);
2067 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2068 MLX5_L4_PROT_TYPE_TCP);
2069 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2070 MLX5_HASH_IP_L4PORTS);
2071 break;
2072
2073 case MLX5E_TT_IPV4_UDP:
2074 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2075 MLX5_L3_PROT_TYPE_IPV4);
2076 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2077 MLX5_L4_PROT_TYPE_UDP);
2078 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2079 MLX5_HASH_IP_L4PORTS);
2080 break;
2081
2082 case MLX5E_TT_IPV6_UDP:
2083 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2084 MLX5_L3_PROT_TYPE_IPV6);
2085 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2086 MLX5_L4_PROT_TYPE_UDP);
2087 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2088 MLX5_HASH_IP_L4PORTS);
2089 break;
2090
2091 case MLX5E_TT_IPV4_IPSEC_AH:
2092 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2093 MLX5_L3_PROT_TYPE_IPV4);
2094 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2095 MLX5_HASH_IP_IPSEC_SPI);
2096 break;
2097
2098 case MLX5E_TT_IPV6_IPSEC_AH:
2099 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2100 MLX5_L3_PROT_TYPE_IPV6);
2101 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2102 MLX5_HASH_IP_IPSEC_SPI);
2103 break;
2104
2105 case MLX5E_TT_IPV4_IPSEC_ESP:
2106 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2107 MLX5_L3_PROT_TYPE_IPV4);
2108 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2109 MLX5_HASH_IP_IPSEC_SPI);
2110 break;
2111
2112 case MLX5E_TT_IPV6_IPSEC_ESP:
2113 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2114 MLX5_L3_PROT_TYPE_IPV6);
2115 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2116 MLX5_HASH_IP_IPSEC_SPI);
2117 break;
2118
2119 case MLX5E_TT_IPV4:
2120 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2121 MLX5_L3_PROT_TYPE_IPV4);
2122 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2123 MLX5_HASH_IP);
2124 break;
2125
2126 case MLX5E_TT_IPV6:
2127 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2128 MLX5_L3_PROT_TYPE_IPV6);
2129 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2130 MLX5_HASH_IP);
2131 break;
2132 default:
2133 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2134 }
2038} 2135}
2039 2136
2040static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) 2137static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
@@ -2404,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
2404static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, 2501static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2405 enum mlx5e_traffic_types tt) 2502 enum mlx5e_traffic_types tt)
2406{ 2503{
2407 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2408
2409 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); 2504 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2410 2505
2411#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2412 MLX5_HASH_FIELD_SEL_DST_IP)
2413
2414#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2415 MLX5_HASH_FIELD_SEL_DST_IP |\
2416 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2417 MLX5_HASH_FIELD_SEL_L4_DPORT)
2418
2419#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2420 MLX5_HASH_FIELD_SEL_DST_IP |\
2421 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2422
2423 mlx5e_build_tir_ctx_lro(tirc, priv); 2506 mlx5e_build_tir_ctx_lro(tirc, priv);
2424 2507
2425 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); 2508 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2426 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); 2509 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2427 mlx5e_build_tir_ctx_hash(tirc, priv); 2510 mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
2428
2429 switch (tt) {
2430 case MLX5E_TT_IPV4_TCP:
2431 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2432 MLX5_L3_PROT_TYPE_IPV4);
2433 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2434 MLX5_L4_PROT_TYPE_TCP);
2435 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2436 MLX5_HASH_IP_L4PORTS);
2437 break;
2438
2439 case MLX5E_TT_IPV6_TCP:
2440 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2441 MLX5_L3_PROT_TYPE_IPV6);
2442 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2443 MLX5_L4_PROT_TYPE_TCP);
2444 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2445 MLX5_HASH_IP_L4PORTS);
2446 break;
2447
2448 case MLX5E_TT_IPV4_UDP:
2449 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2450 MLX5_L3_PROT_TYPE_IPV4);
2451 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2452 MLX5_L4_PROT_TYPE_UDP);
2453 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2454 MLX5_HASH_IP_L4PORTS);
2455 break;
2456
2457 case MLX5E_TT_IPV6_UDP:
2458 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2459 MLX5_L3_PROT_TYPE_IPV6);
2460 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2461 MLX5_L4_PROT_TYPE_UDP);
2462 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2463 MLX5_HASH_IP_L4PORTS);
2464 break;
2465
2466 case MLX5E_TT_IPV4_IPSEC_AH:
2467 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2468 MLX5_L3_PROT_TYPE_IPV4);
2469 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2470 MLX5_HASH_IP_IPSEC_SPI);
2471 break;
2472
2473 case MLX5E_TT_IPV6_IPSEC_AH:
2474 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2475 MLX5_L3_PROT_TYPE_IPV6);
2476 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2477 MLX5_HASH_IP_IPSEC_SPI);
2478 break;
2479
2480 case MLX5E_TT_IPV4_IPSEC_ESP:
2481 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2482 MLX5_L3_PROT_TYPE_IPV4);
2483 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2484 MLX5_HASH_IP_IPSEC_SPI);
2485 break;
2486
2487 case MLX5E_TT_IPV6_IPSEC_ESP:
2488 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2489 MLX5_L3_PROT_TYPE_IPV6);
2490 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2491 MLX5_HASH_IP_IPSEC_SPI);
2492 break;
2493
2494 case MLX5E_TT_IPV4:
2495 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2496 MLX5_L3_PROT_TYPE_IPV4);
2497 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2498 MLX5_HASH_IP);
2499 break;
2500
2501 case MLX5E_TT_IPV6:
2502 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2503 MLX5_L3_PROT_TYPE_IPV6);
2504 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2505 MLX5_HASH_IP);
2506 break;
2507 default:
2508 WARN_ONCE(true,
2509 "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
2510 }
2511} 2511}
2512 2512
2513static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, 2513static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
@@ -3331,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
3331static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 3331static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3332{ 3332{
3333 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 3333 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
3334 return -ENOTSUPP; 3334 return -EOPNOTSUPP;
3335 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) || 3335 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
3336 !MLX5_CAP_GEN(mdev, nic_flow_table) || 3336 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
3337 !MLX5_CAP_ETH(mdev, csum_cap) || 3337 !MLX5_CAP_ETH(mdev, csum_cap) ||
@@ -3343,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3343 < 3) { 3343 < 3) {
3344 mlx5_core_warn(mdev, 3344 mlx5_core_warn(mdev,
3345 "Not creating net device, some required device capabilities are missing\n"); 3345 "Not creating net device, some required device capabilities are missing\n");
3346 return -ENOTSUPP; 3346 return -EOPNOTSUPP;
3347 } 3347 }
3348 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) 3348 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
3349 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); 3349 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 46bef6a26a8c..c5282b6aba8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -663,6 +663,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
663 __be32 *saddr, 663 __be32 *saddr,
664 int *out_ttl) 664 int *out_ttl)
665{ 665{
666 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
666 struct rtable *rt; 667 struct rtable *rt;
667 struct neighbour *n = NULL; 668 struct neighbour *n = NULL;
668 int ttl; 669 int ttl;
@@ -677,12 +678,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
677#else 678#else
678 return -EOPNOTSUPP; 679 return -EOPNOTSUPP;
679#endif 680#endif
680 681 /* if the egress device isn't on the same HW e-switch, we use the uplink */
681 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) { 682 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
682 pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__); 683 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
683 ip_rt_put(rt); 684 else
684 return -EOPNOTSUPP; 685 *out_dev = rt->dst.dev;
685 }
686 686
687 ttl = ip4_dst_hoplimit(&rt->dst); 687 ttl = ip4_dst_hoplimit(&rt->dst);
688 n = dst_neigh_lookup(&rt->dst, &fl4->daddr); 688 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
@@ -693,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
693 *out_n = n; 693 *out_n = n;
694 *saddr = fl4->saddr; 694 *saddr = fl4->saddr;
695 *out_ttl = ttl; 695 *out_ttl = ttl;
696 *out_dev = rt->dst.dev;
697 696
698 return 0; 697 return 0;
699} 698}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f14d9c9ba773..d0c8bf014453 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
133 133
134 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || 134 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
135 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) 135 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
136 return -ENOTSUPP; 136 return -EOPNOTSUPP;
137 137
138 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n", 138 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
139 vport, vlan, qos, set_flags); 139 vport, vlan, qos, set_flags);
@@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
353 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 353 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
354 if (!root_ns) { 354 if (!root_ns) {
355 esw_warn(dev, "Failed to get FDB flow namespace\n"); 355 esw_warn(dev, "Failed to get FDB flow namespace\n");
356 return -ENOMEM; 356 return -EOPNOTSUPP;
357 } 357 }
358 358
359 flow_group_in = mlx5_vzalloc(inlen); 359 flow_group_in = mlx5_vzalloc(inlen);
@@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
962 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); 962 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
963 if (!root_ns) { 963 if (!root_ns) {
964 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); 964 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
965 return -EIO; 965 return -EOPNOTSUPP;
966 } 966 }
967 967
968 flow_group_in = mlx5_vzalloc(inlen); 968 flow_group_in = mlx5_vzalloc(inlen);
@@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1079 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); 1079 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
1080 if (!root_ns) { 1080 if (!root_ns) {
1081 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); 1081 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
1082 return -EIO; 1082 return -EOPNOTSUPP;
1083 } 1083 }
1084 1084
1085 flow_group_in = mlx5_vzalloc(inlen); 1085 flow_group_in = mlx5_vzalloc(inlen);
@@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1630 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || 1630 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1631 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { 1631 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1632 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); 1632 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1633 return -ENOTSUPP; 1633 return -EOPNOTSUPP;
1634 } 1634 }
1635 1635
1636 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) 1636 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 03293ed1cc22..595f7c7383b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
166 return 0; 166 return 0;
167 167
168out_notsupp: 168out_notsupp:
169 return -ENOTSUPP; 169 return -EOPNOTSUPP;
170} 170}
171 171
172int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 172int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
424 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 424 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
425 if (!root_ns) { 425 if (!root_ns) {
426 esw_warn(dev, "Failed to get FDB flow namespace\n"); 426 esw_warn(dev, "Failed to get FDB flow namespace\n");
427 err = -EOPNOTSUPP;
427 goto ns_err; 428 goto ns_err;
428 } 429 }
429 430
@@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
535 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 536 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
536 if (!ns) { 537 if (!ns) {
537 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 538 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
538 return -ENOMEM; 539 return -EOPNOTSUPP;
539 } 540 }
540 541
541 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0); 542 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
@@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
655 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); 656 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
656 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); 657 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
657 if (err1) 658 if (err1)
658 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); 659 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
659 } 660 }
660 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { 661 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
661 if (mlx5_eswitch_inline_mode_get(esw, 662 if (mlx5_eswitch_inline_mode_get(esw,
@@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
674 int vport; 675 int vport;
675 int err; 676 int err;
676 677
678 /* disable PF RoCE so missed packets don't go through RoCE steering */
679 mlx5_dev_list_lock();
680 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
681 mlx5_dev_list_unlock();
682
677 err = esw_create_offloads_fdb_table(esw, nvports); 683 err = esw_create_offloads_fdb_table(esw, nvports);
678 if (err) 684 if (err)
679 return err; 685 goto create_fdb_err;
680 686
681 err = esw_create_offloads_table(esw); 687 err = esw_create_offloads_table(esw);
682 if (err) 688 if (err)
@@ -696,11 +702,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
696 goto err_reps; 702 goto err_reps;
697 } 703 }
698 704
699 /* disable PF RoCE so missed packets don't go through RoCE steering */
700 mlx5_dev_list_lock();
701 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
702 mlx5_dev_list_unlock();
703
704 return 0; 705 return 0;
705 706
706err_reps: 707err_reps:
@@ -717,6 +718,13 @@ create_fg_err:
717 718
718create_ft_err: 719create_ft_err:
719 esw_destroy_offloads_fdb_table(esw); 720 esw_destroy_offloads_fdb_table(esw);
721
722create_fdb_err:
723 /* enable back PF RoCE */
724 mlx5_dev_list_lock();
725 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
726 mlx5_dev_list_unlock();
727
720 return err; 728 return err;
721} 729}
722 730
@@ -724,11 +732,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
724{ 732{
725 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; 733 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
726 734
727 /* enable back PF RoCE */
728 mlx5_dev_list_lock();
729 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
730 mlx5_dev_list_unlock();
731
732 mlx5_eswitch_disable_sriov(esw); 735 mlx5_eswitch_disable_sriov(esw);
733 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); 736 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
734 if (err) { 737 if (err) {
@@ -738,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
738 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); 741 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
739 } 742 }
740 743
744 /* enable back PF RoCE */
745 mlx5_dev_list_lock();
746 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
747 mlx5_dev_list_unlock();
748
741 return err; 749 return err;
742} 750}
743 751
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index c4478ecd8056..b53fc85a2375 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
322 flow_table_properties_nic_receive. 322 flow_table_properties_nic_receive.
323 flow_modify_en); 323 flow_modify_en);
324 if (!atomic_mod_cap) 324 if (!atomic_mod_cap)
325 return -ENOTSUPP; 325 return -EOPNOTSUPP;
326 opmod = 1; 326 opmod = 1;
327 327
328 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); 328 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 0ac7a2fc916c..6346a8f5883b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1822,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
1822 struct mlx5_flow_table *ft; 1822 struct mlx5_flow_table *ft;
1823 1823
1824 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); 1824 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
1825 if (!ns) 1825 if (WARN_ON(!ns))
1826 return -EINVAL; 1826 return -EINVAL;
1827 ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0); 1827 ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
1828 if (IS_ERR(ft)) { 1828 if (IS_ERR(ft)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d01e9f21d469..3c315eb8d270 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -807,7 +807,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
807 return 0; 807 return 0;
808 } 808 }
809 809
810 return -ENOTSUPP; 810 return -EOPNOTSUPP;
811} 811}
812 812
813 813
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index d2ec9d232a70..fd12e0a377a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -620,7 +620,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
620 u32 out[MLX5_ST_SZ_DW(qtct_reg)]; 620 u32 out[MLX5_ST_SZ_DW(qtct_reg)];
621 621
622 if (!MLX5_CAP_GEN(mdev, ets)) 622 if (!MLX5_CAP_GEN(mdev, ets))
623 return -ENOTSUPP; 623 return -EOPNOTSUPP;
624 624
625 return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out), 625 return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
626 MLX5_REG_QETCR, 0, 1); 626 MLX5_REG_QETCR, 0, 1);
@@ -632,7 +632,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
632 u32 in[MLX5_ST_SZ_DW(qtct_reg)]; 632 u32 in[MLX5_ST_SZ_DW(qtct_reg)];
633 633
634 if (!MLX5_CAP_GEN(mdev, ets)) 634 if (!MLX5_CAP_GEN(mdev, ets))
635 return -ENOTSUPP; 635 return -EOPNOTSUPP;
636 636
637 memset(in, 0, sizeof(in)); 637 memset(in, 0, sizeof(in));
638 return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen, 638 return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 269e4401c342..7129c30a2ab4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -532,7 +532,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
532 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) 532 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
533 return -EACCES; 533 return -EACCES;
534 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) 534 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
535 return -ENOTSUPP; 535 return -EOPNOTSUPP;
536 536
537 in = mlx5_vzalloc(inlen); 537 in = mlx5_vzalloc(inlen);
538 if (!in) 538 if (!in)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index be3c91c7f211..5484fd726d5a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -305,8 +305,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
305{ 305{
306 void __iomem *ioaddr = hw->pcsr; 306 void __iomem *ioaddr = hw->pcsr;
307 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); 307 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
308 u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
308 int ret = 0; 309 int ret = 0;
309 310
311 /* Discard masked bits */
312 intr_status &= ~intr_mask;
313
310 /* Not used events (e.g. MMC interrupts) are not handled. */ 314 /* Not used events (e.g. MMC interrupts) are not handled. */
311 if ((intr_status & GMAC_INT_STATUS_MMCTIS)) 315 if ((intr_status & GMAC_INT_STATUS_MMCTIS))
312 x->mmc_tx_irq_n++; 316 x->mmc_tx_irq_n++;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index e55809c5beb7..6742070ca676 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1012,7 +1012,7 @@ static struct phy_driver ksphy_driver[] = {
1012 .phy_id = PHY_ID_KSZ8795, 1012 .phy_id = PHY_ID_KSZ8795,
1013 .phy_id_mask = MICREL_PHY_ID_MASK, 1013 .phy_id_mask = MICREL_PHY_ID_MASK,
1014 .name = "Micrel KSZ8795", 1014 .name = "Micrel KSZ8795",
1015 .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause), 1015 .features = PHY_BASIC_FEATURES,
1016 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 1016 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
1017 .config_init = kszphy_config_init, 1017 .config_init = kszphy_config_init,
1018 .config_aneg = ksz8873mll_config_aneg, 1018 .config_aneg = ksz8873mll_config_aneg,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index d02ca1491d16..8d3e53fac1da 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -91,7 +91,7 @@
91 91
92#define IWL8000_FW_PRE "iwlwifi-8000C-" 92#define IWL8000_FW_PRE "iwlwifi-8000C-"
93#define IWL8000_MODULE_FIRMWARE(api) \ 93#define IWL8000_MODULE_FIRMWARE(api) \
94 IWL8000_FW_PRE "-" __stringify(api) ".ucode" 94 IWL8000_FW_PRE __stringify(api) ".ucode"
95 95
96#define IWL8265_FW_PRE "iwlwifi-8265-" 96#define IWL8265_FW_PRE "iwlwifi-8265-"
97#define IWL8265_MODULE_FIRMWARE(api) \ 97#define IWL8265_MODULE_FIRMWARE(api) \
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 636c8b03e318..09e9e2e3ed04 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1164,9 +1164,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1164 .frame_limit = IWL_FRAME_LIMIT, 1164 .frame_limit = IWL_FRAME_LIMIT,
1165 }; 1165 };
1166 1166
1167 /* Make sure reserved queue is still marked as such (or allocated) */ 1167 /* Make sure reserved queue is still marked as such (if allocated) */
1168 mvm->queue_info[mvm_sta->reserved_queue].status = 1168 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1169 IWL_MVM_QUEUE_RESERVED; 1169 mvm->queue_info[mvm_sta->reserved_queue].status =
1170 IWL_MVM_QUEUE_RESERVED;
1170 1171
1171 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1172 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1172 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; 1173 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 63a051be832e..bec7d9c46087 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
843 return; 843 return;
844 844
845 IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n"); 845 IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
846 thermal_zone_device_unregister(mvm->tz_device.tzone); 846 if (mvm->tz_device.tzone) {
847 mvm->tz_device.tzone = NULL; 847 thermal_zone_device_unregister(mvm->tz_device.tzone);
848 mvm->tz_device.tzone = NULL;
849 }
848} 850}
849 851
850static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) 852static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
@@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
853 return; 855 return;
854 856
855 IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n"); 857 IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
856 thermal_cooling_device_unregister(mvm->cooling_dev.cdev); 858 if (mvm->cooling_dev.cdev) {
857 mvm->cooling_dev.cdev = NULL; 859 thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
860 mvm->cooling_dev.cdev = NULL;
861 }
858} 862}
859#endif /* CONFIG_THERMAL */ 863#endif /* CONFIG_THERMAL */
860 864
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index a0875001b13c..df08a41d5be5 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -45,10 +45,9 @@ struct can_proto {
45extern int can_proto_register(const struct can_proto *cp); 45extern int can_proto_register(const struct can_proto *cp);
46extern void can_proto_unregister(const struct can_proto *cp); 46extern void can_proto_unregister(const struct can_proto *cp);
47 47
48extern int can_rx_register(struct net_device *dev, canid_t can_id, 48int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
49 canid_t mask, 49 void (*func)(struct sk_buff *, void *),
50 void (*func)(struct sk_buff *, void *), 50 void *data, char *ident, struct sock *sk);
51 void *data, char *ident);
52 51
53extern void can_rx_unregister(struct net_device *dev, canid_t can_id, 52extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
54 canid_t mask, 53 canid_t mask,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9bde9558b596..70ad0291d517 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -866,11 +866,15 @@ struct netdev_xdp {
866 * of useless work if you return NETDEV_TX_BUSY. 866 * of useless work if you return NETDEV_TX_BUSY.
867 * Required; cannot be NULL. 867 * Required; cannot be NULL.
868 * 868 *
869 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 869 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
870 * netdev_features_t features); 870 * struct net_device *dev
871 * Adjusts the requested feature flags according to device-specific 871 * netdev_features_t features);
872 * constraints, and returns the resulting flags. Must not modify 872 * Called by core transmit path to determine if device is capable of
873 * the device state. 873 * performing offload operations on a given packet. This is to give
874 * the device an opportunity to implement any restrictions that cannot
875 * be otherwise expressed by feature flags. The check is called with
876 * the set of features that the stack has calculated and it returns
877 * those the driver believes to be appropriate.
874 * 878 *
875 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 879 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
876 * void *accel_priv, select_queue_fallback_t fallback); 880 * void *accel_priv, select_queue_fallback_t fallback);
@@ -1028,6 +1032,12 @@ struct netdev_xdp {
1028 * Called to release previously enslaved netdev. 1032 * Called to release previously enslaved netdev.
1029 * 1033 *
1030 * Feature/offload setting functions. 1034 * Feature/offload setting functions.
1035 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1036 * netdev_features_t features);
1037 * Adjusts the requested feature flags according to device-specific
1038 * constraints, and returns the resulting flags. Must not modify
1039 * the device state.
1040 *
1031 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 1041 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1032 * Called to update device configuration to new features. Passed 1042 * Called to update device configuration to new features. Passed
1033 * feature set might be less than what was returned by ndo_fix_features()). 1043 * feature set might be less than what was returned by ndo_fix_features()).
@@ -1100,15 +1110,6 @@ struct netdev_xdp {
1100 * Callback to use for xmit over the accelerated station. This 1110 * Callback to use for xmit over the accelerated station. This
1101 * is used in place of ndo_start_xmit on accelerated net 1111 * is used in place of ndo_start_xmit on accelerated net
1102 * devices. 1112 * devices.
1103 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1104 * struct net_device *dev
1105 * netdev_features_t features);
1106 * Called by core transmit path to determine if device is capable of
1107 * performing offload operations on a given packet. This is to give
1108 * the device an opportunity to implement any restrictions that cannot
1109 * be otherwise expressed by feature flags. The check is called with
1110 * the set of features that the stack has calculated and it returns
1111 * those the driver believes to be appropriate.
1112 * int (*ndo_set_tx_maxrate)(struct net_device *dev, 1113 * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1113 * int queue_index, u32 maxrate); 1114 * int queue_index, u32 maxrate);
1114 * Called when a user wants to set a max-rate limitation of specific 1115 * Called when a user wants to set a max-rate limitation of specific
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 7afe991e900e..dbf0abba33b8 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -776,6 +776,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
776{ 776{
777 u32 hash; 777 u32 hash;
778 778
779 /* @flowlabel may include more than a flow label, eg, the traffic class.
780 * Here we want only the flow label value.
781 */
782 flowlabel &= IPV6_FLOWLABEL_MASK;
783
779 if (flowlabel || 784 if (flowlabel ||
780 net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF || 785 net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
781 (!autolabel && 786 (!autolabel &&
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index f0db7788f887..3dc91a46e8b8 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1384,6 +1384,8 @@ enum ethtool_link_mode_bit_indices {
1384 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, 1384 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44,
1385 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, 1385 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
1386 ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, 1386 ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46,
1387 ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47,
1388 ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48,
1387 1389
1388 1390
1389 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit 1391 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
@@ -1393,7 +1395,7 @@ enum ethtool_link_mode_bit_indices {
1393 */ 1395 */
1394 1396
1395 __ETHTOOL_LINK_MODE_LAST 1397 __ETHTOOL_LINK_MODE_LAST
1396 = ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 1398 = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
1397}; 1399};
1398 1400
1399#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ 1401#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 1108079d934f..5488e4a6ccd0 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
445 * @func: callback function on filter match 445 * @func: callback function on filter match
446 * @data: returned parameter for callback function 446 * @data: returned parameter for callback function
447 * @ident: string for calling module identification 447 * @ident: string for calling module identification
448 * @sk: socket pointer (might be NULL)
448 * 449 *
449 * Description: 450 * Description:
450 * Invokes the callback function with the received sk_buff and the given 451 * Invokes the callback function with the received sk_buff and the given
@@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
468 */ 469 */
469int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, 470int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
470 void (*func)(struct sk_buff *, void *), void *data, 471 void (*func)(struct sk_buff *, void *), void *data,
471 char *ident) 472 char *ident, struct sock *sk)
472{ 473{
473 struct receiver *r; 474 struct receiver *r;
474 struct hlist_head *rl; 475 struct hlist_head *rl;
@@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
496 r->func = func; 497 r->func = func;
497 r->data = data; 498 r->data = data;
498 r->ident = ident; 499 r->ident = ident;
500 r->sk = sk;
499 501
500 hlist_add_head_rcu(&r->list, rl); 502 hlist_add_head_rcu(&r->list, rl);
501 d->entries++; 503 d->entries++;
@@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register);
520static void can_rx_delete_receiver(struct rcu_head *rp) 522static void can_rx_delete_receiver(struct rcu_head *rp)
521{ 523{
522 struct receiver *r = container_of(rp, struct receiver, rcu); 524 struct receiver *r = container_of(rp, struct receiver, rcu);
525 struct sock *sk = r->sk;
523 526
524 kmem_cache_free(rcv_cache, r); 527 kmem_cache_free(rcv_cache, r);
528 if (sk)
529 sock_put(sk);
525} 530}
526 531
527/** 532/**
@@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
596 spin_unlock(&can_rcvlists_lock); 601 spin_unlock(&can_rcvlists_lock);
597 602
598 /* schedule the receiver item for deletion */ 603 /* schedule the receiver item for deletion */
599 if (r) 604 if (r) {
605 if (r->sk)
606 sock_hold(r->sk);
600 call_rcu(&r->rcu, can_rx_delete_receiver); 607 call_rcu(&r->rcu, can_rx_delete_receiver);
608 }
601} 609}
602EXPORT_SYMBOL(can_rx_unregister); 610EXPORT_SYMBOL(can_rx_unregister);
603 611
diff --git a/net/can/af_can.h b/net/can/af_can.h
index fca0fe9fc45a..b86f5129e838 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -50,13 +50,14 @@
50 50
51struct receiver { 51struct receiver {
52 struct hlist_node list; 52 struct hlist_node list;
53 struct rcu_head rcu;
54 canid_t can_id; 53 canid_t can_id;
55 canid_t mask; 54 canid_t mask;
56 unsigned long matches; 55 unsigned long matches;
57 void (*func)(struct sk_buff *, void *); 56 void (*func)(struct sk_buff *, void *);
58 void *data; 57 void *data;
59 char *ident; 58 char *ident;
59 struct sock *sk;
60 struct rcu_head rcu;
60}; 61};
61 62
62#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS) 63#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 21ac75390e3d..95d13b233c65 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -734,14 +734,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
734 734
735static void bcm_remove_op(struct bcm_op *op) 735static void bcm_remove_op(struct bcm_op *op)
736{ 736{
737 hrtimer_cancel(&op->timer); 737 if (op->tsklet.func) {
738 hrtimer_cancel(&op->thrtimer); 738 while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
739 739 test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
740 if (op->tsklet.func) 740 hrtimer_active(&op->timer)) {
741 tasklet_kill(&op->tsklet); 741 hrtimer_cancel(&op->timer);
742 tasklet_kill(&op->tsklet);
743 }
744 }
742 745
743 if (op->thrtsklet.func) 746 if (op->thrtsklet.func) {
744 tasklet_kill(&op->thrtsklet); 747 while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
748 test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
749 hrtimer_active(&op->thrtimer)) {
750 hrtimer_cancel(&op->thrtimer);
751 tasklet_kill(&op->thrtsklet);
752 }
753 }
745 754
746 if ((op->frames) && (op->frames != &op->sframe)) 755 if ((op->frames) && (op->frames != &op->sframe))
747 kfree(op->frames); 756 kfree(op->frames);
@@ -1216,7 +1225,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1216 err = can_rx_register(dev, op->can_id, 1225 err = can_rx_register(dev, op->can_id,
1217 REGMASK(op->can_id), 1226 REGMASK(op->can_id),
1218 bcm_rx_handler, op, 1227 bcm_rx_handler, op,
1219 "bcm"); 1228 "bcm", sk);
1220 1229
1221 op->rx_reg_dev = dev; 1230 op->rx_reg_dev = dev;
1222 dev_put(dev); 1231 dev_put(dev);
@@ -1225,7 +1234,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1225 } else 1234 } else
1226 err = can_rx_register(NULL, op->can_id, 1235 err = can_rx_register(NULL, op->can_id,
1227 REGMASK(op->can_id), 1236 REGMASK(op->can_id),
1228 bcm_rx_handler, op, "bcm"); 1237 bcm_rx_handler, op, "bcm", sk);
1229 if (err) { 1238 if (err) {
1230 /* this bcm rx op is broken -> remove it */ 1239 /* this bcm rx op is broken -> remove it */
1231 list_del(&op->list); 1240 list_del(&op->list);
diff --git a/net/can/gw.c b/net/can/gw.c
index a54ab0c82104..7056a1a2bb70 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
442{ 442{
443 return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id, 443 return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
444 gwj->ccgw.filter.can_mask, can_can_gw_rcv, 444 gwj->ccgw.filter.can_mask, can_can_gw_rcv,
445 gwj, "gw"); 445 gwj, "gw", NULL);
446} 446}
447 447
448static inline void cgw_unregister_filter(struct cgw_job *gwj) 448static inline void cgw_unregister_filter(struct cgw_job *gwj)
diff --git a/net/can/raw.c b/net/can/raw.c
index b075f028d7e2..6dc546a06673 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
190 for (i = 0; i < count; i++) { 190 for (i = 0; i < count; i++) {
191 err = can_rx_register(dev, filter[i].can_id, 191 err = can_rx_register(dev, filter[i].can_id,
192 filter[i].can_mask, 192 filter[i].can_mask,
193 raw_rcv, sk, "raw"); 193 raw_rcv, sk, "raw", sk);
194 if (err) { 194 if (err) {
195 /* clean up successfully registered filters */ 195 /* clean up successfully registered filters */
196 while (--i >= 0) 196 while (--i >= 0)
@@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
211 211
212 if (err_mask) 212 if (err_mask)
213 err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG, 213 err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
214 raw_rcv, sk, "raw"); 214 raw_rcv, sk, "raw", sk);
215 215
216 return err; 216 return err;
217} 217}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1d5331a1b1dc..8ce50dc3ab8c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2518,9 +2518,11 @@ u32 __tcp_select_window(struct sock *sk)
2518 int full_space = min_t(int, tp->window_clamp, allowed_space); 2518 int full_space = min_t(int, tp->window_clamp, allowed_space);
2519 int window; 2519 int window;
2520 2520
2521 if (mss > full_space) 2521 if (unlikely(mss > full_space)) {
2522 mss = full_space; 2522 mss = full_space;
2523 2523 if (mss <= 0)
2524 return 0;
2525 }
2524 if (free_space < (full_space >> 1)) { 2526 if (free_space < (full_space >> 1)) {
2525 icsk->icsk_ack.quick = 0; 2527 icsk->icsk_ack.quick = 0;
2526 2528
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2c0df09e9036..b6a94ff0bbd0 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1344,7 +1344,7 @@ emsgsize:
1344 */ 1344 */
1345 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP && 1345 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1346 headersize == sizeof(struct ipv6hdr) && 1346 headersize == sizeof(struct ipv6hdr) &&
1347 length < mtu - headersize && 1347 length <= mtu - headersize &&
1348 !(flags & MSG_MORE) && 1348 !(flags & MSG_MORE) &&
1349 rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) 1349 rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1350 csummode = CHECKSUM_PARTIAL; 1350 csummode = CHECKSUM_PARTIAL;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index ff8ee06491c3..75fac933c209 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -441,7 +441,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
441 if (i + sizeof(*tel) > optlen) 441 if (i + sizeof(*tel) > optlen)
442 break; 442 break;
443 443
444 tel = (struct ipv6_tlv_tnl_enc_lim *) skb->data + off + i; 444 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
445 /* return index of option if found and valid */ 445 /* return index of option if found and valid */
446 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && 446 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
447 tel->length == 1) 447 tel->length == 1)
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 970db7a41684..5752789acc13 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -568,9 +568,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
568 &mask->icmp.type, 568 &mask->icmp.type,
569 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 569 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
570 sizeof(key->icmp.type)); 570 sizeof(key->icmp.type));
571 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, 571 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
572 &mask->icmp.code, 572 &mask->icmp.code,
573 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 573 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
574 sizeof(key->icmp.code)); 574 sizeof(key->icmp.code));
575 } 575 }
576 576
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index f935429bd5ef..b12bc2abea93 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -16,16 +16,11 @@
16#include <net/sch_generic.h> 16#include <net/sch_generic.h>
17#include <net/pkt_cls.h> 17#include <net/pkt_cls.h>
18 18
19struct cls_mall_filter { 19struct cls_mall_head {
20 struct tcf_exts exts; 20 struct tcf_exts exts;
21 struct tcf_result res; 21 struct tcf_result res;
22 u32 handle; 22 u32 handle;
23 struct rcu_head rcu;
24 u32 flags; 23 u32 flags;
25};
26
27struct cls_mall_head {
28 struct cls_mall_filter *filter;
29 struct rcu_head rcu; 24 struct rcu_head rcu;
30}; 25};
31 26
@@ -33,38 +28,29 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
33 struct tcf_result *res) 28 struct tcf_result *res)
34{ 29{
35 struct cls_mall_head *head = rcu_dereference_bh(tp->root); 30 struct cls_mall_head *head = rcu_dereference_bh(tp->root);
36 struct cls_mall_filter *f = head->filter;
37 31
38 if (tc_skip_sw(f->flags)) 32 if (tc_skip_sw(head->flags))
39 return -1; 33 return -1;
40 34
41 return tcf_exts_exec(skb, &f->exts, res); 35 return tcf_exts_exec(skb, &head->exts, res);
42} 36}
43 37
44static int mall_init(struct tcf_proto *tp) 38static int mall_init(struct tcf_proto *tp)
45{ 39{
46 struct cls_mall_head *head;
47
48 head = kzalloc(sizeof(*head), GFP_KERNEL);
49 if (!head)
50 return -ENOBUFS;
51
52 rcu_assign_pointer(tp->root, head);
53
54 return 0; 40 return 0;
55} 41}
56 42
57static void mall_destroy_filter(struct rcu_head *head) 43static void mall_destroy_rcu(struct rcu_head *rcu)
58{ 44{
59 struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu); 45 struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
46 rcu);
60 47
61 tcf_exts_destroy(&f->exts); 48 tcf_exts_destroy(&head->exts);
62 49 kfree(head);
63 kfree(f);
64} 50}
65 51
66static int mall_replace_hw_filter(struct tcf_proto *tp, 52static int mall_replace_hw_filter(struct tcf_proto *tp,
67 struct cls_mall_filter *f, 53 struct cls_mall_head *head,
68 unsigned long cookie) 54 unsigned long cookie)
69{ 55{
70 struct net_device *dev = tp->q->dev_queue->dev; 56 struct net_device *dev = tp->q->dev_queue->dev;
@@ -74,7 +60,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
74 offload.type = TC_SETUP_MATCHALL; 60 offload.type = TC_SETUP_MATCHALL;
75 offload.cls_mall = &mall_offload; 61 offload.cls_mall = &mall_offload;
76 offload.cls_mall->command = TC_CLSMATCHALL_REPLACE; 62 offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
77 offload.cls_mall->exts = &f->exts; 63 offload.cls_mall->exts = &head->exts;
78 offload.cls_mall->cookie = cookie; 64 offload.cls_mall->cookie = cookie;
79 65
80 return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, 66 return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
@@ -82,7 +68,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
82} 68}
83 69
84static void mall_destroy_hw_filter(struct tcf_proto *tp, 70static void mall_destroy_hw_filter(struct tcf_proto *tp,
85 struct cls_mall_filter *f, 71 struct cls_mall_head *head,
86 unsigned long cookie) 72 unsigned long cookie)
87{ 73{
88 struct net_device *dev = tp->q->dev_queue->dev; 74 struct net_device *dev = tp->q->dev_queue->dev;
@@ -103,29 +89,20 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
103{ 89{
104 struct cls_mall_head *head = rtnl_dereference(tp->root); 90 struct cls_mall_head *head = rtnl_dereference(tp->root);
105 struct net_device *dev = tp->q->dev_queue->dev; 91 struct net_device *dev = tp->q->dev_queue->dev;
106 struct cls_mall_filter *f = head->filter;
107 92
108 if (!force && f) 93 if (!head)
109 return false; 94 return true;
110 95
111 if (f) { 96 if (tc_should_offload(dev, tp, head->flags))
112 if (tc_should_offload(dev, tp, f->flags)) 97 mall_destroy_hw_filter(tp, head, (unsigned long) head);
113 mall_destroy_hw_filter(tp, f, (unsigned long) f);
114 98
115 call_rcu(&f->rcu, mall_destroy_filter); 99 call_rcu(&head->rcu, mall_destroy_rcu);
116 }
117 kfree_rcu(head, rcu);
118 return true; 100 return true;
119} 101}
120 102
121static unsigned long mall_get(struct tcf_proto *tp, u32 handle) 103static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
122{ 104{
123 struct cls_mall_head *head = rtnl_dereference(tp->root); 105 return 0UL;
124 struct cls_mall_filter *f = head->filter;
125
126 if (f && f->handle == handle)
127 return (unsigned long) f;
128 return 0;
129} 106}
130 107
131static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { 108static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
@@ -134,7 +111,7 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
134}; 111};
135 112
136static int mall_set_parms(struct net *net, struct tcf_proto *tp, 113static int mall_set_parms(struct net *net, struct tcf_proto *tp,
137 struct cls_mall_filter *f, 114 struct cls_mall_head *head,
138 unsigned long base, struct nlattr **tb, 115 unsigned long base, struct nlattr **tb,
139 struct nlattr *est, bool ovr) 116 struct nlattr *est, bool ovr)
140{ 117{
@@ -147,11 +124,11 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
147 return err; 124 return err;
148 125
149 if (tb[TCA_MATCHALL_CLASSID]) { 126 if (tb[TCA_MATCHALL_CLASSID]) {
150 f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]); 127 head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
151 tcf_bind_filter(tp, &f->res, base); 128 tcf_bind_filter(tp, &head->res, base);
152 } 129 }
153 130
154 tcf_exts_change(tp, &f->exts, &e); 131 tcf_exts_change(tp, &head->exts, &e);
155 132
156 return 0; 133 return 0;
157} 134}
@@ -162,21 +139,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
162 unsigned long *arg, bool ovr) 139 unsigned long *arg, bool ovr)
163{ 140{
164 struct cls_mall_head *head = rtnl_dereference(tp->root); 141 struct cls_mall_head *head = rtnl_dereference(tp->root);
165 struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
166 struct net_device *dev = tp->q->dev_queue->dev; 142 struct net_device *dev = tp->q->dev_queue->dev;
167 struct cls_mall_filter *f;
168 struct nlattr *tb[TCA_MATCHALL_MAX + 1]; 143 struct nlattr *tb[TCA_MATCHALL_MAX + 1];
144 struct cls_mall_head *new;
169 u32 flags = 0; 145 u32 flags = 0;
170 int err; 146 int err;
171 147
172 if (!tca[TCA_OPTIONS]) 148 if (!tca[TCA_OPTIONS])
173 return -EINVAL; 149 return -EINVAL;
174 150
175 if (head->filter) 151 if (head)
176 return -EBUSY; 152 return -EEXIST;
177
178 if (fold)
179 return -EINVAL;
180 153
181 err = nla_parse_nested(tb, TCA_MATCHALL_MAX, 154 err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
182 tca[TCA_OPTIONS], mall_policy); 155 tca[TCA_OPTIONS], mall_policy);
@@ -189,23 +162,23 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
189 return -EINVAL; 162 return -EINVAL;
190 } 163 }
191 164
192 f = kzalloc(sizeof(*f), GFP_KERNEL); 165 new = kzalloc(sizeof(*new), GFP_KERNEL);
193 if (!f) 166 if (!new)
194 return -ENOBUFS; 167 return -ENOBUFS;
195 168
196 tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0); 169 tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
197 170
198 if (!handle) 171 if (!handle)
199 handle = 1; 172 handle = 1;
200 f->handle = handle; 173 new->handle = handle;
201 f->flags = flags; 174 new->flags = flags;
202 175
203 err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr); 176 err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
204 if (err) 177 if (err)
205 goto errout; 178 goto errout;
206 179
207 if (tc_should_offload(dev, tp, flags)) { 180 if (tc_should_offload(dev, tp, flags)) {
208 err = mall_replace_hw_filter(tp, f, (unsigned long) f); 181 err = mall_replace_hw_filter(tp, new, (unsigned long) new);
209 if (err) { 182 if (err) {
210 if (tc_skip_sw(flags)) 183 if (tc_skip_sw(flags))
211 goto errout; 184 goto errout;
@@ -214,39 +187,29 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
214 } 187 }
215 } 188 }
216 189
217 *arg = (unsigned long) f; 190 *arg = (unsigned long) head;
218 rcu_assign_pointer(head->filter, f); 191 rcu_assign_pointer(tp->root, new);
219 192 if (head)
193 call_rcu(&head->rcu, mall_destroy_rcu);
220 return 0; 194 return 0;
221 195
222errout: 196errout:
223 kfree(f); 197 kfree(new);
224 return err; 198 return err;
225} 199}
226 200
227static int mall_delete(struct tcf_proto *tp, unsigned long arg) 201static int mall_delete(struct tcf_proto *tp, unsigned long arg)
228{ 202{
229 struct cls_mall_head *head = rtnl_dereference(tp->root); 203 return -EOPNOTSUPP;
230 struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
231 struct net_device *dev = tp->q->dev_queue->dev;
232
233 if (tc_should_offload(dev, tp, f->flags))
234 mall_destroy_hw_filter(tp, f, (unsigned long) f);
235
236 RCU_INIT_POINTER(head->filter, NULL);
237 tcf_unbind_filter(tp, &f->res);
238 call_rcu(&f->rcu, mall_destroy_filter);
239 return 0;
240} 204}
241 205
242static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg) 206static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
243{ 207{
244 struct cls_mall_head *head = rtnl_dereference(tp->root); 208 struct cls_mall_head *head = rtnl_dereference(tp->root);
245 struct cls_mall_filter *f = head->filter;
246 209
247 if (arg->count < arg->skip) 210 if (arg->count < arg->skip)
248 goto skip; 211 goto skip;
249 if (arg->fn(tp, (unsigned long) f, arg) < 0) 212 if (arg->fn(tp, (unsigned long) head, arg) < 0)
250 arg->stop = 1; 213 arg->stop = 1;
251skip: 214skip:
252 arg->count++; 215 arg->count++;
@@ -255,28 +218,28 @@ skip:
255static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, 218static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
256 struct sk_buff *skb, struct tcmsg *t) 219 struct sk_buff *skb, struct tcmsg *t)
257{ 220{
258 struct cls_mall_filter *f = (struct cls_mall_filter *) fh; 221 struct cls_mall_head *head = (struct cls_mall_head *) fh;
259 struct nlattr *nest; 222 struct nlattr *nest;
260 223
261 if (!f) 224 if (!head)
262 return skb->len; 225 return skb->len;
263 226
264 t->tcm_handle = f->handle; 227 t->tcm_handle = head->handle;
265 228
266 nest = nla_nest_start(skb, TCA_OPTIONS); 229 nest = nla_nest_start(skb, TCA_OPTIONS);
267 if (!nest) 230 if (!nest)
268 goto nla_put_failure; 231 goto nla_put_failure;
269 232
270 if (f->res.classid && 233 if (head->res.classid &&
271 nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid)) 234 nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
272 goto nla_put_failure; 235 goto nla_put_failure;
273 236
274 if (tcf_exts_dump(skb, &f->exts)) 237 if (tcf_exts_dump(skb, &head->exts))
275 goto nla_put_failure; 238 goto nla_put_failure;
276 239
277 nla_nest_end(skb, nest); 240 nla_nest_end(skb, nest);
278 241
279 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 242 if (tcf_exts_dump_stats(skb, &head->exts) < 0)
280 goto nla_put_failure; 243 goto nla_put_failure;
281 244
282 return skb->len; 245 return skb->len;