aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-06-13 00:59:18 -0400
committerDavid S. Miller <davem@davemloft.net>2012-06-13 00:59:18 -0400
commit43b03f1f6d6832d744918947d185a7aee89d1e0f (patch)
tree53a8c47d67dfcd23450d4068d08ccf2b7714c5e8
parent2da45db2bdd432a9dca825099c791f5c851f92b9 (diff)
parent5ee31c6898ea5537fcea160999d60dc63bc0c305 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: MAINTAINERS drivers/net/wireless/iwlwifi/pcie/trans.c The iwlwifi conflict was resolved by keeping the code added in 'net' that turns off the buggy chip feature. The MAINTAINERS conflict was merely overlapping changes, one change updated all the wireless web site URLs and the other changed some GIT trees to be Johannes's instead of John's. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--MAINTAINERS12
-rw-r--r--drivers/bcma/driver_pci.c6
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/bonding/bond_sysfs.c8
-rw-r--r--drivers/net/dummy.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/marvell/sky2.c10
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c11
-rw-r--r--drivers/net/ethernet/realtek/r8169.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c23
-rw-r--r--drivers/net/ethernet/sun/niu.c12
-rw-r--r--drivers/net/ethernet/tile/Kconfig2
-rw-r--r--drivers/net/ethernet/tile/Makefile4
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1898
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/virtio_net.c19
-rw-r--r--drivers/net/wireless/b43/b43.h4
-rw-r--r--drivers/net/wireless/b43/main.c19
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c20
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h1
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/6000.c23
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c6
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/leds.c2
-rw-r--r--include/linux/netfilter/xt_HMARK.h5
-rw-r--r--include/linux/tcp.h20
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/sch_generic.h7
-rw-r--r--net/appletalk/ddp.c4
-rw-r--r--net/core/filter.c4
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/ip_forward.c1
-rw-r--r--net/ipv4/ipmr.c1
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_output.c1
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/l2tp/l2tp_ip.c9
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c5
-rw-r--r--net/netfilter/xt_HMARK.c72
-rw-r--r--net/nfc/llcp/sock.c3
47 files changed, 2180 insertions, 155 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index c5fd905206e7..3075a2a29511 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1800,6 +1800,9 @@ F: include/linux/cfag12864b.h
1800CFG80211 and NL80211 1800CFG80211 and NL80211
1801M: Johannes Berg <johannes@sipsolutions.net> 1801M: Johannes Berg <johannes@sipsolutions.net>
1802L: linux-wireless@vger.kernel.org 1802L: linux-wireless@vger.kernel.org
1803W: http://wireless.kernel.org/
1804T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
1805T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
1803S: Maintained 1806S: Maintained
1804F: include/linux/nl80211.h 1807F: include/linux/nl80211.h
1805F: include/net/cfg80211.h 1808F: include/net/cfg80211.h
@@ -4340,7 +4343,8 @@ MAC80211
4340M: Johannes Berg <johannes@sipsolutions.net> 4343M: Johannes Berg <johannes@sipsolutions.net>
4341L: linux-wireless@vger.kernel.org 4344L: linux-wireless@vger.kernel.org
4342W: http://wireless.kernel.org/ 4345W: http://wireless.kernel.org/
4343T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git 4346T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
4347T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
4344S: Maintained 4348S: Maintained
4345F: Documentation/networking/mac80211-injection.txt 4349F: Documentation/networking/mac80211-injection.txt
4346F: include/net/mac80211.h 4350F: include/net/mac80211.h
@@ -4351,7 +4355,8 @@ M: Stefano Brivio <stefano.brivio@polimi.it>
4351M: Mattias Nissler <mattias.nissler@gmx.de> 4355M: Mattias Nissler <mattias.nissler@gmx.de>
4352L: linux-wireless@vger.kernel.org 4356L: linux-wireless@vger.kernel.org
4353W: http://wireless.kernel.org/en/developers/Documentation/mac80211/RateControl/PID 4357W: http://wireless.kernel.org/en/developers/Documentation/mac80211/RateControl/PID
4354T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git 4358T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
4359T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
4355S: Maintained 4360S: Maintained
4356F: net/mac80211/rc80211_pid* 4361F: net/mac80211/rc80211_pid*
4357 4362
@@ -5695,6 +5700,9 @@ F: include/linux/remoteproc.h
5695RFKILL 5700RFKILL
5696M: Johannes Berg <johannes@sipsolutions.net> 5701M: Johannes Berg <johannes@sipsolutions.net>
5697L: linux-wireless@vger.kernel.org 5702L: linux-wireless@vger.kernel.org
5703W: http://wireless.kernel.org/
5704T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
5705T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
5698S: Maintained 5706S: Maintained
5699F: Documentation/rfkill.txt 5707F: Documentation/rfkill.txt
5700F: net/rfkill/ 5708F: net/rfkill/
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index 9a96f14c8f47..c32ebd537abe 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -232,17 +232,19 @@ void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
232int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, 232int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
233 bool enable) 233 bool enable)
234{ 234{
235 struct pci_dev *pdev = pc->core->bus->host_pci; 235 struct pci_dev *pdev;
236 u32 coremask, tmp; 236 u32 coremask, tmp;
237 int err = 0; 237 int err = 0;
238 238
239 if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) { 239 if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
240 /* This bcma device is not on a PCI host-bus. So the IRQs are 240 /* This bcma device is not on a PCI host-bus. So the IRQs are
241 * not routed through the PCI core. 241 * not routed through the PCI core.
242 * So we must not enable routing through the PCI core. */ 242 * So we must not enable routing through the PCI core. */
243 goto out; 243 goto out;
244 } 244 }
245 245
246 pdev = pc->core->bus->host_pci;
247
246 err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); 248 err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
247 if (err) 249 if (err)
248 goto out; 250 goto out;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9e2301eef386..af506321500b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -76,6 +76,7 @@
76#include <net/route.h> 76#include <net/route.h>
77#include <net/net_namespace.h> 77#include <net/net_namespace.h>
78#include <net/netns/generic.h> 78#include <net/netns/generic.h>
79#include <net/pkt_sched.h>
79#include "bonding.h" 80#include "bonding.h"
80#include "bond_3ad.h" 81#include "bond_3ad.h"
81#include "bond_alb.h" 82#include "bond_alb.h"
@@ -381,8 +382,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
381 return next; 382 return next;
382} 383}
383 384
384#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
385
386/** 385/**
387 * bond_dev_queue_xmit - Prepare skb for xmit. 386 * bond_dev_queue_xmit - Prepare skb for xmit.
388 * 387 *
@@ -395,7 +394,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
395{ 394{
396 skb->dev = slave_dev; 395 skb->dev = slave_dev;
397 396
398 skb->queue_mapping = bond_queue_mapping(skb); 397 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
398 sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
399 skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
399 400
400 if (unlikely(netpoll_tx_running(slave_dev))) 401 if (unlikely(netpoll_tx_running(slave_dev)))
401 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); 402 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
@@ -4174,7 +4175,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
4174 /* 4175 /*
4175 * Save the original txq to restore before passing to the driver 4176 * Save the original txq to restore before passing to the driver
4176 */ 4177 */
4177 bond_queue_mapping(skb) = skb->queue_mapping; 4178 qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
4178 4179
4179 if (unlikely(txq >= dev->real_num_tx_queues)) { 4180 if (unlikely(txq >= dev->real_num_tx_queues)) {
4180 do { 4181 do {
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index aef42f045320..485bedb8278c 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1082,8 +1082,12 @@ static ssize_t bonding_store_primary(struct device *d,
1082 } 1082 }
1083 } 1083 }
1084 1084
1085 pr_info("%s: Unable to set %.*s as primary slave.\n", 1085 strncpy(bond->params.primary, ifname, IFNAMSIZ);
1086 bond->dev->name, (int)strlen(buf) - 1, buf); 1086 bond->params.primary[IFNAMSIZ - 1] = 0;
1087
1088 pr_info("%s: Recording %s as primary, "
1089 "but it has not been enslaved to %s yet.\n",
1090 bond->dev->name, ifname, bond->dev->name);
1087out: 1091out:
1088 write_unlock_bh(&bond->curr_slave_lock); 1092 write_unlock_bh(&bond->curr_slave_lock);
1089 read_unlock(&bond->lock); 1093 read_unlock(&bond->lock);
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 442d91a2747b..bab0158f1cc3 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -187,8 +187,10 @@ static int __init dummy_init_module(void)
187 rtnl_lock(); 187 rtnl_lock();
188 err = __rtnl_link_register(&dummy_link_ops); 188 err = __rtnl_link_register(&dummy_link_ops);
189 189
190 for (i = 0; i < numdummies && !err; i++) 190 for (i = 0; i < numdummies && !err; i++) {
191 err = dummy_init_one(); 191 err = dummy_init_one();
192 cond_resched();
193 }
192 if (err < 0) 194 if (err < 0)
193 __rtnl_link_unregister(&dummy_link_ops); 195 __rtnl_link_unregister(&dummy_link_ops);
194 rtnl_unlock(); 196 rtnl_unlock();
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index edeeb516807a..e47ff8be1d7b 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -14275,7 +14275,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
14275 } 14275 }
14276 } 14276 }
14277 14277
14278 if (tg3_flag(tp, 5755_PLUS)) 14278 if (tg3_flag(tp, 5755_PLUS) ||
14279 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14279 tg3_flag_set(tp, SHORT_DMA_BUG); 14280 tg3_flag_set(tp, SHORT_DMA_BUG);
14280 14281
14281 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) 14282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 896f283967d4..5a34503b6a14 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -736,6 +736,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
736 736
737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); 737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
738 if (copied) { 738 if (copied) {
739 int gso_segs = skb_shinfo(skb)->gso_segs;
740
739 /* record the sent skb in the sent_skb table */ 741 /* record the sent skb in the sent_skb table */
740 BUG_ON(txo->sent_skb_list[start]); 742 BUG_ON(txo->sent_skb_list[start]);
741 txo->sent_skb_list[start] = skb; 743 txo->sent_skb_list[start] = skb;
@@ -753,8 +755,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
753 755
754 be_txq_notify(adapter, txq->id, wrb_cnt); 756 be_txq_notify(adapter, txq->id, wrb_cnt);
755 757
756 be_tx_stats_update(txo, wrb_cnt, copied, 758 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
757 skb_shinfo(skb)->gso_segs, stopped);
758 } else { 759 } else {
759 txq->head = start; 760 txq->head = start;
760 dev_kfree_skb_any(skb); 761 dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index cace36f2ab92..28a54451a3e5 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4381,10 +4381,12 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
4381 struct sky2_port *sky2 = netdev_priv(dev); 4381 struct sky2_port *sky2 = netdev_priv(dev);
4382 netdev_features_t changed = dev->features ^ features; 4382 netdev_features_t changed = dev->features ^ features;
4383 4383
4384 if (changed & NETIF_F_RXCSUM) { 4384 if ((changed & NETIF_F_RXCSUM) &&
4385 bool on = features & NETIF_F_RXCSUM; 4385 !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
4386 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), 4386 sky2_write32(sky2->hw,
4387 on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 4387 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
4388 (features & NETIF_F_RXCSUM)
4389 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
4388 } 4390 }
4389 4391
4390 if (changed & NETIF_F_RXHASH) 4392 if (changed & NETIF_F_RXHASH)
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 8d2666fcffd7..083d6715335c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -946,16 +946,16 @@ static void __lpc_handle_xmit(struct net_device *ndev)
946 /* Update stats */ 946 /* Update stats */
947 ndev->stats.tx_packets++; 947 ndev->stats.tx_packets++;
948 ndev->stats.tx_bytes += skb->len; 948 ndev->stats.tx_bytes += skb->len;
949
950 /* Free buffer */
951 dev_kfree_skb_irq(skb);
952 } 949 }
950 dev_kfree_skb_irq(skb);
953 951
954 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); 952 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
955 } 953 }
956 954
957 if (netif_queue_stopped(ndev)) 955 if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
958 netif_wake_queue(ndev); 956 if (netif_queue_stopped(ndev))
957 netif_wake_queue(ndev);
958 }
959} 959}
960 960
961static int __lpc_handle_recv(struct net_device *ndev, int budget) 961static int __lpc_handle_recv(struct net_device *ndev, int budget)
@@ -1320,6 +1320,7 @@ static const struct net_device_ops lpc_netdev_ops = {
1320 .ndo_set_rx_mode = lpc_eth_set_multicast_list, 1320 .ndo_set_rx_mode = lpc_eth_set_multicast_list,
1321 .ndo_do_ioctl = lpc_eth_ioctl, 1321 .ndo_do_ioctl = lpc_eth_ioctl,
1322 .ndo_set_mac_address = lpc_set_mac_address, 1322 .ndo_set_mac_address = lpc_set_mac_address,
1323 .ndo_change_mtu = eth_change_mtu,
1323}; 1324};
1324 1325
1325static int lpc_eth_drv_probe(struct platform_device *pdev) 1326static int lpc_eth_drv_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 9757ce3543a0..7260aa79466a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5889,11 +5889,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
5889 if (status & LinkChg) 5889 if (status & LinkChg)
5890 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); 5890 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
5891 5891
5892 napi_disable(&tp->napi); 5892 rtl_irq_enable_all(tp);
5893 rtl_irq_disable(tp);
5894
5895 napi_enable(&tp->napi);
5896 napi_schedule(&tp->napi);
5897} 5893}
5898 5894
5899static void rtl_task(struct work_struct *work) 5895static void rtl_task(struct work_struct *work)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 0076f770e637..9f448279e12a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -15,6 +15,7 @@ if STMMAC_ETH
15config STMMAC_PLATFORM 15config STMMAC_PLATFORM
16 bool "STMMAC Platform bus support" 16 bool "STMMAC Platform bus support"
17 depends on STMMAC_ETH 17 depends on STMMAC_ETH
18 default y
18 ---help--- 19 ---help---
19 This selects the platform specific bus support for 20 This selects the platform specific bus support for
20 the stmmac device driver. This is the driver used 21 the stmmac device driver. This is the driver used
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 6d07ba2c8661..dc20c56efc9d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -26,6 +26,7 @@
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/stmmac.h> 27#include <linux/stmmac.h>
28#include <linux/phy.h> 28#include <linux/phy.h>
29#include <linux/pci.h>
29#include "common.h" 30#include "common.h"
30#ifdef CONFIG_STMMAC_TIMER 31#ifdef CONFIG_STMMAC_TIMER
31#include "stmmac_timer.h" 32#include "stmmac_timer.h"
@@ -95,8 +96,6 @@ extern int stmmac_mdio_register(struct net_device *ndev);
95extern void stmmac_set_ethtool_ops(struct net_device *netdev); 96extern void stmmac_set_ethtool_ops(struct net_device *netdev);
96extern const struct stmmac_desc_ops enh_desc_ops; 97extern const struct stmmac_desc_ops enh_desc_ops;
97extern const struct stmmac_desc_ops ndesc_ops; 98extern const struct stmmac_desc_ops ndesc_ops;
98extern struct pci_driver stmmac_pci_driver;
99extern struct platform_driver stmmac_pltfr_driver;
100int stmmac_freeze(struct net_device *ndev); 99int stmmac_freeze(struct net_device *ndev);
101int stmmac_restore(struct net_device *ndev); 100int stmmac_restore(struct net_device *ndev);
102int stmmac_resume(struct net_device *ndev); 101int stmmac_resume(struct net_device *ndev);
@@ -110,7 +109,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
110static inline int stmmac_clk_enable(struct stmmac_priv *priv) 109static inline int stmmac_clk_enable(struct stmmac_priv *priv)
111{ 110{
112 if (!IS_ERR(priv->stmmac_clk)) 111 if (!IS_ERR(priv->stmmac_clk))
113 return clk_enable(priv->stmmac_clk); 112 return clk_prepare_enable(priv->stmmac_clk);
114 113
115 return 0; 114 return 0;
116} 115}
@@ -120,7 +119,7 @@ static inline void stmmac_clk_disable(struct stmmac_priv *priv)
120 if (IS_ERR(priv->stmmac_clk)) 119 if (IS_ERR(priv->stmmac_clk))
121 return; 120 return;
122 121
123 clk_disable(priv->stmmac_clk); 122 clk_disable_unprepare(priv->stmmac_clk);
124} 123}
125static inline int stmmac_clk_get(struct stmmac_priv *priv) 124static inline int stmmac_clk_get(struct stmmac_priv *priv)
126{ 125{
@@ -144,3 +143,60 @@ static inline int stmmac_clk_get(struct stmmac_priv *priv)
144 return 0; 143 return 0;
145} 144}
146#endif /* CONFIG_HAVE_CLK */ 145#endif /* CONFIG_HAVE_CLK */
146
147
148#ifdef CONFIG_STMMAC_PLATFORM
149extern struct platform_driver stmmac_pltfr_driver;
150static inline int stmmac_register_platform(void)
151{
152 int err;
153
154 err = platform_driver_register(&stmmac_pltfr_driver);
155 if (err)
156 pr_err("stmmac: failed to register the platform driver\n");
157
158 return err;
159}
160static inline void stmmac_unregister_platform(void)
161{
162 platform_driver_register(&stmmac_pltfr_driver);
163}
164#else
165static inline int stmmac_register_platform(void)
166{
167 pr_debug("stmmac: do not register the platf driver\n");
168
169 return -EINVAL;
170}
171static inline void stmmac_unregister_platform(void)
172{
173}
174#endif /* CONFIG_STMMAC_PLATFORM */
175
176#ifdef CONFIG_STMMAC_PCI
177extern struct pci_driver stmmac_pci_driver;
178static inline int stmmac_register_pci(void)
179{
180 int err;
181
182 err = pci_register_driver(&stmmac_pci_driver);
183 if (err)
184 pr_err("stmmac: failed to register the PCI driver\n");
185
186 return err;
187}
188static inline void stmmac_unregister_pci(void)
189{
190 pci_unregister_driver(&stmmac_pci_driver);
191}
192#else
193static inline int stmmac_register_pci(void)
194{
195 pr_debug("stmmac: do not register the PCI driver\n");
196
197 return -EINVAL;
198}
199static inline void stmmac_unregister_pci(void)
200{
201}
202#endif /* CONFIG_STMMAC_PCI */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8899e105da9f..590e95b4cbfa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -42,7 +42,6 @@
42#include <linux/dma-mapping.h> 42#include <linux/dma-mapping.h>
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/prefetch.h> 44#include <linux/prefetch.h>
45#include <linux/pci.h>
46#ifdef CONFIG_STMMAC_DEBUG_FS 45#ifdef CONFIG_STMMAC_DEBUG_FS
47#include <linux/debugfs.h> 46#include <linux/debugfs.h>
48#include <linux/seq_file.h> 47#include <linux/seq_file.h>
@@ -2093,25 +2092,29 @@ int stmmac_restore(struct net_device *ndev)
2093} 2092}
2094#endif /* CONFIG_PM */ 2093#endif /* CONFIG_PM */
2095 2094
2095/* Driver can be configured w/ and w/ both PCI and Platf drivers
2096 * depending on the configuration selected.
2097 */
2096static int __init stmmac_init(void) 2098static int __init stmmac_init(void)
2097{ 2099{
2098 int err = 0; 2100 int err_plt = 0;
2101 int err_pci = 0;
2099 2102
2100 err = platform_driver_register(&stmmac_pltfr_driver); 2103 err_plt = stmmac_register_platform();
2104 err_pci = stmmac_register_pci();
2101 2105
2102 if (!err) { 2106 if ((err_pci) && (err_plt)) {
2103 err = pci_register_driver(&stmmac_pci_driver); 2107 pr_err("stmmac: driver registration failed\n");
2104 if (err) 2108 return -EINVAL;
2105 platform_driver_unregister(&stmmac_pltfr_driver);
2106 } 2109 }
2107 2110
2108 return err; 2111 return 0;
2109} 2112}
2110 2113
2111static void __exit stmmac_exit(void) 2114static void __exit stmmac_exit(void)
2112{ 2115{
2113 pci_unregister_driver(&stmmac_pci_driver); 2116 stmmac_unregister_platform();
2114 platform_driver_unregister(&stmmac_pltfr_driver); 2117 stmmac_unregister_pci();
2115} 2118}
2116 2119
2117module_init(stmmac_init); 2120module_init(stmmac_init);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 703c8cce2a2c..8c726b7004d3 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3598,7 +3598,6 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3598static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) 3598static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3599{ 3599{
3600 struct netdev_queue *txq; 3600 struct netdev_queue *txq;
3601 unsigned int tx_bytes;
3602 u16 pkt_cnt, tmp; 3601 u16 pkt_cnt, tmp;
3603 int cons, index; 3602 int cons, index;
3604 u64 cs; 3603 u64 cs;
@@ -3621,18 +3620,12 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3621 netif_printk(np, tx_done, KERN_DEBUG, np->dev, 3620 netif_printk(np, tx_done, KERN_DEBUG, np->dev,
3622 "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); 3621 "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
3623 3622
3624 tx_bytes = 0; 3623 while (pkt_cnt--)
3625 tmp = pkt_cnt;
3626 while (tmp--) {
3627 tx_bytes += rp->tx_buffs[cons].skb->len;
3628 cons = release_tx_packet(np, rp, cons); 3624 cons = release_tx_packet(np, rp, cons);
3629 }
3630 3625
3631 rp->cons = cons; 3626 rp->cons = cons;
3632 smp_mb(); 3627 smp_mb();
3633 3628
3634 netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
3635
3636out: 3629out:
3637 if (unlikely(netif_tx_queue_stopped(txq) && 3630 if (unlikely(netif_tx_queue_stopped(txq) &&
3638 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { 3631 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4333,7 +4326,6 @@ static void niu_free_channels(struct niu *np)
4333 struct tx_ring_info *rp = &np->tx_rings[i]; 4326 struct tx_ring_info *rp = &np->tx_rings[i];
4334 4327
4335 niu_free_tx_ring_info(np, rp); 4328 niu_free_tx_ring_info(np, rp);
4336 netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
4337 } 4329 }
4338 kfree(np->tx_rings); 4330 kfree(np->tx_rings);
4339 np->tx_rings = NULL; 4331 np->tx_rings = NULL;
@@ -6739,8 +6731,6 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
6739 prod = NEXT_TX(rp, prod); 6731 prod = NEXT_TX(rp, prod);
6740 } 6732 }
6741 6733
6742 netdev_tx_sent_queue(txq, skb->len);
6743
6744 if (prod < rp->prod) 6734 if (prod < rp->prod)
6745 rp->wrap_bit ^= TX_RING_KICK_WRAP; 6735 rp->wrap_bit ^= TX_RING_KICK_WRAP;
6746 rp->prod = prod; 6736 rp->prod = prod;
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 2d9218f86bca..098b1c42b393 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -7,6 +7,8 @@ config TILE_NET
7 depends on TILE 7 depends on TILE
8 default y 8 default y
9 select CRC32 9 select CRC32
10 select TILE_GXIO_MPIPE if TILEGX
11 select HIGH_RES_TIMERS if TILEGX
10 ---help--- 12 ---help---
11 This is a standard Linux network device driver for the 13 This is a standard Linux network device driver for the
12 on-chip Tilera Gigabit Ethernet and XAUI interfaces. 14 on-chip Tilera Gigabit Ethernet and XAUI interfaces.
diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile
index f634f142cab4..0ef9eefd3211 100644
--- a/drivers/net/ethernet/tile/Makefile
+++ b/drivers/net/ethernet/tile/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_TILE_NET) += tile_net.o 5obj-$(CONFIG_TILE_NET) += tile_net.o
6ifdef CONFIG_TILEGX 6ifdef CONFIG_TILEGX
7tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o 7tile_net-y := tilegx.o
8else 8else
9tile_net-objs := tilepro.o 9tile_net-y := tilepro.o
10endif 10endif
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
new file mode 100644
index 000000000000..83b4b388ad49
--- /dev/null
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -0,0 +1,1898 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/moduleparam.h>
18#include <linux/sched.h>
19#include <linux/kernel.h> /* printk() */
20#include <linux/slab.h> /* kmalloc() */
21#include <linux/errno.h> /* error codes */
22#include <linux/types.h> /* size_t */
23#include <linux/interrupt.h>
24#include <linux/in.h>
25#include <linux/irq.h>
26#include <linux/netdevice.h> /* struct device, and other headers */
27#include <linux/etherdevice.h> /* eth_type_trans */
28#include <linux/skbuff.h>
29#include <linux/ioctl.h>
30#include <linux/cdev.h>
31#include <linux/hugetlb.h>
32#include <linux/in6.h>
33#include <linux/timer.h>
34#include <linux/hrtimer.h>
35#include <linux/ktime.h>
36#include <linux/io.h>
37#include <linux/ctype.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40
41#include <asm/checksum.h>
42#include <asm/homecache.h>
43#include <gxio/mpipe.h>
44#include <arch/sim.h>
45
46/* Default transmit lockup timeout period, in jiffies. */
47#define TILE_NET_TIMEOUT (5 * HZ)
48
49/* The maximum number of distinct channels (idesc.channel is 5 bits). */
50#define TILE_NET_CHANNELS 32
51
52/* Maximum number of idescs to handle per "poll". */
53#define TILE_NET_BATCH 128
54
55/* Maximum number of packets to handle per "poll". */
56#define TILE_NET_WEIGHT 64
57
58/* Number of entries in each iqueue. */
59#define IQUEUE_ENTRIES 512
60
61/* Number of entries in each equeue. */
62#define EQUEUE_ENTRIES 2048
63
64/* Total header bytes per equeue slot. Must be big enough for 2 bytes
65 * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to
66 * 60 bytes of actual TCP header. We round up to align to cache lines.
67 */
68#define HEADER_BYTES 128
69
70/* Maximum completions per cpu per device (must be a power of two).
71 * ISSUE: What is the right number here? If this is too small, then
72 * egress might block waiting for free space in a completions array.
73 * ISSUE: At the least, allocate these only for initialized echannels.
74 */
75#define TILE_NET_MAX_COMPS 64
76
77#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
78
79/* Size of completions data to allocate.
80 * ISSUE: Probably more than needed since we don't use all the channels.
81 */
82#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps))
83
84/* Size of NotifRing data to allocate. */
85#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t))
86
87/* Timeout to wake the per-device TX timer after we stop the queue.
88 * We don't want the timeout too short (adds overhead, and might end
89 * up causing stop/wake/stop/wake cycles) or too long (affects performance).
90 * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets.
91 */
92#define TX_TIMER_DELAY_USEC 30
93
94/* Timeout to wake the per-cpu egress timer to free completions. */
95#define EGRESS_TIMER_DELAY_USEC 1000
96
97MODULE_AUTHOR("Tilera Corporation");
98MODULE_LICENSE("GPL");
99
100/* A "packet fragment" (a chunk of memory). */
101struct frag {
102 void *buf;
103 size_t length;
104};
105
106/* A single completion. */
107struct tile_net_comp {
108 /* The "complete_count" when the completion will be complete. */
109 s64 when;
110 /* The buffer to be freed when the completion is complete. */
111 struct sk_buff *skb;
112};
113
114/* The completions for a given cpu and echannel. */
115struct tile_net_comps {
116 /* The completions. */
117 struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS];
118 /* The number of completions used. */
119 unsigned long comp_next;
120 /* The number of completions freed. */
121 unsigned long comp_last;
122};
123
124/* The transmit wake timer for a given cpu and echannel. */
125struct tile_net_tx_wake {
126 struct hrtimer timer;
127 struct net_device *dev;
128};
129
130/* Info for a specific cpu. */
131struct tile_net_info {
132 /* The NAPI struct. */
133 struct napi_struct napi;
134 /* Packet queue. */
135 gxio_mpipe_iqueue_t iqueue;
136 /* Our cpu. */
137 int my_cpu;
138 /* True if iqueue is valid. */
139 bool has_iqueue;
140 /* NAPI flags. */
141 bool napi_added;
142 bool napi_enabled;
143 /* Number of small sk_buffs which must still be provided. */
144 unsigned int num_needed_small_buffers;
145 /* Number of large sk_buffs which must still be provided. */
146 unsigned int num_needed_large_buffers;
147 /* A timer for handling egress completions. */
148 struct hrtimer egress_timer;
149 /* True if "egress_timer" is scheduled. */
150 bool egress_timer_scheduled;
151 /* Comps for each egress channel. */
152 struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
153 /* Transmit wake timer for each egress channel. */
154 struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
155};
156
157/* Info for egress on a particular egress channel. */
158struct tile_net_egress {
159 /* The "equeue". */
160 gxio_mpipe_equeue_t *equeue;
161 /* The headers for TSO. */
162 unsigned char *headers;
163};
164
165/* Info for a specific device. */
166struct tile_net_priv {
167 /* Our network device. */
168 struct net_device *dev;
169 /* The primary link. */
170 gxio_mpipe_link_t link;
171 /* The primary channel, if open, else -1. */
172 int channel;
173 /* The "loopify" egress link, if needed. */
174 gxio_mpipe_link_t loopify_link;
175 /* The "loopify" egress channel, if open, else -1. */
176 int loopify_channel;
177 /* The egress channel (channel or loopify_channel). */
178 int echannel;
179 /* Total stats. */
180 struct net_device_stats stats;
181};
182
183/* Egress info, indexed by "priv->echannel" (lazily created as needed). */
184static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS];
185
186/* Devices currently associated with each channel.
187 * NOTE: The array entry can become NULL after ifconfig down, but
188 * we do not free the underlying net_device structures, so it is
189 * safe to use a pointer after reading it from this array.
190 */
191static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS];
192
193/* A mutex for "tile_net_devs_for_channel". */
194static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
195
196/* The per-cpu info. */
197static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
198
199/* The "context" for all devices. */
200static gxio_mpipe_context_t context;
201
202/* Buffer sizes and mpipe enum codes for buffer stacks.
203 * See arch/tile/include/gxio/mpipe.h for the set of possible values.
204 */
205#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
206#define BUFFER_SIZE_SMALL 128
207#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
208#define BUFFER_SIZE_LARGE 1664
209
210/* The small/large "buffer stacks". */
211static int small_buffer_stack = -1;
212static int large_buffer_stack = -1;
213
214/* Amount of memory allocated for each buffer stack. */
215static size_t buffer_stack_size;
216
217/* The actual memory allocated for the buffer stacks. */
218static void *small_buffer_stack_va;
219static void *large_buffer_stack_va;
220
221/* The buckets. */
222static int first_bucket = -1;
223static int num_buckets = 1;
224
225/* The ingress irq. */
226static int ingress_irq = -1;
227
228/* Text value of tile_net.cpus if passed as a module parameter. */
229static char *network_cpus_string;
230
231/* The actual cpus in "network_cpus". */
232static struct cpumask network_cpus_map;
233
234/* If "loopify=LINK" was specified, this is "LINK". */
235static char *loopify_link_name;
236
237/* If "tile_net.custom" was specified, this is non-NULL. */
238static char *custom_str;
239
240/* The "tile_net.cpus" argument specifies the cpus that are dedicated
241 * to handle ingress packets.
242 *
243 * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where
244 * m, n, x, y are integer numbers that represent the cpus that can be
245 * neither a dedicated cpu nor a dataplane cpu.
246 */
247static bool network_cpus_init(void)
248{
249 char buf[1024];
250 int rc;
251
252 if (network_cpus_string == NULL)
253 return false;
254
255 rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map);
256 if (rc != 0) {
257 pr_warn("tile_net.cpus=%s: malformed cpu list\n",
258 network_cpus_string);
259 return false;
260 }
261
262 /* Remove dedicated cpus. */
263 cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask);
264
265 if (cpumask_empty(&network_cpus_map)) {
266 pr_warn("Ignoring empty tile_net.cpus='%s'.\n",
267 network_cpus_string);
268 return false;
269 }
270
271 cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
272 pr_info("Linux network CPUs: %s\n", buf);
273 return true;
274}
275
276module_param_named(cpus, network_cpus_string, charp, 0444);
277MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts");
278
279/* The "tile_net.loopify=LINK" argument causes the named device to
280 * actually use "loop0" for ingress, and "loop1" for egress. This
281 * allows an app to sit between the actual link and linux, passing
282 * (some) packets along to linux, and forwarding (some) packets sent
283 * out by linux.
284 */
285module_param_named(loopify, loopify_link_name, charp, 0444);
286MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
287
288/* The "tile_net.custom" argument causes us to ignore the "conventional"
289 * classifier metadata, in particular, the "l2_offset".
290 */
291module_param_named(custom, custom_str, charp, 0444);
292MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
293
294/* Atomically update a statistics field.
295 * Note that on TILE-Gx, this operation is fire-and-forget on the
296 * issuing core (single-cycle dispatch) and takes only a few cycles
297 * longer than a regular store when the request reaches the home cache.
298 * No expensive bus management overhead is required.
299 */
300static void tile_net_stats_add(unsigned long value, unsigned long *field)
301{
302 BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long));
303 atomic_long_add(value, (atomic_long_t *)field);
304}
305
306/* Allocate and push a buffer. */
307static bool tile_net_provide_buffer(bool small)
308{
309 int stack = small ? small_buffer_stack : large_buffer_stack;
310 const unsigned long buffer_alignment = 128;
311 struct sk_buff *skb;
312 int len;
313
314 len = sizeof(struct sk_buff **) + buffer_alignment;
315 len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
316 skb = dev_alloc_skb(len);
317 if (skb == NULL)
318 return false;
319
320 /* Make room for a back-pointer to 'skb' and guarantee alignment. */
321 skb_reserve(skb, sizeof(struct sk_buff **));
322 skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1));
323
324 /* Save a back-pointer to 'skb'. */
325 *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb;
326
327 /* Make sure "skb" and the back-pointer have been flushed. */
328 wmb();
329
330 gxio_mpipe_push_buffer(&context, stack,
331 (void *)va_to_tile_io_addr(skb->data));
332
333 return true;
334}
335
336/* Convert a raw mpipe buffer to its matching skb pointer. */
337static struct sk_buff *mpipe_buf_to_skb(void *va)
338{
339 /* Acquire the associated "skb". */
340 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
341 struct sk_buff *skb = *skb_ptr;
342
343 /* Paranoia. */
344 if (skb->data != va) {
345 /* Panic here since there's a reasonable chance
346 * that corrupt buffers means generic memory
347 * corruption, with unpredictable system effects.
348 */
349 panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p",
350 va, skb, skb->data);
351 }
352
353 return skb;
354}
355
356static void tile_net_pop_all_buffers(int stack)
357{
358 for (;;) {
359 tile_io_addr_t addr =
360 (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack);
361 if (addr == 0)
362 break;
363 dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
364 }
365}
366
367/* Provide linux buffers to mPIPE. */
368static void tile_net_provide_needed_buffers(void)
369{
370 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
371
372 while (info->num_needed_small_buffers != 0) {
373 if (!tile_net_provide_buffer(true))
374 goto oops;
375 info->num_needed_small_buffers--;
376 }
377
378 while (info->num_needed_large_buffers != 0) {
379 if (!tile_net_provide_buffer(false))
380 goto oops;
381 info->num_needed_large_buffers--;
382 }
383
384 return;
385
386oops:
387 /* Add a description to the page allocation failure dump. */
388 pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
389}
390
391static inline bool filter_packet(struct net_device *dev, void *buf)
392{
393 /* Filter packets received before we're up. */
394 if (dev == NULL || !(dev->flags & IFF_UP))
395 return true;
396
397 /* Filter out packets that aren't for us. */
398 if (!(dev->flags & IFF_PROMISC) &&
399 !is_multicast_ether_addr(buf) &&
400 compare_ether_addr(dev->dev_addr, buf) != 0)
401 return true;
402
403 return false;
404}
405
406static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
407 gxio_mpipe_idesc_t *idesc, unsigned long len)
408{
409 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
410 struct tile_net_priv *priv = netdev_priv(dev);
411
412 /* Encode the actual packet length. */
413 skb_put(skb, len);
414
415 skb->protocol = eth_type_trans(skb, dev);
416
417 /* Acknowledge "good" hardware checksums. */
418 if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
419 skb->ip_summed = CHECKSUM_UNNECESSARY;
420
421 netif_receive_skb(skb);
422
423 /* Update stats. */
424 tile_net_stats_add(1, &priv->stats.rx_packets);
425 tile_net_stats_add(len, &priv->stats.rx_bytes);
426
427 /* Need a new buffer. */
428 if (idesc->size == BUFFER_SIZE_SMALL_ENUM)
429 info->num_needed_small_buffers++;
430 else
431 info->num_needed_large_buffers++;
432}
433
434/* Handle a packet. Return true if "processed", false if "filtered". */
435static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
436{
437 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
438 struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
439 uint8_t l2_offset;
440 void *va;
441 void *buf;
442 unsigned long len;
443 bool filter;
444
445 /* Drop packets for which no buffer was available.
446 * NOTE: This happens under heavy load.
447 */
448 if (idesc->be) {
449 struct tile_net_priv *priv = netdev_priv(dev);
450 tile_net_stats_add(1, &priv->stats.rx_dropped);
451 gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
452 if (net_ratelimit())
453 pr_info("Dropping packet (insufficient buffers).\n");
454 return false;
455 }
456
457 /* Get the "l2_offset", if allowed. */
458 l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
459
460 /* Get the raw buffer VA (includes "headroom"). */
461 va = tile_io_addr_to_va((unsigned long)(long)idesc->va);
462
463 /* Get the actual packet start/length. */
464 buf = va + l2_offset;
465 len = idesc->l2_size - l2_offset;
466
467 /* Point "va" at the raw buffer. */
468 va -= NET_IP_ALIGN;
469
470 filter = filter_packet(dev, buf);
471 if (filter) {
472 gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
473 } else {
474 struct sk_buff *skb = mpipe_buf_to_skb(va);
475
476 /* Skip headroom, and any custom header. */
477 skb_reserve(skb, NET_IP_ALIGN + l2_offset);
478
479 tile_net_receive_skb(dev, skb, idesc, len);
480 }
481
482 gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
483 return !filter;
484}
485
486/* Handle some packets for the current CPU.
487 *
488 * This function handles up to TILE_NET_BATCH idescs per call.
489 *
490 * ISSUE: Since we do not provide new buffers until this function is
491 * complete, we must initially provide enough buffers for each network
492 * cpu to fill its iqueue and also its batched idescs.
493 *
494 * ISSUE: The "rotting packet" race condition occurs if a packet
495 * arrives after the queue appears to be empty, and before the
496 * hypervisor interrupt is re-enabled.
497 */
498static int tile_net_poll(struct napi_struct *napi, int budget)
499{
500 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
501 unsigned int work = 0;
502 gxio_mpipe_idesc_t *idesc;
503 int i, n;
504
505 /* Process packets. */
506 while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) {
507 for (i = 0; i < n; i++) {
508 if (i == TILE_NET_BATCH)
509 goto done;
510 if (tile_net_handle_packet(idesc + i)) {
511 if (++work >= budget)
512 goto done;
513 }
514 }
515 }
516
517 /* There are no packets left. */
518 napi_complete(&info->napi);
519
520 /* Re-enable hypervisor interrupts. */
521 gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring);
522
523 /* HACK: Avoid the "rotting packet" problem. */
524 if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0)
525 napi_schedule(&info->napi);
526
527 /* ISSUE: Handle completions? */
528
529done:
530 tile_net_provide_needed_buffers();
531
532 return work;
533}
534
535/* Handle an ingress interrupt on the current cpu. */
536static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused)
537{
538 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
539 napi_schedule(&info->napi);
540 return IRQ_HANDLED;
541}
542
543/* Free some completions. This must be called with interrupts blocked. */
544static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue,
545 struct tile_net_comps *comps,
546 int limit, bool force_update)
547{
548 int n = 0;
549 while (comps->comp_last < comps->comp_next) {
550 unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS;
551 struct tile_net_comp *comp = &comps->comp_queue[cid];
552 if (!gxio_mpipe_equeue_is_complete(equeue, comp->when,
553 force_update || n == 0))
554 break;
555 dev_kfree_skb_irq(comp->skb);
556 comps->comp_last++;
557 if (++n == limit)
558 break;
559 }
560 return n;
561}
562
563/* Add a completion. This must be called with interrupts blocked.
564 * tile_net_equeue_try_reserve() will have ensured a free completion entry.
565 */
566static void add_comp(gxio_mpipe_equeue_t *equeue,
567 struct tile_net_comps *comps,
568 uint64_t when, struct sk_buff *skb)
569{
570 int cid = comps->comp_next % TILE_NET_MAX_COMPS;
571 comps->comp_queue[cid].when = when;
572 comps->comp_queue[cid].skb = skb;
573 comps->comp_next++;
574}
575
576static void tile_net_schedule_tx_wake_timer(struct net_device *dev)
577{
578 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
579 struct tile_net_priv *priv = netdev_priv(dev);
580
581 hrtimer_start(&info->tx_wake[priv->echannel].timer,
582 ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
583 HRTIMER_MODE_REL_PINNED);
584}
585
586static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
587{
588 struct tile_net_tx_wake *tx_wake =
589 container_of(t, struct tile_net_tx_wake, timer);
590 netif_wake_subqueue(tx_wake->dev, smp_processor_id());
591 return HRTIMER_NORESTART;
592}
593
594/* Make sure the egress timer is scheduled. */
595static void tile_net_schedule_egress_timer(void)
596{
597 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
598
599 if (!info->egress_timer_scheduled) {
600 hrtimer_start(&info->egress_timer,
601 ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL),
602 HRTIMER_MODE_REL_PINNED);
603 info->egress_timer_scheduled = true;
604 }
605}
606
607/* The "function" for "info->egress_timer".
608 *
609 * This timer will reschedule itself as long as there are any pending
610 * completions expected for this tile.
611 */
612static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
613{
614 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
615 unsigned long irqflags;
616 bool pending = false;
617 int i;
618
619 local_irq_save(irqflags);
620
621 /* The timer is no longer scheduled. */
622 info->egress_timer_scheduled = false;
623
624 /* Free all possible comps for this tile. */
625 for (i = 0; i < TILE_NET_CHANNELS; i++) {
626 struct tile_net_egress *egress = &egress_for_echannel[i];
627 struct tile_net_comps *comps = info->comps_for_echannel[i];
628 if (comps->comp_last >= comps->comp_next)
629 continue;
630 tile_net_free_comps(egress->equeue, comps, -1, true);
631 pending = pending || (comps->comp_last < comps->comp_next);
632 }
633
634 /* Reschedule timer if needed. */
635 if (pending)
636 tile_net_schedule_egress_timer();
637
638 local_irq_restore(irqflags);
639
640 return HRTIMER_NORESTART;
641}
642
643/* Helper function for "tile_net_update()".
644 * "dev" (i.e. arg) is the device being brought up or down,
645 * or NULL if all devices are now down.
646 */
647static void tile_net_update_cpu(void *arg)
648{
649 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
650 struct net_device *dev = arg;
651
652 if (!info->has_iqueue)
653 return;
654
655 if (dev != NULL) {
656 if (!info->napi_added) {
657 netif_napi_add(dev, &info->napi,
658 tile_net_poll, TILE_NET_WEIGHT);
659 info->napi_added = true;
660 }
661 if (!info->napi_enabled) {
662 napi_enable(&info->napi);
663 info->napi_enabled = true;
664 }
665 enable_percpu_irq(ingress_irq, 0);
666 } else {
667 disable_percpu_irq(ingress_irq);
668 if (info->napi_enabled) {
669 napi_disable(&info->napi);
670 info->napi_enabled = false;
671 }
672 /* FIXME: Drain the iqueue. */
673 }
674}
675
676/* Helper function for tile_net_open() and tile_net_stop().
677 * Always called under tile_net_devs_for_channel_mutex.
678 */
679static int tile_net_update(struct net_device *dev)
680{
681 static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
682 bool saw_channel = false;
683 int channel;
684 int rc;
685 int cpu;
686
687 gxio_mpipe_rules_init(&rules, &context);
688
689 for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
690 if (tile_net_devs_for_channel[channel] == NULL)
691 continue;
692 if (!saw_channel) {
693 saw_channel = true;
694 gxio_mpipe_rules_begin(&rules, first_bucket,
695 num_buckets, NULL);
696 gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
697 }
698 gxio_mpipe_rules_add_channel(&rules, channel);
699 }
700
701 /* NOTE: This can fail if there is no classifier.
702 * ISSUE: Can anything else cause it to fail?
703 */
704 rc = gxio_mpipe_rules_commit(&rules);
705 if (rc != 0) {
706 netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc);
707 return -EIO;
708 }
709
710 /* Update all cpus, sequentially (to protect "netif_napi_add()"). */
711 for_each_online_cpu(cpu)
712 smp_call_function_single(cpu, tile_net_update_cpu,
713 (saw_channel ? dev : NULL), 1);
714
715 /* HACK: Allow packets to flow in the simulator. */
716 if (saw_channel)
717 sim_enable_mpipe_links(0, -1);
718
719 return 0;
720}
721
722/* Allocate and initialize mpipe buffer stacks, and register them in
723 * the mPIPE TLBs, for both small and large packet sizes.
724 * This routine supports tile_net_init_mpipe(), below.
725 */
726static int init_buffer_stacks(struct net_device *dev, int num_buffers)
727{
728 pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
729 int rc;
730
731 /* Compute stack bytes; we round up to 64KB and then use
732 * alloc_pages() so we get the required 64KB alignment as well.
733 */
734 buffer_stack_size =
735 ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
736 64 * 1024);
737
738 /* Allocate two buffer stack indices. */
739 rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
740 if (rc < 0) {
741 netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
742 rc);
743 return rc;
744 }
745 small_buffer_stack = rc;
746 large_buffer_stack = rc + 1;
747
748 /* Allocate the small memory stack. */
749 small_buffer_stack_va =
750 alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
751 if (small_buffer_stack_va == NULL) {
752 netdev_err(dev,
753 "Could not alloc %zd bytes for buffer stacks\n",
754 buffer_stack_size);
755 return -ENOMEM;
756 }
757 rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack,
758 BUFFER_SIZE_SMALL_ENUM,
759 small_buffer_stack_va,
760 buffer_stack_size, 0);
761 if (rc != 0) {
762 netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
763 return rc;
764 }
765 rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack,
766 hash_pte, 0);
767 if (rc != 0) {
768 netdev_err(dev,
769 "gxio_mpipe_register_buffer_memory failed: %d\n",
770 rc);
771 return rc;
772 }
773
774 /* Allocate the large buffer stack. */
775 large_buffer_stack_va =
776 alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
777 if (large_buffer_stack_va == NULL) {
778 netdev_err(dev,
779 "Could not alloc %zd bytes for buffer stacks\n",
780 buffer_stack_size);
781 return -ENOMEM;
782 }
783 rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
784 BUFFER_SIZE_LARGE_ENUM,
785 large_buffer_stack_va,
786 buffer_stack_size, 0);
787 if (rc != 0) {
788 netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
789 rc);
790 return rc;
791 }
792 rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack,
793 hash_pte, 0);
794 if (rc != 0) {
795 netdev_err(dev,
796 "gxio_mpipe_register_buffer_memory failed: %d\n",
797 rc);
798 return rc;
799 }
800
801 return 0;
802}
803
804/* Allocate per-cpu resources (memory for completions and idescs).
805 * This routine supports tile_net_init_mpipe(), below.
806 */
807static int alloc_percpu_mpipe_resources(struct net_device *dev,
808 int cpu, int ring)
809{
810 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
811 int order, i, rc;
812 struct page *page;
813 void *addr;
814
815 /* Allocate the "comps". */
816 order = get_order(COMPS_SIZE);
817 page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
818 if (page == NULL) {
819 netdev_err(dev, "Failed to alloc %zd bytes comps memory\n",
820 COMPS_SIZE);
821 return -ENOMEM;
822 }
823 addr = pfn_to_kaddr(page_to_pfn(page));
824 memset(addr, 0, COMPS_SIZE);
825 for (i = 0; i < TILE_NET_CHANNELS; i++)
826 info->comps_for_echannel[i] =
827 addr + i * sizeof(struct tile_net_comps);
828
829 /* If this is a network cpu, create an iqueue. */
830 if (cpu_isset(cpu, network_cpus_map)) {
831 order = get_order(NOTIF_RING_SIZE);
832 page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
833 if (page == NULL) {
834 netdev_err(dev,
835 "Failed to alloc %zd bytes iqueue memory\n",
836 NOTIF_RING_SIZE);
837 return -ENOMEM;
838 }
839 addr = pfn_to_kaddr(page_to_pfn(page));
840 rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++,
841 addr, NOTIF_RING_SIZE, 0);
842 if (rc < 0) {
843 netdev_err(dev,
844 "gxio_mpipe_iqueue_init failed: %d\n", rc);
845 return rc;
846 }
847 info->has_iqueue = true;
848 }
849
850 return ring;
851}
852
853/* Initialize NotifGroup and buckets.
854 * This routine supports tile_net_init_mpipe(), below.
855 */
856static int init_notif_group_and_buckets(struct net_device *dev,
857 int ring, int network_cpus_count)
858{
859 int group, rc;
860
861 /* Allocate one NotifGroup. */
862 rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0);
863 if (rc < 0) {
864 netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n",
865 rc);
866 return rc;
867 }
868 group = rc;
869
870 /* Initialize global num_buckets value. */
871 if (network_cpus_count > 4)
872 num_buckets = 256;
873 else if (network_cpus_count > 1)
874 num_buckets = 16;
875
876 /* Allocate some buckets, and set global first_bucket value. */
877 rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0);
878 if (rc < 0) {
879 netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc);
880 return rc;
881 }
882 first_bucket = rc;
883
884 /* Init group and buckets. */
885 rc = gxio_mpipe_init_notif_group_and_buckets(
886 &context, group, ring, network_cpus_count,
887 first_bucket, num_buckets,
888 GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
889 if (rc != 0) {
890 netdev_err(
891 dev,
892 "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
893 rc);
894 return rc;
895 }
896
897 return 0;
898}
899
900/* Create an irq and register it, then activate the irq and request
901 * interrupts on all cores. Note that "ingress_irq" being initialized
902 * is how we know not to call tile_net_init_mpipe() again.
903 * This routine supports tile_net_init_mpipe(), below.
904 */
905static int tile_net_setup_interrupts(struct net_device *dev)
906{
907 int cpu, rc;
908
909 rc = create_irq();
910 if (rc < 0) {
911 netdev_err(dev, "create_irq failed: %d\n", rc);
912 return rc;
913 }
914 ingress_irq = rc;
915 tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
916 rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
917 0, NULL, NULL);
918 if (rc != 0) {
919 netdev_err(dev, "request_irq failed: %d\n", rc);
920 destroy_irq(ingress_irq);
921 ingress_irq = -1;
922 return rc;
923 }
924
925 for_each_online_cpu(cpu) {
926 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
927 if (info->has_iqueue) {
928 gxio_mpipe_request_notif_ring_interrupt(
929 &context, cpu_x(cpu), cpu_y(cpu),
930 1, ingress_irq, info->iqueue.ring);
931 }
932 }
933
934 return 0;
935}
936
937/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
938static void tile_net_init_mpipe_fail(void)
939{
940 int cpu;
941
942 /* Do cleanups that require the mpipe context first. */
943 if (small_buffer_stack >= 0)
944 tile_net_pop_all_buffers(small_buffer_stack);
945 if (large_buffer_stack >= 0)
946 tile_net_pop_all_buffers(large_buffer_stack);
947
948 /* Destroy mpipe context so the hardware no longer owns any memory. */
949 gxio_mpipe_destroy(&context);
950
951 for_each_online_cpu(cpu) {
952 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
953 free_pages((unsigned long)(info->comps_for_echannel[0]),
954 get_order(COMPS_SIZE));
955 info->comps_for_echannel[0] = NULL;
956 free_pages((unsigned long)(info->iqueue.idescs),
957 get_order(NOTIF_RING_SIZE));
958 info->iqueue.idescs = NULL;
959 }
960
961 if (small_buffer_stack_va)
962 free_pages_exact(small_buffer_stack_va, buffer_stack_size);
963 if (large_buffer_stack_va)
964 free_pages_exact(large_buffer_stack_va, buffer_stack_size);
965
966 small_buffer_stack_va = NULL;
967 large_buffer_stack_va = NULL;
968 large_buffer_stack = -1;
969 small_buffer_stack = -1;
970 first_bucket = -1;
971}
972
973/* The first time any tilegx network device is opened, we initialize
974 * the global mpipe state. If this step fails, we fail to open the
975 * device, but if it succeeds, we never need to do it again, and since
976 * tile_net can't be unloaded, we never undo it.
977 *
978 * Note that some resources in this path (buffer stack indices,
979 * bindings from init_buffer_stack, etc.) are hypervisor resources
980 * that are freed implicitly by gxio_mpipe_destroy().
981 */
982static int tile_net_init_mpipe(struct net_device *dev)
983{
984 int i, num_buffers, rc;
985 int cpu;
986 int first_ring, ring;
987 int network_cpus_count = cpus_weight(network_cpus_map);
988
989 if (!hash_default) {
990 netdev_err(dev, "Networking requires hash_default!\n");
991 return -EIO;
992 }
993
994 rc = gxio_mpipe_init(&context, 0);
995 if (rc != 0) {
996 netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc);
997 return -EIO;
998 }
999
1000 /* Set up the buffer stacks. */
1001 num_buffers =
1002 network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
1003 rc = init_buffer_stacks(dev, num_buffers);
1004 if (rc != 0)
1005 goto fail;
1006
1007 /* Provide initial buffers. */
1008 rc = -ENOMEM;
1009 for (i = 0; i < num_buffers; i++) {
1010 if (!tile_net_provide_buffer(true)) {
1011 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
1012 goto fail;
1013 }
1014 }
1015 for (i = 0; i < num_buffers; i++) {
1016 if (!tile_net_provide_buffer(false)) {
1017 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
1018 goto fail;
1019 }
1020 }
1021
1022 /* Allocate one NotifRing for each network cpu. */
1023 rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
1024 if (rc < 0) {
1025 netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
1026 rc);
1027 goto fail;
1028 }
1029
1030 /* Init NotifRings per-cpu. */
1031 first_ring = rc;
1032 ring = first_ring;
1033 for_each_online_cpu(cpu) {
1034 rc = alloc_percpu_mpipe_resources(dev, cpu, ring);
1035 if (rc < 0)
1036 goto fail;
1037 ring = rc;
1038 }
1039
1040 /* Initialize NotifGroup and buckets. */
1041 rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count);
1042 if (rc != 0)
1043 goto fail;
1044
1045 /* Create and enable interrupts. */
1046 rc = tile_net_setup_interrupts(dev);
1047 if (rc != 0)
1048 goto fail;
1049
1050 return 0;
1051
1052fail:
1053 tile_net_init_mpipe_fail();
1054 return rc;
1055}
1056
1057/* Create persistent egress info for a given egress channel.
1058 * Note that this may be shared between, say, "gbe0" and "xgbe0".
1059 * ISSUE: Defer header allocation until TSO is actually needed?
1060 */
1061static int tile_net_init_egress(struct net_device *dev, int echannel)
1062{
1063 struct page *headers_page, *edescs_page, *equeue_page;
1064 gxio_mpipe_edesc_t *edescs;
1065 gxio_mpipe_equeue_t *equeue;
1066 unsigned char *headers;
1067 int headers_order, edescs_order, equeue_order;
1068 size_t edescs_size;
1069 int edma;
1070 int rc = -ENOMEM;
1071
1072 /* Only initialize once. */
1073 if (egress_for_echannel[echannel].equeue != NULL)
1074 return 0;
1075
1076 /* Allocate memory for the "headers". */
1077 headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES);
1078 headers_page = alloc_pages(GFP_KERNEL, headers_order);
1079 if (headers_page == NULL) {
1080 netdev_warn(dev,
1081 "Could not alloc %zd bytes for TSO headers.\n",
1082 PAGE_SIZE << headers_order);
1083 goto fail;
1084 }
1085 headers = pfn_to_kaddr(page_to_pfn(headers_page));
1086
1087 /* Allocate memory for the "edescs". */
1088 edescs_size = EQUEUE_ENTRIES * sizeof(*edescs);
1089 edescs_order = get_order(edescs_size);
1090 edescs_page = alloc_pages(GFP_KERNEL, edescs_order);
1091 if (edescs_page == NULL) {
1092 netdev_warn(dev,
1093 "Could not alloc %zd bytes for eDMA ring.\n",
1094 edescs_size);
1095 goto fail_headers;
1096 }
1097 edescs = pfn_to_kaddr(page_to_pfn(edescs_page));
1098
1099 /* Allocate memory for the "equeue". */
1100 equeue_order = get_order(sizeof(*equeue));
1101 equeue_page = alloc_pages(GFP_KERNEL, equeue_order);
1102 if (equeue_page == NULL) {
1103 netdev_warn(dev,
1104 "Could not alloc %zd bytes for equeue info.\n",
1105 PAGE_SIZE << equeue_order);
1106 goto fail_edescs;
1107 }
1108 equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
1109
1110 /* Allocate an edma ring. Note that in practice this can't
1111 * fail, which is good, because we will leak an edma ring if so.
1112 */
1113 rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
1114 if (rc < 0) {
1115 netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n",
1116 rc);
1117 goto fail_equeue;
1118 }
1119 edma = rc;
1120
1121 /* Initialize the equeue. */
1122 rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel,
1123 edescs, edescs_size, 0);
1124 if (rc != 0) {
1125 netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
1126 goto fail_equeue;
1127 }
1128
1129 /* Done. */
1130 egress_for_echannel[echannel].equeue = equeue;
1131 egress_for_echannel[echannel].headers = headers;
1132 return 0;
1133
1134fail_equeue:
1135 __free_pages(equeue_page, equeue_order);
1136
1137fail_edescs:
1138 __free_pages(edescs_page, edescs_order);
1139
1140fail_headers:
1141 __free_pages(headers_page, headers_order);
1142
1143fail:
1144 return rc;
1145}
1146
1147/* Return channel number for a newly-opened link. */
1148static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
1149 const char *link_name)
1150{
1151 int rc = gxio_mpipe_link_open(link, &context, link_name, 0);
1152 if (rc < 0) {
1153 netdev_err(dev, "Failed to open '%s'\n", link_name);
1154 return rc;
1155 }
1156 rc = gxio_mpipe_link_channel(link);
1157 if (rc < 0 || rc >= TILE_NET_CHANNELS) {
1158 netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
1159 gxio_mpipe_link_close(link);
1160 return -EINVAL;
1161 }
1162 return rc;
1163}
1164
1165/* Help the kernel activate the given network interface. */
1166static int tile_net_open(struct net_device *dev)
1167{
1168 struct tile_net_priv *priv = netdev_priv(dev);
1169 int cpu, rc;
1170
1171 mutex_lock(&tile_net_devs_for_channel_mutex);
1172
1173 /* Do one-time initialization the first time any device is opened. */
1174 if (ingress_irq < 0) {
1175 rc = tile_net_init_mpipe(dev);
1176 if (rc != 0)
1177 goto fail;
1178 }
1179
1180 /* Determine if this is the "loopify" device. */
1181 if (unlikely((loopify_link_name != NULL) &&
1182 !strcmp(dev->name, loopify_link_name))) {
1183 rc = tile_net_link_open(dev, &priv->link, "loop0");
1184 if (rc < 0)
1185 goto fail;
1186 priv->channel = rc;
1187 rc = tile_net_link_open(dev, &priv->loopify_link, "loop1");
1188 if (rc < 0)
1189 goto fail;
1190 priv->loopify_channel = rc;
1191 priv->echannel = rc;
1192 } else {
1193 rc = tile_net_link_open(dev, &priv->link, dev->name);
1194 if (rc < 0)
1195 goto fail;
1196 priv->channel = rc;
1197 priv->echannel = rc;
1198 }
1199
1200 /* Initialize egress info (if needed). Once ever, per echannel. */
1201 rc = tile_net_init_egress(dev, priv->echannel);
1202 if (rc != 0)
1203 goto fail;
1204
1205 tile_net_devs_for_channel[priv->channel] = dev;
1206
1207 rc = tile_net_update(dev);
1208 if (rc != 0)
1209 goto fail;
1210
1211 mutex_unlock(&tile_net_devs_for_channel_mutex);
1212
1213 /* Initialize the transmit wake timer for this device for each cpu. */
1214 for_each_online_cpu(cpu) {
1215 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1216 struct tile_net_tx_wake *tx_wake =
1217 &info->tx_wake[priv->echannel];
1218
1219 hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
1220 HRTIMER_MODE_REL);
1221 tx_wake->timer.function = tile_net_handle_tx_wake_timer;
1222 tx_wake->dev = dev;
1223 }
1224
1225 for_each_online_cpu(cpu)
1226 netif_start_subqueue(dev, cpu);
1227 netif_carrier_on(dev);
1228 return 0;
1229
1230fail:
1231 if (priv->loopify_channel >= 0) {
1232 if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
1233 netdev_warn(dev, "Failed to close loopify link!\n");
1234 priv->loopify_channel = -1;
1235 }
1236 if (priv->channel >= 0) {
1237 if (gxio_mpipe_link_close(&priv->link) != 0)
1238 netdev_warn(dev, "Failed to close link!\n");
1239 priv->channel = -1;
1240 }
1241 priv->echannel = -1;
1242 tile_net_devs_for_channel[priv->channel] = NULL;
1243 mutex_unlock(&tile_net_devs_for_channel_mutex);
1244
1245 /* Don't return raw gxio error codes to generic Linux. */
1246 return (rc > -512) ? rc : -EIO;
1247}
1248
1249/* Help the kernel deactivate the given network interface. */
1250static int tile_net_stop(struct net_device *dev)
1251{
1252 struct tile_net_priv *priv = netdev_priv(dev);
1253 int cpu;
1254
1255 for_each_online_cpu(cpu) {
1256 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1257 struct tile_net_tx_wake *tx_wake =
1258 &info->tx_wake[priv->echannel];
1259
1260 hrtimer_cancel(&tx_wake->timer);
1261 netif_stop_subqueue(dev, cpu);
1262 }
1263
1264 mutex_lock(&tile_net_devs_for_channel_mutex);
1265 tile_net_devs_for_channel[priv->channel] = NULL;
1266 (void)tile_net_update(dev);
1267 if (priv->loopify_channel >= 0) {
1268 if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
1269 netdev_warn(dev, "Failed to close loopify link!\n");
1270 priv->loopify_channel = -1;
1271 }
1272 if (priv->channel >= 0) {
1273 if (gxio_mpipe_link_close(&priv->link) != 0)
1274 netdev_warn(dev, "Failed to close link!\n");
1275 priv->channel = -1;
1276 }
1277 priv->echannel = -1;
1278 mutex_unlock(&tile_net_devs_for_channel_mutex);
1279
1280 return 0;
1281}
1282
1283/* Determine the VA for a fragment. */
1284static inline void *tile_net_frag_buf(skb_frag_t *f)
1285{
1286 unsigned long pfn = page_to_pfn(skb_frag_page(f));
1287 return pfn_to_kaddr(pfn) + f->page_offset;
1288}
1289
1290/* Acquire a completion entry and an egress slot, or if we can't,
1291 * stop the queue and schedule the tx_wake timer.
1292 */
1293static s64 tile_net_equeue_try_reserve(struct net_device *dev,
1294 struct tile_net_comps *comps,
1295 gxio_mpipe_equeue_t *equeue,
1296 int num_edescs)
1297{
1298 /* Try to acquire a completion entry. */
1299 if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 ||
1300 tile_net_free_comps(equeue, comps, 32, false) != 0) {
1301
1302 /* Try to acquire an egress slot. */
1303 s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
1304 if (slot >= 0)
1305 return slot;
1306
1307 /* Freeing some completions gives the equeue time to drain. */
1308 tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false);
1309
1310 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
1311 if (slot >= 0)
1312 return slot;
1313 }
1314
1315 /* Still nothing; give up and stop the queue for a short while. */
1316 netif_stop_subqueue(dev, smp_processor_id());
1317 tile_net_schedule_tx_wake_timer(dev);
1318 return -1;
1319}
1320
1321/* Determine how many edesc's are needed for TSO.
1322 *
1323 * Sometimes, if "sendfile()" requires copying, we will be called with
1324 * "data" containing the header and payload, with "frags" being empty.
1325 * Sometimes, for example when using NFS over TCP, a single segment can
1326 * span 3 fragments. This requires special care.
1327 */
1328static int tso_count_edescs(struct sk_buff *skb)
1329{
1330 struct skb_shared_info *sh = skb_shinfo(skb);
1331 unsigned int data_len = skb->data_len;
1332 unsigned int p_len = sh->gso_size;
1333 long f_id = -1; /* id of the current fragment */
1334 long f_size = -1; /* size of the current fragment */
1335 long f_used = -1; /* bytes used from the current fragment */
1336 long n; /* size of the current piece of payload */
1337 int num_edescs = 0;
1338 int segment;
1339
1340 for (segment = 0; segment < sh->gso_segs; segment++) {
1341
1342 unsigned int p_used = 0;
1343
1344 /* One edesc for header and for each piece of the payload. */
1345 for (num_edescs++; p_used < p_len; num_edescs++) {
1346
1347 /* Advance as needed. */
1348 while (f_used >= f_size) {
1349 f_id++;
1350 f_size = sh->frags[f_id].size;
1351 f_used = 0;
1352 }
1353
1354 /* Use bytes from the current fragment. */
1355 n = p_len - p_used;
1356 if (n > f_size - f_used)
1357 n = f_size - f_used;
1358 f_used += n;
1359 p_used += n;
1360 }
1361
1362 /* The last segment may be less than gso_size. */
1363 data_len -= p_len;
1364 if (data_len < p_len)
1365 p_len = data_len;
1366 }
1367
1368 return num_edescs;
1369}
1370
1371/* Prepare modified copies of the skbuff headers.
1372 * FIXME: add support for IPv6.
1373 */
1374static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1375 s64 slot)
1376{
1377 struct skb_shared_info *sh = skb_shinfo(skb);
1378 struct iphdr *ih;
1379 struct tcphdr *th;
1380 unsigned int data_len = skb->data_len;
1381 unsigned char *data = skb->data;
1382 unsigned int ih_off, th_off, sh_len, p_len;
1383 unsigned int isum_seed, tsum_seed, id, seq;
1384 long f_id = -1; /* id of the current fragment */
1385 long f_size = -1; /* size of the current fragment */
1386 long f_used = -1; /* bytes used from the current fragment */
1387 long n; /* size of the current piece of payload */
1388 int segment;
1389
1390 /* Locate original headers and compute various lengths. */
1391 ih = ip_hdr(skb);
1392 th = tcp_hdr(skb);
1393 ih_off = skb_network_offset(skb);
1394 th_off = skb_transport_offset(skb);
1395 sh_len = th_off + tcp_hdrlen(skb);
1396 p_len = sh->gso_size;
1397
1398 /* Set up seed values for IP and TCP csum and initialize id and seq. */
1399 isum_seed = ((0xFFFF - ih->check) +
1400 (0xFFFF - ih->tot_len) +
1401 (0xFFFF - ih->id));
1402 tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
1403 id = ntohs(ih->id);
1404 seq = ntohl(th->seq);
1405
1406 /* Prepare all the headers. */
1407 for (segment = 0; segment < sh->gso_segs; segment++) {
1408 unsigned char *buf;
1409 unsigned int p_used = 0;
1410
1411 /* Copy to the header memory for this segment. */
1412 buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
1413 NET_IP_ALIGN;
1414 memcpy(buf, data, sh_len);
1415
1416 /* Update copied ip header. */
1417 ih = (struct iphdr *)(buf + ih_off);
1418 ih->tot_len = htons(sh_len + p_len - ih_off);
1419 ih->id = htons(id);
1420 ih->check = csum_long(isum_seed + ih->tot_len +
1421 ih->id) ^ 0xffff;
1422
1423 /* Update copied tcp header. */
1424 th = (struct tcphdr *)(buf + th_off);
1425 th->seq = htonl(seq);
1426 th->check = csum_long(tsum_seed + htons(sh_len + p_len));
1427 if (segment != sh->gso_segs - 1) {
1428 th->fin = 0;
1429 th->psh = 0;
1430 }
1431
1432 /* Skip past the header. */
1433 slot++;
1434
1435 /* Skip past the payload. */
1436 while (p_used < p_len) {
1437
1438 /* Advance as needed. */
1439 while (f_used >= f_size) {
1440 f_id++;
1441 f_size = sh->frags[f_id].size;
1442 f_used = 0;
1443 }
1444
1445 /* Use bytes from the current fragment. */
1446 n = p_len - p_used;
1447 if (n > f_size - f_used)
1448 n = f_size - f_used;
1449 f_used += n;
1450 p_used += n;
1451
1452 slot++;
1453 }
1454
1455 id++;
1456 seq += p_len;
1457
1458 /* The last segment may be less than gso_size. */
1459 data_len -= p_len;
1460 if (data_len < p_len)
1461 p_len = data_len;
1462 }
1463
1464 /* Flush the headers so they are ready for hardware DMA. */
1465 wmb();
1466}
1467
1468/* Pass all the data to mpipe for egress. */
1469static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1470 struct sk_buff *skb, unsigned char *headers, s64 slot)
1471{
1472 struct tile_net_priv *priv = netdev_priv(dev);
1473 struct skb_shared_info *sh = skb_shinfo(skb);
1474 unsigned int data_len = skb->data_len;
1475 unsigned int p_len = sh->gso_size;
1476 gxio_mpipe_edesc_t edesc_head = { { 0 } };
1477 gxio_mpipe_edesc_t edesc_body = { { 0 } };
1478 long f_id = -1; /* id of the current fragment */
1479 long f_size = -1; /* size of the current fragment */
1480 long f_used = -1; /* bytes used from the current fragment */
1481 long n; /* size of the current piece of payload */
1482 unsigned long tx_packets = 0, tx_bytes = 0;
1483 unsigned int csum_start, sh_len;
1484 int segment;
1485
1486 /* Prepare to egress the headers: set up header edesc. */
1487 csum_start = skb_checksum_start_offset(skb);
1488 sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1489 edesc_head.csum = 1;
1490 edesc_head.csum_start = csum_start;
1491 edesc_head.csum_dest = csum_start + skb->csum_offset;
1492 edesc_head.xfer_size = sh_len;
1493
1494 /* This is only used to specify the TLB. */
1495 edesc_head.stack_idx = large_buffer_stack;
1496 edesc_body.stack_idx = large_buffer_stack;
1497
1498 /* Egress all the edescs. */
1499 for (segment = 0; segment < sh->gso_segs; segment++) {
1500 void *va;
1501 unsigned char *buf;
1502 unsigned int p_used = 0;
1503
1504 /* Egress the header. */
1505 buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
1506 NET_IP_ALIGN;
1507 edesc_head.va = va_to_tile_io_addr(buf);
1508 gxio_mpipe_equeue_put_at(equeue, edesc_head, slot);
1509 slot++;
1510
1511 /* Egress the payload. */
1512 while (p_used < p_len) {
1513
1514 /* Advance as needed. */
1515 while (f_used >= f_size) {
1516 f_id++;
1517 f_size = sh->frags[f_id].size;
1518 f_used = 0;
1519 }
1520
1521 va = tile_net_frag_buf(&sh->frags[f_id]) + f_used;
1522
1523 /* Use bytes from the current fragment. */
1524 n = p_len - p_used;
1525 if (n > f_size - f_used)
1526 n = f_size - f_used;
1527 f_used += n;
1528 p_used += n;
1529
1530 /* Egress a piece of the payload. */
1531 edesc_body.va = va_to_tile_io_addr(va);
1532 edesc_body.xfer_size = n;
1533 edesc_body.bound = !(p_used < p_len);
1534 gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
1535 slot++;
1536 }
1537
1538 tx_packets++;
1539 tx_bytes += sh_len + p_len;
1540
1541 /* The last segment may be less than gso_size. */
1542 data_len -= p_len;
1543 if (data_len < p_len)
1544 p_len = data_len;
1545 }
1546
1547 /* Update stats. */
1548 tile_net_stats_add(tx_packets, &priv->stats.tx_packets);
1549 tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes);
1550}
1551
1552/* Do "TSO" handling for egress.
1553 *
1554 * Normally drivers set NETIF_F_TSO only to support hardware TSO;
1555 * otherwise the stack uses scatter-gather to implement GSO in software.
1556 * On our testing, enabling GSO support (via NETIF_F_SG) drops network
1557 * performance down to around 7.5 Gbps on the 10G interfaces, although
1558 * also dropping cpu utilization way down, to under 8%. But
1559 * implementing "TSO" in the driver brings performance back up to line
1560 * rate, while dropping cpu usage even further, to less than 4%. In
1561 * practice, profiling of GSO shows that skb_segment() is what causes
1562 * the performance overheads; we benefit in the driver from using
1563 * preallocated memory to duplicate the TCP/IP headers.
1564 */
1565static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1566{
1567 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1568 struct tile_net_priv *priv = netdev_priv(dev);
1569 int channel = priv->echannel;
1570 struct tile_net_egress *egress = &egress_for_echannel[channel];
1571 struct tile_net_comps *comps = info->comps_for_echannel[channel];
1572 gxio_mpipe_equeue_t *equeue = egress->equeue;
1573 unsigned long irqflags;
1574 int num_edescs;
1575 s64 slot;
1576
1577 /* Determine how many mpipe edesc's are needed. */
1578 num_edescs = tso_count_edescs(skb);
1579
1580 local_irq_save(irqflags);
1581
1582 /* Try to acquire a completion entry and an egress slot. */
1583 slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
1584 if (slot < 0) {
1585 local_irq_restore(irqflags);
1586 return NETDEV_TX_BUSY;
1587 }
1588
1589 /* Set up copies of header data properly. */
1590 tso_headers_prepare(skb, egress->headers, slot);
1591
1592 /* Actually pass the data to the network hardware. */
1593 tso_egress(dev, equeue, skb, egress->headers, slot);
1594
1595 /* Add a completion record. */
1596 add_comp(equeue, comps, slot + num_edescs - 1, skb);
1597
1598 local_irq_restore(irqflags);
1599
1600 /* Make sure the egress timer is scheduled. */
1601 tile_net_schedule_egress_timer();
1602
1603 return NETDEV_TX_OK;
1604}
1605
1606/* Analyze the body and frags for a transmit request. */
1607static unsigned int tile_net_tx_frags(struct frag *frags,
1608 struct sk_buff *skb,
1609 void *b_data, unsigned int b_len)
1610{
1611 unsigned int i, n = 0;
1612
1613 struct skb_shared_info *sh = skb_shinfo(skb);
1614
1615 if (b_len != 0) {
1616 frags[n].buf = b_data;
1617 frags[n++].length = b_len;
1618 }
1619
1620 for (i = 0; i < sh->nr_frags; i++) {
1621 skb_frag_t *f = &sh->frags[i];
1622 frags[n].buf = tile_net_frag_buf(f);
1623 frags[n++].length = skb_frag_size(f);
1624 }
1625
1626 return n;
1627}
1628
1629/* Help the kernel transmit a packet. */
1630static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1631{
1632 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1633 struct tile_net_priv *priv = netdev_priv(dev);
1634 struct tile_net_egress *egress = &egress_for_echannel[priv->echannel];
1635 gxio_mpipe_equeue_t *equeue = egress->equeue;
1636 struct tile_net_comps *comps =
1637 info->comps_for_echannel[priv->echannel];
1638 unsigned int len = skb->len;
1639 unsigned char *data = skb->data;
1640 unsigned int num_edescs;
1641 struct frag frags[MAX_FRAGS];
1642 gxio_mpipe_edesc_t edescs[MAX_FRAGS];
1643 unsigned long irqflags;
1644 gxio_mpipe_edesc_t edesc = { { 0 } };
1645 unsigned int i;
1646 s64 slot;
1647
1648 if (skb_is_gso(skb))
1649 return tile_net_tx_tso(skb, dev);
1650
1651 num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
1652
1653 /* This is only used to specify the TLB. */
1654 edesc.stack_idx = large_buffer_stack;
1655
1656 /* Prepare the edescs. */
1657 for (i = 0; i < num_edescs; i++) {
1658 edesc.xfer_size = frags[i].length;
1659 edesc.va = va_to_tile_io_addr(frags[i].buf);
1660 edescs[i] = edesc;
1661 }
1662
1663 /* Mark the final edesc. */
1664 edescs[num_edescs - 1].bound = 1;
1665
1666 /* Add checksum info to the initial edesc, if needed. */
1667 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1668 unsigned int csum_start = skb_checksum_start_offset(skb);
1669 edescs[0].csum = 1;
1670 edescs[0].csum_start = csum_start;
1671 edescs[0].csum_dest = csum_start + skb->csum_offset;
1672 }
1673
1674 local_irq_save(irqflags);
1675
1676 /* Try to acquire a completion entry and an egress slot. */
1677 slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
1678 if (slot < 0) {
1679 local_irq_restore(irqflags);
1680 return NETDEV_TX_BUSY;
1681 }
1682
1683 for (i = 0; i < num_edescs; i++)
1684 gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
1685
1686 /* Add a completion record. */
1687 add_comp(equeue, comps, slot - 1, skb);
1688
1689 /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
1690 tile_net_stats_add(1, &priv->stats.tx_packets);
1691 tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
1692 &priv->stats.tx_bytes);
1693
1694 local_irq_restore(irqflags);
1695
1696 /* Make sure the egress timer is scheduled. */
1697 tile_net_schedule_egress_timer();
1698
1699 return NETDEV_TX_OK;
1700}
1701
1702/* Return subqueue id on this core (one per core). */
1703static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
1704{
1705 return smp_processor_id();
1706}
1707
1708/* Deal with a transmit timeout. */
1709static void tile_net_tx_timeout(struct net_device *dev)
1710{
1711 int cpu;
1712
1713 for_each_online_cpu(cpu)
1714 netif_wake_subqueue(dev, cpu);
1715}
1716
1717/* Ioctl commands. */
1718static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1719{
1720 return -EOPNOTSUPP;
1721}
1722
1723/* Get system network statistics for device. */
1724static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
1725{
1726 struct tile_net_priv *priv = netdev_priv(dev);
1727 return &priv->stats;
1728}
1729
1730/* Change the MTU. */
1731static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
1732{
1733 if ((new_mtu < 68) || (new_mtu > 1500))
1734 return -EINVAL;
1735 dev->mtu = new_mtu;
1736 return 0;
1737}
1738
1739/* Change the Ethernet address of the NIC.
1740 *
1741 * The hypervisor driver does not support changing MAC address. However,
1742 * the hardware does not do anything with the MAC address, so the address
1743 * which gets used on outgoing packets, and which is accepted on incoming
1744 * packets, is completely up to us.
1745 *
1746 * Returns 0 on success, negative on failure.
1747 */
1748static int tile_net_set_mac_address(struct net_device *dev, void *p)
1749{
1750 struct sockaddr *addr = p;
1751
1752 if (!is_valid_ether_addr(addr->sa_data))
1753 return -EINVAL;
1754 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1755 return 0;
1756}
1757
1758#ifdef CONFIG_NET_POLL_CONTROLLER
1759/* Polling 'interrupt' - used by things like netconsole to send skbs
1760 * without having to re-enable interrupts. It's not called while
1761 * the interrupt routine is executing.
1762 */
1763static void tile_net_netpoll(struct net_device *dev)
1764{
1765 disable_percpu_irq(ingress_irq);
1766 tile_net_handle_ingress_irq(ingress_irq, NULL);
1767 enable_percpu_irq(ingress_irq, 0);
1768}
1769#endif
1770
1771static const struct net_device_ops tile_net_ops = {
1772 .ndo_open = tile_net_open,
1773 .ndo_stop = tile_net_stop,
1774 .ndo_start_xmit = tile_net_tx,
1775 .ndo_select_queue = tile_net_select_queue,
1776 .ndo_do_ioctl = tile_net_ioctl,
1777 .ndo_get_stats = tile_net_get_stats,
1778 .ndo_change_mtu = tile_net_change_mtu,
1779 .ndo_tx_timeout = tile_net_tx_timeout,
1780 .ndo_set_mac_address = tile_net_set_mac_address,
1781#ifdef CONFIG_NET_POLL_CONTROLLER
1782 .ndo_poll_controller = tile_net_netpoll,
1783#endif
1784};
1785
1786/* The setup function.
1787 *
1788 * This uses ether_setup() to assign various fields in dev, including
1789 * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
1790 */
1791static void tile_net_setup(struct net_device *dev)
1792{
1793 ether_setup(dev);
1794 dev->netdev_ops = &tile_net_ops;
1795 dev->watchdog_timeo = TILE_NET_TIMEOUT;
1796 dev->features |= NETIF_F_LLTX;
1797 dev->features |= NETIF_F_HW_CSUM;
1798 dev->features |= NETIF_F_SG;
1799 dev->features |= NETIF_F_TSO;
1800 dev->mtu = 1500;
1801}
1802
1803/* Allocate the device structure, register the device, and obtain the
1804 * MAC address from the hypervisor.
1805 */
1806static void tile_net_dev_init(const char *name, const uint8_t *mac)
1807{
1808 int ret;
1809 int i;
1810 int nz_addr = 0;
1811 struct net_device *dev;
1812 struct tile_net_priv *priv;
1813
1814 /* HACK: Ignore "loop" links. */
1815 if (strncmp(name, "loop", 4) == 0)
1816 return;
1817
1818 /* Allocate the device structure. Normally, "name" is a
1819 * template, instantiated by register_netdev(), but not for us.
1820 */
1821 dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup,
1822 NR_CPUS, 1);
1823 if (!dev) {
1824 pr_err("alloc_netdev_mqs(%s) failed\n", name);
1825 return;
1826 }
1827
1828 /* Initialize "priv". */
1829 priv = netdev_priv(dev);
1830 memset(priv, 0, sizeof(*priv));
1831 priv->dev = dev;
1832 priv->channel = -1;
1833 priv->loopify_channel = -1;
1834 priv->echannel = -1;
1835
1836 /* Get the MAC address and set it in the device struct; this must
1837 * be done before the device is opened. If the MAC is all zeroes,
1838 * we use a random address, since we're probably on the simulator.
1839 */
1840 for (i = 0; i < 6; i++)
1841 nz_addr |= mac[i];
1842
1843 if (nz_addr) {
1844 memcpy(dev->dev_addr, mac, 6);
1845 dev->addr_len = 6;
1846 } else {
1847 random_ether_addr(dev->dev_addr);
1848 }
1849
1850 /* Register the network device. */
1851 ret = register_netdev(dev);
1852 if (ret) {
1853 netdev_err(dev, "register_netdev failed %d\n", ret);
1854 free_netdev(dev);
1855 return;
1856 }
1857}
1858
1859/* Per-cpu module initialization. */
1860static void tile_net_init_module_percpu(void *unused)
1861{
1862 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1863 int my_cpu = smp_processor_id();
1864
1865 info->has_iqueue = false;
1866
1867 info->my_cpu = my_cpu;
1868
1869 /* Initialize the egress timer. */
1870 hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1871 info->egress_timer.function = tile_net_handle_egress_timer;
1872}
1873
1874/* Module initialization. */
1875static int __init tile_net_init_module(void)
1876{
1877 int i;
1878 char name[GXIO_MPIPE_LINK_NAME_LEN];
1879 uint8_t mac[6];
1880
1881 pr_info("Tilera Network Driver\n");
1882
1883 mutex_init(&tile_net_devs_for_channel_mutex);
1884
1885 /* Initialize each CPU. */
1886 on_each_cpu(tile_net_init_module_percpu, NULL, 1);
1887
1888 /* Find out what devices we have, and initialize them. */
1889 for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++)
1890 tile_net_dev_init(name, mac);
1891
1892 if (!network_cpus_init())
1893 network_cpus_map = *cpu_online_mask;
1894
1895 return 0;
1896}
1897
1898module_init(tile_net_init_module);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index d4a4992b4935..31470b0d0c32 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -99,7 +99,7 @@ static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
99} 99}
100/** 100/**
101 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. 101 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
102 * @mdio_np: Pointer to the mii_bus. 102 * @mdio_bus_np: Pointer to the mii_bus.
103 * 103 *
104 * Returns a pointer to the mii_bus, or NULL if none found. 104 * Returns a pointer to the mii_bus, or NULL if none found.
105 * 105 *
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5214b1eceb95..f18149ae2588 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -42,7 +42,8 @@ module_param(gso, bool, 0444);
42#define VIRTNET_DRIVER_VERSION "1.0.0" 42#define VIRTNET_DRIVER_VERSION "1.0.0"
43 43
44struct virtnet_stats { 44struct virtnet_stats {
45 struct u64_stats_sync syncp; 45 struct u64_stats_sync tx_syncp;
46 struct u64_stats_sync rx_syncp;
46 u64 tx_bytes; 47 u64 tx_bytes;
47 u64 tx_packets; 48 u64 tx_packets;
48 49
@@ -300,10 +301,10 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
300 301
301 hdr = skb_vnet_hdr(skb); 302 hdr = skb_vnet_hdr(skb);
302 303
303 u64_stats_update_begin(&stats->syncp); 304 u64_stats_update_begin(&stats->rx_syncp);
304 stats->rx_bytes += skb->len; 305 stats->rx_bytes += skb->len;
305 stats->rx_packets++; 306 stats->rx_packets++;
306 u64_stats_update_end(&stats->syncp); 307 u64_stats_update_end(&stats->rx_syncp);
307 308
308 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 309 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
309 pr_debug("Needs csum!\n"); 310 pr_debug("Needs csum!\n");
@@ -565,10 +566,10 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
565 while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { 566 while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
566 pr_debug("Sent skb %p\n", skb); 567 pr_debug("Sent skb %p\n", skb);
567 568
568 u64_stats_update_begin(&stats->syncp); 569 u64_stats_update_begin(&stats->tx_syncp);
569 stats->tx_bytes += skb->len; 570 stats->tx_bytes += skb->len;
570 stats->tx_packets++; 571 stats->tx_packets++;
571 u64_stats_update_end(&stats->syncp); 572 u64_stats_update_end(&stats->tx_syncp);
572 573
573 tot_sgs += skb_vnet_hdr(skb)->num_sg; 574 tot_sgs += skb_vnet_hdr(skb)->num_sg;
574 dev_kfree_skb_any(skb); 575 dev_kfree_skb_any(skb);
@@ -703,12 +704,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
703 u64 tpackets, tbytes, rpackets, rbytes; 704 u64 tpackets, tbytes, rpackets, rbytes;
704 705
705 do { 706 do {
706 start = u64_stats_fetch_begin(&stats->syncp); 707 start = u64_stats_fetch_begin(&stats->tx_syncp);
707 tpackets = stats->tx_packets; 708 tpackets = stats->tx_packets;
708 tbytes = stats->tx_bytes; 709 tbytes = stats->tx_bytes;
710 } while (u64_stats_fetch_retry(&stats->tx_syncp, start));
711
712 do {
713 start = u64_stats_fetch_begin(&stats->rx_syncp);
709 rpackets = stats->rx_packets; 714 rpackets = stats->rx_packets;
710 rbytes = stats->rx_bytes; 715 rbytes = stats->rx_bytes;
711 } while (u64_stats_fetch_retry(&stats->syncp, start)); 716 } while (u64_stats_fetch_retry(&stats->rx_syncp, start));
712 717
713 tot->rx_packets += rpackets; 718 tot->rx_packets += rpackets;
714 tot->tx_packets += tpackets; 719 tot->tx_packets += tpackets;
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 67c13af6f206..c06b6cb5c91e 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -877,6 +877,10 @@ struct b43_wl {
877 * from the mac80211 subsystem. */ 877 * from the mac80211 subsystem. */
878 u16 mac80211_initially_registered_queues; 878 u16 mac80211_initially_registered_queues;
879 879
880 /* Set this if we call ieee80211_register_hw() and check if we call
881 * ieee80211_unregister_hw(). */
882 bool hw_registred;
883
880 /* We can only have one operating interface (802.11 core) 884 /* We can only have one operating interface (802.11 core)
881 * at a time. General information about this interface follows. 885 * at a time. General information about this interface follows.
882 */ 886 */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 5a39b226b2e3..acd03a4f9730 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2437,6 +2437,7 @@ start_ieee80211:
2437 err = ieee80211_register_hw(wl->hw); 2437 err = ieee80211_register_hw(wl->hw);
2438 if (err) 2438 if (err)
2439 goto err_one_core_detach; 2439 goto err_one_core_detach;
2440 wl->hw_registred = true;
2440 b43_leds_register(wl->current_dev); 2441 b43_leds_register(wl->current_dev);
2441 goto out; 2442 goto out;
2442 2443
@@ -5299,6 +5300,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
5299 5300
5300 hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; 5301 hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
5301 wl->mac80211_initially_registered_queues = hw->queues; 5302 wl->mac80211_initially_registered_queues = hw->queues;
5303 wl->hw_registred = false;
5302 hw->max_rates = 2; 5304 hw->max_rates = 2;
5303 SET_IEEE80211_DEV(hw, dev->dev); 5305 SET_IEEE80211_DEV(hw, dev->dev);
5304 if (is_valid_ether_addr(sprom->et1mac)) 5306 if (is_valid_ether_addr(sprom->et1mac))
@@ -5370,12 +5372,15 @@ static void b43_bcma_remove(struct bcma_device *core)
5370 * as the ieee80211 unreg will destroy the workqueue. */ 5372 * as the ieee80211 unreg will destroy the workqueue. */
5371 cancel_work_sync(&wldev->restart_work); 5373 cancel_work_sync(&wldev->restart_work);
5372 5374
5373 /* Restore the queues count before unregistering, because firmware detect 5375 B43_WARN_ON(!wl);
5374 * might have modified it. Restoring is important, so the networking 5376 if (wl->current_dev == wldev && wl->hw_registred) {
5375 * stack can properly free resources. */ 5377 /* Restore the queues count before unregistering, because firmware detect
5376 wl->hw->queues = wl->mac80211_initially_registered_queues; 5378 * might have modified it. Restoring is important, so the networking
5377 b43_leds_stop(wldev); 5379 * stack can properly free resources. */
5378 ieee80211_unregister_hw(wl->hw); 5380 wl->hw->queues = wl->mac80211_initially_registered_queues;
5381 b43_leds_stop(wldev);
5382 ieee80211_unregister_hw(wl->hw);
5383 }
5379 5384
5380 b43_one_core_detach(wldev->dev); 5385 b43_one_core_detach(wldev->dev);
5381 5386
@@ -5446,7 +5451,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
5446 cancel_work_sync(&wldev->restart_work); 5451 cancel_work_sync(&wldev->restart_work);
5447 5452
5448 B43_WARN_ON(!wl); 5453 B43_WARN_ON(!wl);
5449 if (wl->current_dev == wldev) { 5454 if (wl->current_dev == wldev && wl->hw_registred) {
5450 /* Restore the queues count before unregistering, because firmware detect 5455 /* Restore the queues count before unregistering, because firmware detect
5451 * might have modified it. Restoring is important, so the networking 5456 * might have modified it. Restoring is important, so the networking
5452 * stack can properly free resources. */ 5457 * stack can properly free resources. */
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 9cfae0c08707..95aa8e1683ec 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1903,14 +1903,6 @@ static void ipw2100_down(struct ipw2100_priv *priv)
1903 netif_stop_queue(priv->net_dev); 1903 netif_stop_queue(priv->net_dev);
1904} 1904}
1905 1905
1906/* Called by register_netdev() */
1907static int ipw2100_net_init(struct net_device *dev)
1908{
1909 struct ipw2100_priv *priv = libipw_priv(dev);
1910
1911 return ipw2100_up(priv, 1);
1912}
1913
1914static int ipw2100_wdev_init(struct net_device *dev) 1906static int ipw2100_wdev_init(struct net_device *dev)
1915{ 1907{
1916 struct ipw2100_priv *priv = libipw_priv(dev); 1908 struct ipw2100_priv *priv = libipw_priv(dev);
@@ -6087,7 +6079,6 @@ static const struct net_device_ops ipw2100_netdev_ops = {
6087 .ndo_stop = ipw2100_close, 6079 .ndo_stop = ipw2100_close,
6088 .ndo_start_xmit = libipw_xmit, 6080 .ndo_start_xmit = libipw_xmit,
6089 .ndo_change_mtu = libipw_change_mtu, 6081 .ndo_change_mtu = libipw_change_mtu,
6090 .ndo_init = ipw2100_net_init,
6091 .ndo_tx_timeout = ipw2100_tx_timeout, 6082 .ndo_tx_timeout = ipw2100_tx_timeout,
6092 .ndo_set_mac_address = ipw2100_set_address, 6083 .ndo_set_mac_address = ipw2100_set_address,
6093 .ndo_validate_addr = eth_validate_addr, 6084 .ndo_validate_addr = eth_validate_addr,
@@ -6329,6 +6320,10 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6329 printk(KERN_INFO DRV_NAME 6320 printk(KERN_INFO DRV_NAME
6330 ": Detected Intel PRO/Wireless 2100 Network Connection\n"); 6321 ": Detected Intel PRO/Wireless 2100 Network Connection\n");
6331 6322
6323 err = ipw2100_up(priv, 1);
6324 if (err)
6325 goto fail;
6326
6332 err = ipw2100_wdev_init(dev); 6327 err = ipw2100_wdev_init(dev);
6333 if (err) 6328 if (err)
6334 goto fail; 6329 goto fail;
@@ -6338,12 +6333,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6338 * network device we would call ipw2100_up. This introduced a race 6333 * network device we would call ipw2100_up. This introduced a race
6339 * condition with newer hotplug configurations (network was coming 6334 * condition with newer hotplug configurations (network was coming
6340 * up and making calls before the device was initialized). 6335 * up and making calls before the device was initialized).
6341 * 6336 */
6342 * If we called ipw2100_up before we registered the device, then the
6343 * device name wasn't registered. So, we instead use the net_dev->init
6344 * member to call a function that then just turns and calls ipw2100_up.
6345 * net_dev->init is called after name allocation but before the
6346 * notifier chain is called */
6347 err = register_netdev(dev); 6337 err = register_netdev(dev);
6348 if (err) { 6338 if (err) {
6349 printk(KERN_WARNING DRV_NAME 6339 printk(KERN_WARNING DRV_NAME
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index 286ce4e18068..b29b798f7550 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -1251,7 +1251,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
1251 key_flags |= STA_KEY_MULTICAST_MSK; 1251 key_flags |= STA_KEY_MULTICAST_MSK;
1252 1252
1253 sta_cmd.key.key_flags = key_flags; 1253 sta_cmd.key.key_flags = key_flags;
1254 sta_cmd.key.key_offset = WEP_INVALID_OFFSET; 1254 sta_cmd.key.key_offset = keyconf->hw_key_idx;
1255 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; 1255 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
1256 sta_cmd.mode = STA_CONTROL_MODIFY_MSK; 1256 sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
1257 1257
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index a9f0415916c7..9253ef1dba72 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -224,6 +224,7 @@
224#define SCD_TXFACT (SCD_BASE + 0x10) 224#define SCD_TXFACT (SCD_BASE + 0x10)
225#define SCD_ACTIVE (SCD_BASE + 0x14) 225#define SCD_ACTIVE (SCD_BASE + 0x14)
226#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) 226#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
227#define SCD_CHAINEXT_EN (SCD_BASE + 0x244)
227#define SCD_AGGR_SEL (SCD_BASE + 0x248) 228#define SCD_AGGR_SEL (SCD_BASE + 0x248)
228#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) 229#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
229 230
diff --git a/drivers/net/wireless/iwlwifi/pcie/6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c
index 8dd8a6fe61e8..cb08ba03aae7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/6000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/6000.c
@@ -35,17 +35,20 @@
35#define IWL6000_UCODE_API_MAX 6 35#define IWL6000_UCODE_API_MAX 6
36#define IWL6050_UCODE_API_MAX 5 36#define IWL6050_UCODE_API_MAX 5
37#define IWL6000G2_UCODE_API_MAX 6 37#define IWL6000G2_UCODE_API_MAX 6
38#define IWL6035_UCODE_API_MAX 6
38 39
39/* Oldest version we won't warn about */ 40/* Oldest version we won't warn about */
40#define IWL6000_UCODE_API_OK 4 41#define IWL6000_UCODE_API_OK 4
41#define IWL6000G2_UCODE_API_OK 5 42#define IWL6000G2_UCODE_API_OK 5
42#define IWL6050_UCODE_API_OK 5 43#define IWL6050_UCODE_API_OK 5
43#define IWL6000G2B_UCODE_API_OK 6 44#define IWL6000G2B_UCODE_API_OK 6
45#define IWL6035_UCODE_API_OK 6
44 46
45/* Lowest firmware API version supported */ 47/* Lowest firmware API version supported */
46#define IWL6000_UCODE_API_MIN 4 48#define IWL6000_UCODE_API_MIN 4
47#define IWL6050_UCODE_API_MIN 4 49#define IWL6050_UCODE_API_MIN 4
48#define IWL6000G2_UCODE_API_MIN 4 50#define IWL6000G2_UCODE_API_MIN 5
51#define IWL6035_UCODE_API_MIN 6
49 52
50/* EEPROM versions */ 53/* EEPROM versions */
51#define EEPROM_6000_TX_POWER_VERSION (4) 54#define EEPROM_6000_TX_POWER_VERSION (4)
@@ -243,9 +246,25 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
243 IWL_DEVICE_6030, 246 IWL_DEVICE_6030,
244}; 247};
245 248
249#define IWL_DEVICE_6035 \
250 .fw_name_pre = IWL6030_FW_PRE, \
251 .ucode_api_max = IWL6035_UCODE_API_MAX, \
252 .ucode_api_ok = IWL6035_UCODE_API_OK, \
253 .ucode_api_min = IWL6035_UCODE_API_MIN, \
254 .device_family = IWL_DEVICE_FAMILY_6030, \
255 .max_inst_size = IWL60_RTC_INST_SIZE, \
256 .max_data_size = IWL60_RTC_DATA_SIZE, \
257 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
258 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
259 .base_params = &iwl6000_g2_base_params, \
260 .bt_params = &iwl6000_bt_params, \
261 .need_temp_offset_calib = true, \
262 .led_mode = IWL_LED_RF_STATE, \
263 .adv_pm = true
264
246const struct iwl_cfg iwl6035_2agn_cfg = { 265const struct iwl_cfg iwl6035_2agn_cfg = {
247 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", 266 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
248 IWL_DEVICE_6030, 267 IWL_DEVICE_6035,
249 .ht_params = &iwl6000_ht_params, 268 .ht_params = &iwl6000_ht_params,
250}; 269};
251 270
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 969f78f421df..7461a6a14338 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1065,6 +1065,12 @@ static void iwl_tx_start(struct iwl_trans *trans)
1065 /* Activate all Tx DMA/FIFO channels */ 1065 /* Activate all Tx DMA/FIFO channels */
1066 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); 1066 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
1067 1067
1068 /* The chain extension of the SCD doesn't work well. This feature is
1069 * enabled by default by the HW, so we need to disable it manually.
1070 */
1071 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
1072
1073
1068 /* Enable DMA channel */ 1074 /* Enable DMA channel */
1069 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) 1075 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
1070 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 1076 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4c9336cee817..a0b7cfd34685 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1555,6 +1555,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
1555 hdr = (struct ieee80211_hdr *) skb->data; 1555 hdr = (struct ieee80211_hdr *) skb->data;
1556 mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2); 1556 mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2);
1557 } 1557 }
1558 txi->flags |= IEEE80211_TX_STAT_ACK;
1558 } 1559 }
1559 ieee80211_tx_status_irqsafe(data2->hw, skb); 1560 ieee80211_tx_status_irqsafe(data2->hw, skb);
1560 return 0; 1561 return 0;
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c
index 2e0de2f5f0f9..c2d5b495c179 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c
@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
117 radio_on = true; 117 radio_on = true;
118 } else if (radio_on) { 118 } else if (radio_on) {
119 radio_on = false; 119 radio_on = false;
120 cancel_delayed_work_sync(&priv->led_on); 120 cancel_delayed_work(&priv->led_on);
121 ieee80211_queue_delayed_work(hw, &priv->led_off, 0); 121 ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
122 } 122 }
123 } else if (radio_on) { 123 } else if (radio_on) {
diff --git a/include/linux/netfilter/xt_HMARK.h b/include/linux/netfilter/xt_HMARK.h
index abb1650940d2..826fc5807577 100644
--- a/include/linux/netfilter/xt_HMARK.h
+++ b/include/linux/netfilter/xt_HMARK.h
@@ -27,7 +27,12 @@ union hmark_ports {
27 __u16 src; 27 __u16 src;
28 __u16 dst; 28 __u16 dst;
29 } p16; 29 } p16;
30 struct {
31 __be16 src;
32 __be16 dst;
33 } b16;
30 __u32 v32; 34 __u32 v32;
35 __be32 b32;
31}; 36};
32 37
33struct xt_hmark_info { 38struct xt_hmark_info {
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 23e8234f75a5..7d3bcedc062a 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -69,16 +69,16 @@ union tcp_word_hdr {
69#define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) 69#define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3])
70 70
71enum { 71enum {
72 TCP_FLAG_CWR = __cpu_to_be32(0x00800000), 72 TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000),
73 TCP_FLAG_ECE = __cpu_to_be32(0x00400000), 73 TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000),
74 TCP_FLAG_URG = __cpu_to_be32(0x00200000), 74 TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000),
75 TCP_FLAG_ACK = __cpu_to_be32(0x00100000), 75 TCP_FLAG_ACK = __constant_cpu_to_be32(0x00100000),
76 TCP_FLAG_PSH = __cpu_to_be32(0x00080000), 76 TCP_FLAG_PSH = __constant_cpu_to_be32(0x00080000),
77 TCP_FLAG_RST = __cpu_to_be32(0x00040000), 77 TCP_FLAG_RST = __constant_cpu_to_be32(0x00040000),
78 TCP_FLAG_SYN = __cpu_to_be32(0x00020000), 78 TCP_FLAG_SYN = __constant_cpu_to_be32(0x00020000),
79 TCP_FLAG_FIN = __cpu_to_be32(0x00010000), 79 TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000),
80 TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000), 80 TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000),
81 TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000) 81 TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000)
82}; 82};
83 83
84/* 84/*
diff --git a/include/net/route.h b/include/net/route.h
index 2bfbc9329ea9..a36ae429ed5d 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -164,9 +164,9 @@ static inline struct rtable *ip_route_output(struct net *net, __be32 daddr,
164{ 164{
165 struct flowi4 fl4 = { 165 struct flowi4 fl4 = {
166 .flowi4_oif = oif, 166 .flowi4_oif = oif,
167 .flowi4_tos = tos,
167 .daddr = daddr, 168 .daddr = daddr,
168 .saddr = saddr, 169 .saddr = saddr,
169 .flowi4_tos = tos,
170 }; 170 };
171 return ip_route_output_key(net, &fl4); 171 return ip_route_output_key(net, &fl4);
172} 172}
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 55ce96b53b09..9d7d54a00e63 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -220,13 +220,16 @@ struct tcf_proto {
220 220
221struct qdisc_skb_cb { 221struct qdisc_skb_cb {
222 unsigned int pkt_len; 222 unsigned int pkt_len;
223 unsigned char data[24]; 223 u16 bond_queue_mapping;
224 u16 _pad;
225 unsigned char data[20];
224}; 226};
225 227
226static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 228static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
227{ 229{
228 struct qdisc_skb_cb *qcb; 230 struct qdisc_skb_cb *qcb;
229 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz); 231
232 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
230 BUILD_BUG_ON(sizeof(qcb->data) < sz); 233 BUILD_BUG_ON(sizeof(qcb->data) < sz);
231} 234}
232 235
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 0301b328cf0f..86852963b7f7 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1208,9 +1208,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
1208 if (addr->sat_addr.s_node == ATADDR_BCAST && 1208 if (addr->sat_addr.s_node == ATADDR_BCAST &&
1209 !sock_flag(sk, SOCK_BROADCAST)) { 1209 !sock_flag(sk, SOCK_BROADCAST)) {
1210#if 1 1210#if 1
1211 printk(KERN_WARNING "%s is broken and did not set " 1211 pr_warn("atalk_connect: %s is broken and did not set SO_BROADCAST.\n",
1212 "SO_BROADCAST. It will break when 2.2 is "
1213 "released.\n",
1214 current->comm); 1212 current->comm);
1215#else 1213#else
1216 return -EACCES; 1214 return -EACCES;
diff --git a/net/core/filter.c b/net/core/filter.c
index a3eddb515d1b..d4ce2dc712e3 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -616,9 +616,9 @@ static int __sk_prepare_filter(struct sk_filter *fp)
616/** 616/**
617 * sk_unattached_filter_create - create an unattached filter 617 * sk_unattached_filter_create - create an unattached filter
618 * @fprog: the filter program 618 * @fprog: the filter program
619 * @sk: the socket to use 619 * @pfp: the unattached filter that is created
620 * 620 *
621 * Create a filter independent ofr any socket. We first run some 621 * Create a filter independent of any socket. We first run some
622 * sanity checks on it to make sure it does not explode on us later. 622 * sanity checks on it to make sure it does not explode on us later.
623 * If an error occurs or there is insufficient memory for the filter 623 * If an error occurs or there is insufficient memory for the filter
624 * a negative errno code is returned. On success the return is zero. 624 * a negative errno code is returned. On success the return is zero.
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index eb09f8bbbf07..d81d026138f0 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2219,9 +2219,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2219 rcu_read_lock_bh(); 2219 rcu_read_lock_bh();
2220 nht = rcu_dereference_bh(tbl->nht); 2220 nht = rcu_dereference_bh(tbl->nht);
2221 2221
2222 for (h = 0; h < (1 << nht->hash_shift); h++) { 2222 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2223 if (h < s_h)
2224 continue;
2225 if (h > s_h) 2223 if (h > s_h)
2226 s_idx = 0; 2224 s_idx = 0;
2227 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; 2225 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
@@ -2260,9 +2258,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2260 2258
2261 read_lock_bh(&tbl->lock); 2259 read_lock_bh(&tbl->lock);
2262 2260
2263 for (h = 0; h <= PNEIGH_HASHMASK; h++) { 2261 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2264 if (h < s_h)
2265 continue;
2266 if (h > s_h) 2262 if (h > s_h)
2267 s_idx = 0; 2263 s_idx = 0;
2268 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { 2264 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
@@ -2297,7 +2293,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2297 struct neigh_table *tbl; 2293 struct neigh_table *tbl;
2298 int t, family, s_t; 2294 int t, family, s_t;
2299 int proxy = 0; 2295 int proxy = 0;
2300 int err = 0; 2296 int err;
2301 2297
2302 read_lock(&neigh_tbl_lock); 2298 read_lock(&neigh_tbl_lock);
2303 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; 2299 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
@@ -2311,7 +2307,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2311 2307
2312 s_t = cb->args[0]; 2308 s_t = cb->args[0];
2313 2309
2314 for (tbl = neigh_tables, t = 0; tbl && (err >= 0); 2310 for (tbl = neigh_tables, t = 0; tbl;
2315 tbl = tbl->next, t++) { 2311 tbl = tbl->next, t++) {
2316 if (t < s_t || (family && tbl->family != family)) 2312 if (t < s_t || (family && tbl->family != family))
2317 continue; 2313 continue;
@@ -2322,6 +2318,8 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2322 err = pneigh_dump_table(tbl, skb, cb); 2318 err = pneigh_dump_table(tbl, skb, cb);
2323 else 2319 else
2324 err = neigh_dump_table(tbl, skb, cb); 2320 err = neigh_dump_table(tbl, skb, cb);
2321 if (err < 0)
2322 break;
2325 } 2323 }
2326 read_unlock(&neigh_tbl_lock); 2324 read_unlock(&neigh_tbl_lock);
2327 2325
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1d74cea22aaa..5b21522ed0e1 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3361,7 +3361,7 @@ EXPORT_SYMBOL(kfree_skb_partial);
3361 * @to: prior buffer 3361 * @to: prior buffer
3362 * @from: buffer to add 3362 * @from: buffer to add
3363 * @fragstolen: pointer to boolean 3363 * @fragstolen: pointer to boolean
3364 * 3364 * @delta_truesize: how much more was allocated than was requested
3365 */ 3365 */
3366bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 3366bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
3367 bool *fragstolen, int *delta_truesize) 3367 bool *fragstolen, int *delta_truesize)
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index e5c44fc586ab..ab09b126423c 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -44,6 +44,7 @@ static int ip_forward_finish(struct sk_buff *skb)
44 struct ip_options *opt = &(IPCB(skb)->opt); 44 struct ip_options *opt = &(IPCB(skb)->opt);
45 45
46 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 46 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
47 IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
47 48
48 if (unlikely(opt->optlen)) 49 if (unlikely(opt->optlen))
49 ip_forward_options(skb); 50 ip_forward_options(skb);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index a9e519ad6db5..c94bbc6f2ba3 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1574,6 +1574,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
1574 struct ip_options *opt = &(IPCB(skb)->opt); 1574 struct ip_options *opt = &(IPCB(skb)->opt);
1575 1575
1576 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 1576 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1577 IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
1577 1578
1578 if (unlikely(opt->optlen)) 1579 if (unlikely(opt->optlen))
1579 ip_forward_options(skb); 1580 ip_forward_options(skb);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 7ef0743f06f0..215afc74d8aa 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1562,7 +1562,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
1562 neigh_flags = neigh->flags; 1562 neigh_flags = neigh->flags;
1563 neigh_release(neigh); 1563 neigh_release(neigh);
1564 } 1564 }
1565 if (neigh_flags & NTF_ROUTER) { 1565 if (!(neigh_flags & NTF_ROUTER)) {
1566 RT6_TRACE("purging route %p via non-router but gateway\n", 1566 RT6_TRACE("purging route %p via non-router but gateway\n",
1567 rt); 1567 rt);
1568 return -1; 1568 return -1;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ee1bb450bfe4..a233a7ccbc3a 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -513,6 +513,7 @@ int ip6_forward(struct sk_buff *skb)
513 hdr->hop_limit--; 513 hdr->hop_limit--;
514 514
515 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); 515 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
516 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
516 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, 517 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
517 ip6_forward_finish); 518 ip6_forward_finish);
518 519
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index b15dc08643a4..461e47c8e956 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1886,6 +1886,8 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1886{ 1886{
1887 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), 1887 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1888 IPSTATS_MIB_OUTFORWDATAGRAMS); 1888 IPSTATS_MIB_OUTFORWDATAGRAMS);
1889 IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1890 IPSTATS_MIB_OUTOCTETS, skb->len);
1889 return dst_output(skb); 1891 return dst_output(skb);
1890} 1892}
1891 1893
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 443591d629ca..185f12f4a5fa 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -162,6 +162,7 @@ static void l2tp_eth_delete(struct l2tp_session *session)
162 if (dev) { 162 if (dev) {
163 unregister_netdev(dev); 163 unregister_netdev(dev);
164 spriv->dev = NULL; 164 spriv->dev = NULL;
165 module_put(THIS_MODULE);
165 } 166 }
166 } 167 }
167} 168}
@@ -249,6 +250,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
249 if (rc < 0) 250 if (rc < 0)
250 goto out_del_dev; 251 goto out_del_dev;
251 252
253 __module_get(THIS_MODULE);
252 /* Must be done after register_netdev() */ 254 /* Must be done after register_netdev() */
253 strlcpy(session->ifname, dev->name, IFNAMSIZ); 255 strlcpy(session->ifname, dev->name, IFNAMSIZ);
254 256
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 70614e7affab..61d8b75d2686 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -464,10 +464,12 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
464 sk->sk_bound_dev_if); 464 sk->sk_bound_dev_if);
465 if (IS_ERR(rt)) 465 if (IS_ERR(rt))
466 goto no_route; 466 goto no_route;
467 if (connected) 467 if (connected) {
468 sk_setup_caps(sk, &rt->dst); 468 sk_setup_caps(sk, &rt->dst);
469 else 469 } else {
470 dst_release(&rt->dst); /* safe since we hold rcu_read_lock */ 470 skb_dst_set(skb, &rt->dst);
471 goto xmit;
472 }
471 } 473 }
472 474
473 /* We dont need to clone dst here, it is guaranteed to not disappear. 475 /* We dont need to clone dst here, it is guaranteed to not disappear.
@@ -475,6 +477,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
475 */ 477 */
476 skb_dst_set_noref(skb, &rt->dst); 478 skb_dst_set_noref(skb, &rt->dst);
477 479
480xmit:
478 /* Queue the packet to IP for output */ 481 /* Queue the packet to IP for output */
479 rc = ip_queue_xmit(skb, &inet->cork.fl); 482 rc = ip_queue_xmit(skb, &inet->cork.fl);
480 rcu_read_unlock(); 483 rcu_read_unlock();
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0f45d02e0ba7..d7134c170336 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3100,7 +3100,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3100 } 3100 }
3101 3101
3102 local->oper_channel = cbss->channel; 3102 local->oper_channel = cbss->channel;
3103 ieee80211_hw_config(local, 0); 3103 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
3104 3104
3105 if (!have_sta) { 3105 if (!have_sta) {
3106 u32 rates = 0, basic_rates = 0; 3106 u32 rates = 0, basic_rates = 0;
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 46d69d7f1bb4..31f50bc3a312 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -270,9 +270,8 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
270 return 0; 270 return 0;
271 271
272 /* RTP port is even */ 272 /* RTP port is even */
273 port &= htons(~1); 273 rtp_port = port & ~htons(1);
274 rtp_port = port; 274 rtcp_port = port | htons(1);
275 rtcp_port = htons(ntohs(port) + 1);
276 275
277 /* Create expect for RTP */ 276 /* Create expect for RTP */
278 if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL) 277 if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
index 0a96a43108ed..1686ca1b53a1 100644
--- a/net/netfilter/xt_HMARK.c
+++ b/net/netfilter/xt_HMARK.c
@@ -32,13 +32,13 @@ MODULE_ALIAS("ipt_HMARK");
32MODULE_ALIAS("ip6t_HMARK"); 32MODULE_ALIAS("ip6t_HMARK");
33 33
34struct hmark_tuple { 34struct hmark_tuple {
35 u32 src; 35 __be32 src;
36 u32 dst; 36 __be32 dst;
37 union hmark_ports uports; 37 union hmark_ports uports;
38 uint8_t proto; 38 u8 proto;
39}; 39};
40 40
41static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask) 41static inline __be32 hmark_addr6_mask(const __be32 *addr32, const __be32 *mask)
42{ 42{
43 return (addr32[0] & mask[0]) ^ 43 return (addr32[0] & mask[0]) ^
44 (addr32[1] & mask[1]) ^ 44 (addr32[1] & mask[1]) ^
@@ -46,8 +46,8 @@ static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask)
46 (addr32[3] & mask[3]); 46 (addr32[3] & mask[3]);
47} 47}
48 48
49static inline u32 49static inline __be32
50hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask) 50hmark_addr_mask(int l3num, const __be32 *addr32, const __be32 *mask)
51{ 51{
52 switch (l3num) { 52 switch (l3num) {
53 case AF_INET: 53 case AF_INET:
@@ -58,6 +58,22 @@ hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask)
58 return 0; 58 return 0;
59} 59}
60 60
61static inline void hmark_swap_ports(union hmark_ports *uports,
62 const struct xt_hmark_info *info)
63{
64 union hmark_ports hp;
65 u16 src, dst;
66
67 hp.b32 = (uports->b32 & info->port_mask.b32) | info->port_set.b32;
68 src = ntohs(hp.b16.src);
69 dst = ntohs(hp.b16.dst);
70
71 if (dst > src)
72 uports->v32 = (dst << 16) | src;
73 else
74 uports->v32 = (src << 16) | dst;
75}
76
61static int 77static int
62hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, 78hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
63 const struct xt_hmark_info *info) 79 const struct xt_hmark_info *info)
@@ -74,22 +90,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
74 otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 90 otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
75 rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; 91 rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
76 92
77 t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all, 93 t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6,
78 info->src_mask.all); 94 info->src_mask.ip6);
79 t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all, 95 t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6,
80 info->dst_mask.all); 96 info->dst_mask.ip6);
81 97
82 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) 98 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
83 return 0; 99 return 0;
84 100
85 t->proto = nf_ct_protonum(ct); 101 t->proto = nf_ct_protonum(ct);
86 if (t->proto != IPPROTO_ICMP) { 102 if (t->proto != IPPROTO_ICMP) {
87 t->uports.p16.src = otuple->src.u.all; 103 t->uports.b16.src = otuple->src.u.all;
88 t->uports.p16.dst = rtuple->src.u.all; 104 t->uports.b16.dst = rtuple->src.u.all;
89 t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | 105 hmark_swap_ports(&t->uports, info);
90 info->port_set.v32;
91 if (t->uports.p16.dst < t->uports.p16.src)
92 swap(t->uports.p16.dst, t->uports.p16.src);
93 } 106 }
94 107
95 return 0; 108 return 0;
@@ -98,15 +111,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
98#endif 111#endif
99} 112}
100 113
114/* This hash function is endian independent, to ensure consistent hashing if
115 * the cluster is composed of big and little endian systems. */
101static inline u32 116static inline u32
102hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) 117hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
103{ 118{
104 u32 hash; 119 u32 hash;
120 u32 src = ntohl(t->src);
121 u32 dst = ntohl(t->dst);
105 122
106 if (t->dst < t->src) 123 if (dst < src)
107 swap(t->src, t->dst); 124 swap(src, dst);
108 125
109 hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd); 126 hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd);
110 hash = hash ^ (t->proto & info->proto_mask); 127 hash = hash ^ (t->proto & info->proto_mask);
111 128
112 return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; 129 return (((u64)hash * info->hmodulus) >> 32) + info->hoffset;
@@ -126,11 +143,7 @@ hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff,
126 if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0) 143 if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0)
127 return; 144 return;
128 145
129 t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | 146 hmark_swap_ports(&t->uports, info);
130 info->port_set.v32;
131
132 if (t->uports.p16.dst < t->uports.p16.src)
133 swap(t->uports.p16.dst, t->uports.p16.src);
134} 147}
135 148
136#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 149#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
@@ -178,8 +191,8 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
178 return -1; 191 return -1;
179 } 192 }
180noicmp: 193noicmp:
181 t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all); 194 t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6);
182 t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all); 195 t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6);
183 196
184 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) 197 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
185 return 0; 198 return 0;
@@ -255,11 +268,8 @@ hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t,
255 } 268 }
256 } 269 }
257 270
258 t->src = (__force u32) ip->saddr; 271 t->src = ip->saddr & info->src_mask.ip;
259 t->dst = (__force u32) ip->daddr; 272 t->dst = ip->daddr & info->dst_mask.ip;
260
261 t->src &= info->src_mask.ip;
262 t->dst &= info->dst_mask.ip;
263 273
264 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) 274 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
265 return 0; 275 return 0;
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 30e3cc71be7a..2c0b317344b7 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -292,6 +292,9 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
292 292
293 pr_debug("%p\n", sk); 293 pr_debug("%p\n", sk);
294 294
295 if (llcp_sock == NULL)
296 return -EBADFD;
297
295 addr->sa_family = AF_NFC; 298 addr->sa_family = AF_NFC;
296 *len = sizeof(struct sockaddr_nfc_llcp); 299 *len = sizeof(struct sockaddr_nfc_llcp);
297 300