aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/stmmac.txt48
-rw-r--r--drivers/block/aoe/aoecmd.c6
-rw-r--r--drivers/infiniband/core/addr.c8
-rw-r--r--drivers/net/3c507.c2
-rw-r--r--drivers/net/3c515.c2
-rw-r--r--drivers/net/82596.c2
-rw-r--r--drivers/net/Kconfig243
-rw-r--r--drivers/net/arm/w90p910_ether.c2
-rw-r--r--drivers/net/at1700.c2
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/bonding/bond_main.c8
-rw-r--r--drivers/net/can/mscan/mscan.c2
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/ks8851.c33
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lib82596.c2
-rw-r--r--drivers/net/qlge/qlge.h4
-rw-r--r--drivers/net/qlge/qlge_dbg.c21
-rw-r--r--drivers/net/qlge/qlge_ethtool.c19
-rw-r--r--drivers/net/qlge/qlge_mpi.c2
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/vxge/vxge-config.c1219
-rw-r--r--drivers/net/vxge/vxge-config.h135
-rw-r--r--drivers/net/vxge/vxge-ethtool.c90
-rw-r--r--drivers/net/vxge/vxge-main.c535
-rw-r--r--drivers/net/vxge/vxge-main.h81
-rw-r--r--drivers/net/vxge/vxge-reg.h33
-rw-r--r--drivers/net/vxge/vxge-traffic.h28
-rw-r--r--drivers/net/vxge/vxge-version.h31
-rw-r--r--drivers/net/znet.c2
-rw-r--r--include/linux/igmp.h12
-rw-r--r--include/linux/inetdevice.h5
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/net/dn_dev.h27
-rw-r--r--include/net/dn_route.h10
-rw-r--r--include/net/dst.h8
-rw-r--r--include/net/inet_sock.h2
-rw-r--r--include/net/neighbour.h10
-rw-r--r--include/net/route.h12
-rw-r--r--net/core/dev.c3
-rw-r--r--net/dccp/ackvec.c251
-rw-r--r--net/dccp/ackvec.h115
-rw-r--r--net/dccp/ccids/ccid2.c13
-rw-r--r--net/dccp/dccp.h11
-rw-r--r--net/dccp/input.c6
-rw-r--r--net/dccp/options.c65
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/decnet/dn_dev.c100
-rw-r--r--net/decnet/dn_fib.c6
-rw-r--r--net/decnet/dn_neigh.c2
-rw-r--r--net/decnet/dn_route.c72
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/igmp.c225
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/route.c57
-rw-r--r--net/ipv4/tcp.c16
-rw-r--r--net/ipv4/xfrm4_policy.c24
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c8
-rw-r--r--net/socket.c11
-rw-r--r--net/unix/af_unix.c34
61 files changed, 2134 insertions, 1551 deletions
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 7ee770b5ef5f..80a7a3454902 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -7,7 +7,7 @@ This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers
7(Synopsys IP blocks); it has been fully tested on STLinux platforms. 7(Synopsys IP blocks); it has been fully tested on STLinux platforms.
8 8
9Currently this network device driver is for all STM embedded MAC/GMAC 9Currently this network device driver is for all STM embedded MAC/GMAC
10(7xxx SoCs). 10(7xxx SoCs). Other platforms start using it i.e. ARM SPEAr.
11 11
12DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100 12DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100
13Universal version 4.0 have been used for developing the first code 13Universal version 4.0 have been used for developing the first code
@@ -95,9 +95,14 @@ Several information came from the platform; please refer to the
95driver's Header file in include/linux directory. 95driver's Header file in include/linux directory.
96 96
97struct plat_stmmacenet_data { 97struct plat_stmmacenet_data {
98 int bus_id; 98 int bus_id;
99 int pbl; 99 int pbl;
100 int has_gmac; 100 int clk_csr;
101 int has_gmac;
102 int enh_desc;
103 int tx_coe;
104 int bugged_jumbo;
105 int pmt;
101 void (*fix_mac_speed)(void *priv, unsigned int speed); 106 void (*fix_mac_speed)(void *priv, unsigned int speed);
102 void (*bus_setup)(unsigned long ioaddr); 107 void (*bus_setup)(unsigned long ioaddr);
103#ifdef CONFIG_STM_DRIVERS 108#ifdef CONFIG_STM_DRIVERS
@@ -114,6 +119,12 @@ Where:
114 registers (on STM platforms); 119 registers (on STM platforms);
115- has_gmac: GMAC core is on board (get it at run-time in the next step); 120- has_gmac: GMAC core is on board (get it at run-time in the next step);
116- bus_id: bus identifier. 121- bus_id: bus identifier.
122- tx_coe: core is able to perform the tx csum in HW.
123- enh_desc: if sets the MAC will use the enhanced descriptor structure.
124- clk_csr: CSR Clock range selection.
125- bugged_jumbo: some HWs are not able to perform the csum in HW for
126 over-sized frames due to limited buffer sizes. Setting this
127 flag the csum will be done in SW on JUMBO frames.
117 128
118struct plat_stmmacphy_data { 129struct plat_stmmacphy_data {
119 int bus_id; 130 int bus_id;
@@ -131,13 +142,28 @@ Where:
131- interface: physical MII interface mode; 142- interface: physical MII interface mode;
132- phy_reset: hook to reset HW function. 143- phy_reset: hook to reset HW function.
133 144
145SOURCES:
146- Kconfig
147- Makefile
148- stmmac_main.c: main network device driver;
149- stmmac_mdio.c: mdio functions;
150- stmmac_ethtool.c: ethtool support;
151- stmmac_timer.[ch]: timer code used for mitigating the driver dma interrupts
152 Only tested on ST40 platforms based.
153- stmmac.h: private driver structure;
154- common.h: common definitions and VFTs;
155- descs.h: descriptor structure definitions;
156- dwmac1000_core.c: GMAC core functions;
157- dwmac1000_dma.c: dma functions for the GMAC chip;
158- dwmac1000.h: specific header file for the GMAC;
159- dwmac100_core: MAC 100 core and dma code;
160- dwmac100_dma.c: dma funtions for the MAC chip;
161- dwmac1000.h: specific header file for the MAC;
162- dwmac_lib.c: generic DMA functions shared among chips
163- enh_desc.c: functions for handling enhanced descriptors
164- norm_desc.c: functions for handling normal descriptors
165
134TODO: 166TODO:
135- Continue to make the driver more generic and suitable for other Synopsys 167- XGMAC controller is not supported.
136 Ethernet controllers used on other architectures (i.e. ARM).
137- 10G controllers are not supported.
138- MAC uses Normal descriptors and GMAC uses enhanced ones.
139 This is a limit that should be reviewed. MAC could want to
140 use the enhanced structure.
141- Checksumming: Rx/Tx csum is done in HW in case of GMAC only.
142- Review the timer optimisation code to use an embedded device that seems to be 168- Review the timer optimisation code to use an embedded device that seems to be
143 available in new chip generations. 169 available in new chip generations.
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 5674bd01d96d..de0435e63b02 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -297,8 +297,8 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
297 struct sk_buff *skb; 297 struct sk_buff *skb;
298 struct net_device *ifp; 298 struct net_device *ifp;
299 299
300 read_lock(&dev_base_lock); 300 rcu_read_lock();
301 for_each_netdev(&init_net, ifp) { 301 for_each_netdev_rcu(&init_net, ifp) {
302 dev_hold(ifp); 302 dev_hold(ifp);
303 if (!is_aoe_netif(ifp)) 303 if (!is_aoe_netif(ifp))
304 goto cont; 304 goto cont;
@@ -325,7 +325,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
325cont: 325cont:
326 dev_put(ifp); 326 dev_put(ifp);
327 } 327 }
328 read_unlock(&dev_base_lock); 328 rcu_read_unlock();
329} 329}
330 330
331static void 331static void
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a5ea1bce9689..c15fd2ea56c1 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -200,7 +200,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
200 src_in->sin_family = AF_INET; 200 src_in->sin_family = AF_INET;
201 src_in->sin_addr.s_addr = rt->rt_src; 201 src_in->sin_addr.s_addr = rt->rt_src;
202 202
203 if (rt->idev->dev->flags & IFF_LOOPBACK) { 203 if (rt->dst.dev->flags & IFF_LOOPBACK) {
204 ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); 204 ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
205 if (!ret) 205 if (!ret)
206 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); 206 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
@@ -208,12 +208,12 @@ static int addr4_resolve(struct sockaddr_in *src_in,
208 } 208 }
209 209
210 /* If the device does ARP internally, return 'done' */ 210 /* If the device does ARP internally, return 'done' */
211 if (rt->idev->dev->flags & IFF_NOARP) { 211 if (rt->dst.dev->flags & IFF_NOARP) {
212 rdma_copy_addr(addr, rt->idev->dev, NULL); 212 rdma_copy_addr(addr, rt->dst.dev, NULL);
213 goto put; 213 goto put;
214 } 214 }
215 215
216 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev); 216 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
217 if (!neigh || !(neigh->nud_state & NUD_VALID)) { 217 if (!neigh || !(neigh->nud_state & NUD_VALID)) {
218 neigh_event_send(rt->dst.neighbour, NULL); 218 neigh_event_send(rt->dst.neighbour, NULL);
219 ret = -ENODATA; 219 ret = -ENODATA;
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index ea9b7a098c9b..475a66d95b34 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -201,7 +201,7 @@ struct net_local {
201#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */ 201#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
202#define RX_BUF_END (dev->mem_end - dev->mem_start) 202#define RX_BUF_END (dev->mem_end - dev->mem_start)
203 203
204#define TX_TIMEOUT 5 204#define TX_TIMEOUT (HZ/20)
205 205
206/* 206/*
207 That's it: only 86 bytes to set up the beast, including every extra 207 That's it: only 86 bytes to set up the beast, including every extra
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index cdf7226a7c43..d2bb4b254c57 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -98,7 +98,7 @@ static int rx_nocopy, rx_copy, queued_packet;
98#define WAIT_TX_AVAIL 200 98#define WAIT_TX_AVAIL 200
99 99
100/* Operational parameter that usually are not changed. */ 100/* Operational parameter that usually are not changed. */
101#define TX_TIMEOUT 40 /* Time in jiffies before concluding Tx hung */ 101#define TX_TIMEOUT ((4*HZ)/10) /* Time in jiffies before concluding Tx hung */
102 102
103/* The size here is somewhat misleading: the Corkscrew also uses the ISA 103/* The size here is somewhat misleading: the Corkscrew also uses the ISA
104 aliased registers at <base>+0x400. 104 aliased registers at <base>+0x400.
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index e2c9c5b949f9..be1f1970c842 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -191,7 +191,7 @@ enum commands {
191#define RX_SUSPEND 0x0030 191#define RX_SUSPEND 0x0030
192#define RX_ABORT 0x0040 192#define RX_ABORT 0x0040
193 193
194#define TX_TIMEOUT 5 194#define TX_TIMEOUT (HZ/20)
195 195
196 196
197struct i596_reg { 197struct i596_reg {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f6668cdaac85..0a7e6cea0082 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1533,7 +1533,7 @@ config E100
1533 1533
1534 <http://support.intel.com/support/network/adapter/pro100/21397.htm> 1534 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
1535 1535
1536 to identify the adapter. 1536 to identify the adapter.
1537 1537
1538 For the latest Intel PRO/100 network driver for Linux, see: 1538 For the latest Intel PRO/100 network driver for Linux, see:
1539 1539
@@ -1786,17 +1786,17 @@ config KS8842
1786 tristate "Micrel KSZ8841/42 with generic bus interface" 1786 tristate "Micrel KSZ8841/42 with generic bus interface"
1787 depends on HAS_IOMEM && DMA_ENGINE 1787 depends on HAS_IOMEM && DMA_ENGINE
1788 help 1788 help
1789 This platform driver is for KSZ8841(1-port) / KS8842(2-port) 1789 This platform driver is for KSZ8841(1-port) / KS8842(2-port)
1790 ethernet switch chip (managed, VLAN, QoS) from Micrel or 1790 ethernet switch chip (managed, VLAN, QoS) from Micrel or
1791 Timberdale(FPGA). 1791 Timberdale(FPGA).
1792 1792
1793config KS8851 1793config KS8851
1794 tristate "Micrel KS8851 SPI" 1794 tristate "Micrel KS8851 SPI"
1795 depends on SPI 1795 depends on SPI
1796 select MII 1796 select MII
1797 select CRC32 1797 select CRC32
1798 help 1798 help
1799 SPI driver for Micrel KS8851 SPI attached network chip. 1799 SPI driver for Micrel KS8851 SPI attached network chip.
1800 1800
1801config KS8851_MLL 1801config KS8851_MLL
1802 tristate "Micrel KS8851 MLL" 1802 tristate "Micrel KS8851 MLL"
@@ -2133,25 +2133,25 @@ config IP1000
2133 will be called ipg. This is recommended. 2133 will be called ipg. This is recommended.
2134 2134
2135config IGB 2135config IGB
2136 tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" 2136 tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
2137 depends on PCI 2137 depends on PCI
2138 ---help--- 2138 ---help---
2139 This driver supports Intel(R) 82575/82576 gigabit ethernet family of 2139 This driver supports Intel(R) 82575/82576 gigabit ethernet family of
2140 adapters. For more information on how to identify your adapter, go 2140 adapters. For more information on how to identify your adapter, go
2141 to the Adapter & Driver ID Guide at: 2141 to the Adapter & Driver ID Guide at:
2142 2142
2143 <http://support.intel.com/support/network/adapter/pro100/21397.htm> 2143 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
2144 2144
2145 For general information and support, go to the Intel support 2145 For general information and support, go to the Intel support
2146 website at: 2146 website at:
2147 2147
2148 <http://support.intel.com> 2148 <http://support.intel.com>
2149 2149
2150 More specific information on configuring the driver is in 2150 More specific information on configuring the driver is in
2151 <file:Documentation/networking/e1000.txt>. 2151 <file:Documentation/networking/e1000.txt>.
2152 2152
2153 To compile this driver as a module, choose M here. The module 2153 To compile this driver as a module, choose M here. The module
2154 will be called igb. 2154 will be called igb.
2155 2155
2156config IGB_DCA 2156config IGB_DCA
2157 bool "Direct Cache Access (DCA) Support" 2157 bool "Direct Cache Access (DCA) Support"
@@ -2163,25 +2163,25 @@ config IGB_DCA
2163 is used, with the intent of lessening the impact of cache misses. 2163 is used, with the intent of lessening the impact of cache misses.
2164 2164
2165config IGBVF 2165config IGBVF
2166 tristate "Intel(R) 82576 Virtual Function Ethernet support" 2166 tristate "Intel(R) 82576 Virtual Function Ethernet support"
2167 depends on PCI 2167 depends on PCI
2168 ---help--- 2168 ---help---
2169 This driver supports Intel(R) 82576 virtual functions. For more 2169 This driver supports Intel(R) 82576 virtual functions. For more
2170 information on how to identify your adapter, go to the Adapter & 2170 information on how to identify your adapter, go to the Adapter &
2171 Driver ID Guide at: 2171 Driver ID Guide at:
2172 2172
2173 <http://support.intel.com/support/network/adapter/pro100/21397.htm> 2173 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
2174 2174
2175 For general information and support, go to the Intel support 2175 For general information and support, go to the Intel support
2176 website at: 2176 website at:
2177 2177
2178 <http://support.intel.com> 2178 <http://support.intel.com>
2179 2179
2180 More specific information on configuring the driver is in 2180 More specific information on configuring the driver is in
2181 <file:Documentation/networking/e1000.txt>. 2181 <file:Documentation/networking/e1000.txt>.
2182 2182
2183 To compile this driver as a module, choose M here. The module 2183 To compile this driver as a module, choose M here. The module
2184 will be called igbvf. 2184 will be called igbvf.
2185 2185
2186source "drivers/net/ixp2000/Kconfig" 2186source "drivers/net/ixp2000/Kconfig"
2187 2187
@@ -2300,14 +2300,14 @@ config SKGE
2300 will be called skge. This is recommended. 2300 will be called skge. This is recommended.
2301 2301
2302config SKGE_DEBUG 2302config SKGE_DEBUG
2303 bool "Debugging interface" 2303 bool "Debugging interface"
2304 depends on SKGE && DEBUG_FS 2304 depends on SKGE && DEBUG_FS
2305 help 2305 help
2306 This option adds the ability to dump driver state for debugging. 2306 This option adds the ability to dump driver state for debugging.
2307 The file /sys/kernel/debug/skge/ethX displays the state of the internal 2307 The file /sys/kernel/debug/skge/ethX displays the state of the internal
2308 transmit and receive rings. 2308 transmit and receive rings.
2309 2309
2310 If unsure, say N. 2310 If unsure, say N.
2311 2311
2312config SKY2 2312config SKY2
2313 tristate "SysKonnect Yukon2 support" 2313 tristate "SysKonnect Yukon2 support"
@@ -2326,14 +2326,14 @@ config SKY2
2326 will be called sky2. This is recommended. 2326 will be called sky2. This is recommended.
2327 2327
2328config SKY2_DEBUG 2328config SKY2_DEBUG
2329 bool "Debugging interface" 2329 bool "Debugging interface"
2330 depends on SKY2 && DEBUG_FS 2330 depends on SKY2 && DEBUG_FS
2331 help 2331 help
2332 This option adds the ability to dump driver state for debugging. 2332 This option adds the ability to dump driver state for debugging.
2333 The file /sys/kernel/debug/sky2/ethX displays the state of the internal 2333 The file /sys/kernel/debug/sky2/ethX displays the state of the internal
2334 transmit and receive rings. 2334 transmit and receive rings.
2335 2335
2336 If unsure, say N. 2336 If unsure, say N.
2337 2337
2338config VIA_VELOCITY 2338config VIA_VELOCITY
2339 tristate "VIA Velocity support" 2339 tristate "VIA Velocity support"
@@ -2573,32 +2573,32 @@ config MDIO
2573 tristate 2573 tristate
2574 2574
2575config CHELSIO_T1 2575config CHELSIO_T1
2576 tristate "Chelsio 10Gb Ethernet support" 2576 tristate "Chelsio 10Gb Ethernet support"
2577 depends on PCI 2577 depends on PCI
2578 select CRC32 2578 select CRC32
2579 select MDIO 2579 select MDIO
2580 help 2580 help
2581 This driver supports Chelsio gigabit and 10-gigabit 2581 This driver supports Chelsio gigabit and 10-gigabit
2582 Ethernet cards. More information about adapter features and 2582 Ethernet cards. More information about adapter features and
2583 performance tuning is in <file:Documentation/networking/cxgb.txt>. 2583 performance tuning is in <file:Documentation/networking/cxgb.txt>.
2584 2584
2585 For general information about Chelsio and our products, visit 2585 For general information about Chelsio and our products, visit
2586 our website at <http://www.chelsio.com>. 2586 our website at <http://www.chelsio.com>.
2587 2587
2588 For customer support, please visit our customer support page at 2588 For customer support, please visit our customer support page at
2589 <http://www.chelsio.com/support.html>. 2589 <http://www.chelsio.com/support.html>.
2590 2590
2591 Please send feedback to <linux-bugs@chelsio.com>. 2591 Please send feedback to <linux-bugs@chelsio.com>.
2592 2592
2593 To compile this driver as a module, choose M here: the module 2593 To compile this driver as a module, choose M here: the module
2594 will be called cxgb. 2594 will be called cxgb.
2595 2595
2596config CHELSIO_T1_1G 2596config CHELSIO_T1_1G
2597 bool "Chelsio gigabit Ethernet support" 2597 bool "Chelsio gigabit Ethernet support"
2598 depends on CHELSIO_T1 2598 depends on CHELSIO_T1
2599 help 2599 help
2600 Enables support for Chelsio's gigabit Ethernet PCI cards. If you 2600 Enables support for Chelsio's gigabit Ethernet PCI cards. If you
2601 are using only 10G cards say 'N' here. 2601 are using only 10G cards say 'N' here.
2602 2602
2603config CHELSIO_T3_DEPENDS 2603config CHELSIO_T3_DEPENDS
2604 tristate 2604 tristate
@@ -2728,26 +2728,26 @@ config IXGBE_DCB
2728 If unsure, say N. 2728 If unsure, say N.
2729 2729
2730config IXGBEVF 2730config IXGBEVF
2731 tristate "Intel(R) 82599 Virtual Function Ethernet support" 2731 tristate "Intel(R) 82599 Virtual Function Ethernet support"
2732 depends on PCI_MSI 2732 depends on PCI_MSI
2733 ---help--- 2733 ---help---
2734 This driver supports Intel(R) 82599 virtual functions. For more 2734 This driver supports Intel(R) 82599 virtual functions. For more
2735 information on how to identify your adapter, go to the Adapter & 2735 information on how to identify your adapter, go to the Adapter &
2736 Driver ID Guide at: 2736 Driver ID Guide at:
2737 2737
2738 <http://support.intel.com/support/network/sb/CS-008441.htm> 2738 <http://support.intel.com/support/network/sb/CS-008441.htm>
2739 2739
2740 For general information and support, go to the Intel support 2740 For general information and support, go to the Intel support
2741 website at: 2741 website at:
2742 2742
2743 <http://support.intel.com> 2743 <http://support.intel.com>
2744 2744
2745 More specific information on configuring the driver is in 2745 More specific information on configuring the driver is in
2746 <file:Documentation/networking/ixgbevf.txt>. 2746 <file:Documentation/networking/ixgbevf.txt>.
2747 2747
2748 To compile this driver as a module, choose M here. The module 2748 To compile this driver as a module, choose M here. The module
2749 will be called ixgbevf. MSI-X interrupt support is required 2749 will be called ixgbevf. MSI-X interrupt support is required
2750 for this driver to work correctly. 2750 for this driver to work correctly.
2751 2751
2752config IXGB 2752config IXGB
2753 tristate "Intel(R) PRO/10GbE support" 2753 tristate "Intel(R) PRO/10GbE support"
@@ -2772,29 +2772,38 @@ config IXGB
2772 will be called ixgb. 2772 will be called ixgb.
2773 2773
2774config S2IO 2774config S2IO
2775 tristate "S2IO 10Gbe XFrame NIC" 2775 tristate "Exar Xframe 10Gb Ethernet Adapter"
2776 depends on PCI 2776 depends on PCI
2777 ---help--- 2777 ---help---
2778 This driver supports the 10Gbe XFrame NIC of S2IO. 2778 This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
2779
2779 More specific information on configuring the driver is in 2780 More specific information on configuring the driver is in
2780 <file:Documentation/networking/s2io.txt>. 2781 <file:Documentation/networking/s2io.txt>.
2781 2782
2783 To compile this driver as a module, choose M here. The module
2784 will be called s2io.
2785
2782config VXGE 2786config VXGE
2783 tristate "Neterion X3100 Series 10GbE PCIe Server Adapter" 2787 tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
2784 depends on PCI && INET 2788 depends on PCI && INET
2785 ---help--- 2789 ---help---
2786 This driver supports Neterion Inc's X3100 Series 10 GbE PCIe 2790 This driver supports Exar Corp's X3100 Series 10 GbE PCIe
2787 I/O Virtualized Server Adapter. 2791 I/O Virtualized Server Adapter.
2792
2788 More specific information on configuring the driver is in 2793 More specific information on configuring the driver is in
2789 <file:Documentation/networking/vxge.txt>. 2794 <file:Documentation/networking/vxge.txt>.
2790 2795
2796 To compile this driver as a module, choose M here. The module
2797 will be called vxge.
2798
2791config VXGE_DEBUG_TRACE_ALL 2799config VXGE_DEBUG_TRACE_ALL
2792 bool "Enabling All Debug trace statments in driver" 2800 bool "Enabling All Debug trace statments in driver"
2793 default n 2801 default n
2794 depends on VXGE 2802 depends on VXGE
2795 ---help--- 2803 ---help---
2796 Say Y here if you want to enabling all the debug trace statements in 2804 Say Y here if you want to enabling all the debug trace statements in
2797 driver. By default only few debug trace statements are enabled. 2805 the vxge driver. By default only few debug trace statements are
2806 enabled.
2798 2807
2799config MYRI10GE 2808config MYRI10GE
2800 tristate "Myricom Myri-10G Ethernet support" 2809 tristate "Myricom Myri-10G Ethernet support"
@@ -2906,18 +2915,18 @@ config QLGE
2906 will be called qlge. 2915 will be called qlge.
2907 2916
2908config BNA 2917config BNA
2909 tristate "Brocade 1010/1020 10Gb Ethernet Driver support" 2918 tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
2910 depends on PCI 2919 depends on PCI
2911 ---help--- 2920 ---help---
2912 This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet 2921 This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
2913 cards. 2922 cards.
2914 To compile this driver as a module, choose M here: the module 2923 To compile this driver as a module, choose M here: the module
2915 will be called bna. 2924 will be called bna.
2916 2925
2917 For general information and support, go to the Brocade support 2926 For general information and support, go to the Brocade support
2918 website at: 2927 website at:
2919 2928
2920 <http://support.brocade.com> 2929 <http://support.brocade.com>
2921 2930
2922source "drivers/net/sfc/Kconfig" 2931source "drivers/net/sfc/Kconfig"
2923 2932
@@ -3227,18 +3236,18 @@ config PPP_BSDCOMP
3227 modules once you have said "make modules". If unsure, say N. 3236 modules once you have said "make modules". If unsure, say N.
3228 3237
3229config PPP_MPPE 3238config PPP_MPPE
3230 tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)" 3239 tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
3231 depends on PPP && EXPERIMENTAL 3240 depends on PPP && EXPERIMENTAL
3232 select CRYPTO 3241 select CRYPTO
3233 select CRYPTO_SHA1 3242 select CRYPTO_SHA1
3234 select CRYPTO_ARC4 3243 select CRYPTO_ARC4
3235 select CRYPTO_ECB 3244 select CRYPTO_ECB
3236 ---help--- 3245 ---help---
3237 Support for the MPPE Encryption protocol, as employed by the 3246 Support for the MPPE Encryption protocol, as employed by the
3238 Microsoft Point-to-Point Tunneling Protocol. 3247 Microsoft Point-to-Point Tunneling Protocol.
3239 3248
3240 See http://pptpclient.sourceforge.net/ for information on 3249 See http://pptpclient.sourceforge.net/ for information on
3241 configuring PPTP clients and servers to utilize this method. 3250 configuring PPTP clients and servers to utilize this method.
3242 3251
3243config PPPOE 3252config PPPOE
3244 tristate "PPP over Ethernet (EXPERIMENTAL)" 3253 tristate "PPP over Ethernet (EXPERIMENTAL)"
@@ -3397,14 +3406,14 @@ config VIRTIO_NET
3397 depends on EXPERIMENTAL && VIRTIO 3406 depends on EXPERIMENTAL && VIRTIO
3398 ---help--- 3407 ---help---
3399 This is the virtual network driver for virtio. It can be used with 3408 This is the virtual network driver for virtio. It can be used with
3400 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. 3409 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
3401 3410
3402config VMXNET3 3411config VMXNET3
3403 tristate "VMware VMXNET3 ethernet driver" 3412 tristate "VMware VMXNET3 ethernet driver"
3404 depends on PCI && INET 3413 depends on PCI && INET
3405 help 3414 help
3406 This driver supports VMware's vmxnet3 virtual ethernet NIC. 3415 This driver supports VMware's vmxnet3 virtual ethernet NIC.
3407 To compile this driver as a module, choose M here: the 3416 To compile this driver as a module, choose M here: the
3408 module will be called vmxnet3. 3417 module will be called vmxnet3.
3409 3418
3410endif # NETDEVICES 3419endif # NETDEVICES
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 4545d5a06c24..bfea499a3513 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -117,7 +117,7 @@
117#define TX_DESC_SIZE 10 117#define TX_DESC_SIZE 10
118#define MAX_RBUFF_SZ 0x600 118#define MAX_RBUFF_SZ 0x600
119#define MAX_TBUFF_SZ 0x600 119#define MAX_TBUFF_SZ 0x600
120#define TX_TIMEOUT 50 120#define TX_TIMEOUT (HZ/2)
121#define DELAY 1000 121#define DELAY 1000
122#define CAM0 0x0 122#define CAM0 0x0
123 123
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 89876897a6fe..871b1633f543 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -150,7 +150,7 @@ struct net_local {
150#define PORT_OFFSET(o) (o) 150#define PORT_OFFSET(o) (o)
151 151
152 152
153#define TX_TIMEOUT 10 153#define TX_TIMEOUT (HZ/10)
154 154
155 155
156/* Index to functions, as function prototypes. */ 156/* Index to functions, as function prototypes. */
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 8cb27cb7bca1..ce0091eb06f5 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -116,7 +116,7 @@ MODULE_LICENSE("GPL");
116#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5) 116#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
117#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) 117#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
118 118
119#define TX_TIMEOUT 20 119#define TX_TIMEOUT (HZ/5)
120 120
121/* The LANCE Rx and Tx ring descriptors. */ 121/* The LANCE Rx and Tx ring descriptors. */
122struct lance_rx_head { 122struct lance_rx_head {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bdb68a600382..518844852f06 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3209,7 +3209,7 @@ out:
3209#ifdef CONFIG_PROC_FS 3209#ifdef CONFIG_PROC_FS
3210 3210
3211static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) 3211static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
3212 __acquires(&dev_base_lock) 3212 __acquires(RCU)
3213 __acquires(&bond->lock) 3213 __acquires(&bond->lock)
3214{ 3214{
3215 struct bonding *bond = seq->private; 3215 struct bonding *bond = seq->private;
@@ -3218,7 +3218,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
3218 int i; 3218 int i;
3219 3219
3220 /* make sure the bond won't be taken away */ 3220 /* make sure the bond won't be taken away */
3221 read_lock(&dev_base_lock); 3221 rcu_read_lock();
3222 read_lock(&bond->lock); 3222 read_lock(&bond->lock);
3223 3223
3224 if (*pos == 0) 3224 if (*pos == 0)
@@ -3248,12 +3248,12 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3248 3248
3249static void bond_info_seq_stop(struct seq_file *seq, void *v) 3249static void bond_info_seq_stop(struct seq_file *seq, void *v)
3250 __releases(&bond->lock) 3250 __releases(&bond->lock)
3251 __releases(&dev_base_lock) 3251 __releases(RCU)
3252{ 3252{
3253 struct bonding *bond = seq->private; 3253 struct bonding *bond = seq->private;
3254 3254
3255 read_unlock(&bond->lock); 3255 read_unlock(&bond->lock);
3256 read_unlock(&dev_base_lock); 3256 rcu_read_unlock();
3257} 3257}
3258 3258
3259static void bond_info_show_master(struct seq_file *seq) 3259static void bond_info_show_master(struct seq_file *seq)
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 64c378cd0c34..74cd880c7e06 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -182,7 +182,7 @@ static int mscan_restart(struct net_device *dev)
182 182
183 priv->can.state = CAN_STATE_ERROR_ACTIVE; 183 priv->can.state = CAN_STATE_ERROR_ACTIVE;
184 WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD), 184 WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
185 "bus-off state expected"); 185 "bus-off state expected\n");
186 out_8(&regs->canmisc, MSCAN_BOHOLD); 186 out_8(&regs->canmisc, MSCAN_BOHOLD);
187 /* Re-enable receive interrupts. */ 187 /* Re-enable receive interrupts. */
188 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE); 188 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 7c826319ee5a..9e19fbc2f176 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -302,7 +302,7 @@ struct eepro_local {
302#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */ 302#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */
303#define ee_id_eepro10p1 0x31 303#define ee_id_eepro10p1 0x31
304 304
305#define TX_TIMEOUT 40 305#define TX_TIMEOUT ((4*HZ)/10)
306 306
307/* Index to functions, as function prototypes. */ 307/* Index to functions, as function prototypes. */
308 308
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 51919fcd50c2..0fa4a9887ba2 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -1545,6 +1545,37 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
1545 1545
1546/* driver bus management functions */ 1546/* driver bus management functions */
1547 1547
1548#ifdef CONFIG_PM
1549static int ks8851_suspend(struct spi_device *spi, pm_message_t state)
1550{
1551 struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
1552 struct net_device *dev = ks->netdev;
1553
1554 if (netif_running(dev)) {
1555 netif_device_detach(dev);
1556 ks8851_net_stop(dev);
1557 }
1558
1559 return 0;
1560}
1561
1562static int ks8851_resume(struct spi_device *spi)
1563{
1564 struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
1565 struct net_device *dev = ks->netdev;
1566
1567 if (netif_running(dev)) {
1568 ks8851_net_open(dev);
1569 netif_device_attach(dev);
1570 }
1571
1572 return 0;
1573}
1574#else
1575#define ks8851_suspend NULL
1576#define ks8851_resume NULL
1577#endif
1578
1548static int __devinit ks8851_probe(struct spi_device *spi) 1579static int __devinit ks8851_probe(struct spi_device *spi)
1549{ 1580{
1550 struct net_device *ndev; 1581 struct net_device *ndev;
@@ -1679,6 +1710,8 @@ static struct spi_driver ks8851_driver = {
1679 }, 1710 },
1680 .probe = ks8851_probe, 1711 .probe = ks8851_probe,
1681 .remove = __devexit_p(ks8851_remove), 1712 .remove = __devexit_p(ks8851_remove),
1713 .suspend = ks8851_suspend,
1714 .resume = ks8851_resume,
1682}; 1715};
1683 1716
1684static int __init ks8851_init(void) 1717static int __init ks8851_init(void)
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index f06296bfe293..02336edce748 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -207,7 +207,7 @@ tx_full and tbusy flags.
207#define LANCE_BUS_IF 0x16 207#define LANCE_BUS_IF 0x16
208#define LANCE_TOTAL_SIZE 0x18 208#define LANCE_TOTAL_SIZE 0x18
209 209
210#define TX_TIMEOUT 20 210#define TX_TIMEOUT (HZ/5)
211 211
212/* The LANCE Rx and Tx ring descriptors. */ 212/* The LANCE Rx and Tx ring descriptors. */
213struct lance_rx_head { 213struct lance_rx_head {
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index c27f4291b350..9e042894479b 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -161,7 +161,7 @@ enum commands {
161#define RX_SUSPEND 0x0030 161#define RX_SUSPEND 0x0030
162#define RX_ABORT 0x0040 162#define RX_ABORT 0x0040
163 163
164#define TX_TIMEOUT 5 164#define TX_TIMEOUT (HZ/20)
165 165
166 166
167struct i596_reg { 167struct i596_reg {
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 22821398fc63..bdb8fe868539 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
16 */ 16 */
17#define DRV_NAME "qlge" 17#define DRV_NAME "qlge"
18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
19#define DRV_VERSION "v1.00.00.25.00.00-01" 19#define DRV_VERSION "v1.00.00.27.00.00-01"
20 20
21#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 21#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
22 22
@@ -2221,6 +2221,7 @@ int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
2221int ql_unpause_mpi_risc(struct ql_adapter *qdev); 2221int ql_unpause_mpi_risc(struct ql_adapter *qdev);
2222int ql_pause_mpi_risc(struct ql_adapter *qdev); 2222int ql_pause_mpi_risc(struct ql_adapter *qdev);
2223int ql_hard_reset_mpi_risc(struct ql_adapter *qdev); 2223int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
2224int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
2224int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, 2225int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
2225 u32 ram_addr, int word_count); 2226 u32 ram_addr, int word_count);
2226int ql_core_dump(struct ql_adapter *qdev, 2227int ql_core_dump(struct ql_adapter *qdev,
@@ -2236,6 +2237,7 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
2236int ql_mb_get_port_cfg(struct ql_adapter *qdev); 2237int ql_mb_get_port_cfg(struct ql_adapter *qdev);
2237int ql_mb_set_port_cfg(struct ql_adapter *qdev); 2238int ql_mb_set_port_cfg(struct ql_adapter *qdev);
2238int ql_wait_fifo_empty(struct ql_adapter *qdev); 2239int ql_wait_fifo_empty(struct ql_adapter *qdev);
2240void ql_get_dump(struct ql_adapter *qdev, void *buff);
2239void ql_gen_reg_dump(struct ql_adapter *qdev, 2241void ql_gen_reg_dump(struct ql_adapter *qdev,
2240 struct ql_reg_dump *mpi_coredump); 2242 struct ql_reg_dump *mpi_coredump);
2241netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); 2243netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 4747492935ef..fca804f36d61 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1317,9 +1317,28 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
1317 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); 1317 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1318 if (status) 1318 if (status)
1319 return; 1319 return;
1320}
1321
1322void ql_get_dump(struct ql_adapter *qdev, void *buff)
1323{
1324 /*
1325 * If the dump has already been taken and is stored
1326 * in our internal buffer and if force dump is set then
1327 * just start the spool to dump it to the log file
1328 * and also, take a snapshot of the general regs to
1329 * to the user's buffer or else take complete dump
1330 * to the user's buffer if force is not set.
1331 */
1320 1332
1321 if (test_bit(QL_FRC_COREDUMP, &qdev->flags)) 1333 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
1334 if (!ql_core_dump(qdev, buff))
1335 ql_soft_reset_mpi_risc(qdev);
1336 else
1337 netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
1338 } else {
1339 ql_gen_reg_dump(qdev, buff);
1322 ql_get_core_dump(qdev); 1340 ql_get_core_dump(qdev);
1341 }
1323} 1342}
1324 1343
1325/* Coredump to messages log file using separate worker thread */ 1344/* Coredump to messages log file using separate worker thread */
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 4892d64f4e05..8149cc9de4ca 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -375,7 +375,10 @@ static void ql_get_drvinfo(struct net_device *ndev,
375 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); 375 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
376 drvinfo->n_stats = 0; 376 drvinfo->n_stats = 0;
377 drvinfo->testinfo_len = 0; 377 drvinfo->testinfo_len = 0;
378 drvinfo->regdump_len = 0; 378 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
379 drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
380 else
381 drvinfo->regdump_len = sizeof(struct ql_reg_dump);
379 drvinfo->eedump_len = 0; 382 drvinfo->eedump_len = 0;
380} 383}
381 384
@@ -547,7 +550,12 @@ static void ql_self_test(struct net_device *ndev,
547 550
548static int ql_get_regs_len(struct net_device *ndev) 551static int ql_get_regs_len(struct net_device *ndev)
549{ 552{
550 return sizeof(struct ql_reg_dump); 553 struct ql_adapter *qdev = netdev_priv(ndev);
554
555 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
556 return sizeof(struct ql_mpi_coredump);
557 else
558 return sizeof(struct ql_reg_dump);
551} 559}
552 560
553static void ql_get_regs(struct net_device *ndev, 561static void ql_get_regs(struct net_device *ndev,
@@ -555,7 +563,12 @@ static void ql_get_regs(struct net_device *ndev,
555{ 563{
556 struct ql_adapter *qdev = netdev_priv(ndev); 564 struct ql_adapter *qdev = netdev_priv(ndev);
557 565
558 ql_gen_reg_dump(qdev, p); 566 ql_get_dump(qdev, p);
567 qdev->core_is_dumped = 0;
568 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
569 regs->len = sizeof(struct ql_mpi_coredump);
570 else
571 regs->len = sizeof(struct ql_reg_dump);
559} 572}
560 573
561static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 574static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 0e7c7c7ee164..100a462cc916 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -87,7 +87,7 @@ exit:
87 return status; 87 return status;
88} 88}
89 89
90static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev) 90int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
91{ 91{
92 int status; 92 int status;
93 status = ql_write_mpi_reg(qdev, 0x00001010, 1); 93 status = ql_write_mpi_reg(qdev, 0x00001010, 1);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index b2bcf99e6f08..7d42f9a2c068 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -363,7 +363,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
363 363
364 /* Paranoid */ 364 /* Paranoid */
365 if (skb->len > IPHETH_BUF_SIZE) { 365 if (skb->len > IPHETH_BUF_SIZE) {
366 WARN(1, "%s: skb too large: %d bytes", __func__, skb->len); 366 WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
367 dev->net->stats.tx_dropped++; 367 dev->net->stats.tx_dropped++;
368 dev_kfree_skb_irq(skb); 368 dev_kfree_skb_irq(skb);
369 return NETDEV_TX_OK; 369 return NETDEV_TX_OK;
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 906a3ca3676b..409c2e6053d0 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -19,19 +19,7 @@
19 19
20#include "vxge-traffic.h" 20#include "vxge-traffic.h"
21#include "vxge-config.h" 21#include "vxge-config.h"
22 22#include "vxge-main.h"
23static enum vxge_hw_status
24__vxge_hw_fifo_create(
25 struct __vxge_hw_vpath_handle *vpath_handle,
26 struct vxge_hw_fifo_attr *attr);
27
28static enum vxge_hw_status
29__vxge_hw_fifo_abort(
30 struct __vxge_hw_fifo *fifoh);
31
32static enum vxge_hw_status
33__vxge_hw_fifo_reset(
34 struct __vxge_hw_fifo *ringh);
35 23
36static enum vxge_hw_status 24static enum vxge_hw_status
37__vxge_hw_fifo_delete( 25__vxge_hw_fifo_delete(
@@ -71,53 +59,15 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
71 u32 size, 59 u32 size,
72 struct vxge_hw_mempool_dma *dma_object); 60 struct vxge_hw_mempool_dma *dma_object);
73 61
74
75static struct __vxge_hw_channel*
76__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
77 enum __vxge_hw_channel_type type, u32 length,
78 u32 per_dtr_space, void *userdata);
79
80static void 62static void
81__vxge_hw_channel_free( 63__vxge_hw_channel_free(
82 struct __vxge_hw_channel *channel); 64 struct __vxge_hw_channel *channel);
83 65
84static enum vxge_hw_status
85__vxge_hw_channel_initialize(
86 struct __vxge_hw_channel *channel);
87
88static enum vxge_hw_status
89__vxge_hw_channel_reset(
90 struct __vxge_hw_channel *channel);
91
92static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp); 66static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
93 67
94static enum vxge_hw_status 68static enum vxge_hw_status
95__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
96
97static enum vxge_hw_status
98__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config); 69__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
99 70
100static void
101__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
102
103static void
104__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
105
106static enum vxge_hw_status
107__vxge_hw_vpath_card_info_get(
108 u32 vp_id,
109 struct vxge_hw_vpath_reg __iomem *vpath_reg,
110 struct vxge_hw_device_hw_info *hw_info);
111
112static enum vxge_hw_status
113__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
114
115static void
116__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
117
118static enum vxge_hw_status
119__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
120
121static enum vxge_hw_status 71static enum vxge_hw_status
122__vxge_hw_device_register_poll( 72__vxge_hw_device_register_poll(
123 void __iomem *reg, 73 void __iomem *reg,
@@ -138,9 +88,10 @@ __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
138 88
139static struct vxge_hw_mempool* 89static struct vxge_hw_mempool*
140__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size, 90__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
141 u32 item_size, u32 private_size, u32 items_initial, 91 u32 item_size, u32 private_size, u32 items_initial,
142 u32 items_max, struct vxge_hw_mempool_cbs *mp_callback, 92 u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
143 void *userdata); 93 void *userdata);
94
144static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool); 95static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
145 96
146static enum vxge_hw_status 97static enum vxge_hw_status
@@ -153,52 +104,353 @@ vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
153static enum vxge_hw_status 104static enum vxge_hw_status
154__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg); 105__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
155 106
156static u64 107static void
157__vxge_hw_vpath_pci_func_mode_get(u32 vp_id, 108__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
158 struct vxge_hw_vpath_reg __iomem *vpath_reg);
159
160static u32
161__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
162 109
163static enum vxge_hw_status 110static enum vxge_hw_status
164__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, 111__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
165 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]); 112 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
166 113
167static enum vxge_hw_status 114static enum vxge_hw_status
168__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath); 115__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
116 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
169 117
118static void
119vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
120{
121 u64 val64;
170 122
171static enum vxge_hw_status 123 val64 = readq(&vp_reg->rxmac_vcfg0);
172__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id); 124 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
125 writeq(val64, &vp_reg->rxmac_vcfg0);
126 val64 = readq(&vp_reg->rxmac_vcfg0);
173 127
174static enum vxge_hw_status 128 return;
175__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, 129}
176 struct vxge_hw_device_hw_info *hw_info);
177 130
178static enum vxge_hw_status 131/*
179__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id); 132 * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
133 */
134int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
135{
136 struct vxge_hw_vpath_reg __iomem *vp_reg;
137 struct __vxge_hw_virtualpath *vpath;
138 u64 val64, rxd_count, rxd_spat;
139 int count = 0, total_count = 0;
180 140
181static void 141 vpath = &hldev->virtual_paths[vp_id];
182__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id); 142 vp_reg = vpath->vp_reg;
183 143
184static enum vxge_hw_status 144 vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
185__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
186 u32 operation, u32 offset, u64 *stat);
187 145
188static enum vxge_hw_status 146 /* Check that the ring controller for this vpath has enough free RxDs
189__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath, 147 * to send frames to the host. This is done by reading the
190 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats); 148 * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
149 * RXD_SPAT value for the vpath.
150 */
151 val64 = readq(&vp_reg->prc_cfg6);
152 rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
153 /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
154 * leg room.
155 */
156 rxd_spat *= 2;
157
158 do {
159 mdelay(1);
160
161 rxd_count = readq(&vp_reg->prc_rxd_doorbell);
162
163 /* Check that the ring controller for this vpath does
164 * not have any frame in its pipeline.
165 */
166 val64 = readq(&vp_reg->frm_in_progress_cnt);
167 if ((rxd_count <= rxd_spat) || (val64 > 0))
168 count = 0;
169 else
170 count++;
171 total_count++;
172 } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
173 (total_count < VXGE_HW_MAX_POLLING_COUNT));
174
175 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
176 printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
177 __func__);
178
179 return total_count;
180}
181
182/* vxge_hw_device_wait_receive_idle - This function waits until all frames
183 * stored in the frame buffer for each vpath assigned to the given
184 * function (hldev) have been sent to the host.
185 */
186void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
187{
188 int i, total_count = 0;
189
190 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
191 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
192 continue;
193
194 total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
195 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
196 break;
197 }
198}
191 199
192static enum vxge_hw_status 200static enum vxge_hw_status
193__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, 201vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
194 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats); 202 u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
203 u64 *steer_ctrl)
204{
205 struct vxge_hw_vpath_reg __iomem *vp_reg;
206 enum vxge_hw_status status;
207 u64 val64;
208 u32 retry = 0, max_retry = 100;
209
210 vp_reg = vpath->vp_reg;
211
212 if (vpath->vp_open) {
213 max_retry = 3;
214 spin_lock(&vpath->lock);
215 }
216
217 writeq(*data0, &vp_reg->rts_access_steer_data0);
218 writeq(*data1, &vp_reg->rts_access_steer_data1);
219 wmb();
220
221 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
222 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
223 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
224 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
225 *steer_ctrl;
226
227 status = __vxge_hw_pio_mem_write64(val64,
228 &vp_reg->rts_access_steer_ctrl,
229 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
230 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
231
232 /* The __vxge_hw_device_register_poll can udelay for a significant
233 * amount of time, blocking other proccess from the CPU. If it delays
234 * for ~5secs, a NMI error can occur. A way around this is to give up
235 * the processor via msleep, but this is not allowed is under lock.
236 * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
237 * 1sec and sleep for 10ms until the firmware operation has completed
238 * or timed-out.
239 */
240 while ((status != VXGE_HW_OK) && retry++ < max_retry) {
241 if (!vpath->vp_open)
242 msleep(20);
243 status = __vxge_hw_device_register_poll(
244 &vp_reg->rts_access_steer_ctrl,
245 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
246 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
247 }
248
249 if (status != VXGE_HW_OK)
250 goto out;
251
252 val64 = readq(&vp_reg->rts_access_steer_ctrl);
253 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
254 *data0 = readq(&vp_reg->rts_access_steer_data0);
255 *data1 = readq(&vp_reg->rts_access_steer_data1);
256 *steer_ctrl = val64;
257 } else
258 status = VXGE_HW_FAIL;
259
260out:
261 if (vpath->vp_open)
262 spin_unlock(&vpath->lock);
263 return status;
264}
265
266enum vxge_hw_status
267vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
268 u32 *minor, u32 *build)
269{
270 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
271 struct __vxge_hw_virtualpath *vpath;
272 enum vxge_hw_status status;
273
274 vpath = &hldev->virtual_paths[hldev->first_vp_id];
275
276 status = vxge_hw_vpath_fw_api(vpath,
277 VXGE_HW_FW_UPGRADE_ACTION,
278 VXGE_HW_FW_UPGRADE_MEMO,
279 VXGE_HW_FW_UPGRADE_OFFSET_READ,
280 &data0, &data1, &steer_ctrl);
281 if (status != VXGE_HW_OK)
282 return status;
283
284 *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
285 *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
286 *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
287
288 return status;
289}
290
291enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
292{
293 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
294 struct __vxge_hw_virtualpath *vpath;
295 enum vxge_hw_status status;
296 u32 ret;
297
298 vpath = &hldev->virtual_paths[hldev->first_vp_id];
299
300 status = vxge_hw_vpath_fw_api(vpath,
301 VXGE_HW_FW_UPGRADE_ACTION,
302 VXGE_HW_FW_UPGRADE_MEMO,
303 VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
304 &data0, &data1, &steer_ctrl);
305 if (status != VXGE_HW_OK) {
306 vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
307 goto exit;
308 }
309
310 ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
311 if (ret != 1) {
312 vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
313 __func__, ret);
314 status = VXGE_HW_FAIL;
315 }
316
317exit:
318 return status;
319}
320
321enum vxge_hw_status
322vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
323{
324 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
325 struct __vxge_hw_virtualpath *vpath;
326 enum vxge_hw_status status;
327 int ret_code, sec_code;
328
329 vpath = &hldev->virtual_paths[hldev->first_vp_id];
330
331 /* send upgrade start command */
332 status = vxge_hw_vpath_fw_api(vpath,
333 VXGE_HW_FW_UPGRADE_ACTION,
334 VXGE_HW_FW_UPGRADE_MEMO,
335 VXGE_HW_FW_UPGRADE_OFFSET_START,
336 &data0, &data1, &steer_ctrl);
337 if (status != VXGE_HW_OK) {
338 vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
339 __func__);
340 return status;
341 }
342
343 /* Transfer fw image to adapter 16 bytes at a time */
344 for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
345 steer_ctrl = 0;
346
347 /* The next 128bits of fwdata to be loaded onto the adapter */
348 data0 = *((u64 *)fwdata);
349 data1 = *((u64 *)fwdata + 1);
350
351 status = vxge_hw_vpath_fw_api(vpath,
352 VXGE_HW_FW_UPGRADE_ACTION,
353 VXGE_HW_FW_UPGRADE_MEMO,
354 VXGE_HW_FW_UPGRADE_OFFSET_SEND,
355 &data0, &data1, &steer_ctrl);
356 if (status != VXGE_HW_OK) {
357 vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
358 __func__);
359 goto out;
360 }
361
362 ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
363 switch (ret_code) {
364 case VXGE_HW_FW_UPGRADE_OK:
365 /* All OK, send next 16 bytes. */
366 break;
367 case VXGE_FW_UPGRADE_BYTES2SKIP:
368 /* skip bytes in the stream */
369 fwdata += (data0 >> 8) & 0xFFFFFFFF;
370 break;
371 case VXGE_HW_FW_UPGRADE_DONE:
372 goto out;
373 case VXGE_HW_FW_UPGRADE_ERR:
374 sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
375 switch (sec_code) {
376 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
377 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
378 printk(KERN_ERR
379 "corrupted data from .ncf file\n");
380 break;
381 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
382 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
383 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
384 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
385 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
386 printk(KERN_ERR "invalid .ncf file\n");
387 break;
388 case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
389 printk(KERN_ERR "buffer overflow\n");
390 break;
391 case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
392 printk(KERN_ERR "failed to flash the image\n");
393 break;
394 case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
395 printk(KERN_ERR
396 "generic error. Unknown error type\n");
397 break;
398 default:
399 printk(KERN_ERR "Unknown error of type %d\n",
400 sec_code);
401 break;
402 }
403 status = VXGE_HW_FAIL;
404 goto out;
405 default:
406 printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
407 status = VXGE_HW_FAIL;
408 goto out;
409 }
410 /* point to next 16 bytes */
411 fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
412 }
413out:
414 return status;
415}
416
417enum vxge_hw_status
418vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
419 struct eprom_image *img)
420{
421 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
422 struct __vxge_hw_virtualpath *vpath;
423 enum vxge_hw_status status;
424 int i;
425
426 vpath = &hldev->virtual_paths[hldev->first_vp_id];
427
428 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
429 data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
430 data1 = steer_ctrl = 0;
431
432 status = vxge_hw_vpath_fw_api(vpath,
433 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
434 VXGE_HW_FW_API_GET_EPROM_REV,
435 0, &data0, &data1, &steer_ctrl);
436 if (status != VXGE_HW_OK)
437 break;
438
439 img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
440 img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
441 img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
442 img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
443 }
444
445 return status;
446}
195 447
196/* 448/*
197 * __vxge_hw_channel_allocate - Allocate memory for channel 449 * __vxge_hw_channel_allocate - Allocate memory for channel
198 * This function allocates required memory for the channel and various arrays 450 * This function allocates required memory for the channel and various arrays
199 * in the channel 451 * in the channel
200 */ 452 */
201struct __vxge_hw_channel* 453static struct __vxge_hw_channel *
202__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, 454__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
203 enum __vxge_hw_channel_type type, 455 enum __vxge_hw_channel_type type,
204 u32 length, u32 per_dtr_space, void *userdata) 456 u32 length, u32 per_dtr_space, void *userdata)
@@ -269,7 +521,7 @@ exit0:
269 * This function deallocates memory from the channel and various arrays 521 * This function deallocates memory from the channel and various arrays
270 * in the channel 522 * in the channel
271 */ 523 */
272void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) 524static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
273{ 525{
274 kfree(channel->work_arr); 526 kfree(channel->work_arr);
275 kfree(channel->free_arr); 527 kfree(channel->free_arr);
@@ -283,7 +535,7 @@ void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
283 * This function initializes a channel by properly setting the 535 * This function initializes a channel by properly setting the
284 * various references 536 * various references
285 */ 537 */
286enum vxge_hw_status 538static enum vxge_hw_status
287__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) 539__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
288{ 540{
289 u32 i; 541 u32 i;
@@ -318,7 +570,7 @@ __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
318 * __vxge_hw_channel_reset - Resets a channel 570 * __vxge_hw_channel_reset - Resets a channel
319 * This function resets a channel by properly setting the various references 571 * This function resets a channel by properly setting the various references
320 */ 572 */
321enum vxge_hw_status 573static enum vxge_hw_status
322__vxge_hw_channel_reset(struct __vxge_hw_channel *channel) 574__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
323{ 575{
324 u32 i; 576 u32 i;
@@ -345,8 +597,7 @@ __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
345 * Initialize certain PCI/PCI-X configuration registers 597 * Initialize certain PCI/PCI-X configuration registers
346 * with recommended values. Save config space for future hw resets. 598 * with recommended values. Save config space for future hw resets.
347 */ 599 */
348void 600static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
349__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
350{ 601{
351 u16 cmd = 0; 602 u16 cmd = 0;
352 603
@@ -390,7 +641,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
390 return ret; 641 return ret;
391} 642}
392 643
393 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset 644/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
394 * in progress 645 * in progress
395 * This routine checks the vpath reset in progress register is turned zero 646 * This routine checks the vpath reset in progress register is turned zero
396 */ 647 */
@@ -435,7 +686,7 @@ exit:
435 * register location pointers in the device object. It waits until the ric is 686 * register location pointers in the device object. It waits until the ric is
436 * completed initializing registers. 687 * completed initializing registers.
437 */ 688 */
438enum vxge_hw_status 689static enum vxge_hw_status
439__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev) 690__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
440{ 691{
441 u64 val64; 692 u64 val64;
@@ -496,26 +747,6 @@ exit:
496} 747}
497 748
498/* 749/*
499 * __vxge_hw_device_id_get
500 * This routine returns sets the device id and revision numbers into the device
501 * structure
502 */
503void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
504{
505 u64 val64;
506
507 val64 = readq(&hldev->common_reg->titan_asic_id);
508 hldev->device_id =
509 (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
510
511 hldev->major_revision =
512 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
513
514 hldev->minor_revision =
515 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
516}
517
518/*
519 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver 750 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
520 * This routine returns the Access Rights of the driver 751 * This routine returns the Access Rights of the driver
521 */ 752 */
@@ -568,10 +799,25 @@ __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
568} 799}
569 800
570/* 801/*
802 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
803 * Returns the function number of the vpath.
804 */
805static u32
806__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
807{
808 u64 val64;
809
810 val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
811
812 return
813 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
814}
815
816/*
571 * __vxge_hw_device_host_info_get 817 * __vxge_hw_device_host_info_get
572 * This routine returns the host type assignments 818 * This routine returns the host type assignments
573 */ 819 */
574void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) 820static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
575{ 821{
576 u64 val64; 822 u64 val64;
577 u32 i; 823 u32 i;
@@ -584,16 +830,18 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
584 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments); 830 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
585 831
586 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 832 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
587
588 if (!(hldev->vpath_assignments & vxge_mBIT(i))) 833 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
589 continue; 834 continue;
590 835
591 hldev->func_id = 836 hldev->func_id =
592 __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]); 837 __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
593 838
594 hldev->access_rights = __vxge_hw_device_access_rights_get( 839 hldev->access_rights = __vxge_hw_device_access_rights_get(
595 hldev->host_type, hldev->func_id); 840 hldev->host_type, hldev->func_id);
596 841
842 hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
843 hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
844
597 hldev->first_vp_id = i; 845 hldev->first_vp_id = i;
598 break; 846 break;
599 } 847 }
@@ -634,7 +882,8 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
634 * __vxge_hw_device_initialize 882 * __vxge_hw_device_initialize
635 * Initialize Titan-V hardware. 883 * Initialize Titan-V hardware.
636 */ 884 */
637enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev) 885static enum vxge_hw_status
886__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
638{ 887{
639 enum vxge_hw_status status = VXGE_HW_OK; 888 enum vxge_hw_status status = VXGE_HW_OK;
640 889
@@ -650,6 +899,196 @@ exit:
650 return status; 899 return status;
651} 900}
652 901
902/*
903 * __vxge_hw_vpath_fw_ver_get - Get the fw version
904 * Returns FW Version
905 */
906static enum vxge_hw_status
907__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
908 struct vxge_hw_device_hw_info *hw_info)
909{
910 struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
911 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
912 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
913 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
914 u64 data0, data1 = 0, steer_ctrl = 0;
915 enum vxge_hw_status status;
916
917 status = vxge_hw_vpath_fw_api(vpath,
918 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
919 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
920 0, &data0, &data1, &steer_ctrl);
921 if (status != VXGE_HW_OK)
922 goto exit;
923
924 fw_date->day =
925 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
926 fw_date->month =
927 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
928 fw_date->year =
929 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
930
931 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
932 fw_date->month, fw_date->day, fw_date->year);
933
934 fw_version->major =
935 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
936 fw_version->minor =
937 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
938 fw_version->build =
939 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
940
941 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
942 fw_version->major, fw_version->minor, fw_version->build);
943
944 flash_date->day =
945 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
946 flash_date->month =
947 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
948 flash_date->year =
949 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
950
951 snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
952 flash_date->month, flash_date->day, flash_date->year);
953
954 flash_version->major =
955 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
956 flash_version->minor =
957 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
958 flash_version->build =
959 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
960
961 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
962 flash_version->major, flash_version->minor,
963 flash_version->build);
964
965exit:
966 return status;
967}
968
969/*
970 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
971 * part number and product description.
972 */
973static enum vxge_hw_status
974__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
975 struct vxge_hw_device_hw_info *hw_info)
976{
977 enum vxge_hw_status status;
978 u64 data0, data1 = 0, steer_ctrl = 0;
979 u8 *serial_number = hw_info->serial_number;
980 u8 *part_number = hw_info->part_number;
981 u8 *product_desc = hw_info->product_desc;
982 u32 i, j = 0;
983
984 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
985
986 status = vxge_hw_vpath_fw_api(vpath,
987 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
988 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
989 0, &data0, &data1, &steer_ctrl);
990 if (status != VXGE_HW_OK)
991 return status;
992
993 ((u64 *)serial_number)[0] = be64_to_cpu(data0);
994 ((u64 *)serial_number)[1] = be64_to_cpu(data1);
995
996 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
997 data1 = steer_ctrl = 0;
998
999 status = vxge_hw_vpath_fw_api(vpath,
1000 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
1001 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
1002 0, &data0, &data1, &steer_ctrl);
1003 if (status != VXGE_HW_OK)
1004 return status;
1005
1006 ((u64 *)part_number)[0] = be64_to_cpu(data0);
1007 ((u64 *)part_number)[1] = be64_to_cpu(data1);
1008
1009 for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
1010 i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
1011 data0 = i;
1012 data1 = steer_ctrl = 0;
1013
1014 status = vxge_hw_vpath_fw_api(vpath,
1015 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
1016 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
1017 0, &data0, &data1, &steer_ctrl);
1018 if (status != VXGE_HW_OK)
1019 return status;
1020
1021 ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
1022 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
1023 }
1024
1025 return status;
1026}
1027
1028/*
1029 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
1030 * Returns pci function mode
1031 */
1032static enum vxge_hw_status
1033__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
1034 struct vxge_hw_device_hw_info *hw_info)
1035{
1036 u64 data0, data1 = 0, steer_ctrl = 0;
1037 enum vxge_hw_status status;
1038
1039 data0 = 0;
1040
1041 status = vxge_hw_vpath_fw_api(vpath,
1042 VXGE_HW_FW_API_GET_FUNC_MODE,
1043 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
1044 0, &data0, &data1, &steer_ctrl);
1045 if (status != VXGE_HW_OK)
1046 return status;
1047
1048 hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
1049 return status;
1050}
1051
1052/*
1053 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
1054 * from MAC address table.
1055 */
1056static enum vxge_hw_status
1057__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
1058 u8 *macaddr, u8 *macaddr_mask)
1059{
1060 u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1061 data0 = 0, data1 = 0, steer_ctrl = 0;
1062 enum vxge_hw_status status;
1063 int i;
1064
1065 do {
1066 status = vxge_hw_vpath_fw_api(vpath, action,
1067 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1068 0, &data0, &data1, &steer_ctrl);
1069 if (status != VXGE_HW_OK)
1070 goto exit;
1071
1072 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
1073 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
1074 data1);
1075
1076 for (i = ETH_ALEN; i > 0; i--) {
1077 macaddr[i - 1] = (u8) (data0 & 0xFF);
1078 data0 >>= 8;
1079
1080 macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
1081 data1 >>= 8;
1082 }
1083
1084 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
1085 data0 = 0, data1 = 0, steer_ctrl = 0;
1086
1087 } while (!is_valid_ether_addr(macaddr));
1088exit:
1089 return status;
1090}
1091
653/** 1092/**
654 * vxge_hw_device_hw_info_get - Get the hw information 1093 * vxge_hw_device_hw_info_get - Get the hw information
655 * Returns the vpath mask that has the bits set for each vpath allocated 1094 * Returns the vpath mask that has the bits set for each vpath allocated
@@ -665,9 +1104,9 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
665 struct vxge_hw_toc_reg __iomem *toc; 1104 struct vxge_hw_toc_reg __iomem *toc;
666 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; 1105 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
667 struct vxge_hw_common_reg __iomem *common_reg; 1106 struct vxge_hw_common_reg __iomem *common_reg;
668 struct vxge_hw_vpath_reg __iomem *vpath_reg;
669 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; 1107 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
670 enum vxge_hw_status status; 1108 enum vxge_hw_status status;
1109 struct __vxge_hw_virtualpath vpath;
671 1110
672 memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info)); 1111 memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
673 1112
@@ -702,7 +1141,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
702 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *) 1141 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
703 (bar0 + val64); 1142 (bar0 + val64);
704 1143
705 hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg); 1144 hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
706 if (__vxge_hw_device_access_rights_get(hw_info->host_type, 1145 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
707 hw_info->func_id) & 1146 hw_info->func_id) &
708 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) { 1147 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
@@ -718,16 +1157,19 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
718 1157
719 val64 = readq(&toc->toc_vpath_pointer[i]); 1158 val64 = readq(&toc->toc_vpath_pointer[i]);
720 1159
721 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); 1160 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1161 (bar0 + val64);
1162 vpath.vp_open = 0;
722 1163
723 hw_info->function_mode = 1164 status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
724 __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg); 1165 if (status != VXGE_HW_OK)
1166 goto exit;
725 1167
726 status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info); 1168 status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
727 if (status != VXGE_HW_OK) 1169 if (status != VXGE_HW_OK)
728 goto exit; 1170 goto exit;
729 1171
730 status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info); 1172 status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
731 if (status != VXGE_HW_OK) 1173 if (status != VXGE_HW_OK)
732 goto exit; 1174 goto exit;
733 1175
@@ -735,14 +1177,15 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
735 } 1177 }
736 1178
737 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 1179 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
738
739 if (!((hw_info->vpath_mask) & vxge_mBIT(i))) 1180 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
740 continue; 1181 continue;
741 1182
742 val64 = readq(&toc->toc_vpath_pointer[i]); 1183 val64 = readq(&toc->toc_vpath_pointer[i]);
743 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); 1184 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1185 (bar0 + val64);
1186 vpath.vp_open = 0;
744 1187
745 status = __vxge_hw_vpath_addr_get(i, vpath_reg, 1188 status = __vxge_hw_vpath_addr_get(&vpath,
746 hw_info->mac_addrs[i], 1189 hw_info->mac_addrs[i],
747 hw_info->mac_addr_masks[i]); 1190 hw_info->mac_addr_masks[i]);
748 if (status != VXGE_HW_OK) 1191 if (status != VXGE_HW_OK)
@@ -806,7 +1249,6 @@ vxge_hw_device_initialize(
806 vfree(hldev); 1249 vfree(hldev);
807 goto exit; 1250 goto exit;
808 } 1251 }
809 __vxge_hw_device_id_get(hldev);
810 1252
811 __vxge_hw_device_host_info_get(hldev); 1253 __vxge_hw_device_host_info_get(hldev);
812 1254
@@ -814,7 +1256,6 @@ vxge_hw_device_initialize(
814 nblocks++; 1256 nblocks++;
815 1257
816 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 1258 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
817
818 if (!(hldev->vpath_assignments & vxge_mBIT(i))) 1259 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
819 continue; 1260 continue;
820 1261
@@ -839,7 +1280,6 @@ vxge_hw_device_initialize(
839 } 1280 }
840 1281
841 status = __vxge_hw_device_initialize(hldev); 1282 status = __vxge_hw_device_initialize(hldev);
842
843 if (status != VXGE_HW_OK) { 1283 if (status != VXGE_HW_OK) {
844 vxge_hw_device_terminate(hldev); 1284 vxge_hw_device_terminate(hldev);
845 goto exit; 1285 goto exit;
@@ -876,7 +1316,6 @@ vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
876 enum vxge_hw_status status = VXGE_HW_OK; 1316 enum vxge_hw_status status = VXGE_HW_OK;
877 1317
878 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 1318 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
879
880 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) || 1319 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
881 (hldev->virtual_paths[i].vp_open == 1320 (hldev->virtual_paths[i].vp_open ==
882 VXGE_HW_VP_NOT_OPEN)) 1321 VXGE_HW_VP_NOT_OPEN))
@@ -1165,7 +1604,6 @@ exit:
1165 * It can be used to set or reset Pause frame generation or reception 1604 * It can be used to set or reset Pause frame generation or reception
1166 * support of the NIC. 1605 * support of the NIC.
1167 */ 1606 */
1168
1169enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev, 1607enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1170 u32 port, u32 tx, u32 rx) 1608 u32 port, u32 tx, u32 rx)
1171{ 1609{
@@ -1409,7 +1847,6 @@ exit:
1409/* 1847/*
1410 * __vxge_hw_ring_create - Create a Ring 1848 * __vxge_hw_ring_create - Create a Ring
1411 * This function creates Ring and initializes it. 1849 * This function creates Ring and initializes it.
1412 *
1413 */ 1850 */
1414static enum vxge_hw_status 1851static enum vxge_hw_status
1415__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, 1852__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
@@ -1845,7 +2282,7 @@ static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1845 * __vxge_hw_device_fifo_config_check - Check fifo configuration. 2282 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1846 * Check the fifo configuration 2283 * Check the fifo configuration
1847 */ 2284 */
1848enum vxge_hw_status 2285static enum vxge_hw_status
1849__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) 2286__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1850{ 2287{
1851 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || 2288 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
@@ -1893,7 +2330,7 @@ __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1893 * __vxge_hw_device_config_check - Check device configuration. 2330 * __vxge_hw_device_config_check - Check device configuration.
1894 * Check the device configuration 2331 * Check the device configuration
1895 */ 2332 */
1896enum vxge_hw_status 2333static enum vxge_hw_status
1897__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) 2334__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1898{ 2335{
1899 u32 i; 2336 u32 i;
@@ -2453,7 +2890,7 @@ __vxge_hw_fifo_mempool_item_alloc(
2453 * __vxge_hw_fifo_create - Create a FIFO 2890 * __vxge_hw_fifo_create - Create a FIFO
2454 * This function creates FIFO and initializes it. 2891 * This function creates FIFO and initializes it.
2455 */ 2892 */
2456enum vxge_hw_status 2893static enum vxge_hw_status
2457__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, 2894__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2458 struct vxge_hw_fifo_attr *attr) 2895 struct vxge_hw_fifo_attr *attr)
2459{ 2896{
@@ -2617,7 +3054,8 @@ static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2617 * __vxge_hw_fifo_delete - Removes the FIFO 3054 * __vxge_hw_fifo_delete - Removes the FIFO
2618 * This function freeup the memory pool and removes the FIFO 3055 * This function freeup the memory pool and removes the FIFO
2619 */ 3056 */
2620enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) 3057static enum vxge_hw_status
3058__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2621{ 3059{
2622 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; 3060 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
2623 3061
@@ -2675,297 +3113,6 @@ exit:
2675 return status; 3113 return status;
2676} 3114}
2677 3115
2678/*
2679 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2680 * Returns the function number of the vpath.
2681 */
2682static u32
2683__vxge_hw_vpath_func_id_get(u32 vp_id,
2684 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2685{
2686 u64 val64;
2687
2688 val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
2689
2690 return
2691 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
2692}
2693
2694/*
2695 * __vxge_hw_read_rts_ds - Program RTS steering critieria
2696 */
2697static inline void
2698__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2699 u64 dta_struct_sel)
2700{
2701 writeq(0, &vpath_reg->rts_access_steer_ctrl);
2702 wmb();
2703 writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
2704 writeq(0, &vpath_reg->rts_access_steer_data1);
2705 wmb();
2706}
2707
2708
2709/*
2710 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2711 * part number and product description.
2712 */
2713static enum vxge_hw_status
2714__vxge_hw_vpath_card_info_get(
2715 u32 vp_id,
2716 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2717 struct vxge_hw_device_hw_info *hw_info)
2718{
2719 u32 i, j;
2720 u64 val64;
2721 u64 data1 = 0ULL;
2722 u64 data2 = 0ULL;
2723 enum vxge_hw_status status = VXGE_HW_OK;
2724 u8 *serial_number = hw_info->serial_number;
2725 u8 *part_number = hw_info->part_number;
2726 u8 *product_desc = hw_info->product_desc;
2727
2728 __vxge_hw_read_rts_ds(vpath_reg,
2729 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
2730
2731 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2732 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2733 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2734 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2735 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2736 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2737
2738 status = __vxge_hw_pio_mem_write64(val64,
2739 &vpath_reg->rts_access_steer_ctrl,
2740 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2741 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2742
2743 if (status != VXGE_HW_OK)
2744 return status;
2745
2746 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2747
2748 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2749 data1 = readq(&vpath_reg->rts_access_steer_data0);
2750 ((u64 *)serial_number)[0] = be64_to_cpu(data1);
2751
2752 data2 = readq(&vpath_reg->rts_access_steer_data1);
2753 ((u64 *)serial_number)[1] = be64_to_cpu(data2);
2754 status = VXGE_HW_OK;
2755 } else
2756 *serial_number = 0;
2757
2758 __vxge_hw_read_rts_ds(vpath_reg,
2759 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
2760
2761 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2762 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2763 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2764 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2765 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2766 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2767
2768 status = __vxge_hw_pio_mem_write64(val64,
2769 &vpath_reg->rts_access_steer_ctrl,
2770 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2771 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2772
2773 if (status != VXGE_HW_OK)
2774 return status;
2775
2776 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2777
2778 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2779
2780 data1 = readq(&vpath_reg->rts_access_steer_data0);
2781 ((u64 *)part_number)[0] = be64_to_cpu(data1);
2782
2783 data2 = readq(&vpath_reg->rts_access_steer_data1);
2784 ((u64 *)part_number)[1] = be64_to_cpu(data2);
2785
2786 status = VXGE_HW_OK;
2787
2788 } else
2789 *part_number = 0;
2790
2791 j = 0;
2792
2793 for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
2794 i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
2795
2796 __vxge_hw_read_rts_ds(vpath_reg, i);
2797
2798 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2799 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2800 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2801 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2802 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2803 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2804
2805 status = __vxge_hw_pio_mem_write64(val64,
2806 &vpath_reg->rts_access_steer_ctrl,
2807 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2808 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2809
2810 if (status != VXGE_HW_OK)
2811 return status;
2812
2813 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2814
2815 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2816
2817 data1 = readq(&vpath_reg->rts_access_steer_data0);
2818 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
2819
2820 data2 = readq(&vpath_reg->rts_access_steer_data1);
2821 ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
2822
2823 status = VXGE_HW_OK;
2824 } else
2825 *product_desc = 0;
2826 }
2827
2828 return status;
2829}
2830
2831/*
2832 * __vxge_hw_vpath_fw_ver_get - Get the fw version
2833 * Returns FW Version
2834 */
2835static enum vxge_hw_status
2836__vxge_hw_vpath_fw_ver_get(
2837 u32 vp_id,
2838 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2839 struct vxge_hw_device_hw_info *hw_info)
2840{
2841 u64 val64;
2842 u64 data1 = 0ULL;
2843 u64 data2 = 0ULL;
2844 struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
2845 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
2846 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
2847 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
2848 enum vxge_hw_status status = VXGE_HW_OK;
2849
2850 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2851 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
2852 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2853 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2854 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2855 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2856
2857 status = __vxge_hw_pio_mem_write64(val64,
2858 &vpath_reg->rts_access_steer_ctrl,
2859 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2860 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2861
2862 if (status != VXGE_HW_OK)
2863 goto exit;
2864
2865 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2866
2867 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2868
2869 data1 = readq(&vpath_reg->rts_access_steer_data0);
2870 data2 = readq(&vpath_reg->rts_access_steer_data1);
2871
2872 fw_date->day =
2873 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2874 data1);
2875 fw_date->month =
2876 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2877 data1);
2878 fw_date->year =
2879 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2880 data1);
2881
2882 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
2883 fw_date->month, fw_date->day, fw_date->year);
2884
2885 fw_version->major =
2886 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
2887 fw_version->minor =
2888 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
2889 fw_version->build =
2890 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
2891
2892 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2893 fw_version->major, fw_version->minor, fw_version->build);
2894
2895 flash_date->day =
2896 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
2897 flash_date->month =
2898 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
2899 flash_date->year =
2900 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
2901
2902 snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
2903 "%2.2d/%2.2d/%4.4d",
2904 flash_date->month, flash_date->day, flash_date->year);
2905
2906 flash_version->major =
2907 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
2908 flash_version->minor =
2909 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
2910 flash_version->build =
2911 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
2912
2913 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2914 flash_version->major, flash_version->minor,
2915 flash_version->build);
2916
2917 status = VXGE_HW_OK;
2918
2919 } else
2920 status = VXGE_HW_FAIL;
2921exit:
2922 return status;
2923}
2924
2925/*
2926 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2927 * Returns pci function mode
2928 */
2929static u64
2930__vxge_hw_vpath_pci_func_mode_get(
2931 u32 vp_id,
2932 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2933{
2934 u64 val64;
2935 u64 data1 = 0ULL;
2936 enum vxge_hw_status status = VXGE_HW_OK;
2937
2938 __vxge_hw_read_rts_ds(vpath_reg,
2939 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
2940
2941 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2942 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2943 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2944 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2945 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2946 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2947
2948 status = __vxge_hw_pio_mem_write64(val64,
2949 &vpath_reg->rts_access_steer_ctrl,
2950 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2951 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2952
2953 if (status != VXGE_HW_OK)
2954 goto exit;
2955
2956 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2957
2958 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2959 data1 = readq(&vpath_reg->rts_access_steer_data0);
2960 status = VXGE_HW_OK;
2961 } else {
2962 data1 = 0;
2963 status = VXGE_HW_FAIL;
2964 }
2965exit:
2966 return data1;
2967}
2968
2969/** 3116/**
2970 * vxge_hw_device_flick_link_led - Flick (blink) link LED. 3117 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
2971 * @hldev: HW device. 3118 * @hldev: HW device.
@@ -2974,37 +3121,24 @@ exit:
2974 * Flicker the link LED. 3121 * Flicker the link LED.
2975 */ 3122 */
2976enum vxge_hw_status 3123enum vxge_hw_status
2977vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, 3124vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
2978 u64 on_off)
2979{ 3125{
2980 u64 val64; 3126 struct __vxge_hw_virtualpath *vpath;
2981 enum vxge_hw_status status = VXGE_HW_OK; 3127 u64 data0, data1 = 0, steer_ctrl = 0;
2982 struct vxge_hw_vpath_reg __iomem *vp_reg; 3128 enum vxge_hw_status status;
2983 3129
2984 if (hldev == NULL) { 3130 if (hldev == NULL) {
2985 status = VXGE_HW_ERR_INVALID_DEVICE; 3131 status = VXGE_HW_ERR_INVALID_DEVICE;
2986 goto exit; 3132 goto exit;
2987 } 3133 }
2988 3134
2989 vp_reg = hldev->vpath_reg[hldev->first_vp_id]; 3135 vpath = &hldev->virtual_paths[hldev->first_vp_id];
2990
2991 writeq(0, &vp_reg->rts_access_steer_ctrl);
2992 wmb();
2993 writeq(on_off, &vp_reg->rts_access_steer_data0);
2994 writeq(0, &vp_reg->rts_access_steer_data1);
2995 wmb();
2996 3136
2997 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( 3137 data0 = on_off;
2998 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) | 3138 status = vxge_hw_vpath_fw_api(vpath,
2999 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( 3139 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
3000 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | 3140 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
3001 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | 3141 0, &data0, &data1, &steer_ctrl);
3002 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3003
3004 status = __vxge_hw_pio_mem_write64(val64,
3005 &vp_reg->rts_access_steer_ctrl,
3006 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3007 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3008exit: 3142exit:
3009 return status; 3143 return status;
3010} 3144}
@@ -3013,63 +3147,38 @@ exit:
3013 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables 3147 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3014 */ 3148 */
3015enum vxge_hw_status 3149enum vxge_hw_status
3016__vxge_hw_vpath_rts_table_get( 3150__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3017 struct __vxge_hw_vpath_handle *vp, 3151 u32 action, u32 rts_table, u32 offset,
3018 u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2) 3152 u64 *data0, u64 *data1)
3019{ 3153{
3020 u64 val64; 3154 enum vxge_hw_status status;
3021 struct __vxge_hw_virtualpath *vpath; 3155 u64 steer_ctrl = 0;
3022 struct vxge_hw_vpath_reg __iomem *vp_reg;
3023
3024 enum vxge_hw_status status = VXGE_HW_OK;
3025 3156
3026 if (vp == NULL) { 3157 if (vp == NULL) {
3027 status = VXGE_HW_ERR_INVALID_HANDLE; 3158 status = VXGE_HW_ERR_INVALID_HANDLE;
3028 goto exit; 3159 goto exit;
3029 } 3160 }
3030 3161
3031 vpath = vp->vpath;
3032 vp_reg = vpath->vp_reg;
3033
3034 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3035 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3036 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3037 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3038
3039 if ((rts_table == 3162 if ((rts_table ==
3040 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || 3163 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3041 (rts_table == 3164 (rts_table ==
3042 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || 3165 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3043 (rts_table == 3166 (rts_table ==
3044 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || 3167 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3045 (rts_table == 3168 (rts_table ==
3046 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { 3169 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3047 val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; 3170 steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3048 } 3171 }
3049 3172
3050 status = __vxge_hw_pio_mem_write64(val64, 3173 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3051 &vp_reg->rts_access_steer_ctrl, 3174 data0, data1, &steer_ctrl);
3052 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3053 vpath->hldev->config.device_poll_millis);
3054
3055 if (status != VXGE_HW_OK) 3175 if (status != VXGE_HW_OK)
3056 goto exit; 3176 goto exit;
3057 3177
3058 val64 = readq(&vp_reg->rts_access_steer_ctrl); 3178 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3059 3179 (rts_table !=
3060 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { 3180 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3061 3181 *data1 = 0;
3062 *data1 = readq(&vp_reg->rts_access_steer_data0);
3063
3064 if ((rts_table ==
3065 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3066 (rts_table ==
3067 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3068 *data2 = readq(&vp_reg->rts_access_steer_data1);
3069 }
3070 status = VXGE_HW_OK;
3071 } else
3072 status = VXGE_HW_FAIL;
3073exit: 3182exit:
3074 return status; 3183 return status;
3075} 3184}
@@ -3078,107 +3187,27 @@ exit:
3078 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables 3187 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3079 */ 3188 */
3080enum vxge_hw_status 3189enum vxge_hw_status
3081__vxge_hw_vpath_rts_table_set( 3190__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
3082 struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table, 3191 u32 rts_table, u32 offset, u64 steer_data0,
3083 u32 offset, u64 data1, u64 data2) 3192 u64 steer_data1)
3084{ 3193{
3085 u64 val64; 3194 u64 data0, data1 = 0, steer_ctrl = 0;
3086 struct __vxge_hw_virtualpath *vpath; 3195 enum vxge_hw_status status;
3087 enum vxge_hw_status status = VXGE_HW_OK;
3088 struct vxge_hw_vpath_reg __iomem *vp_reg;
3089 3196
3090 if (vp == NULL) { 3197 if (vp == NULL) {
3091 status = VXGE_HW_ERR_INVALID_HANDLE; 3198 status = VXGE_HW_ERR_INVALID_HANDLE;
3092 goto exit; 3199 goto exit;
3093 } 3200 }
3094 3201
3095 vpath = vp->vpath; 3202 data0 = steer_data0;
3096 vp_reg = vpath->vp_reg;
3097
3098 writeq(data1, &vp_reg->rts_access_steer_data0);
3099 wmb();
3100 3203
3101 if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || 3204 if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3102 (rts_table == 3205 (rts_table ==
3103 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) { 3206 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3104 writeq(data2, &vp_reg->rts_access_steer_data1); 3207 data1 = steer_data1;
3105 wmb();
3106 }
3107
3108 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3109 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3110 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3111 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3112
3113 status = __vxge_hw_pio_mem_write64(val64,
3114 &vp_reg->rts_access_steer_ctrl,
3115 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3116 vpath->hldev->config.device_poll_millis);
3117
3118 if (status != VXGE_HW_OK)
3119 goto exit;
3120
3121 val64 = readq(&vp_reg->rts_access_steer_ctrl);
3122
3123 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
3124 status = VXGE_HW_OK;
3125 else
3126 status = VXGE_HW_FAIL;
3127exit:
3128 return status;
3129}
3130
3131/*
3132 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3133 * from MAC address table.
3134 */
3135static enum vxge_hw_status
3136__vxge_hw_vpath_addr_get(
3137 u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3138 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
3139{
3140 u32 i;
3141 u64 val64;
3142 u64 data1 = 0ULL;
3143 u64 data2 = 0ULL;
3144 enum vxge_hw_status status = VXGE_HW_OK;
3145
3146 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3147 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
3148 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3149 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
3150 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3151 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3152
3153 status = __vxge_hw_pio_mem_write64(val64,
3154 &vpath_reg->rts_access_steer_ctrl,
3155 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3156 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3157
3158 if (status != VXGE_HW_OK)
3159 goto exit;
3160
3161 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3162
3163 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3164 3208
3165 data1 = readq(&vpath_reg->rts_access_steer_data0); 3209 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3166 data2 = readq(&vpath_reg->rts_access_steer_data1); 3210 &data0, &data1, &steer_ctrl);
3167
3168 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
3169 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3170 data2);
3171
3172 for (i = ETH_ALEN; i > 0; i--) {
3173 macaddr[i-1] = (u8)(data1 & 0xFF);
3174 data1 >>= 8;
3175
3176 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
3177 data2 >>= 8;
3178 }
3179 status = VXGE_HW_OK;
3180 } else
3181 status = VXGE_HW_FAIL;
3182exit: 3211exit:
3183 return status; 3212 return status;
3184} 3213}
@@ -3204,6 +3233,8 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3204 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, 3233 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3205 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, 3234 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3206 0, &data0, &data1); 3235 0, &data0, &data1);
3236 if (status != VXGE_HW_OK)
3237 goto exit;
3207 3238
3208 data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) | 3239 data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3209 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3)); 3240 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
@@ -4117,6 +4148,7 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4117 4148
4118 vpath = &hldev->virtual_paths[vp_id]; 4149 vpath = &hldev->virtual_paths[vp_id];
4119 4150
4151 spin_lock_init(&hldev->virtual_paths[vp_id].lock);
4120 vpath->vp_id = vp_id; 4152 vpath->vp_id = vp_id;
4121 vpath->vp_open = VXGE_HW_VP_OPEN; 4153 vpath->vp_open = VXGE_HW_VP_OPEN;
4122 vpath->hldev = hldev; 4154 vpath->hldev = hldev;
@@ -4127,14 +4159,12 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4127 __vxge_hw_vpath_reset(hldev, vp_id); 4159 __vxge_hw_vpath_reset(hldev, vp_id);
4128 4160
4129 status = __vxge_hw_vpath_reset_check(vpath); 4161 status = __vxge_hw_vpath_reset_check(vpath);
4130
4131 if (status != VXGE_HW_OK) { 4162 if (status != VXGE_HW_OK) {
4132 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); 4163 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4133 goto exit; 4164 goto exit;
4134 } 4165 }
4135 4166
4136 status = __vxge_hw_vpath_mgmt_read(hldev, vpath); 4167 status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4137
4138 if (status != VXGE_HW_OK) { 4168 if (status != VXGE_HW_OK) {
4139 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); 4169 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4140 goto exit; 4170 goto exit;
@@ -4148,7 +4178,6 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4148 hldev->tim_int_mask1, vp_id); 4178 hldev->tim_int_mask1, vp_id);
4149 4179
4150 status = __vxge_hw_vpath_initialize(hldev, vp_id); 4180 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4151
4152 if (status != VXGE_HW_OK) 4181 if (status != VXGE_HW_OK)
4153 __vxge_hw_vp_terminate(hldev, vp_id); 4182 __vxge_hw_vp_terminate(hldev, vp_id);
4154exit: 4183exit:
@@ -4335,16 +4364,18 @@ vpath_open_exit1:
4335void 4364void
4336vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) 4365vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4337{ 4366{
4338 struct __vxge_hw_virtualpath *vpath = NULL; 4367 struct __vxge_hw_virtualpath *vpath = vp->vpath;
4368 struct __vxge_hw_ring *ring = vpath->ringh;
4369 struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
4339 u64 new_count, val64, val164; 4370 u64 new_count, val64, val164;
4340 struct __vxge_hw_ring *ring;
4341 4371
4342 vpath = vp->vpath; 4372 if (vdev->titan1) {
4343 ring = vpath->ringh; 4373 new_count = readq(&vpath->vp_reg->rxdmem_size);
4374 new_count &= 0x1fff;
4375 } else
4376 new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
4344 4377
4345 new_count = readq(&vpath->vp_reg->rxdmem_size); 4378 val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
4346 new_count &= 0x1fff;
4347 val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4348 4379
4349 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164), 4380 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4350 &vpath->vp_reg->prc_rxd_doorbell); 4381 &vpath->vp_reg->prc_rxd_doorbell);
@@ -4414,7 +4445,9 @@ enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4414 4445
4415 __vxge_hw_vp_terminate(devh, vp_id); 4446 __vxge_hw_vp_terminate(devh, vp_id);
4416 4447
4448 spin_lock(&vpath->lock);
4417 vpath->vp_open = VXGE_HW_VP_NOT_OPEN; 4449 vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4450 spin_unlock(&vpath->lock);
4418 4451
4419vpath_close_exit: 4452vpath_close_exit:
4420 return status; 4453 return status;
@@ -4810,7 +4843,7 @@ static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
4810 * __vxge_hw_blockpool_create - Create block pool 4843 * __vxge_hw_blockpool_create - Create block pool
4811 */ 4844 */
4812 4845
4813enum vxge_hw_status 4846static enum vxge_hw_status
4814__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, 4847__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4815 struct __vxge_hw_blockpool *blockpool, 4848 struct __vxge_hw_blockpool *blockpool,
4816 u32 pool_size, 4849 u32 pool_size,
@@ -4910,7 +4943,7 @@ blockpool_create_exit:
4910 * __vxge_hw_blockpool_destroy - Deallocates the block pool 4943 * __vxge_hw_blockpool_destroy - Deallocates the block pool
4911 */ 4944 */
4912 4945
4913void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) 4946static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4914{ 4947{
4915 4948
4916 struct __vxge_hw_device *hldev; 4949 struct __vxge_hw_device *hldev;
@@ -5076,7 +5109,7 @@ exit:
5076 * Allocates a block of memory of given size, either from block pool 5109 * Allocates a block of memory of given size, either from block pool
5077 * or by calling vxge_os_dma_malloc() 5110 * or by calling vxge_os_dma_malloc()
5078 */ 5111 */
5079void * 5112static void *
5080__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, 5113__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
5081 struct vxge_hw_mempool_dma *dma_object) 5114 struct vxge_hw_mempool_dma *dma_object)
5082{ 5115{
@@ -5140,7 +5173,7 @@ exit:
5140 * __vxge_hw_blockpool_free - Frees the memory allcoated with 5173 * __vxge_hw_blockpool_free - Frees the memory allcoated with
5141 __vxge_hw_blockpool_malloc 5174 __vxge_hw_blockpool_malloc
5142 */ 5175 */
5143void 5176static void
5144__vxge_hw_blockpool_free(struct __vxge_hw_device *devh, 5177__vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5145 void *memblock, u32 size, 5178 void *memblock, u32 size,
5146 struct vxge_hw_mempool_dma *dma_object) 5179 struct vxge_hw_mempool_dma *dma_object)
@@ -5192,7 +5225,7 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5192 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool 5225 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5193 * This function allocates a block from block pool or from the system 5226 * This function allocates a block from block pool or from the system
5194 */ 5227 */
5195struct __vxge_hw_blockpool_entry * 5228static struct __vxge_hw_blockpool_entry *
5196__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) 5229__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5197{ 5230{
5198 struct __vxge_hw_blockpool_entry *entry = NULL; 5231 struct __vxge_hw_blockpool_entry *entry = NULL;
@@ -5227,7 +5260,7 @@ __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5227 * 5260 *
5228 * This function frees a block from block pool 5261 * This function frees a block from block pool
5229 */ 5262 */
5230void 5263static void
5231__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, 5264__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5232 struct __vxge_hw_blockpool_entry *entry) 5265 struct __vxge_hw_blockpool_entry *entry)
5233{ 5266{
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 5c00861b6c2c..5b2c8313426d 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -20,13 +20,6 @@
20#define VXGE_CACHE_LINE_SIZE 128 20#define VXGE_CACHE_LINE_SIZE 128
21#endif 21#endif
22 22
23#define vxge_os_vaprintf(level, mask, fmt, ...) { \
24 char buff[255]; \
25 snprintf(buff, 255, fmt, __VA_ARGS__); \
26 printk(buff); \
27 printk("\n"); \
28}
29
30#ifndef VXGE_ALIGN 23#ifndef VXGE_ALIGN
31#define VXGE_ALIGN(adrs, size) \ 24#define VXGE_ALIGN(adrs, size) \
32 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1)) 25 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
@@ -36,8 +29,16 @@
36#define VXGE_HW_MAX_MTU 9600 29#define VXGE_HW_MAX_MTU 9600
37#define VXGE_HW_DEFAULT_MTU 1500 30#define VXGE_HW_DEFAULT_MTU 1500
38 31
39#ifdef VXGE_DEBUG_ASSERT 32#define VXGE_HW_MAX_ROM_IMAGES 8
33
34struct eprom_image {
35 u8 is_valid:1;
36 u8 index;
37 u8 type;
38 u16 version;
39};
40 40
41#ifdef VXGE_DEBUG_ASSERT
41/** 42/**
42 * vxge_assert 43 * vxge_assert
43 * @test: C-condition to check 44 * @test: C-condition to check
@@ -48,16 +49,13 @@
48 * compilation 49 * compilation
49 * time. 50 * time.
50 */ 51 */
51#define vxge_assert(test) { \ 52#define vxge_assert(test) BUG_ON(!(test))
52 if (!(test)) \
53 vxge_os_bug("bad cond: "#test" at %s:%d\n", \
54 __FILE__, __LINE__); }
55#else 53#else
56#define vxge_assert(test) 54#define vxge_assert(test)
57#endif /* end of VXGE_DEBUG_ASSERT */ 55#endif /* end of VXGE_DEBUG_ASSERT */
58 56
59/** 57/**
60 * enum enum vxge_debug_level 58 * enum vxge_debug_level
61 * @VXGE_NONE: debug disabled 59 * @VXGE_NONE: debug disabled
62 * @VXGE_ERR: all errors going to be logged out 60 * @VXGE_ERR: all errors going to be logged out
63 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs 61 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
@@ -159,6 +157,47 @@ enum vxge_hw_device_link_state {
159}; 157};
160 158
161/** 159/**
160 * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
161 * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
162 * @VXGE_HW_FW_UPGRADE_DONE: upload completed
163 * @VXGE_HW_FW_UPGRADE_ERR: upload error
164 * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream
165 *
166 */
167enum vxge_hw_fw_upgrade_code {
168 VXGE_HW_FW_UPGRADE_OK = 0,
169 VXGE_HW_FW_UPGRADE_DONE = 1,
170 VXGE_HW_FW_UPGRADE_ERR = 2,
171 VXGE_FW_UPGRADE_BYTES2SKIP = 3
172};
173
174/**
175 * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
176 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
177 * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
178 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
179 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
180 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
181 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
182 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
183 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
184 * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
185 * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
186 */
187enum vxge_hw_fw_upgrade_err_code {
188 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1,
189 VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2,
190 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3,
191 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4,
192 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5,
193 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6,
194 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7,
195 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8,
196 VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9,
197 VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10
198};
199
200/**
162 * struct vxge_hw_device_date - Date Format 201 * struct vxge_hw_device_date - Date Format
163 * @day: Day 202 * @day: Day
164 * @month: Month 203 * @month: Month
@@ -465,7 +504,6 @@ struct vxge_hw_device_config {
465 * See also: vxge_hw_driver_initialize(). 504 * See also: vxge_hw_driver_initialize().
466 */ 505 */
467struct vxge_hw_uld_cbs { 506struct vxge_hw_uld_cbs {
468
469 void (*link_up)(struct __vxge_hw_device *devh); 507 void (*link_up)(struct __vxge_hw_device *devh);
470 void (*link_down)(struct __vxge_hw_device *devh); 508 void (*link_down)(struct __vxge_hw_device *devh);
471 void (*crit_err)(struct __vxge_hw_device *devh, 509 void (*crit_err)(struct __vxge_hw_device *devh,
@@ -652,6 +690,7 @@ struct __vxge_hw_virtualpath {
652 struct vxge_hw_vpath_stats_hw_info *hw_stats; 690 struct vxge_hw_vpath_stats_hw_info *hw_stats;
653 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav; 691 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
654 struct vxge_hw_vpath_stats_sw_info *sw_stats; 692 struct vxge_hw_vpath_stats_sw_info *sw_stats;
693 spinlock_t lock;
655}; 694};
656 695
657/* 696/*
@@ -674,9 +713,6 @@ struct __vxge_hw_vpath_handle{
674/** 713/**
675 * struct __vxge_hw_device - Hal device object 714 * struct __vxge_hw_device - Hal device object
676 * @magic: Magic Number 715 * @magic: Magic Number
677 * @device_id: PCI Device Id of the adapter
678 * @major_revision: PCI Device major revision
679 * @minor_revision: PCI Device minor revision
680 * @bar0: BAR0 virtual address. 716 * @bar0: BAR0 virtual address.
681 * @pdev: Physical device handle 717 * @pdev: Physical device handle
682 * @config: Confguration passed by the LL driver at initialization 718 * @config: Confguration passed by the LL driver at initialization
@@ -688,9 +724,6 @@ struct __vxge_hw_device {
688 u32 magic; 724 u32 magic;
689#define VXGE_HW_DEVICE_MAGIC 0x12345678 725#define VXGE_HW_DEVICE_MAGIC 0x12345678
690#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD 726#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
691 u16 device_id;
692 u8 major_revision;
693 u8 minor_revision;
694 void __iomem *bar0; 727 void __iomem *bar0;
695 struct pci_dev *pdev; 728 struct pci_dev *pdev;
696 struct net_device *ndev; 729 struct net_device *ndev;
@@ -731,6 +764,7 @@ struct __vxge_hw_device {
731 u32 debug_level; 764 u32 debug_level;
732 u32 level_err; 765 u32 level_err;
733 u32 level_trace; 766 u32 level_trace;
767 u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
734}; 768};
735 769
736#define VXGE_HW_INFO_LEN 64 770#define VXGE_HW_INFO_LEN 64
@@ -1413,12 +1447,12 @@ enum vxge_hw_rth_algoritms {
1413 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get(). 1447 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
1414 */ 1448 */
1415struct vxge_hw_rth_hash_types { 1449struct vxge_hw_rth_hash_types {
1416 u8 hash_type_tcpipv4_en; 1450 u8 hash_type_tcpipv4_en:1,
1417 u8 hash_type_ipv4_en; 1451 hash_type_ipv4_en:1,
1418 u8 hash_type_tcpipv6_en; 1452 hash_type_tcpipv6_en:1,
1419 u8 hash_type_ipv6_en; 1453 hash_type_ipv6_en:1,
1420 u8 hash_type_tcpipv6ex_en; 1454 hash_type_tcpipv6ex_en:1,
1421 u8 hash_type_ipv6ex_en; 1455 hash_type_ipv6ex_en:1;
1422}; 1456};
1423 1457
1424void vxge_hw_device_debug_set( 1458void vxge_hw_device_debug_set(
@@ -2000,7 +2034,7 @@ enum vxge_hw_status
2000vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); 2034vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2001 2035
2002/** 2036/**
2003 * vxge_debug 2037 * vxge_debug_ll
2004 * @level: level of debug verbosity. 2038 * @level: level of debug verbosity.
2005 * @mask: mask for the debug 2039 * @mask: mask for the debug
2006 * @buf: Circular buffer for tracing 2040 * @buf: Circular buffer for tracing
@@ -2012,26 +2046,13 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2012 * may be compiled out if DEBUG macro was never defined. 2046 * may be compiled out if DEBUG macro was never defined.
2013 * See also: enum vxge_debug_level{}. 2047 * See also: enum vxge_debug_level{}.
2014 */ 2048 */
2015
2016#define vxge_trace_aux(level, mask, fmt, ...) \
2017{\
2018 vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\
2019}
2020
2021#define vxge_debug(module, level, mask, fmt, ...) { \
2022if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \
2023 (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\
2024 if ((mask & VXGE_DEBUG_MASK) == mask)\
2025 vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \
2026} \
2027}
2028
2029#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK) 2049#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
2030#define vxge_debug_ll(level, mask, fmt, ...) \ 2050#define vxge_debug_ll(level, mask, fmt, ...) do { \
2031{\ 2051 if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
2032 vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\ 2052 (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
2033} 2053 if ((mask & VXGE_DEBUG_MASK) == mask) \
2034 2054 printk(fmt "\n", __VA_ARGS__); \
2055} while (0)
2035#else 2056#else
2036#define vxge_debug_ll(level, mask, fmt, ...) 2057#define vxge_debug_ll(level, mask, fmt, ...)
2037#endif 2058#endif
@@ -2051,4 +2072,26 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
2051 2072
2052enum vxge_hw_status 2073enum vxge_hw_status
2053__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id); 2074__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
2075
2076#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
2077#define VXGE_HW_MAX_POLLING_COUNT 100
2078
2079void
2080vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
2081
2082enum vxge_hw_status
2083vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
2084 u32 *minor, u32 *build);
2085
2086enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
2087
2088enum vxge_hw_status
2089vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
2090 int size);
2091
2092enum vxge_hw_status
2093vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
2094 struct eprom_image *eprom_image_data);
2095
2096int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
2054#endif 2097#endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index b67746eef923..09f721e10517 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -11,7 +11,7 @@
11 * Virtualized Server Adapter. 11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp. 12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/ 13 ******************************************************************************/
14#include<linux/ethtool.h> 14#include <linux/ethtool.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/pci.h> 16#include <linux/pci.h>
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
@@ -29,7 +29,6 @@
29 * Return value: 29 * Return value:
30 * 0 on success. 30 * 0 on success.
31 */ 31 */
32
33static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info) 32static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
34{ 33{
35 /* We currently only support 10Gb/FULL */ 34 /* We currently only support 10Gb/FULL */
@@ -79,10 +78,9 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
79 * Returns driver specefic information like name, version etc.. to ethtool. 78 * Returns driver specefic information like name, version etc.. to ethtool.
80 */ 79 */
81static void vxge_ethtool_gdrvinfo(struct net_device *dev, 80static void vxge_ethtool_gdrvinfo(struct net_device *dev,
82 struct ethtool_drvinfo *info) 81 struct ethtool_drvinfo *info)
83{ 82{
84 struct vxgedev *vdev; 83 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
85 vdev = (struct vxgedev *)netdev_priv(dev);
86 strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME)); 84 strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
87 strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION)); 85 strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
88 strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN); 86 strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
@@ -104,15 +102,14 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
104 * buffer area. 102 * buffer area.
105 */ 103 */
106static void vxge_ethtool_gregs(struct net_device *dev, 104static void vxge_ethtool_gregs(struct net_device *dev,
107 struct ethtool_regs *regs, void *space) 105 struct ethtool_regs *regs, void *space)
108{ 106{
109 int index, offset; 107 int index, offset;
110 enum vxge_hw_status status; 108 enum vxge_hw_status status;
111 u64 reg; 109 u64 reg;
112 u64 *reg_space = (u64 *) space; 110 u64 *reg_space = (u64 *)space;
113 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 111 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
114 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 112 struct __vxge_hw_device *hldev = vdev->devh;
115 pci_get_drvdata(vdev->pdev);
116 113
117 regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; 114 regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
118 regs->version = vdev->pdev->subsystem_device; 115 regs->version = vdev->pdev->subsystem_device;
@@ -148,8 +145,7 @@ static void vxge_ethtool_gregs(struct net_device *dev,
148static int vxge_ethtool_idnic(struct net_device *dev, u32 data) 145static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
149{ 146{
150 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 147 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
151 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 148 struct __vxge_hw_device *hldev = vdev->devh;
152 pci_get_drvdata(vdev->pdev);
153 149
154 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON); 150 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
155 msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME); 151 msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME);
@@ -168,11 +164,10 @@ static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
168 * void 164 * void
169 */ 165 */
170static void vxge_ethtool_getpause_data(struct net_device *dev, 166static void vxge_ethtool_getpause_data(struct net_device *dev,
171 struct ethtool_pauseparam *ep) 167 struct ethtool_pauseparam *ep)
172{ 168{
173 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 169 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
174 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 170 struct __vxge_hw_device *hldev = vdev->devh;
175 pci_get_drvdata(vdev->pdev);
176 171
177 vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause); 172 vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
178} 173}
@@ -188,11 +183,10 @@ static void vxge_ethtool_getpause_data(struct net_device *dev,
188 * int, returns 0 on Success 183 * int, returns 0 on Success
189 */ 184 */
190static int vxge_ethtool_setpause_data(struct net_device *dev, 185static int vxge_ethtool_setpause_data(struct net_device *dev,
191 struct ethtool_pauseparam *ep) 186 struct ethtool_pauseparam *ep)
192{ 187{
193 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 188 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
194 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 189 struct __vxge_hw_device *hldev = vdev->devh;
195 pci_get_drvdata(vdev->pdev);
196 190
197 vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause); 191 vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
198 192
@@ -209,9 +203,8 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
209 enum vxge_hw_status status; 203 enum vxge_hw_status status;
210 enum vxge_hw_status swstatus; 204 enum vxge_hw_status swstatus;
211 struct vxge_vpath *vpath = NULL; 205 struct vxge_vpath *vpath = NULL;
212
213 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 206 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
214 struct __vxge_hw_device *hldev = vdev->devh; 207 struct __vxge_hw_device *hldev = vdev->devh;
215 struct vxge_hw_xmac_stats *xmac_stats; 208 struct vxge_hw_xmac_stats *xmac_stats;
216 struct vxge_hw_device_stats_sw_info *sw_stats; 209 struct vxge_hw_device_stats_sw_info *sw_stats;
217 struct vxge_hw_device_stats_hw_info *hw_stats; 210 struct vxge_hw_device_stats_hw_info *hw_stats;
@@ -574,8 +567,8 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
574 kfree(hw_stats); 567 kfree(hw_stats);
575} 568}
576 569
577static void vxge_ethtool_get_strings(struct net_device *dev, 570static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
578 u32 stringset, u8 *data) 571 u8 *data)
579{ 572{
580 int stat_size = 0; 573 int stat_size = 0;
581 int i, j; 574 int i, j;
@@ -1119,6 +1112,59 @@ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
1119 } 1112 }
1120} 1113}
1121 1114
1115static int vxge_set_flags(struct net_device *dev, u32 data)
1116{
1117 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
1118 enum vxge_hw_status status;
1119
1120 if (data & ~ETH_FLAG_RXHASH)
1121 return -EOPNOTSUPP;
1122
1123 if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
1124 return 0;
1125
1126 if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
1127 return -EINVAL;
1128
1129 vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);
1130
1131 /* Enabling RTH requires some of the logic in vxge_device_register and a
1132 * vpath reset. Due to these restrictions, only allow modification
1133 * while the interface is down.
1134 */
1135 status = vxge_reset_all_vpaths(vdev);
1136 if (status != VXGE_HW_OK) {
1137 vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
1138 return -EFAULT;
1139 }
1140
1141 if (vdev->devh->config.rth_en)
1142 dev->features |= NETIF_F_RXHASH;
1143 else
1144 dev->features &= ~NETIF_F_RXHASH;
1145
1146 return 0;
1147}
1148
1149static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
1150{
1151 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
1152
1153 if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
1154 printk(KERN_INFO "Single Function Mode is required to flash the"
1155 " firmware\n");
1156 return -EINVAL;
1157 }
1158
1159 if (netif_running(dev)) {
1160 printk(KERN_INFO "Interface %s must be down to flash the "
1161 "firmware\n", dev->name);
1162 return -EBUSY;
1163 }
1164
1165 return vxge_fw_upgrade(vdev, parms->data, 1);
1166}
1167
1122static const struct ethtool_ops vxge_ethtool_ops = { 1168static const struct ethtool_ops vxge_ethtool_ops = {
1123 .get_settings = vxge_ethtool_gset, 1169 .get_settings = vxge_ethtool_gset,
1124 .set_settings = vxge_ethtool_sset, 1170 .set_settings = vxge_ethtool_sset,
@@ -1140,6 +1186,8 @@ static const struct ethtool_ops vxge_ethtool_ops = {
1140 .phys_id = vxge_ethtool_idnic, 1186 .phys_id = vxge_ethtool_idnic,
1141 .get_sset_count = vxge_ethtool_get_sset_count, 1187 .get_sset_count = vxge_ethtool_get_sset_count,
1142 .get_ethtool_stats = vxge_get_ethtool_stats, 1188 .get_ethtool_stats = vxge_get_ethtool_stats,
1189 .set_flags = vxge_set_flags,
1190 .flash_device = vxge_fw_flash,
1143}; 1191};
1144 1192
1145void vxge_initialize_ethtool_ops(struct net_device *ndev) 1193void vxge_initialize_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 813829f3d024..3f2d6ed13d3e 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -50,6 +50,8 @@
50#include <net/ip.h> 50#include <net/ip.h>
51#include <linux/netdevice.h> 51#include <linux/netdevice.h>
52#include <linux/etherdevice.h> 52#include <linux/etherdevice.h>
53#include <linux/firmware.h>
54#include <linux/net_tstamp.h>
53#include "vxge-main.h" 55#include "vxge-main.h"
54#include "vxge-reg.h" 56#include "vxge-reg.h"
55 57
@@ -90,7 +92,6 @@ static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
90static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac); 92static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
91static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath); 93static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
92static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath); 94static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
93static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
94 95
95static inline int is_vxge_card_up(struct vxgedev *vdev) 96static inline int is_vxge_card_up(struct vxgedev *vdev)
96{ 97{
@@ -369,7 +370,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
369 u8 t_code, void *userdata) 370 u8 t_code, void *userdata)
370{ 371{
371 struct vxge_ring *ring = (struct vxge_ring *)userdata; 372 struct vxge_ring *ring = (struct vxge_ring *)userdata;
372 struct net_device *dev = ring->ndev; 373 struct net_device *dev = ring->ndev;
373 unsigned int dma_sizes; 374 unsigned int dma_sizes;
374 void *first_dtr = NULL; 375 void *first_dtr = NULL;
375 int dtr_cnt = 0; 376 int dtr_cnt = 0;
@@ -513,6 +514,23 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
513 else 514 else
514 skb_checksum_none_assert(skb); 515 skb_checksum_none_assert(skb);
515 516
517
518 if (ring->rx_hwts) {
519 struct skb_shared_hwtstamps *skb_hwts;
520 u32 ns = *(u32 *)(skb->head + pkt_length);
521
522 skb_hwts = skb_hwtstamps(skb);
523 skb_hwts->hwtstamp = ns_to_ktime(ns);
524 skb_hwts->syststamp.tv64 = 0;
525 }
526
527 /* rth_hash_type and rth_it_hit are non-zero regardless of
528 * whether rss is enabled. Only the rth_value is zero/non-zero
529 * if rss is disabled/enabled, so key off of that.
530 */
531 if (ext_info.rth_value)
532 skb->rxhash = ext_info.rth_value;
533
516 vxge_rx_complete(ring, skb, ext_info.vlan, 534 vxge_rx_complete(ring, skb, ext_info.vlan,
517 pkt_length, &ext_info); 535 pkt_length, &ext_info);
518 536
@@ -670,7 +688,7 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
670 struct vxge_vpath *vpath = NULL; 688 struct vxge_vpath *vpath = NULL;
671 struct __vxge_hw_device *hldev; 689 struct __vxge_hw_device *hldev;
672 690
673 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 691 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
674 692
675 mac_address = (u8 *)&mac_addr; 693 mac_address = (u8 *)&mac_addr;
676 memcpy(mac_address, mac_header, ETH_ALEN); 694 memcpy(mac_address, mac_header, ETH_ALEN);
@@ -1094,7 +1112,7 @@ static void vxge_set_multicast(struct net_device *dev)
1094 /* Delete previous MC's */ 1112 /* Delete previous MC's */
1095 for (i = 0; i < mcast_cnt; i++) { 1113 for (i = 0; i < mcast_cnt; i++) {
1096 list_for_each_safe(entry, next, list_head) { 1114 list_for_each_safe(entry, next, list_head) {
1097 mac_entry = (struct vxge_mac_addrs *) entry; 1115 mac_entry = (struct vxge_mac_addrs *)entry;
1098 /* Copy the mac address to delete */ 1116 /* Copy the mac address to delete */
1099 mac_address = (u8 *)&mac_entry->macaddr; 1117 mac_address = (u8 *)&mac_entry->macaddr;
1100 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 1118 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1137,7 +1155,7 @@ _set_all_mcast:
1137 /* Delete previous MC's */ 1155 /* Delete previous MC's */
1138 for (i = 0; i < mcast_cnt; i++) { 1156 for (i = 0; i < mcast_cnt; i++) {
1139 list_for_each_safe(entry, next, list_head) { 1157 list_for_each_safe(entry, next, list_head) {
1140 mac_entry = (struct vxge_mac_addrs *) entry; 1158 mac_entry = (struct vxge_mac_addrs *)entry;
1141 /* Copy the mac address to delete */ 1159 /* Copy the mac address to delete */
1142 mac_address = (u8 *)&mac_entry->macaddr; 1160 mac_address = (u8 *)&mac_entry->macaddr;
1143 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 1161 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1184,7 +1202,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
1184{ 1202{
1185 struct sockaddr *addr = p; 1203 struct sockaddr *addr = p;
1186 struct vxgedev *vdev; 1204 struct vxgedev *vdev;
1187 struct __vxge_hw_device *hldev; 1205 struct __vxge_hw_device *hldev;
1188 enum vxge_hw_status status = VXGE_HW_OK; 1206 enum vxge_hw_status status = VXGE_HW_OK;
1189 struct macInfo mac_info_new, mac_info_old; 1207 struct macInfo mac_info_new, mac_info_old;
1190 int vpath_idx = 0; 1208 int vpath_idx = 0;
@@ -1292,8 +1310,13 @@ static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1292static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) 1310static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1293{ 1311{
1294 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1312 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1313 struct __vxge_hw_device *hldev;
1295 int msix_id; 1314 int msix_id;
1296 1315
1316 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
1317
1318 vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1319
1297 vxge_hw_vpath_intr_disable(vpath->handle); 1320 vxge_hw_vpath_intr_disable(vpath->handle);
1298 1321
1299 if (vdev->config.intr_type == INTA) 1322 if (vdev->config.intr_type == INTA)
@@ -1423,6 +1446,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1423 } 1446 }
1424 1447
1425 if (event == VXGE_LL_FULL_RESET) { 1448 if (event == VXGE_LL_FULL_RESET) {
1449 vxge_hw_device_wait_receive_idle(vdev->devh);
1426 vxge_hw_device_intr_disable(vdev->devh); 1450 vxge_hw_device_intr_disable(vdev->devh);
1427 1451
1428 switch (vdev->cric_err_event) { 1452 switch (vdev->cric_err_event) {
@@ -1608,7 +1632,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1608 int budget_org = budget; 1632 int budget_org = budget;
1609 struct vxge_ring *ring; 1633 struct vxge_ring *ring;
1610 1634
1611 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 1635 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
1612 pci_get_drvdata(vdev->pdev); 1636 pci_get_drvdata(vdev->pdev);
1613 1637
1614 for (i = 0; i < vdev->no_of_vpath; i++) { 1638 for (i = 0; i < vdev->no_of_vpath; i++) {
@@ -1645,7 +1669,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1645 */ 1669 */
1646static void vxge_netpoll(struct net_device *dev) 1670static void vxge_netpoll(struct net_device *dev)
1647{ 1671{
1648 struct __vxge_hw_device *hldev; 1672 struct __vxge_hw_device *hldev;
1649 struct vxgedev *vdev; 1673 struct vxgedev *vdev;
1650 1674
1651 vdev = (struct vxgedev *)netdev_priv(dev); 1675 vdev = (struct vxgedev *)netdev_priv(dev);
@@ -1689,15 +1713,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1689 mtable[index] = index % vdev->no_of_vpath; 1713 mtable[index] = index % vdev->no_of_vpath;
1690 } 1714 }
1691 1715
1692 /* Fill RTH hash types */
1693 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1694 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1695 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1696 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1697 hash_types.hash_type_tcpipv6ex_en =
1698 vdev->config.rth_hash_type_tcpipv6ex;
1699 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1700
1701 /* set indirection table, bucket-to-vpath mapping */ 1716 /* set indirection table, bucket-to-vpath mapping */
1702 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles, 1717 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1703 vdev->no_of_vpath, 1718 vdev->no_of_vpath,
@@ -1710,12 +1725,21 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1710 return status; 1725 return status;
1711 } 1726 }
1712 1727
1728 /* Fill RTH hash types */
1729 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1730 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1731 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1732 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1733 hash_types.hash_type_tcpipv6ex_en =
1734 vdev->config.rth_hash_type_tcpipv6ex;
1735 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1736
1713 /* 1737 /*
1714 * Because the itable_set() method uses the active_table field 1738 * Because the itable_set() method uses the active_table field
1715 * for the target virtual path the RTH config should be updated 1739 * for the target virtual path the RTH config should be updated
1716 * for all VPATHs. The h/w only uses the lowest numbered VPATH 1740 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1717 * when steering frames. 1741 * when steering frames.
1718 */ 1742 */
1719 for (index = 0; index < vdev->no_of_vpath; index++) { 1743 for (index = 0; index < vdev->no_of_vpath; index++) {
1720 status = vxge_hw_vpath_rts_rth_set( 1744 status = vxge_hw_vpath_rts_rth_set(
1721 vdev->vpaths[index].handle, 1745 vdev->vpaths[index].handle,
@@ -1797,7 +1821,7 @@ static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1797{ 1821{
1798 struct list_head *entry, *next; 1822 struct list_head *entry, *next;
1799 u64 del_mac = 0; 1823 u64 del_mac = 0;
1800 u8 *mac_address = (u8 *) (&del_mac); 1824 u8 *mac_address = (u8 *)(&del_mac);
1801 1825
1802 /* Copy the mac address to delete from the list */ 1826 /* Copy the mac address to delete from the list */
1803 memcpy(mac_address, mac->macaddr, ETH_ALEN); 1827 memcpy(mac_address, mac->macaddr, ETH_ALEN);
@@ -1928,7 +1952,7 @@ static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1928} 1952}
1929 1953
1930/* reset vpaths */ 1954/* reset vpaths */
1931static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) 1955enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1932{ 1956{
1933 enum vxge_hw_status status = VXGE_HW_OK; 1957 enum vxge_hw_status status = VXGE_HW_OK;
1934 struct vxge_vpath *vpath; 1958 struct vxge_vpath *vpath;
@@ -1988,8 +2012,23 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
1988 2012
1989 for (i = 0; i < vdev->no_of_vpath; i++) { 2013 for (i = 0; i < vdev->no_of_vpath; i++) {
1990 vpath = &vdev->vpaths[i]; 2014 vpath = &vdev->vpaths[i];
1991
1992 vxge_assert(vpath->is_configured); 2015 vxge_assert(vpath->is_configured);
2016
2017 if (!vdev->titan1) {
2018 struct vxge_hw_vp_config *vcfg;
2019 vcfg = &vdev->devh->config.vp_config[vpath->device_id];
2020
2021 vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
2022 vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
2023 vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
2024 vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
2025 vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
2026 vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
2027 vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
2028 vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
2029 vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
2030 }
2031
1993 attr.vp_id = vpath->device_id; 2032 attr.vp_id = vpath->device_id;
1994 attr.fifo_attr.callback = vxge_xmit_compl; 2033 attr.fifo_attr.callback = vxge_xmit_compl;
1995 attr.fifo_attr.txdl_term = vxge_tx_term; 2034 attr.fifo_attr.txdl_term = vxge_tx_term;
@@ -2024,6 +2063,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2024 vdev->config.fifo_indicate_max_pkts; 2063 vdev->config.fifo_indicate_max_pkts;
2025 vpath->ring.rx_vector_no = 0; 2064 vpath->ring.rx_vector_no = 0;
2026 vpath->ring.rx_csum = vdev->rx_csum; 2065 vpath->ring.rx_csum = vdev->rx_csum;
2066 vpath->ring.rx_hwts = vdev->rx_hwts;
2027 vpath->is_open = 1; 2067 vpath->is_open = 1;
2028 vdev->vp_handles[i] = vpath->handle; 2068 vdev->vp_handles[i] = vpath->handle;
2029 vpath->ring.gro_enable = vdev->config.gro_enable; 2069 vpath->ring.gro_enable = vdev->config.gro_enable;
@@ -2062,7 +2102,7 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2062 struct __vxge_hw_device *hldev; 2102 struct __vxge_hw_device *hldev;
2063 u64 reason; 2103 u64 reason;
2064 enum vxge_hw_status status; 2104 enum vxge_hw_status status;
2065 struct vxgedev *vdev = (struct vxgedev *) dev_id;; 2105 struct vxgedev *vdev = (struct vxgedev *)dev_id;
2066 2106
2067 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); 2107 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2068 2108
@@ -2073,7 +2113,7 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2073 return IRQ_NONE; 2113 return IRQ_NONE;
2074 2114
2075 if (unlikely(!is_vxge_card_up(vdev))) 2115 if (unlikely(!is_vxge_card_up(vdev)))
2076 return IRQ_NONE; 2116 return IRQ_HANDLED;
2077 2117
2078 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, 2118 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
2079 &reason); 2119 &reason);
@@ -2301,8 +2341,8 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
2301 2341
2302static void vxge_rem_isr(struct vxgedev *vdev) 2342static void vxge_rem_isr(struct vxgedev *vdev)
2303{ 2343{
2304 struct __vxge_hw_device *hldev; 2344 struct __vxge_hw_device *hldev;
2305 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2345 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
2306 2346
2307#ifdef CONFIG_PCI_MSI 2347#ifdef CONFIG_PCI_MSI
2308 if (vdev->config.intr_type == MSI_X) { 2348 if (vdev->config.intr_type == MSI_X) {
@@ -2543,7 +2583,7 @@ vxge_open(struct net_device *dev)
2543 "%s: %s:%d", dev->name, __func__, __LINE__); 2583 "%s: %s:%d", dev->name, __func__, __LINE__);
2544 2584
2545 vdev = (struct vxgedev *)netdev_priv(dev); 2585 vdev = (struct vxgedev *)netdev_priv(dev);
2546 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2586 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
2547 function_mode = vdev->config.device_hw_info.function_mode; 2587 function_mode = vdev->config.device_hw_info.function_mode;
2548 2588
2549 /* make sure you have link off by default every time Nic is 2589 /* make sure you have link off by default every time Nic is
@@ -2598,6 +2638,8 @@ vxge_open(struct net_device *dev)
2598 goto out2; 2638 goto out2;
2599 } 2639 }
2600 } 2640 }
2641 printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2642 hldev->config.rth_en ? "enabled" : "disabled");
2601 2643
2602 for (i = 0; i < vdev->no_of_vpath; i++) { 2644 for (i = 0; i < vdev->no_of_vpath; i++) {
2603 vpath = &vdev->vpaths[i]; 2645 vpath = &vdev->vpaths[i];
@@ -2683,9 +2725,10 @@ vxge_open(struct net_device *dev)
2683 vxge_os_timer(vdev->vp_reset_timer, 2725 vxge_os_timer(vdev->vp_reset_timer,
2684 vxge_poll_vp_reset, vdev, (HZ/2)); 2726 vxge_poll_vp_reset, vdev, (HZ/2));
2685 2727
2686 if (vdev->vp_lockup_timer.function == NULL) 2728 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2687 vxge_os_timer(vdev->vp_lockup_timer, 2729 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2688 vxge_poll_vp_lockup, vdev, (HZ/2)); 2730 vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
2731 HZ / 2);
2689 2732
2690 set_bit(__VXGE_STATE_CARD_UP, &vdev->state); 2733 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2691 2734
@@ -2768,7 +2811,7 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2768 dev->name, __func__, __LINE__); 2811 dev->name, __func__, __LINE__);
2769 2812
2770 vdev = (struct vxgedev *)netdev_priv(dev); 2813 vdev = (struct vxgedev *)netdev_priv(dev);
2771 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2814 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
2772 2815
2773 if (unlikely(!is_vxge_card_up(vdev))) 2816 if (unlikely(!is_vxge_card_up(vdev)))
2774 return 0; 2817 return 0;
@@ -2778,7 +2821,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2778 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) 2821 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2779 msleep(50); 2822 msleep(50);
2780 2823
2781 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2782 if (do_io) { 2824 if (do_io) {
2783 /* Put the vpath back in normal mode */ 2825 /* Put the vpath back in normal mode */
2784 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id); 2826 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
@@ -2818,10 +2860,17 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2818 2860
2819 smp_wmb(); 2861 smp_wmb();
2820 } 2862 }
2821 del_timer_sync(&vdev->vp_lockup_timer); 2863
2864 if (vdev->titan1)
2865 del_timer_sync(&vdev->vp_lockup_timer);
2822 2866
2823 del_timer_sync(&vdev->vp_reset_timer); 2867 del_timer_sync(&vdev->vp_reset_timer);
2824 2868
2869 if (do_io)
2870 vxge_hw_device_wait_receive_idle(hldev);
2871
2872 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2873
2825 /* Disable napi */ 2874 /* Disable napi */
2826 if (vdev->config.intr_type != MSI_X) 2875 if (vdev->config.intr_type != MSI_X)
2827 napi_disable(&vdev->napi); 2876 napi_disable(&vdev->napi);
@@ -2838,8 +2887,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2838 if (do_io) 2887 if (do_io)
2839 vxge_hw_device_intr_disable(vdev->devh); 2888 vxge_hw_device_intr_disable(vdev->devh);
2840 2889
2841 mdelay(1000);
2842
2843 vxge_rem_isr(vdev); 2890 vxge_rem_isr(vdev);
2844 2891
2845 vxge_napi_del_all(vdev); 2892 vxge_napi_del_all(vdev);
@@ -2954,6 +3001,101 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
2954 return net_stats; 3001 return net_stats;
2955} 3002}
2956 3003
3004static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev,
3005 int enable)
3006{
3007 enum vxge_hw_status status;
3008 u64 val64;
3009
3010 /* Timestamp is passed to the driver via the FCS, therefore we
3011 * must disable the FCS stripping by the adapter. Since this is
3012 * required for the driver to load (due to a hardware bug),
3013 * there is no need to do anything special here.
3014 */
3015 if (enable)
3016 val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
3017 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3018 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3019 else
3020 val64 = 0;
3021
3022 status = vxge_hw_mgmt_reg_write(vdev->devh,
3023 vxge_hw_mgmt_reg_type_mrpcim,
3024 0,
3025 offsetof(struct vxge_hw_mrpcim_reg,
3026 xmac_timestamp),
3027 val64);
3028 vxge_hw_device_flush_io(vdev->devh);
3029 return status;
3030}
3031
3032static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
3033{
3034 struct hwtstamp_config config;
3035 enum vxge_hw_status status;
3036 int i;
3037
3038 if (copy_from_user(&config, data, sizeof(config)))
3039 return -EFAULT;
3040
3041 /* reserved for future extensions */
3042 if (config.flags)
3043 return -EINVAL;
3044
3045 /* Transmit HW Timestamp not supported */
3046 switch (config.tx_type) {
3047 case HWTSTAMP_TX_OFF:
3048 break;
3049 case HWTSTAMP_TX_ON:
3050 default:
3051 return -ERANGE;
3052 }
3053
3054 switch (config.rx_filter) {
3055 case HWTSTAMP_FILTER_NONE:
3056 status = vxge_timestamp_config(vdev, 0);
3057 if (status != VXGE_HW_OK)
3058 return -EFAULT;
3059
3060 vdev->rx_hwts = 0;
3061 config.rx_filter = HWTSTAMP_FILTER_NONE;
3062 break;
3063
3064 case HWTSTAMP_FILTER_ALL:
3065 case HWTSTAMP_FILTER_SOME:
3066 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3067 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3068 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3069 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3070 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3071 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3072 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3073 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3074 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3075 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3076 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3077 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3078 status = vxge_timestamp_config(vdev, 1);
3079 if (status != VXGE_HW_OK)
3080 return -EFAULT;
3081
3082 vdev->rx_hwts = 1;
3083 config.rx_filter = HWTSTAMP_FILTER_ALL;
3084 break;
3085
3086 default:
3087 return -ERANGE;
3088 }
3089
3090 for (i = 0; i < vdev->no_of_vpath; i++)
3091 vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
3092
3093 if (copy_to_user(data, &config, sizeof(config)))
3094 return -EFAULT;
3095
3096 return 0;
3097}
3098
2957/** 3099/**
2958 * vxge_ioctl 3100 * vxge_ioctl
2959 * @dev: Device pointer. 3101 * @dev: Device pointer.
@@ -2966,7 +3108,20 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
2966 */ 3108 */
2967static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3109static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2968{ 3110{
2969 return -EOPNOTSUPP; 3111 struct vxgedev *vdev = netdev_priv(dev);
3112 int ret;
3113
3114 switch (cmd) {
3115 case SIOCSHWTSTAMP:
3116 ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
3117 if (ret)
3118 return ret;
3119 break;
3120 default:
3121 return -EOPNOTSUPP;
3122 }
3123
3124 return 0;
2970} 3125}
2971 3126
2972/** 3127/**
@@ -3125,6 +3280,19 @@ static const struct net_device_ops vxge_netdev_ops = {
3125#endif 3280#endif
3126}; 3281};
3127 3282
3283static int __devinit vxge_device_revision(struct vxgedev *vdev)
3284{
3285 int ret;
3286 u8 revision;
3287
3288 ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
3289 if (ret)
3290 return -EIO;
3291
3292 vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
3293 return 0;
3294}
3295
3128static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, 3296static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3129 struct vxge_config *config, 3297 struct vxge_config *config,
3130 int high_dma, int no_of_vpath, 3298 int high_dma, int no_of_vpath,
@@ -3163,6 +3331,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3163 vdev->pdev = hldev->pdev; 3331 vdev->pdev = hldev->pdev;
3164 memcpy(&vdev->config, config, sizeof(struct vxge_config)); 3332 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3165 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ 3333 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3334 vdev->rx_hwts = 0;
3335
3336 ret = vxge_device_revision(vdev);
3337 if (ret < 0)
3338 goto _out1;
3166 3339
3167 SET_NETDEV_DEV(ndev, &vdev->pdev->dev); 3340 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3168 3341
@@ -3178,6 +3351,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3178 3351
3179 vxge_initialize_ethtool_ops(ndev); 3352 vxge_initialize_ethtool_ops(ndev);
3180 3353
3354 if (vdev->config.rth_steering != NO_STEERING) {
3355 ndev->features |= NETIF_F_RXHASH;
3356 hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
3357 }
3358
3181 /* Allocate memory for vpath */ 3359 /* Allocate memory for vpath */
3182 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * 3360 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3183 no_of_vpath, GFP_KERNEL); 3361 no_of_vpath, GFP_KERNEL);
@@ -3227,6 +3405,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3227 "%s: Ethernet device registered", 3405 "%s: Ethernet device registered",
3228 ndev->name); 3406 ndev->name);
3229 3407
3408 hldev->ndev = ndev;
3230 *vdev_out = vdev; 3409 *vdev_out = vdev;
3231 3410
3232 /* Resetting the Device stats */ 3411 /* Resetting the Device stats */
@@ -3261,36 +3440,29 @@ _out0:
3261 * 3440 *
3262 * This function will unregister and free network device 3441 * This function will unregister and free network device
3263 */ 3442 */
3264static void 3443static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3265vxge_device_unregister(struct __vxge_hw_device *hldev)
3266{ 3444{
3267 struct vxgedev *vdev; 3445 struct vxgedev *vdev;
3268 struct net_device *dev; 3446 struct net_device *dev;
3269 char buf[IFNAMSIZ]; 3447 char buf[IFNAMSIZ];
3270#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3271 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3272 u32 level_trace;
3273#endif
3274 3448
3275 dev = hldev->ndev; 3449 dev = hldev->ndev;
3276 vdev = netdev_priv(dev); 3450 vdev = netdev_priv(dev);
3277#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3278 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3279 level_trace = vdev->level_trace;
3280#endif
3281 vxge_debug_entryexit(level_trace,
3282 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3283 3451
3284 memcpy(buf, vdev->ndev->name, IFNAMSIZ); 3452 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
3453 __func__, __LINE__);
3454
3455 memcpy(buf, dev->name, IFNAMSIZ);
3285 3456
3286 /* in 2.6 will call stop() if device is up */ 3457 /* in 2.6 will call stop() if device is up */
3287 unregister_netdev(dev); 3458 unregister_netdev(dev);
3288 3459
3289 flush_scheduled_work(); 3460 flush_scheduled_work();
3290 3461
3291 vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf); 3462 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3292 vxge_debug_entryexit(level_trace, 3463 buf);
3293 "%s: %s:%d Exiting...", buf, __func__, __LINE__); 3464 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
3465 __func__, __LINE__);
3294} 3466}
3295 3467
3296/* 3468/*
@@ -3813,8 +3985,8 @@ static int vxge_pm_resume(struct pci_dev *pdev)
3813static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, 3985static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3814 pci_channel_state_t state) 3986 pci_channel_state_t state)
3815{ 3987{
3816 struct __vxge_hw_device *hldev = 3988 struct __vxge_hw_device *hldev =
3817 (struct __vxge_hw_device *) pci_get_drvdata(pdev); 3989 (struct __vxge_hw_device *)pci_get_drvdata(pdev);
3818 struct net_device *netdev = hldev->ndev; 3990 struct net_device *netdev = hldev->ndev;
3819 3991
3820 netif_device_detach(netdev); 3992 netif_device_detach(netdev);
@@ -3843,8 +4015,8 @@ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3843 */ 4015 */
3844static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) 4016static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3845{ 4017{
3846 struct __vxge_hw_device *hldev = 4018 struct __vxge_hw_device *hldev =
3847 (struct __vxge_hw_device *) pci_get_drvdata(pdev); 4019 (struct __vxge_hw_device *)pci_get_drvdata(pdev);
3848 struct net_device *netdev = hldev->ndev; 4020 struct net_device *netdev = hldev->ndev;
3849 4021
3850 struct vxgedev *vdev = netdev_priv(netdev); 4022 struct vxgedev *vdev = netdev_priv(netdev);
@@ -3869,8 +4041,8 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3869 */ 4041 */
3870static void vxge_io_resume(struct pci_dev *pdev) 4042static void vxge_io_resume(struct pci_dev *pdev)
3871{ 4043{
3872 struct __vxge_hw_device *hldev = 4044 struct __vxge_hw_device *hldev =
3873 (struct __vxge_hw_device *) pci_get_drvdata(pdev); 4045 (struct __vxge_hw_device *)pci_get_drvdata(pdev);
3874 struct net_device *netdev = hldev->ndev; 4046 struct net_device *netdev = hldev->ndev;
3875 4047
3876 if (netif_running(netdev)) { 4048 if (netif_running(netdev)) {
@@ -3914,6 +4086,142 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
3914 return num_functions; 4086 return num_functions;
3915} 4087}
3916 4088
4089int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4090{
4091 struct __vxge_hw_device *hldev = vdev->devh;
4092 u32 maj, min, bld, cmaj, cmin, cbld;
4093 enum vxge_hw_status status;
4094 const struct firmware *fw;
4095 int ret;
4096
4097 ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
4098 if (ret) {
4099 vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
4100 VXGE_DRIVER_NAME, fw_name);
4101 goto out;
4102 }
4103
4104 /* Load the new firmware onto the adapter */
4105 status = vxge_update_fw_image(hldev, fw->data, fw->size);
4106 if (status != VXGE_HW_OK) {
4107 vxge_debug_init(VXGE_ERR,
4108 "%s: FW image download to adapter failed '%s'.",
4109 VXGE_DRIVER_NAME, fw_name);
4110 ret = -EIO;
4111 goto out;
4112 }
4113
4114 /* Read the version of the new firmware */
4115 status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
4116 if (status != VXGE_HW_OK) {
4117 vxge_debug_init(VXGE_ERR,
4118 "%s: Upgrade read version failed '%s'.",
4119 VXGE_DRIVER_NAME, fw_name);
4120 ret = -EIO;
4121 goto out;
4122 }
4123
4124 cmaj = vdev->config.device_hw_info.fw_version.major;
4125 cmin = vdev->config.device_hw_info.fw_version.minor;
4126 cbld = vdev->config.device_hw_info.fw_version.build;
4127 /* It's possible the version in /lib/firmware is not the latest version.
4128 * If so, we could get into a loop of trying to upgrade to the latest
4129 * and flashing the older version.
4130 */
4131 if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
4132 !override) {
4133 ret = -EINVAL;
4134 goto out;
4135 }
4136
4137 printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
4138 maj, min, bld);
4139
4140 /* Flash the adapter with the new firmware */
4141 status = vxge_hw_flash_fw(hldev);
4142 if (status != VXGE_HW_OK) {
4143 vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
4144 VXGE_DRIVER_NAME, fw_name);
4145 ret = -EIO;
4146 goto out;
4147 }
4148
4149 printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
4150 "hard reset before using, thus requiring a system reboot or a "
4151 "hotplug event.\n");
4152
4153out:
4154 return ret;
4155}
4156
4157static int vxge_probe_fw_update(struct vxgedev *vdev)
4158{
4159 u32 maj, min, bld;
4160 int ret, gpxe = 0;
4161 char *fw_name;
4162
4163 maj = vdev->config.device_hw_info.fw_version.major;
4164 min = vdev->config.device_hw_info.fw_version.minor;
4165 bld = vdev->config.device_hw_info.fw_version.build;
4166
4167 if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
4168 return 0;
4169
4170 /* Ignore the build number when determining if the current firmware is
4171 * "too new" to load the driver
4172 */
4173 if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
4174 vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
4175 "version, unable to load driver\n",
4176 VXGE_DRIVER_NAME);
4177 return -EINVAL;
4178 }
4179
4180 /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4181 * work with this driver.
4182 */
4183 if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
4184 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
4185 "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
4186 return -EINVAL;
4187 }
4188
4189 /* If file not specified, determine gPXE or not */
4190 if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
4191 int i;
4192 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
4193 if (vdev->devh->eprom_versions[i]) {
4194 gpxe = 1;
4195 break;
4196 }
4197 }
4198 if (gpxe)
4199 fw_name = "vxge/X3fw-pxe.ncf";
4200 else
4201 fw_name = "vxge/X3fw.ncf";
4202
4203 ret = vxge_fw_upgrade(vdev, fw_name, 0);
4204 /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4205 * probe, so ignore them
4206 */
4207 if (ret != -EINVAL && ret != -ENOENT)
4208 return -EIO;
4209 else
4210 ret = 0;
4211
4212 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4213 VXGE_FW_VER(maj, min, 0)) {
4214 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4215 " be used with this driver.\n"
4216 "Please get the latest version from "
4217 "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
4218 VXGE_DRIVER_NAME, maj, min, bld);
4219 return -EINVAL;
4220 }
4221
4222 return ret;
4223}
4224
3917/** 4225/**
3918 * vxge_probe 4226 * vxge_probe
3919 * @pdev : structure containing the PCI related information of the device. 4227 * @pdev : structure containing the PCI related information of the device.
@@ -3928,7 +4236,7 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
3928static int __devinit 4236static int __devinit
3929vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) 4237vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
3930{ 4238{
3931 struct __vxge_hw_device *hldev; 4239 struct __vxge_hw_device *hldev;
3932 enum vxge_hw_status status; 4240 enum vxge_hw_status status;
3933 int ret; 4241 int ret;
3934 int high_dma = 0; 4242 int high_dma = 0;
@@ -4072,16 +4380,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4072 goto _exit3; 4380 goto _exit3;
4073 } 4381 }
4074 4382
4075 if (ll_config->device_hw_info.fw_version.major !=
4076 VXGE_DRIVER_FW_VERSION_MAJOR) {
4077 vxge_debug_init(VXGE_ERR,
4078 "%s: Incorrect firmware version."
4079 "Please upgrade the firmware to version 1.x.x",
4080 VXGE_DRIVER_NAME);
4081 ret = -EINVAL;
4082 goto _exit3;
4083 }
4084
4085 vpath_mask = ll_config->device_hw_info.vpath_mask; 4383 vpath_mask = ll_config->device_hw_info.vpath_mask;
4086 if (vpath_mask == 0) { 4384 if (vpath_mask == 0) {
4087 vxge_debug_ll_config(VXGE_TRACE, 4385 vxge_debug_ll_config(VXGE_TRACE,
@@ -4145,11 +4443,37 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4145 goto _exit3; 4443 goto _exit3;
4146 } 4444 }
4147 4445
4446 if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
4447 ll_config->device_hw_info.fw_version.minor,
4448 ll_config->device_hw_info.fw_version.build) >=
4449 VXGE_EPROM_FW_VER) {
4450 struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
4451
4452 status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
4453 if (status != VXGE_HW_OK) {
4454 vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
4455 VXGE_DRIVER_NAME);
4456 /* This is a non-fatal error, continue */
4457 }
4458
4459 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
4460 hldev->eprom_versions[i] = img[i].version;
4461 if (!img[i].is_valid)
4462 break;
4463 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4464 "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
4465 VXGE_EPROM_IMG_MAJOR(img[i].version),
4466 VXGE_EPROM_IMG_MINOR(img[i].version),
4467 VXGE_EPROM_IMG_FIX(img[i].version),
4468 VXGE_EPROM_IMG_BUILD(img[i].version));
4469 }
4470 }
4471
4148 /* if FCS stripping is not disabled in MAC fail driver load */ 4472 /* if FCS stripping is not disabled in MAC fail driver load */
4149 if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) { 4473 status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
4150 vxge_debug_init(VXGE_ERR, 4474 if (status != VXGE_HW_OK) {
4151 "%s: FCS stripping is not disabled in MAC" 4475 vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
4152 " failing driver load", VXGE_DRIVER_NAME); 4476 " failing driver load", VXGE_DRIVER_NAME);
4153 ret = -EINVAL; 4477 ret = -EINVAL;
4154 goto _exit4; 4478 goto _exit4;
4155 } 4479 }
@@ -4163,28 +4487,32 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4163 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; 4487 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4164 ll_config->addr_learn_en = addr_learn_en; 4488 ll_config->addr_learn_en = addr_learn_en;
4165 ll_config->rth_algorithm = RTH_ALG_JENKINS; 4489 ll_config->rth_algorithm = RTH_ALG_JENKINS;
4166 ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; 4490 ll_config->rth_hash_type_tcpipv4 = 1;
4167 ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; 4491 ll_config->rth_hash_type_ipv4 = 0;
4168 ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4492 ll_config->rth_hash_type_tcpipv6 = 0;
4169 ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4493 ll_config->rth_hash_type_ipv6 = 0;
4170 ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4494 ll_config->rth_hash_type_tcpipv6ex = 0;
4171 ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4495 ll_config->rth_hash_type_ipv6ex = 0;
4172 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE; 4496 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4173 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4497 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4174 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4498 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4175 4499
4176 if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, 4500 ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4177 &vdev)) { 4501 &vdev);
4502 if (ret) {
4178 ret = -EINVAL; 4503 ret = -EINVAL;
4179 goto _exit4; 4504 goto _exit4;
4180 } 4505 }
4181 4506
4507 ret = vxge_probe_fw_update(vdev);
4508 if (ret)
4509 goto _exit5;
4510
4182 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL); 4511 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4183 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), 4512 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4184 vxge_hw_device_trace_level_get(hldev)); 4513 vxge_hw_device_trace_level_get(hldev));
4185 4514
4186 /* set private HW device info */ 4515 /* set private HW device info */
4187 hldev->ndev = vdev->ndev;
4188 vdev->mtu = VXGE_HW_DEFAULT_MTU; 4516 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4189 vdev->bar0 = attr.bar0; 4517 vdev->bar0 = attr.bar0;
4190 vdev->max_vpath_supported = max_vpath_supported; 4518 vdev->max_vpath_supported = max_vpath_supported;
@@ -4286,7 +4614,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4286 "%s: mac_addr_list : memory allocation failed", 4614 "%s: mac_addr_list : memory allocation failed",
4287 vdev->ndev->name); 4615 vdev->ndev->name);
4288 ret = -EPERM; 4616 ret = -EPERM;
4289 goto _exit5; 4617 goto _exit6;
4290 } 4618 }
4291 macaddr = (u8 *)&entry->macaddr; 4619 macaddr = (u8 *)&entry->macaddr;
4292 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN); 4620 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
@@ -4326,10 +4654,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4326 kfree(ll_config); 4654 kfree(ll_config);
4327 return 0; 4655 return 0;
4328 4656
4329_exit5: 4657_exit6:
4330 for (i = 0; i < vdev->no_of_vpath; i++) 4658 for (i = 0; i < vdev->no_of_vpath; i++)
4331 vxge_free_mac_add_list(&vdev->vpaths[i]); 4659 vxge_free_mac_add_list(&vdev->vpaths[i]);
4332 4660_exit5:
4333 vxge_device_unregister(hldev); 4661 vxge_device_unregister(hldev);
4334_exit4: 4662_exit4:
4335 pci_disable_sriov(pdev); 4663 pci_disable_sriov(pdev);
@@ -4354,34 +4682,25 @@ _exit0:
4354 * Description: This function is called by the Pci subsystem to release a 4682 * Description: This function is called by the Pci subsystem to release a
4355 * PCI device and free up all resource held up by the device. 4683 * PCI device and free up all resource held up by the device.
4356 */ 4684 */
4357static void __devexit 4685static void __devexit vxge_remove(struct pci_dev *pdev)
4358vxge_remove(struct pci_dev *pdev)
4359{ 4686{
4360 struct __vxge_hw_device *hldev; 4687 struct __vxge_hw_device *hldev;
4361 struct vxgedev *vdev = NULL; 4688 struct vxgedev *vdev = NULL;
4362 struct net_device *dev; 4689 struct net_device *dev;
4363 int i = 0; 4690 int i = 0;
4364#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4365 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4366 u32 level_trace;
4367#endif
4368 4691
4369 hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev); 4692 hldev = (struct __vxge_hw_device *)pci_get_drvdata(pdev);
4370 4693
4371 if (hldev == NULL) 4694 if (hldev == NULL)
4372 return; 4695 return;
4696
4373 dev = hldev->ndev; 4697 dev = hldev->ndev;
4374 vdev = netdev_priv(dev); 4698 vdev = netdev_priv(dev);
4375 4699
4376#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ 4700 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4377 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4378 level_trace = vdev->level_trace;
4379#endif
4380 vxge_debug_entryexit(level_trace,
4381 "%s:%d", __func__, __LINE__);
4382 4701
4383 vxge_debug_init(level_trace, 4702 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4384 "%s : removing PCI device...", __func__); 4703 __func__);
4385 vxge_device_unregister(hldev); 4704 vxge_device_unregister(hldev);
4386 4705
4387 for (i = 0; i < vdev->no_of_vpath; i++) { 4706 for (i = 0; i < vdev->no_of_vpath; i++) {
@@ -4399,16 +4718,16 @@ vxge_remove(struct pci_dev *pdev)
4399 /* we are safe to free it now */ 4718 /* we are safe to free it now */
4400 free_netdev(dev); 4719 free_netdev(dev);
4401 4720
4402 vxge_debug_init(level_trace, 4721 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4403 "%s:%d Device unregistered", __func__, __LINE__); 4722 __func__, __LINE__);
4404 4723
4405 vxge_hw_device_terminate(hldev); 4724 vxge_hw_device_terminate(hldev);
4406 4725
4407 pci_disable_device(pdev); 4726 pci_disable_device(pdev);
4408 pci_release_regions(pdev); 4727 pci_release_regions(pdev);
4409 pci_set_drvdata(pdev, NULL); 4728 pci_set_drvdata(pdev, NULL);
4410 vxge_debug_entryexit(level_trace, 4729 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4411 "%s:%d Exiting...", __func__, __LINE__); 4730 __LINE__);
4412} 4731}
4413 4732
4414static struct pci_error_handlers vxge_err_handler = { 4733static struct pci_error_handlers vxge_err_handler = {
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index de64536cb7d0..953cb0ded3e1 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -29,6 +29,9 @@
29 29
30#define PCI_DEVICE_ID_TITAN_WIN 0x5733 30#define PCI_DEVICE_ID_TITAN_WIN 0x5733
31#define PCI_DEVICE_ID_TITAN_UNI 0x5833 31#define PCI_DEVICE_ID_TITAN_UNI 0x5833
32#define VXGE_HW_TITAN1_PCI_REVISION 1
33#define VXGE_HW_TITAN1A_PCI_REVISION 2
34
32#define VXGE_USE_DEFAULT 0xffffffff 35#define VXGE_USE_DEFAULT 0xffffffff
33#define VXGE_HW_VPATH_MSIX_ACTIVE 4 36#define VXGE_HW_VPATH_MSIX_ACTIVE 4
34#define VXGE_ALARM_MSIX_ID 2 37#define VXGE_ALARM_MSIX_ID 2
@@ -53,11 +56,13 @@
53 56
54#define VXGE_TTI_BTIMER_VAL 250000 57#define VXGE_TTI_BTIMER_VAL 250000
55 58
56#define VXGE_TTI_LTIMER_VAL 1000 59#define VXGE_TTI_LTIMER_VAL 1000
57#define VXGE_TTI_RTIMER_VAL 0 60#define VXGE_T1A_TTI_LTIMER_VAL 80
58#define VXGE_RTI_BTIMER_VAL 250 61#define VXGE_TTI_RTIMER_VAL 0
59#define VXGE_RTI_LTIMER_VAL 100 62#define VXGE_T1A_TTI_RTIMER_VAL 400
60#define VXGE_RTI_RTIMER_VAL 0 63#define VXGE_RTI_BTIMER_VAL 250
64#define VXGE_RTI_LTIMER_VAL 100
65#define VXGE_RTI_RTIMER_VAL 0
61#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH 66#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
62#define VXGE_ISR_POLLING_CNT 8 67#define VXGE_ISR_POLLING_CNT 8
63#define VXGE_MAX_CONFIG_DEV 0xFF 68#define VXGE_MAX_CONFIG_DEV 0xFF
@@ -76,14 +81,32 @@
76#define TTI_TX_UFC_B 40 81#define TTI_TX_UFC_B 40
77#define TTI_TX_UFC_C 60 82#define TTI_TX_UFC_C 60
78#define TTI_TX_UFC_D 100 83#define TTI_TX_UFC_D 100
84#define TTI_T1A_TX_UFC_A 30
85#define TTI_T1A_TX_UFC_B 80
86/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
87/* Slope - 93 */
88/* 60 - 9k Mtu, 140 - 1.5k mtu */
89#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
90
91/* Slope - 37 */
92/* 100 - 9k Mtu, 300 - 1.5k mtu */
93#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
94
95
96#define RTI_RX_URANGE_A 5
97#define RTI_RX_URANGE_B 15
98#define RTI_RX_URANGE_C 40
99#define RTI_T1A_RX_URANGE_A 1
100#define RTI_T1A_RX_URANGE_B 20
101#define RTI_T1A_RX_URANGE_C 50
102#define RTI_RX_UFC_A 1
103#define RTI_RX_UFC_B 5
104#define RTI_RX_UFC_C 10
105#define RTI_RX_UFC_D 15
106#define RTI_T1A_RX_UFC_B 20
107#define RTI_T1A_RX_UFC_C 50
108#define RTI_T1A_RX_UFC_D 60
79 109
80#define RTI_RX_URANGE_A 5
81#define RTI_RX_URANGE_B 15
82#define RTI_RX_URANGE_C 40
83#define RTI_RX_UFC_A 1
84#define RTI_RX_UFC_B 5
85#define RTI_RX_UFC_C 10
86#define RTI_RX_UFC_D 15
87 110
88/* Milli secs timer period */ 111/* Milli secs timer period */
89#define VXGE_TIMER_DELAY 10000 112#define VXGE_TIMER_DELAY 10000
@@ -145,15 +168,15 @@ struct vxge_config {
145 168
146 int addr_learn_en; 169 int addr_learn_en;
147 170
148 int rth_steering; 171 u32 rth_steering:2,
149 int rth_algorithm; 172 rth_algorithm:2,
150 int rth_hash_type_tcpipv4; 173 rth_hash_type_tcpipv4:1,
151 int rth_hash_type_ipv4; 174 rth_hash_type_ipv4:1,
152 int rth_hash_type_tcpipv6; 175 rth_hash_type_tcpipv6:1,
153 int rth_hash_type_ipv6; 176 rth_hash_type_ipv6:1,
154 int rth_hash_type_tcpipv6ex; 177 rth_hash_type_tcpipv6ex:1,
155 int rth_hash_type_ipv6ex; 178 rth_hash_type_ipv6ex:1,
156 int rth_bkt_sz; 179 rth_bkt_sz:8;
157 int rth_jhash_golden_ratio; 180 int rth_jhash_golden_ratio;
158 int tx_steering_type; 181 int tx_steering_type;
159 int fifo_indicate_max_pkts; 182 int fifo_indicate_max_pkts;
@@ -248,8 +271,9 @@ struct vxge_ring {
248 */ 271 */
249 int driver_id; 272 int driver_id;
250 273
251 /* copy of the flag indicating whether rx_csum is to be used */ 274 /* copy of the flag indicating whether rx_csum is to be used */
252 u32 rx_csum; 275 u32 rx_csum:1,
276 rx_hwts:1;
253 277
254 int pkts_processed; 278 int pkts_processed;
255 int budget; 279 int budget;
@@ -327,7 +351,9 @@ struct vxgedev {
327 u16 all_multi_flg; 351 u16 all_multi_flg;
328 352
329 /* A flag indicating whether rx_csum is to be used or not. */ 353 /* A flag indicating whether rx_csum is to be used or not. */
330 u32 rx_csum; 354 u32 rx_csum:1,
355 rx_hwts:1,
356 titan1:1;
331 357
332 struct vxge_msix_entry *vxge_entries; 358 struct vxge_msix_entry *vxge_entries;
333 struct msix_entry *entries; 359 struct msix_entry *entries;
@@ -387,8 +413,6 @@ struct vxge_tx_priv {
387 static int p = val; \ 413 static int p = val; \
388 module_param(p, int, 0) 414 module_param(p, int, 0)
389 415
390#define vxge_os_bug(fmt...) { printk(fmt); BUG(); }
391
392#define vxge_os_timer(timer, handle, arg, exp) do { \ 416#define vxge_os_timer(timer, handle, arg, exp) do { \
393 init_timer(&timer); \ 417 init_timer(&timer); \
394 timer.function = handle; \ 418 timer.function = handle; \
@@ -397,6 +421,11 @@ struct vxge_tx_priv {
397 } while (0); 421 } while (0);
398 422
399extern void vxge_initialize_ethtool_ops(struct net_device *ndev); 423extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
424
425enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
426
427int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
428
400/** 429/**
401 * #define VXGE_DEBUG_INIT: debug for initialization functions 430 * #define VXGE_DEBUG_INIT: debug for initialization functions
402 * #define VXGE_DEBUG_TX : debug transmit related functions 431 * #define VXGE_DEBUG_TX : debug transmit related functions
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h
index 3dd5c9615ef9..3e658b175947 100644
--- a/drivers/net/vxge/vxge-reg.h
+++ b/drivers/net/vxge/vxge-reg.h
@@ -49,6 +49,33 @@
49#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17 49#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17
50#define VXGE_HW_TITAN_VPATH_REG_SPACES 17 50#define VXGE_HW_TITAN_VPATH_REG_SPACES 17
51 51
52#define VXGE_HW_FW_API_GET_EPROM_REV 31
53
54#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4)
55#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4)
56#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4)
57#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4)
58
59#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8)
60#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1)
61#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8)
62#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16)
63#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8)
64
65#define VXGE_HW_FW_API_GET_FUNC_MODE 29
66#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF)
67
68#define VXGE_HW_FW_UPGRADE_MEMO 13
69#define VXGE_HW_FW_UPGRADE_ACTION 16
70#define VXGE_HW_FW_UPGRADE_OFFSET_START 2
71#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3
72#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4
73#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5
74
75#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16
76#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff)
77#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff)
78
52#define VXGE_HW_ASIC_MODE_RESERVED 0 79#define VXGE_HW_ASIC_MODE_RESERVED 0
53#define VXGE_HW_ASIC_MODE_NO_IOV 1 80#define VXGE_HW_ASIC_MODE_NO_IOV 1
54#define VXGE_HW_ASIC_MODE_SR_IOV 2 81#define VXGE_HW_ASIC_MODE_SR_IOV 2
@@ -165,13 +192,13 @@
165#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2 192#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
166#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3 193#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3
167#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5 194#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
168#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6 195#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
169#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7 196#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
170#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8 197#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
171#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9 198#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
172#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10 199#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
173#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11 200#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11
174#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12 201#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
175#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13 202#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13
176 203
177#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \ 204#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
@@ -437,6 +464,7 @@
437#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \ 464#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
438 vxge_bVALn(bits, 48, 16) 465 vxge_bVALn(bits, 48, 16)
439#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16) 466#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
467#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
440 468
441#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\ 469#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
442 vxge_bVALn(bits, 0, 18) 470 vxge_bVALn(bits, 0, 18)
@@ -3998,6 +4026,7 @@ struct vxge_hw_vpath_reg {
3998#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9) 4026#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9)
3999#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9) 4027#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
4000#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9) 4028#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
4029#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9)
4001/*0x00a78*/ u64 prc_cfg7; 4030/*0x00a78*/ u64 prc_cfg7;
4002#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2) 4031#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
4003#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11) 4032#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11)
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 9890d4d596d0..1fceee876228 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1904,34 +1904,6 @@ enum vxge_hw_ring_tcode {
1904 VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF 1904 VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
1905}; 1905};
1906 1906
1907/**
1908 * enum enum vxge_hw_ring_hash_type - RTH hash types
1909 * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash
1910 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4
1911 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4
1912 * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4
1913 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6
1914 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6
1915 * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6
1916 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension
1917 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension
1918 * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension
1919 *
1920 * RTH hash types
1921 */
1922enum vxge_hw_ring_hash_type {
1923 VXGE_HW_RING_HASH_TYPE_NONE = 0x0,
1924 VXGE_HW_RING_HASH_TYPE_TCP_IPV4 = 0x1,
1925 VXGE_HW_RING_HASH_TYPE_UDP_IPV4 = 0x2,
1926 VXGE_HW_RING_HASH_TYPE_IPV4 = 0x3,
1927 VXGE_HW_RING_HASH_TYPE_TCP_IPV6 = 0x4,
1928 VXGE_HW_RING_HASH_TYPE_UDP_IPV6 = 0x5,
1929 VXGE_HW_RING_HASH_TYPE_IPV6 = 0x6,
1930 VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX = 0x7,
1931 VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX = 0x8,
1932 VXGE_HW_RING_HASH_TYPE_IPV6_EX = 0x9
1933};
1934
1935enum vxge_hw_status vxge_hw_ring_rxd_reserve( 1907enum vxge_hw_status vxge_hw_ring_rxd_reserve(
1936 struct __vxge_hw_ring *ring_handle, 1908 struct __vxge_hw_ring *ring_handle,
1937 void **rxdh); 1909 void **rxdh);
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 53fefe137368..f05bb2f55e73 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -16,7 +16,34 @@
16 16
17#define VXGE_VERSION_MAJOR "2" 17#define VXGE_VERSION_MAJOR "2"
18#define VXGE_VERSION_MINOR "0" 18#define VXGE_VERSION_MINOR "0"
19#define VXGE_VERSION_FIX "9" 19#define VXGE_VERSION_FIX "10"
20#define VXGE_VERSION_BUILD "20840" 20#define VXGE_VERSION_BUILD "21808"
21#define VXGE_VERSION_FOR "k" 21#define VXGE_VERSION_FOR "k"
22
23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
24
25#define VXGE_DEAD_FW_VER_MAJOR 1
26#define VXGE_DEAD_FW_VER_MINOR 4
27#define VXGE_DEAD_FW_VER_BUILD 4
28
29#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
30 VXGE_DEAD_FW_VER_MINOR, \
31 VXGE_DEAD_FW_VER_BUILD)
32
33#define VXGE_EPROM_FW_VER_MAJOR 1
34#define VXGE_EPROM_FW_VER_MINOR 6
35#define VXGE_EPROM_FW_VER_BUILD 1
36
37#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
38 VXGE_EPROM_FW_VER_MINOR, \
39 VXGE_EPROM_FW_VER_BUILD)
40
41#define VXGE_CERT_FW_VER_MAJOR 1
42#define VXGE_CERT_FW_VER_MINOR 8
43#define VXGE_CERT_FW_VER_BUILD 1
44
45#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
46 VXGE_CERT_FW_VER_MINOR, \
47 VXGE_CERT_FW_VER_BUILD)
48
22#endif 49#endif
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index c3a329204511..ae07b3dfbcc1 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -124,7 +124,7 @@ MODULE_LICENSE("GPL");
124#define TX_BUF_SIZE 8192 124#define TX_BUF_SIZE 8192
125#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */ 125#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */
126 126
127#define TX_TIMEOUT 10 127#define TX_TIMEOUT (HZ/10)
128 128
129struct znet_private { 129struct znet_private {
130 int rx_dma, tx_dma; 130 int rx_dma, tx_dma;
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 93fc2449af10..7d164670f264 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -167,10 +167,10 @@ struct ip_sf_socklist {
167 */ 167 */
168 168
169struct ip_mc_socklist { 169struct ip_mc_socklist {
170 struct ip_mc_socklist *next; 170 struct ip_mc_socklist __rcu *next_rcu;
171 struct ip_mreqn multi; 171 struct ip_mreqn multi;
172 unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ 172 unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
173 struct ip_sf_socklist *sflist; 173 struct ip_sf_socklist __rcu *sflist;
174 struct rcu_head rcu; 174 struct rcu_head rcu;
175}; 175};
176 176
@@ -186,11 +186,14 @@ struct ip_sf_list {
186struct ip_mc_list { 186struct ip_mc_list {
187 struct in_device *interface; 187 struct in_device *interface;
188 __be32 multiaddr; 188 __be32 multiaddr;
189 unsigned int sfmode;
189 struct ip_sf_list *sources; 190 struct ip_sf_list *sources;
190 struct ip_sf_list *tomb; 191 struct ip_sf_list *tomb;
191 unsigned int sfmode;
192 unsigned long sfcount[2]; 192 unsigned long sfcount[2];
193 struct ip_mc_list *next; 193 union {
194 struct ip_mc_list *next;
195 struct ip_mc_list __rcu *next_rcu;
196 };
194 struct timer_list timer; 197 struct timer_list timer;
195 int users; 198 int users;
196 atomic_t refcnt; 199 atomic_t refcnt;
@@ -201,6 +204,7 @@ struct ip_mc_list {
201 char loaded; 204 char loaded;
202 unsigned char gsquery; /* check source marks? */ 205 unsigned char gsquery; /* check source marks? */
203 unsigned char crcount; 206 unsigned char crcount;
207 struct rcu_head rcu;
204}; 208};
205 209
206/* V3 exponential field decoding */ 210/* V3 exponential field decoding */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index ccd5b07d678d..380ba6bc5db1 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -52,9 +52,8 @@ struct in_device {
52 atomic_t refcnt; 52 atomic_t refcnt;
53 int dead; 53 int dead;
54 struct in_ifaddr *ifa_list; /* IP ifaddr chain */ 54 struct in_ifaddr *ifa_list; /* IP ifaddr chain */
55 rwlock_t mc_list_lock; 55 struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */
56 struct ip_mc_list *mc_list; /* IP multicast filter chain */ 56 int mc_count; /* Number of installed mcasts */
57 int mc_count; /* Number of installed mcasts */
58 spinlock_t mc_tomb_lock; 57 spinlock_t mc_tomb_lock;
59 struct ip_mc_list *mc_tomb; 58 struct ip_mc_list *mc_tomb;
60 unsigned long mr_v1_seen; 59 unsigned long mr_v1_seen;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d8fd2c23a1b9..578debb801f4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -951,7 +951,7 @@ struct net_device {
951#endif 951#endif
952 void *atalk_ptr; /* AppleTalk link */ 952 void *atalk_ptr; /* AppleTalk link */
953 struct in_device __rcu *ip_ptr; /* IPv4 specific data */ 953 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
954 void *dn_ptr; /* DECnet specific data */ 954 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
955 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */ 955 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
956 void *ec_ptr; /* Econet specific data */ 956 void *ec_ptr; /* Econet specific data */
957 void *ax25_ptr; /* AX.25 specific data */ 957 void *ax25_ptr; /* AX.25 specific data */
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
index 0916bbf3bdff..b9e32db03f20 100644
--- a/include/net/dn_dev.h
+++ b/include/net/dn_dev.h
@@ -5,13 +5,14 @@
5struct dn_dev; 5struct dn_dev;
6 6
7struct dn_ifaddr { 7struct dn_ifaddr {
8 struct dn_ifaddr *ifa_next; 8 struct dn_ifaddr __rcu *ifa_next;
9 struct dn_dev *ifa_dev; 9 struct dn_dev *ifa_dev;
10 __le16 ifa_local; 10 __le16 ifa_local;
11 __le16 ifa_address; 11 __le16 ifa_address;
12 __u8 ifa_flags; 12 __u8 ifa_flags;
13 __u8 ifa_scope; 13 __u8 ifa_scope;
14 char ifa_label[IFNAMSIZ]; 14 char ifa_label[IFNAMSIZ];
15 struct rcu_head rcu;
15}; 16};
16 17
17#define DN_DEV_S_RU 0 /* Run - working normally */ 18#define DN_DEV_S_RU 0 /* Run - working normally */
@@ -83,7 +84,7 @@ struct dn_dev_parms {
83 84
84 85
85struct dn_dev { 86struct dn_dev {
86 struct dn_ifaddr *ifa_list; 87 struct dn_ifaddr __rcu *ifa_list;
87 struct net_device *dev; 88 struct net_device *dev;
88 struct dn_dev_parms parms; 89 struct dn_dev_parms parms;
89 char use_long; 90 char use_long;
@@ -171,19 +172,27 @@ extern int unregister_dnaddr_notifier(struct notifier_block *nb);
171 172
172static inline int dn_dev_islocal(struct net_device *dev, __le16 addr) 173static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
173{ 174{
174 struct dn_dev *dn_db = dev->dn_ptr; 175 struct dn_dev *dn_db;
175 struct dn_ifaddr *ifa; 176 struct dn_ifaddr *ifa;
177 int res = 0;
176 178
179 rcu_read_lock();
180 dn_db = rcu_dereference(dev->dn_ptr);
177 if (dn_db == NULL) { 181 if (dn_db == NULL) {
178 printk(KERN_DEBUG "dn_dev_islocal: Called for non DECnet device\n"); 182 printk(KERN_DEBUG "dn_dev_islocal: Called for non DECnet device\n");
179 return 0; 183 goto out;
180 } 184 }
181 185
182 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) 186 for (ifa = rcu_dereference(dn_db->ifa_list);
183 if ((addr ^ ifa->ifa_local) == 0) 187 ifa != NULL;
184 return 1; 188 ifa = rcu_dereference(ifa->ifa_next))
185 189 if ((addr ^ ifa->ifa_local) == 0) {
186 return 0; 190 res = 1;
191 break;
192 }
193out:
194 rcu_read_unlock();
195 return res;
187} 196}
188 197
189#endif /* _NET_DN_DEV_H */ 198#endif /* _NET_DN_DEV_H */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index ccadab3aa3f6..9b185df265fb 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -80,6 +80,16 @@ struct dn_route {
80 unsigned rt_type; 80 unsigned rt_type;
81}; 81};
82 82
83static inline bool dn_is_input_route(struct dn_route *rt)
84{
85 return rt->fl.iif != 0;
86}
87
88static inline bool dn_is_output_route(struct dn_route *rt)
89{
90 return rt->fl.iif == 0;
91}
92
83extern void dn_route_init(void); 93extern void dn_route_init(void);
84extern void dn_route_cleanup(void); 94extern void dn_route_cleanup(void);
85 95
diff --git a/include/net/dst.h b/include/net/dst.h
index ffe9cb719c0e..a5bd72646d65 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -94,10 +94,10 @@ struct dst_entry {
94 int __use; 94 int __use;
95 unsigned long lastuse; 95 unsigned long lastuse;
96 union { 96 union {
97 struct dst_entry *next; 97 struct dst_entry *next;
98 struct rtable __rcu *rt_next; 98 struct rtable __rcu *rt_next;
99 struct rt6_info *rt6_next; 99 struct rt6_info *rt6_next;
100 struct dn_route *dn_next; 100 struct dn_route __rcu *dn_next;
101 }; 101 };
102}; 102};
103 103
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 1989cfd7405f..8945f9fb192a 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -141,7 +141,7 @@ struct inet_sock {
141 nodefrag:1; 141 nodefrag:1;
142 int mc_index; 142 int mc_index;
143 __be32 mc_addr; 143 __be32 mc_addr;
144 struct ip_mc_socklist *mc_list; 144 struct ip_mc_socklist __rcu *mc_list;
145 struct { 145 struct {
146 unsigned int flags; 146 unsigned int flags;
147 unsigned int fragsize; 147 unsigned int fragsize;
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 55590ab16b3e..815b2ce9f4a4 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -96,16 +96,16 @@ struct neighbour {
96 struct neigh_parms *parms; 96 struct neigh_parms *parms;
97 unsigned long confirmed; 97 unsigned long confirmed;
98 unsigned long updated; 98 unsigned long updated;
99 __u8 flags; 99 rwlock_t lock;
100 __u8 nud_state;
101 __u8 type;
102 __u8 dead;
103 atomic_t refcnt; 100 atomic_t refcnt;
104 struct sk_buff_head arp_queue; 101 struct sk_buff_head arp_queue;
105 struct timer_list timer; 102 struct timer_list timer;
106 unsigned long used; 103 unsigned long used;
107 atomic_t probes; 104 atomic_t probes;
108 rwlock_t lock; 105 __u8 flags;
106 __u8 nud_state;
107 __u8 type;
108 __u8 dead;
109 seqlock_t ha_lock; 109 seqlock_t ha_lock;
110 unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))]; 110 unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
111 struct hh_cache *hh; 111 struct hh_cache *hh;
diff --git a/include/net/route.h b/include/net/route.h
index 7e5e73bfa4de..5cd46d1c0e14 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -55,8 +55,6 @@ struct rtable {
55 /* Cache lookup keys */ 55 /* Cache lookup keys */
56 struct flowi fl; 56 struct flowi fl;
57 57
58 struct in_device *idev;
59
60 int rt_genid; 58 int rt_genid;
61 unsigned rt_flags; 59 unsigned rt_flags;
62 __u16 rt_type; 60 __u16 rt_type;
@@ -73,6 +71,16 @@ struct rtable {
73 struct inet_peer *peer; /* long-living peer info */ 71 struct inet_peer *peer; /* long-living peer info */
74}; 72};
75 73
74static inline bool rt_is_input_route(struct rtable *rt)
75{
76 return rt->fl.iif != 0;
77}
78
79static inline bool rt_is_output_route(struct rtable *rt)
80{
81 return rt->fl.iif == 0;
82}
83
76struct ip_rt_acct { 84struct ip_rt_acct {
77 __u32 o_bytes; 85 __u32 o_bytes;
78 __u32 o_packets; 86 __u32 o_packets;
diff --git a/net/core/dev.c b/net/core/dev.c
index 0dd54a69dace..5968c822c999 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1817,8 +1817,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1817 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) 1817 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1818 dev->ethtool_ops->get_drvinfo(dev, &info); 1818 dev->ethtool_ops->get_drvinfo(dev, &info);
1819 1819
1820 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d " 1820 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
1821 "ip_summed=%d",
1822 info.driver, dev ? dev->features : 0L, 1821 info.driver, dev ? dev->features : 0L,
1823 skb->sk ? skb->sk->sk_route_caps : 0L, 1822 skb->sk ? skb->sk->sk_route_caps : 0L,
1824 skb->len, skb->data_len, skb->ip_summed); 1823 skb->len, skb->data_len, skb->ip_summed);
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 92a6fcb40d7d..abaf241c7353 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * net/dccp/ackvec.c 2 * net/dccp/ackvec.c
3 * 3 *
4 * An implementation of the DCCP protocol 4 * An implementation of Ack Vectors for the DCCP protocol
5 * Copyright (c) 2007 University of Aberdeen, Scotland, UK
5 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
@@ -23,173 +24,93 @@
23static struct kmem_cache *dccp_ackvec_slab; 24static struct kmem_cache *dccp_ackvec_slab;
24static struct kmem_cache *dccp_ackvec_record_slab; 25static struct kmem_cache *dccp_ackvec_record_slab;
25 26
26static struct dccp_ackvec_record *dccp_ackvec_record_new(void) 27struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
27{ 28{
28 struct dccp_ackvec_record *avr = 29 struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority);
29 kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
30
31 if (avr != NULL)
32 INIT_LIST_HEAD(&avr->avr_node);
33 30
34 return avr; 31 if (av != NULL) {
32 av->av_buf_head = av->av_buf_tail = DCCPAV_MAX_ACKVEC_LEN - 1;
33 INIT_LIST_HEAD(&av->av_records);
34 }
35 return av;
35} 36}
36 37
37static void dccp_ackvec_record_delete(struct dccp_ackvec_record *avr) 38static void dccp_ackvec_purge_records(struct dccp_ackvec *av)
38{ 39{
39 if (unlikely(avr == NULL)) 40 struct dccp_ackvec_record *cur, *next;
40 return; 41
41 /* Check if deleting a linked record */ 42 list_for_each_entry_safe(cur, next, &av->av_records, avr_node)
42 WARN_ON(!list_empty(&avr->avr_node)); 43 kmem_cache_free(dccp_ackvec_record_slab, cur);
43 kmem_cache_free(dccp_ackvec_record_slab, avr); 44 INIT_LIST_HEAD(&av->av_records);
44} 45}
45 46
46static void dccp_ackvec_insert_avr(struct dccp_ackvec *av, 47void dccp_ackvec_free(struct dccp_ackvec *av)
47 struct dccp_ackvec_record *avr)
48{ 48{
49 /* 49 if (likely(av != NULL)) {
50 * AVRs are sorted by seqno. Since we are sending them in order, we 50 dccp_ackvec_purge_records(av);
51 * just add the AVR at the head of the list. 51 kmem_cache_free(dccp_ackvec_slab, av);
52 * -sorbo.
53 */
54 if (!list_empty(&av->av_records)) {
55 const struct dccp_ackvec_record *head =
56 list_entry(av->av_records.next,
57 struct dccp_ackvec_record,
58 avr_node);
59 BUG_ON(before48(avr->avr_ack_seqno, head->avr_ack_seqno));
60 } 52 }
61
62 list_add(&avr->avr_node, &av->av_records);
63} 53}
64 54
65int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) 55/**
56 * dccp_ackvec_update_records - Record information about sent Ack Vectors
57 * @av: Ack Vector records to update
58 * @seqno: Sequence number of the packet carrying the Ack Vector just sent
59 * @nonce_sum: The sum of all buffer nonces contained in the Ack Vector
60 */
61int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum)
66{ 62{
67 struct dccp_sock *dp = dccp_sk(sk);
68 struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec;
69 /* Figure out how many options do we need to represent the ackvec */
70 const u8 nr_opts = DIV_ROUND_UP(av->av_vec_len, DCCP_SINGLE_OPT_MAXLEN);
71 u16 len = av->av_vec_len + 2 * nr_opts, i;
72 u32 elapsed_time;
73 const unsigned char *tail, *from;
74 unsigned char *to;
75 struct dccp_ackvec_record *avr; 63 struct dccp_ackvec_record *avr;
76 suseconds_t delta;
77
78 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
79 return -1;
80
81 delta = ktime_us_delta(ktime_get_real(), av->av_time);
82 elapsed_time = delta / 10;
83 64
84 if (elapsed_time != 0 && 65 avr = kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
85 dccp_insert_option_elapsed_time(skb, elapsed_time))
86 return -1;
87
88 avr = dccp_ackvec_record_new();
89 if (avr == NULL) 66 if (avr == NULL)
90 return -1; 67 return -ENOBUFS;
91
92 DCCP_SKB_CB(skb)->dccpd_opt_len += len;
93
94 to = skb_push(skb, len);
95 len = av->av_vec_len;
96 from = av->av_buf + av->av_buf_head;
97 tail = av->av_buf + DCCP_MAX_ACKVEC_LEN;
98
99 for (i = 0; i < nr_opts; ++i) {
100 int copylen = len;
101
102 if (len > DCCP_SINGLE_OPT_MAXLEN)
103 copylen = DCCP_SINGLE_OPT_MAXLEN;
104
105 *to++ = DCCPO_ACK_VECTOR_0;
106 *to++ = copylen + 2;
107
108 /* Check if buf_head wraps */
109 if (from + copylen > tail) {
110 const u16 tailsize = tail - from;
111
112 memcpy(to, from, tailsize);
113 to += tailsize;
114 len -= tailsize;
115 copylen -= tailsize;
116 from = av->av_buf;
117 }
118
119 memcpy(to, from, copylen);
120 from += copylen;
121 to += copylen;
122 len -= copylen;
123 }
124 68
69 avr->avr_ack_seqno = seqno;
70 avr->avr_ack_ptr = av->av_buf_head;
71 avr->avr_ack_ackno = av->av_buf_ackno;
72 avr->avr_ack_nonce = nonce_sum;
73 avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head);
125 /* 74 /*
126 * From RFC 4340, A.2: 75 * When the buffer overflows, we keep no more than one record. This is
127 * 76 * the simplest way of disambiguating sender-Acks dating from before the
128 * For each acknowledgement it sends, the HC-Receiver will add an 77 * overflow from sender-Acks which refer to after the overflow; a simple
129 * acknowledgement record. ack_seqno will equal the HC-Receiver 78 * solution is preferable here since we are handling an exception.
130 * sequence number it used for the ack packet; ack_ptr will equal
131 * buf_head; ack_ackno will equal buf_ackno; and ack_nonce will
132 * equal buf_nonce.
133 */ 79 */
134 avr->avr_ack_seqno = DCCP_SKB_CB(skb)->dccpd_seq; 80 if (av->av_overflow)
135 avr->avr_ack_ptr = av->av_buf_head; 81 dccp_ackvec_purge_records(av);
136 avr->avr_ack_ackno = av->av_buf_ackno; 82 /*
137 avr->avr_ack_nonce = av->av_buf_nonce; 83 * Since GSS is incremented for each packet, the list is automatically
138 avr->avr_sent_len = av->av_vec_len; 84 * arranged in descending order of @ack_seqno.
139 85 */
140 dccp_ackvec_insert_avr(av, avr); 86 list_add(&avr->avr_node, &av->av_records);
141 87
142 dccp_pr_debug("%s ACK Vector 0, len=%d, ack_seqno=%llu, " 88 dccp_pr_debug("Added Vector, ack_seqno=%llu, ack_ackno=%llu (rl=%u)\n",
143 "ack_ackno=%llu\n",
144 dccp_role(sk), avr->avr_sent_len,
145 (unsigned long long)avr->avr_ack_seqno, 89 (unsigned long long)avr->avr_ack_seqno,
146 (unsigned long long)avr->avr_ack_ackno); 90 (unsigned long long)avr->avr_ack_ackno,
91 avr->avr_ack_runlen);
147 return 0; 92 return 0;
148} 93}
149 94
150struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority) 95/*
151{ 96 * Buffer index and length computation using modulo-buffersize arithmetic.
152 struct dccp_ackvec *av = kmem_cache_alloc(dccp_ackvec_slab, priority); 97 * Note that, as pointers move from right to left, head is `before' tail.
153 98 */
154 if (av != NULL) { 99static inline u16 __ackvec_idx_add(const u16 a, const u16 b)
155 av->av_buf_head = DCCP_MAX_ACKVEC_LEN - 1;
156 av->av_buf_ackno = UINT48_MAX + 1;
157 av->av_buf_nonce = 0;
158 av->av_time = ktime_set(0, 0);
159 av->av_vec_len = 0;
160 INIT_LIST_HEAD(&av->av_records);
161 }
162
163 return av;
164}
165
166void dccp_ackvec_free(struct dccp_ackvec *av)
167{ 100{
168 if (unlikely(av == NULL)) 101 return (a + b) % DCCPAV_MAX_ACKVEC_LEN;
169 return;
170
171 if (!list_empty(&av->av_records)) {
172 struct dccp_ackvec_record *avr, *next;
173
174 list_for_each_entry_safe(avr, next, &av->av_records, avr_node) {
175 list_del_init(&avr->avr_node);
176 dccp_ackvec_record_delete(avr);
177 }
178 }
179
180 kmem_cache_free(dccp_ackvec_slab, av);
181} 102}
182 103
183static inline u8 dccp_ackvec_state(const struct dccp_ackvec *av, 104static inline u16 __ackvec_idx_sub(const u16 a, const u16 b)
184 const u32 index)
185{ 105{
186 return av->av_buf[index] & DCCP_ACKVEC_STATE_MASK; 106 return __ackvec_idx_add(a, DCCPAV_MAX_ACKVEC_LEN - b);
187} 107}
188 108
189static inline u8 dccp_ackvec_len(const struct dccp_ackvec *av, 109u16 dccp_ackvec_buflen(const struct dccp_ackvec *av)
190 const u32 index)
191{ 110{
192 return av->av_buf[index] & DCCP_ACKVEC_LEN_MASK; 111 if (unlikely(av->av_overflow))
112 return DCCPAV_MAX_ACKVEC_LEN;
113 return __ackvec_idx_sub(av->av_buf_tail, av->av_buf_head);
193} 114}
194 115
195/* 116/*
@@ -204,7 +125,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
204 long gap; 125 long gap;
205 long new_head; 126 long new_head;
206 127
207 if (av->av_vec_len + packets > DCCP_MAX_ACKVEC_LEN) 128 if (av->av_vec_len + packets > DCCPAV_MAX_ACKVEC_LEN)
208 return -ENOBUFS; 129 return -ENOBUFS;
209 130
210 gap = packets - 1; 131 gap = packets - 1;
@@ -212,18 +133,18 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
212 133
213 if (new_head < 0) { 134 if (new_head < 0) {
214 if (gap > 0) { 135 if (gap > 0) {
215 memset(av->av_buf, DCCP_ACKVEC_STATE_NOT_RECEIVED, 136 memset(av->av_buf, DCCPAV_NOT_RECEIVED,
216 gap + new_head + 1); 137 gap + new_head + 1);
217 gap = -new_head; 138 gap = -new_head;
218 } 139 }
219 new_head += DCCP_MAX_ACKVEC_LEN; 140 new_head += DCCPAV_MAX_ACKVEC_LEN;
220 } 141 }
221 142
222 av->av_buf_head = new_head; 143 av->av_buf_head = new_head;
223 144
224 if (gap > 0) 145 if (gap > 0)
225 memset(av->av_buf + av->av_buf_head + 1, 146 memset(av->av_buf + av->av_buf_head + 1,
226 DCCP_ACKVEC_STATE_NOT_RECEIVED, gap); 147 DCCPAV_NOT_RECEIVED, gap);
227 148
228 av->av_buf[av->av_buf_head] = state; 149 av->av_buf[av->av_buf_head] = state;
229 av->av_vec_len += packets; 150 av->av_vec_len += packets;
@@ -236,6 +157,8 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
236int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, 157int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
237 const u64 ackno, const u8 state) 158 const u64 ackno, const u8 state)
238{ 159{
160 u8 *cur_head = av->av_buf + av->av_buf_head,
161 *buf_end = av->av_buf + DCCPAV_MAX_ACKVEC_LEN;
239 /* 162 /*
240 * Check at the right places if the buffer is full, if it is, tell the 163 * Check at the right places if the buffer is full, if it is, tell the
241 * caller to start dropping packets till the HC-Sender acks our ACK 164 * caller to start dropping packets till the HC-Sender acks our ACK
@@ -260,7 +183,7 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
260 183
261 /* See if this is the first ackno being inserted */ 184 /* See if this is the first ackno being inserted */
262 if (av->av_vec_len == 0) { 185 if (av->av_vec_len == 0) {
263 av->av_buf[av->av_buf_head] = state; 186 *cur_head = state;
264 av->av_vec_len = 1; 187 av->av_vec_len = 1;
265 } else if (after48(ackno, av->av_buf_ackno)) { 188 } else if (after48(ackno, av->av_buf_ackno)) {
266 const u64 delta = dccp_delta_seqno(av->av_buf_ackno, ackno); 189 const u64 delta = dccp_delta_seqno(av->av_buf_ackno, ackno);
@@ -269,10 +192,9 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
269 * Look if the state of this packet is the same as the 192 * Look if the state of this packet is the same as the
270 * previous ackno and if so if we can bump the head len. 193 * previous ackno and if so if we can bump the head len.
271 */ 194 */
272 if (delta == 1 && 195 if (delta == 1 && dccp_ackvec_state(cur_head) == state &&
273 dccp_ackvec_state(av, av->av_buf_head) == state && 196 dccp_ackvec_runlen(cur_head) < DCCPAV_MAX_RUNLEN)
274 dccp_ackvec_len(av, av->av_buf_head) < DCCP_ACKVEC_LEN_MASK) 197 *cur_head += 1;
275 av->av_buf[av->av_buf_head]++;
276 else if (dccp_ackvec_set_buf_head_state(av, delta, state)) 198 else if (dccp_ackvec_set_buf_head_state(av, delta, state))
277 return -ENOBUFS; 199 return -ENOBUFS;
278 } else { 200 } else {
@@ -285,21 +207,17 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
285 * could reduce the complexity of this scan.) 207 * could reduce the complexity of this scan.)
286 */ 208 */
287 u64 delta = dccp_delta_seqno(ackno, av->av_buf_ackno); 209 u64 delta = dccp_delta_seqno(ackno, av->av_buf_ackno);
288 u32 index = av->av_buf_head;
289 210
290 while (1) { 211 while (1) {
291 const u8 len = dccp_ackvec_len(av, index); 212 const u8 len = dccp_ackvec_runlen(cur_head);
292 const u8 av_state = dccp_ackvec_state(av, index);
293 /* 213 /*
294 * valid packets not yet in av_buf have a reserved 214 * valid packets not yet in av_buf have a reserved
295 * entry, with a len equal to 0. 215 * entry, with a len equal to 0.
296 */ 216 */
297 if (av_state == DCCP_ACKVEC_STATE_NOT_RECEIVED && 217 if (*cur_head == DCCPAV_NOT_RECEIVED && delta == 0) {
298 len == 0 && delta == 0) { /* Found our
299 reserved seat! */
300 dccp_pr_debug("Found %llu reserved seat!\n", 218 dccp_pr_debug("Found %llu reserved seat!\n",
301 (unsigned long long)ackno); 219 (unsigned long long)ackno);
302 av->av_buf[index] = state; 220 *cur_head = state;
303 goto out; 221 goto out;
304 } 222 }
305 /* len == 0 means one packet */ 223 /* len == 0 means one packet */
@@ -307,13 +225,12 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
307 goto out_duplicate; 225 goto out_duplicate;
308 226
309 delta -= len + 1; 227 delta -= len + 1;
310 if (++index == DCCP_MAX_ACKVEC_LEN) 228 if (++cur_head == buf_end)
311 index = 0; 229 cur_head = av->av_buf;
312 } 230 }
313 } 231 }
314 232
315 av->av_buf_ackno = ackno; 233 av->av_buf_ackno = ackno;
316 av->av_time = ktime_get_real();
317out: 234out:
318 return 0; 235 return 0;
319 236
@@ -333,13 +250,13 @@ static void dccp_ackvec_throw_record(struct dccp_ackvec *av,
333 if (av->av_buf_head <= avr->avr_ack_ptr) 250 if (av->av_buf_head <= avr->avr_ack_ptr)
334 av->av_vec_len = avr->avr_ack_ptr - av->av_buf_head; 251 av->av_vec_len = avr->avr_ack_ptr - av->av_buf_head;
335 else 252 else
336 av->av_vec_len = DCCP_MAX_ACKVEC_LEN - 1 - 253 av->av_vec_len = DCCPAV_MAX_ACKVEC_LEN - 1 -
337 av->av_buf_head + avr->avr_ack_ptr; 254 av->av_buf_head + avr->avr_ack_ptr;
338 255
339 /* free records */ 256 /* free records */
340 list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) { 257 list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) {
341 list_del_init(&avr->avr_node); 258 list_del(&avr->avr_node);
342 dccp_ackvec_record_delete(avr); 259 kmem_cache_free(dccp_ackvec_record_slab, avr);
343 } 260 }
344} 261}
345 262
@@ -357,7 +274,7 @@ void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, struct sock *sk,
357 if (ackno == avr->avr_ack_seqno) { 274 if (ackno == avr->avr_ack_seqno) {
358 dccp_pr_debug("%s ACK packet 0, len=%d, ack_seqno=%llu, " 275 dccp_pr_debug("%s ACK packet 0, len=%d, ack_seqno=%llu, "
359 "ack_ackno=%llu, ACKED!\n", 276 "ack_ackno=%llu, ACKED!\n",
360 dccp_role(sk), 1, 277 dccp_role(sk), avr->avr_ack_runlen,
361 (unsigned long long)avr->avr_ack_seqno, 278 (unsigned long long)avr->avr_ack_seqno,
362 (unsigned long long)avr->avr_ack_ackno); 279 (unsigned long long)avr->avr_ack_ackno);
363 dccp_ackvec_throw_record(av, avr); 280 dccp_ackvec_throw_record(av, avr);
@@ -387,7 +304,7 @@ static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av,
387 */ 304 */
388 avr = list_entry(av->av_records.next, struct dccp_ackvec_record, avr_node); 305 avr = list_entry(av->av_records.next, struct dccp_ackvec_record, avr_node);
389 while (i--) { 306 while (i--) {
390 const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; 307 const u8 rl = dccp_ackvec_runlen(vector);
391 u64 ackno_end_rl; 308 u64 ackno_end_rl;
392 309
393 dccp_set_seqno(&ackno_end_rl, *ackno - rl); 310 dccp_set_seqno(&ackno_end_rl, *ackno - rl);
@@ -404,8 +321,7 @@ static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av,
404 break; 321 break;
405found: 322found:
406 if (between48(avr->avr_ack_seqno, ackno_end_rl, *ackno)) { 323 if (between48(avr->avr_ack_seqno, ackno_end_rl, *ackno)) {
407 const u8 state = *vector & DCCP_ACKVEC_STATE_MASK; 324 if (dccp_ackvec_state(vector) != DCCPAV_NOT_RECEIVED) {
408 if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED) {
409 dccp_pr_debug("%s ACK vector 0, len=%d, " 325 dccp_pr_debug("%s ACK vector 0, len=%d, "
410 "ack_seqno=%llu, ack_ackno=%llu, " 326 "ack_seqno=%llu, ack_ackno=%llu, "
411 "ACKED!\n", 327 "ACKED!\n",
@@ -448,10 +364,9 @@ int __init dccp_ackvec_init(void)
448 if (dccp_ackvec_slab == NULL) 364 if (dccp_ackvec_slab == NULL)
449 goto out_err; 365 goto out_err;
450 366
451 dccp_ackvec_record_slab = 367 dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
452 kmem_cache_create("dccp_ackvec_record", 368 sizeof(struct dccp_ackvec_record),
453 sizeof(struct dccp_ackvec_record), 369 0, SLAB_HWCACHE_ALIGN, NULL);
454 0, SLAB_HWCACHE_ALIGN, NULL);
455 if (dccp_ackvec_record_slab == NULL) 370 if (dccp_ackvec_record_slab == NULL)
456 goto out_destroy_slab; 371 goto out_destroy_slab;
457 372
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h
index 7ea557b7c6b1..23880be8fc29 100644
--- a/net/dccp/ackvec.h
+++ b/net/dccp/ackvec.h
@@ -3,9 +3,9 @@
3/* 3/*
4 * net/dccp/ackvec.h 4 * net/dccp/ackvec.h
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of Ack Vectors for the DCCP protocol
7 * Copyright (c) 2007 University of Aberdeen, Scotland, UK
7 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@mandriva.com> 8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@mandriva.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as 10 * under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
@@ -13,75 +13,89 @@
13 13
14#include <linux/dccp.h> 14#include <linux/dccp.h>
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <linux/ktime.h>
17#include <linux/list.h> 16#include <linux/list.h>
18#include <linux/types.h> 17#include <linux/types.h>
19 18
20/* We can spread an ack vector across multiple options */ 19/*
21#define DCCP_MAX_ACKVEC_LEN (DCCP_SINGLE_OPT_MAXLEN * 2) 20 * Ack Vector buffer space is static, in multiples of %DCCP_SINGLE_OPT_MAXLEN,
21 * the maximum size of a single Ack Vector. Setting %DCCPAV_NUM_ACKVECS to 1
22 * will be sufficient for most cases of low Ack Ratios, using a value of 2 gives
23 * more headroom if Ack Ratio is higher or when the sender acknowledges slowly.
24 * The maximum value is bounded by the u16 types for indices and functions.
25 */
26#define DCCPAV_NUM_ACKVECS 2
27#define DCCPAV_MAX_ACKVEC_LEN (DCCP_SINGLE_OPT_MAXLEN * DCCPAV_NUM_ACKVECS)
22 28
23/* Estimated minimum average Ack Vector length - used for updating MPS */ 29/* Estimated minimum average Ack Vector length - used for updating MPS */
24#define DCCPAV_MIN_OPTLEN 16 30#define DCCPAV_MIN_OPTLEN 16
25 31
26#define DCCP_ACKVEC_STATE_RECEIVED 0 32enum dccp_ackvec_states {
27#define DCCP_ACKVEC_STATE_ECN_MARKED (1 << 6) 33 DCCPAV_RECEIVED = 0x00,
28#define DCCP_ACKVEC_STATE_NOT_RECEIVED (3 << 6) 34 DCCPAV_ECN_MARKED = 0x40,
35 DCCPAV_RESERVED = 0x80,
36 DCCPAV_NOT_RECEIVED = 0xC0
37};
38#define DCCPAV_MAX_RUNLEN 0x3F
29 39
30#define DCCP_ACKVEC_STATE_MASK 0xC0 /* 11000000 */ 40static inline u8 dccp_ackvec_runlen(const u8 *cell)
31#define DCCP_ACKVEC_LEN_MASK 0x3F /* 00111111 */ 41{
42 return *cell & DCCPAV_MAX_RUNLEN;
43}
32 44
33/** struct dccp_ackvec - ack vector 45static inline u8 dccp_ackvec_state(const u8 *cell)
34 * 46{
35 * This data structure is the one defined in RFC 4340, Appendix A. 47 return *cell & ~DCCPAV_MAX_RUNLEN;
36 * 48}
37 * @av_buf_head - circular buffer head 49
38 * @av_buf_tail - circular buffer tail 50/** struct dccp_ackvec - Ack Vector main data structure
39 * @av_buf_ackno - ack # of the most recent packet acknowledgeable in the
40 * buffer (i.e. %av_buf_head)
41 * @av_buf_nonce - the one-bit sum of the ECN Nonces on all packets acked
42 * by the buffer with State 0
43 *
44 * Additionally, the HC-Receiver must keep some information about the
45 * Ack Vectors it has recently sent. For each packet sent carrying an
46 * Ack Vector, it remembers four variables:
47 * 51 *
48 * @av_records - list of dccp_ackvec_record 52 * This implements a fixed-size circular buffer within an array and is largely
49 * @av_ack_nonce - the one-bit sum of the ECN Nonces for all State 0. 53 * based on Appendix A of RFC 4340.
50 * 54 *
51 * @av_time - the time in usecs 55 * @av_buf: circular buffer storage area
52 * @av_buf - circular buffer of acknowledgeable packets 56 * @av_buf_head: head index; begin of live portion in @av_buf
57 * @av_buf_tail: tail index; first index _after_ the live portion in @av_buf
58 * @av_buf_ackno: highest seqno of acknowledgeable packet recorded in @av_buf
59 * @av_tail_ackno: lowest seqno of acknowledgeable packet recorded in @av_buf
60 * @av_buf_nonce: ECN nonce sums, each covering subsequent segments of up to
61 * %DCCP_SINGLE_OPT_MAXLEN cells in the live portion of @av_buf
62 * @av_overflow: if 1 then buf_head == buf_tail indicates buffer wraparound
63 * @av_records: list of %dccp_ackvec_record (Ack Vectors sent previously)
64 * @av_veclen: length of the live portion of @av_buf
53 */ 65 */
54struct dccp_ackvec { 66struct dccp_ackvec {
55 u64 av_buf_ackno; 67 u8 av_buf[DCCPAV_MAX_ACKVEC_LEN];
56 struct list_head av_records;
57 ktime_t av_time;
58 u16 av_buf_head; 68 u16 av_buf_head;
69 u16 av_buf_tail;
70 u64 av_buf_ackno:48;
71 u64 av_tail_ackno:48;
72 bool av_buf_nonce[DCCPAV_NUM_ACKVECS];
73 u8 av_overflow:1;
74 struct list_head av_records;
59 u16 av_vec_len; 75 u16 av_vec_len;
60 u8 av_buf_nonce;
61 u8 av_ack_nonce;
62 u8 av_buf[DCCP_MAX_ACKVEC_LEN];
63}; 76};
64 77
65/** struct dccp_ackvec_record - ack vector record 78/** struct dccp_ackvec_record - Records information about sent Ack Vectors
66 * 79 *
67 * ACK vector record as defined in Appendix A of spec. 80 * These list entries define the additional information which the HC-Receiver
81 * keeps about recently-sent Ack Vectors; again refer to RFC 4340, Appendix A.
68 * 82 *
69 * The list is sorted by avr_ack_seqno 83 * @avr_node: the list node in @av_records
84 * @avr_ack_seqno: sequence number of the packet the Ack Vector was sent on
85 * @avr_ack_ackno: the Ack number that this record/Ack Vector refers to
86 * @avr_ack_ptr: pointer into @av_buf where this record starts
87 * @avr_ack_runlen: run length of @avr_ack_ptr at the time of sending
88 * @avr_ack_nonce: the sum of @av_buf_nonce's at the time this record was sent
70 * 89 *
71 * @avr_node - node in av_records 90 * The list as a whole is sorted in descending order by @avr_ack_seqno.
72 * @avr_ack_seqno - sequence number of the packet this record was sent on
73 * @avr_ack_ackno - sequence number being acknowledged
74 * @avr_ack_ptr - pointer into av_buf where this record starts
75 * @avr_ack_nonce - av_ack_nonce at the time this record was sent
76 * @avr_sent_len - lenght of the record in av_buf
77 */ 91 */
78struct dccp_ackvec_record { 92struct dccp_ackvec_record {
79 struct list_head avr_node; 93 struct list_head avr_node;
80 u64 avr_ack_seqno; 94 u64 avr_ack_seqno:48;
81 u64 avr_ack_ackno; 95 u64 avr_ack_ackno:48;
82 u16 avr_ack_ptr; 96 u16 avr_ack_ptr;
83 u16 avr_sent_len; 97 u8 avr_ack_runlen;
84 u8 avr_ack_nonce; 98 u8 avr_ack_nonce:1;
85}; 99};
86 100
87struct sock; 101struct sock;
@@ -102,10 +116,11 @@ extern int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb,
102 u64 *ackno, const u8 opt, 116 u64 *ackno, const u8 opt,
103 const u8 *value, const u8 len); 117 const u8 *value, const u8 len);
104 118
105extern int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb); 119extern int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seq, u8 sum);
120extern u16 dccp_ackvec_buflen(const struct dccp_ackvec *av);
106 121
107static inline int dccp_ackvec_pending(const struct dccp_ackvec *av) 122static inline bool dccp_ackvec_is_empty(const struct dccp_ackvec *av)
108{ 123{
109 return av->av_vec_len; 124 return av->av_overflow == 0 && av->av_buf_head == av->av_buf_tail;
110} 125}
111#endif /* _ACKVEC_H */ 126#endif /* _ACKVEC_H */
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 6576eae9e779..cb1b4a0d1877 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -513,8 +513,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
513 &vector, &veclen)) != -1) { 513 &vector, &veclen)) != -1) {
514 /* go through this ack vector */ 514 /* go through this ack vector */
515 while (veclen--) { 515 while (veclen--) {
516 const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; 516 u64 ackno_end_rl = SUB48(ackno, dccp_ackvec_runlen(vector));
517 u64 ackno_end_rl = SUB48(ackno, rl);
518 517
519 ccid2_pr_debug("ackvec start:%llu end:%llu\n", 518 ccid2_pr_debug("ackvec start:%llu end:%llu\n",
520 (unsigned long long)ackno, 519 (unsigned long long)ackno,
@@ -537,17 +536,15 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
537 * run length 536 * run length
538 */ 537 */
539 while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) { 538 while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
540 const u8 state = *vector & 539 const u8 state = dccp_ackvec_state(vector);
541 DCCP_ACKVEC_STATE_MASK;
542 540
543 /* new packet received or marked */ 541 /* new packet received or marked */
544 if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED && 542 if (state != DCCPAV_NOT_RECEIVED &&
545 !seqp->ccid2s_acked) { 543 !seqp->ccid2s_acked) {
546 if (state == 544 if (state == DCCPAV_ECN_MARKED)
547 DCCP_ACKVEC_STATE_ECN_MARKED) {
548 ccid2_congestion_event(sk, 545 ccid2_congestion_event(sk,
549 seqp); 546 seqp);
550 } else 547 else
551 ccid2_new_ack(sk, seqp, 548 ccid2_new_ack(sk, seqp,
552 &maxincr); 549 &maxincr);
553 550
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index a8ed459508b2..19fafd597465 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -457,12 +457,15 @@ static inline void dccp_update_gss(struct sock *sk, u64 seq)
457 dp->dccps_awh = dp->dccps_gss; 457 dp->dccps_awh = dp->dccps_gss;
458} 458}
459 459
460static inline int dccp_ackvec_pending(const struct sock *sk)
461{
462 return dccp_sk(sk)->dccps_hc_rx_ackvec != NULL &&
463 !dccp_ackvec_is_empty(dccp_sk(sk)->dccps_hc_rx_ackvec);
464}
465
460static inline int dccp_ack_pending(const struct sock *sk) 466static inline int dccp_ack_pending(const struct sock *sk)
461{ 467{
462 const struct dccp_sock *dp = dccp_sk(sk); 468 return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
463 return (dp->dccps_hc_rx_ackvec != NULL &&
464 dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) ||
465 inet_csk_ack_scheduled(sk);
466} 469}
467 470
468extern int dccp_feat_finalise_settings(struct dccp_sock *dp); 471extern int dccp_feat_finalise_settings(struct dccp_sock *dp);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 265985370fa1..c7aeeba859d4 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -378,8 +378,7 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
378 378
379 if (dp->dccps_hc_rx_ackvec != NULL && 379 if (dp->dccps_hc_rx_ackvec != NULL &&
380 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk, 380 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
381 DCCP_SKB_CB(skb)->dccpd_seq, 381 DCCP_SKB_CB(skb)->dccpd_seq, DCCPAV_RECEIVED))
382 DCCP_ACKVEC_STATE_RECEIVED))
383 goto discard; 382 goto discard;
384 dccp_deliver_input_to_ccids(sk, skb); 383 dccp_deliver_input_to_ccids(sk, skb);
385 384
@@ -637,8 +636,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
637 636
638 if (dp->dccps_hc_rx_ackvec != NULL && 637 if (dp->dccps_hc_rx_ackvec != NULL &&
639 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk, 638 dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
640 DCCP_SKB_CB(skb)->dccpd_seq, 639 DCCP_SKB_CB(skb)->dccpd_seq, DCCPAV_RECEIVED))
641 DCCP_ACKVEC_STATE_RECEIVED))
642 goto discard; 640 goto discard;
643 641
644 dccp_deliver_input_to_ccids(sk, skb); 642 dccp_deliver_input_to_ccids(sk, skb);
diff --git a/net/dccp/options.c b/net/dccp/options.c
index cd3061813009..5adeeed5e0d2 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -340,6 +340,7 @@ static inline int dccp_elapsed_time_len(const u32 elapsed_time)
340 return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4; 340 return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4;
341} 341}
342 342
343/* FIXME: This function is currently not used anywhere */
343int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed_time) 344int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed_time)
344{ 345{
345 const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time); 346 const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time);
@@ -424,6 +425,67 @@ static int dccp_insert_option_timestamp_echo(struct dccp_sock *dp,
424 return 0; 425 return 0;
425} 426}
426 427
428static int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
429{
430 struct dccp_sock *dp = dccp_sk(sk);
431 struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec;
432 const u16 buflen = dccp_ackvec_buflen(av);
433 /* Figure out how many options do we need to represent the ackvec */
434 const u8 nr_opts = DIV_ROUND_UP(buflen, DCCP_SINGLE_OPT_MAXLEN);
435 u16 len = buflen + 2 * nr_opts;
436 u8 i, nonce = 0;
437 const unsigned char *tail, *from;
438 unsigned char *to;
439
440 if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
441 return -1;
442
443 DCCP_SKB_CB(skb)->dccpd_opt_len += len;
444
445 to = skb_push(skb, len);
446 len = buflen;
447 from = av->av_buf + av->av_buf_head;
448 tail = av->av_buf + DCCPAV_MAX_ACKVEC_LEN;
449
450 for (i = 0; i < nr_opts; ++i) {
451 int copylen = len;
452
453 if (len > DCCP_SINGLE_OPT_MAXLEN)
454 copylen = DCCP_SINGLE_OPT_MAXLEN;
455
456 /*
457 * RFC 4340, 12.2: Encode the Nonce Echo for this Ack Vector via
458 * its type; ack_nonce is the sum of all individual buf_nonce's.
459 */
460 nonce ^= av->av_buf_nonce[i];
461
462 *to++ = DCCPO_ACK_VECTOR_0 + av->av_buf_nonce[i];
463 *to++ = copylen + 2;
464
465 /* Check if buf_head wraps */
466 if (from + copylen > tail) {
467 const u16 tailsize = tail - from;
468
469 memcpy(to, from, tailsize);
470 to += tailsize;
471 len -= tailsize;
472 copylen -= tailsize;
473 from = av->av_buf;
474 }
475
476 memcpy(to, from, copylen);
477 from += copylen;
478 to += copylen;
479 len -= copylen;
480 }
481 /*
482 * Each sent Ack Vector is recorded in the list, as per A.2 of RFC 4340.
483 */
484 if (dccp_ackvec_update_records(av, DCCP_SKB_CB(skb)->dccpd_seq, nonce))
485 return -ENOBUFS;
486 return 0;
487}
488
427/** 489/**
428 * dccp_insert_option_mandatory - Mandatory option (5.8.2) 490 * dccp_insert_option_mandatory - Mandatory option (5.8.2)
429 * Note that since we are using skb_push, this function needs to be called 491 * Note that since we are using skb_push, this function needs to be called
@@ -519,8 +581,7 @@ int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
519 if (dccp_insert_option_timestamp(skb)) 581 if (dccp_insert_option_timestamp(skb))
520 return -1; 582 return -1;
521 583
522 } else if (dp->dccps_hc_rx_ackvec != NULL && 584 } else if (dccp_ackvec_pending(sk) &&
523 dccp_ackvec_pending(dp->dccps_hc_rx_ackvec) &&
524 dccp_insert_option_ackvec(sk, skb)) { 585 dccp_insert_option_ackvec(sk, skb)) {
525 return -1; 586 return -1;
526 } 587 }
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index a76b78de679f..9ecef9968c39 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1848,7 +1848,7 @@ unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu)
1848{ 1848{
1849 unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER; 1849 unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
1850 if (dev) { 1850 if (dev) {
1851 struct dn_dev *dn_db = dev->dn_ptr; 1851 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1852 mtu -= LL_RESERVED_SPACE(dev); 1852 mtu -= LL_RESERVED_SPACE(dev);
1853 if (dn_db->use_long) 1853 if (dn_db->use_long)
1854 mtu -= 21; 1854 mtu -= 21;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 4c409b46aa35..0ba15633c418 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -267,7 +267,7 @@ static int dn_forwarding_proc(ctl_table *table, int write,
267 if (table->extra1 == NULL) 267 if (table->extra1 == NULL)
268 return -EINVAL; 268 return -EINVAL;
269 269
270 dn_db = dev->dn_ptr; 270 dn_db = rcu_dereference_raw(dev->dn_ptr);
271 old = dn_db->parms.forwarding; 271 old = dn_db->parms.forwarding;
272 272
273 err = proc_dointvec(table, write, buffer, lenp, ppos); 273 err = proc_dointvec(table, write, buffer, lenp, ppos);
@@ -332,14 +332,19 @@ static struct dn_ifaddr *dn_dev_alloc_ifa(void)
332 return ifa; 332 return ifa;
333} 333}
334 334
335static __inline__ void dn_dev_free_ifa(struct dn_ifaddr *ifa) 335static void dn_dev_free_ifa_rcu(struct rcu_head *head)
336{ 336{
337 kfree(ifa); 337 kfree(container_of(head, struct dn_ifaddr, rcu));
338} 338}
339 339
340static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int destroy) 340static void dn_dev_free_ifa(struct dn_ifaddr *ifa)
341{ 341{
342 struct dn_ifaddr *ifa1 = *ifap; 342 call_rcu(&ifa->rcu, dn_dev_free_ifa_rcu);
343}
344
345static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy)
346{
347 struct dn_ifaddr *ifa1 = rtnl_dereference(*ifap);
343 unsigned char mac_addr[6]; 348 unsigned char mac_addr[6];
344 struct net_device *dev = dn_db->dev; 349 struct net_device *dev = dn_db->dev;
345 350
@@ -373,7 +378,9 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
373 ASSERT_RTNL(); 378 ASSERT_RTNL();
374 379
375 /* Check for duplicates */ 380 /* Check for duplicates */
376 for(ifa1 = dn_db->ifa_list; ifa1; ifa1 = ifa1->ifa_next) { 381 for (ifa1 = rtnl_dereference(dn_db->ifa_list);
382 ifa1 != NULL;
383 ifa1 = rtnl_dereference(ifa1->ifa_next)) {
377 if (ifa1->ifa_local == ifa->ifa_local) 384 if (ifa1->ifa_local == ifa->ifa_local)
378 return -EEXIST; 385 return -EEXIST;
379 } 386 }
@@ -386,7 +393,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
386 } 393 }
387 394
388 ifa->ifa_next = dn_db->ifa_list; 395 ifa->ifa_next = dn_db->ifa_list;
389 dn_db->ifa_list = ifa; 396 rcu_assign_pointer(dn_db->ifa_list, ifa);
390 397
391 dn_ifaddr_notify(RTM_NEWADDR, ifa); 398 dn_ifaddr_notify(RTM_NEWADDR, ifa);
392 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); 399 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
@@ -396,7 +403,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
396 403
397static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa) 404static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa)
398{ 405{
399 struct dn_dev *dn_db = dev->dn_ptr; 406 struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
400 int rv; 407 int rv;
401 408
402 if (dn_db == NULL) { 409 if (dn_db == NULL) {
@@ -425,7 +432,8 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg)
425 struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr; 432 struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr;
426 struct dn_dev *dn_db; 433 struct dn_dev *dn_db;
427 struct net_device *dev; 434 struct net_device *dev;
428 struct dn_ifaddr *ifa = NULL, **ifap = NULL; 435 struct dn_ifaddr *ifa = NULL;
436 struct dn_ifaddr __rcu **ifap = NULL;
429 int ret = 0; 437 int ret = 0;
430 438
431 if (copy_from_user(ifr, arg, DN_IFREQ_SIZE)) 439 if (copy_from_user(ifr, arg, DN_IFREQ_SIZE))
@@ -454,8 +462,10 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg)
454 goto done; 462 goto done;
455 } 463 }
456 464
457 if ((dn_db = dev->dn_ptr) != NULL) { 465 if ((dn_db = rtnl_dereference(dev->dn_ptr)) != NULL) {
458 for (ifap = &dn_db->ifa_list; (ifa=*ifap) != NULL; ifap = &ifa->ifa_next) 466 for (ifap = &dn_db->ifa_list;
467 (ifa = rtnl_dereference(*ifap)) != NULL;
468 ifap = &ifa->ifa_next)
459 if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0) 469 if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0)
460 break; 470 break;
461 } 471 }
@@ -558,7 +568,7 @@ static struct dn_dev *dn_dev_by_index(int ifindex)
558 568
559 dev = __dev_get_by_index(&init_net, ifindex); 569 dev = __dev_get_by_index(&init_net, ifindex);
560 if (dev) 570 if (dev)
561 dn_dev = dev->dn_ptr; 571 dn_dev = rtnl_dereference(dev->dn_ptr);
562 572
563 return dn_dev; 573 return dn_dev;
564} 574}
@@ -576,7 +586,8 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
576 struct nlattr *tb[IFA_MAX+1]; 586 struct nlattr *tb[IFA_MAX+1];
577 struct dn_dev *dn_db; 587 struct dn_dev *dn_db;
578 struct ifaddrmsg *ifm; 588 struct ifaddrmsg *ifm;
579 struct dn_ifaddr *ifa, **ifap; 589 struct dn_ifaddr *ifa;
590 struct dn_ifaddr __rcu **ifap;
580 int err = -EINVAL; 591 int err = -EINVAL;
581 592
582 if (!net_eq(net, &init_net)) 593 if (!net_eq(net, &init_net))
@@ -592,7 +603,9 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
592 goto errout; 603 goto errout;
593 604
594 err = -EADDRNOTAVAIL; 605 err = -EADDRNOTAVAIL;
595 for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) { 606 for (ifap = &dn_db->ifa_list;
607 (ifa = rtnl_dereference(*ifap)) != NULL;
608 ifap = &ifa->ifa_next) {
596 if (tb[IFA_LOCAL] && 609 if (tb[IFA_LOCAL] &&
597 nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2)) 610 nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
598 continue; 611 continue;
@@ -632,7 +645,7 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
632 if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL) 645 if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL)
633 return -ENODEV; 646 return -ENODEV;
634 647
635 if ((dn_db = dev->dn_ptr) == NULL) { 648 if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL) {
636 dn_db = dn_dev_create(dev, &err); 649 dn_db = dn_dev_create(dev, &err);
637 if (!dn_db) 650 if (!dn_db)
638 return err; 651 return err;
@@ -748,11 +761,11 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
748 skip_naddr = 0; 761 skip_naddr = 0;
749 } 762 }
750 763
751 if ((dn_db = dev->dn_ptr) == NULL) 764 if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL)
752 goto cont; 765 goto cont;
753 766
754 for (ifa = dn_db->ifa_list, dn_idx = 0; ifa; 767 for (ifa = rtnl_dereference(dn_db->ifa_list), dn_idx = 0; ifa;
755 ifa = ifa->ifa_next, dn_idx++) { 768 ifa = rtnl_dereference(ifa->ifa_next), dn_idx++) {
756 if (dn_idx < skip_naddr) 769 if (dn_idx < skip_naddr)
757 continue; 770 continue;
758 771
@@ -773,21 +786,22 @@ done:
773 786
774static int dn_dev_get_first(struct net_device *dev, __le16 *addr) 787static int dn_dev_get_first(struct net_device *dev, __le16 *addr)
775{ 788{
776 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 789 struct dn_dev *dn_db;
777 struct dn_ifaddr *ifa; 790 struct dn_ifaddr *ifa;
778 int rv = -ENODEV; 791 int rv = -ENODEV;
779 792
793 rcu_read_lock();
794 dn_db = rcu_dereference(dev->dn_ptr);
780 if (dn_db == NULL) 795 if (dn_db == NULL)
781 goto out; 796 goto out;
782 797
783 rtnl_lock(); 798 ifa = rcu_dereference(dn_db->ifa_list);
784 ifa = dn_db->ifa_list;
785 if (ifa != NULL) { 799 if (ifa != NULL) {
786 *addr = ifa->ifa_local; 800 *addr = ifa->ifa_local;
787 rv = 0; 801 rv = 0;
788 } 802 }
789 rtnl_unlock();
790out: 803out:
804 rcu_read_unlock();
791 return rv; 805 return rv;
792} 806}
793 807
@@ -823,7 +837,7 @@ static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa)
823 struct endnode_hello_message *msg; 837 struct endnode_hello_message *msg;
824 struct sk_buff *skb = NULL; 838 struct sk_buff *skb = NULL;
825 __le16 *pktlen; 839 __le16 *pktlen;
826 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 840 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
827 841
828 if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL) 842 if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL)
829 return; 843 return;
@@ -889,7 +903,7 @@ static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn
889static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa) 903static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
890{ 904{
891 int n; 905 int n;
892 struct dn_dev *dn_db = dev->dn_ptr; 906 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
893 struct dn_neigh *dn = (struct dn_neigh *)dn_db->router; 907 struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
894 struct sk_buff *skb; 908 struct sk_buff *skb;
895 size_t size; 909 size_t size;
@@ -960,7 +974,7 @@ static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
960 974
961static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa) 975static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa)
962{ 976{
963 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 977 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
964 978
965 if (dn_db->parms.forwarding == 0) 979 if (dn_db->parms.forwarding == 0)
966 dn_send_endnode_hello(dev, ifa); 980 dn_send_endnode_hello(dev, ifa);
@@ -998,7 +1012,7 @@ static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa)
998 1012
999static int dn_eth_up(struct net_device *dev) 1013static int dn_eth_up(struct net_device *dev)
1000{ 1014{
1001 struct dn_dev *dn_db = dev->dn_ptr; 1015 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1002 1016
1003 if (dn_db->parms.forwarding == 0) 1017 if (dn_db->parms.forwarding == 0)
1004 dev_mc_add(dev, dn_rt_all_end_mcast); 1018 dev_mc_add(dev, dn_rt_all_end_mcast);
@@ -1012,7 +1026,7 @@ static int dn_eth_up(struct net_device *dev)
1012 1026
1013static void dn_eth_down(struct net_device *dev) 1027static void dn_eth_down(struct net_device *dev)
1014{ 1028{
1015 struct dn_dev *dn_db = dev->dn_ptr; 1029 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1016 1030
1017 if (dn_db->parms.forwarding == 0) 1031 if (dn_db->parms.forwarding == 0)
1018 dev_mc_del(dev, dn_rt_all_end_mcast); 1032 dev_mc_del(dev, dn_rt_all_end_mcast);
@@ -1025,12 +1039,16 @@ static void dn_dev_set_timer(struct net_device *dev);
1025static void dn_dev_timer_func(unsigned long arg) 1039static void dn_dev_timer_func(unsigned long arg)
1026{ 1040{
1027 struct net_device *dev = (struct net_device *)arg; 1041 struct net_device *dev = (struct net_device *)arg;
1028 struct dn_dev *dn_db = dev->dn_ptr; 1042 struct dn_dev *dn_db;
1029 struct dn_ifaddr *ifa; 1043 struct dn_ifaddr *ifa;
1030 1044
1045 rcu_read_lock();
1046 dn_db = rcu_dereference(dev->dn_ptr);
1031 if (dn_db->t3 <= dn_db->parms.t2) { 1047 if (dn_db->t3 <= dn_db->parms.t2) {
1032 if (dn_db->parms.timer3) { 1048 if (dn_db->parms.timer3) {
1033 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { 1049 for (ifa = rcu_dereference(dn_db->ifa_list);
1050 ifa;
1051 ifa = rcu_dereference(ifa->ifa_next)) {
1034 if (!(ifa->ifa_flags & IFA_F_SECONDARY)) 1052 if (!(ifa->ifa_flags & IFA_F_SECONDARY))
1035 dn_db->parms.timer3(dev, ifa); 1053 dn_db->parms.timer3(dev, ifa);
1036 } 1054 }
@@ -1039,13 +1057,13 @@ static void dn_dev_timer_func(unsigned long arg)
1039 } else { 1057 } else {
1040 dn_db->t3 -= dn_db->parms.t2; 1058 dn_db->t3 -= dn_db->parms.t2;
1041 } 1059 }
1042 1060 rcu_read_unlock();
1043 dn_dev_set_timer(dev); 1061 dn_dev_set_timer(dev);
1044} 1062}
1045 1063
1046static void dn_dev_set_timer(struct net_device *dev) 1064static void dn_dev_set_timer(struct net_device *dev)
1047{ 1065{
1048 struct dn_dev *dn_db = dev->dn_ptr; 1066 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1049 1067
1050 if (dn_db->parms.t2 > dn_db->parms.t3) 1068 if (dn_db->parms.t2 > dn_db->parms.t3)
1051 dn_db->parms.t2 = dn_db->parms.t3; 1069 dn_db->parms.t2 = dn_db->parms.t3;
@@ -1077,8 +1095,8 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1077 return NULL; 1095 return NULL;
1078 1096
1079 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); 1097 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
1080 smp_wmb(); 1098
1081 dev->dn_ptr = dn_db; 1099 rcu_assign_pointer(dev->dn_ptr, dn_db);
1082 dn_db->dev = dev; 1100 dn_db->dev = dev;
1083 init_timer(&dn_db->timer); 1101 init_timer(&dn_db->timer);
1084 1102
@@ -1086,7 +1104,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1086 1104
1087 dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table); 1105 dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
1088 if (!dn_db->neigh_parms) { 1106 if (!dn_db->neigh_parms) {
1089 dev->dn_ptr = NULL; 1107 rcu_assign_pointer(dev->dn_ptr, NULL);
1090 kfree(dn_db); 1108 kfree(dn_db);
1091 return NULL; 1109 return NULL;
1092 } 1110 }
@@ -1125,7 +1143,7 @@ void dn_dev_up(struct net_device *dev)
1125 struct dn_ifaddr *ifa; 1143 struct dn_ifaddr *ifa;
1126 __le16 addr = decnet_address; 1144 __le16 addr = decnet_address;
1127 int maybe_default = 0; 1145 int maybe_default = 0;
1128 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 1146 struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
1129 1147
1130 if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK)) 1148 if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
1131 return; 1149 return;
@@ -1176,7 +1194,7 @@ void dn_dev_up(struct net_device *dev)
1176 1194
1177static void dn_dev_delete(struct net_device *dev) 1195static void dn_dev_delete(struct net_device *dev)
1178{ 1196{
1179 struct dn_dev *dn_db = dev->dn_ptr; 1197 struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
1180 1198
1181 if (dn_db == NULL) 1199 if (dn_db == NULL)
1182 return; 1200 return;
@@ -1204,13 +1222,13 @@ static void dn_dev_delete(struct net_device *dev)
1204 1222
1205void dn_dev_down(struct net_device *dev) 1223void dn_dev_down(struct net_device *dev)
1206{ 1224{
1207 struct dn_dev *dn_db = dev->dn_ptr; 1225 struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
1208 struct dn_ifaddr *ifa; 1226 struct dn_ifaddr *ifa;
1209 1227
1210 if (dn_db == NULL) 1228 if (dn_db == NULL)
1211 return; 1229 return;
1212 1230
1213 while((ifa = dn_db->ifa_list) != NULL) { 1231 while ((ifa = rtnl_dereference(dn_db->ifa_list)) != NULL) {
1214 dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0); 1232 dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0);
1215 dn_dev_free_ifa(ifa); 1233 dn_dev_free_ifa(ifa);
1216 } 1234 }
@@ -1270,7 +1288,7 @@ static inline int is_dn_dev(struct net_device *dev)
1270} 1288}
1271 1289
1272static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos) 1290static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos)
1273 __acquires(rcu) 1291 __acquires(RCU)
1274{ 1292{
1275 int i; 1293 int i;
1276 struct net_device *dev; 1294 struct net_device *dev;
@@ -1313,7 +1331,7 @@ static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1313} 1331}
1314 1332
1315static void dn_dev_seq_stop(struct seq_file *seq, void *v) 1333static void dn_dev_seq_stop(struct seq_file *seq, void *v)
1316 __releases(rcu) 1334 __releases(RCU)
1317{ 1335{
1318 rcu_read_unlock(); 1336 rcu_read_unlock();
1319} 1337}
@@ -1340,7 +1358,7 @@ static int dn_dev_seq_show(struct seq_file *seq, void *v)
1340 struct net_device *dev = v; 1358 struct net_device *dev = v;
1341 char peer_buf[DN_ASCBUF_LEN]; 1359 char peer_buf[DN_ASCBUF_LEN];
1342 char router_buf[DN_ASCBUF_LEN]; 1360 char router_buf[DN_ASCBUF_LEN];
1343 struct dn_dev *dn_db = dev->dn_ptr; 1361 struct dn_dev *dn_db = rcu_dereference(dev->dn_ptr);
1344 1362
1345 seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu" 1363 seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu"
1346 " %04hu %03d %02x %-10s %-7s %-7s\n", 1364 " %04hu %03d %02x %-10s %-7s %-7s\n",
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 4ab96c15166d..0ef0a81bcd72 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -610,10 +610,12 @@ static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa)
610 /* Scan device list */ 610 /* Scan device list */
611 rcu_read_lock(); 611 rcu_read_lock();
612 for_each_netdev_rcu(&init_net, dev) { 612 for_each_netdev_rcu(&init_net, dev) {
613 dn_db = dev->dn_ptr; 613 dn_db = rcu_dereference(dev->dn_ptr);
614 if (dn_db == NULL) 614 if (dn_db == NULL)
615 continue; 615 continue;
616 for(ifa2 = dn_db->ifa_list; ifa2; ifa2 = ifa2->ifa_next) { 616 for (ifa2 = rcu_dereference(dn_db->ifa_list);
617 ifa2 != NULL;
618 ifa2 = rcu_dereference(ifa2->ifa_next)) {
617 if (ifa2->ifa_local == ifa->ifa_local) { 619 if (ifa2->ifa_local == ifa->ifa_local) {
618 found_it = 1; 620 found_it = 1;
619 break; 621 break;
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index a085dbcf5c7f..602dade7e9a3 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -391,7 +391,7 @@ int dn_neigh_router_hello(struct sk_buff *skb)
391 write_lock(&neigh->lock); 391 write_lock(&neigh->lock);
392 392
393 neigh->used = jiffies; 393 neigh->used = jiffies;
394 dn_db = (struct dn_dev *)neigh->dev->dn_ptr; 394 dn_db = rcu_dereference(neigh->dev->dn_ptr);
395 395
396 if (!(neigh->nud_state & NUD_PERMANENT)) { 396 if (!(neigh->nud_state & NUD_PERMANENT)) {
397 neigh->updated = jiffies; 397 neigh->updated = jiffies;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index df0f3e54ff8a..474d54dd08c2 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -93,7 +93,7 @@
93 93
94struct dn_rt_hash_bucket 94struct dn_rt_hash_bucket
95{ 95{
96 struct dn_route *chain; 96 struct dn_route __rcu *chain;
97 spinlock_t lock; 97 spinlock_t lock;
98}; 98};
99 99
@@ -157,15 +157,17 @@ static inline void dnrt_drop(struct dn_route *rt)
157static void dn_dst_check_expire(unsigned long dummy) 157static void dn_dst_check_expire(unsigned long dummy)
158{ 158{
159 int i; 159 int i;
160 struct dn_route *rt, **rtp; 160 struct dn_route *rt;
161 struct dn_route __rcu **rtp;
161 unsigned long now = jiffies; 162 unsigned long now = jiffies;
162 unsigned long expire = 120 * HZ; 163 unsigned long expire = 120 * HZ;
163 164
164 for(i = 0; i <= dn_rt_hash_mask; i++) { 165 for (i = 0; i <= dn_rt_hash_mask; i++) {
165 rtp = &dn_rt_hash_table[i].chain; 166 rtp = &dn_rt_hash_table[i].chain;
166 167
167 spin_lock(&dn_rt_hash_table[i].lock); 168 spin_lock(&dn_rt_hash_table[i].lock);
168 while((rt=*rtp) != NULL) { 169 while ((rt = rcu_dereference_protected(*rtp,
170 lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
169 if (atomic_read(&rt->dst.__refcnt) || 171 if (atomic_read(&rt->dst.__refcnt) ||
170 (now - rt->dst.lastuse) < expire) { 172 (now - rt->dst.lastuse) < expire) {
171 rtp = &rt->dst.dn_next; 173 rtp = &rt->dst.dn_next;
@@ -186,17 +188,19 @@ static void dn_dst_check_expire(unsigned long dummy)
186 188
187static int dn_dst_gc(struct dst_ops *ops) 189static int dn_dst_gc(struct dst_ops *ops)
188{ 190{
189 struct dn_route *rt, **rtp; 191 struct dn_route *rt;
192 struct dn_route __rcu **rtp;
190 int i; 193 int i;
191 unsigned long now = jiffies; 194 unsigned long now = jiffies;
192 unsigned long expire = 10 * HZ; 195 unsigned long expire = 10 * HZ;
193 196
194 for(i = 0; i <= dn_rt_hash_mask; i++) { 197 for (i = 0; i <= dn_rt_hash_mask; i++) {
195 198
196 spin_lock_bh(&dn_rt_hash_table[i].lock); 199 spin_lock_bh(&dn_rt_hash_table[i].lock);
197 rtp = &dn_rt_hash_table[i].chain; 200 rtp = &dn_rt_hash_table[i].chain;
198 201
199 while((rt=*rtp) != NULL) { 202 while ((rt = rcu_dereference_protected(*rtp,
203 lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
200 if (atomic_read(&rt->dst.__refcnt) || 204 if (atomic_read(&rt->dst.__refcnt) ||
201 (now - rt->dst.lastuse) < expire) { 205 (now - rt->dst.lastuse) < expire) {
202 rtp = &rt->dst.dn_next; 206 rtp = &rt->dst.dn_next;
@@ -227,7 +231,7 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
227{ 231{
228 u32 min_mtu = 230; 232 u32 min_mtu = 230;
229 struct dn_dev *dn = dst->neighbour ? 233 struct dn_dev *dn = dst->neighbour ?
230 (struct dn_dev *)dst->neighbour->dev->dn_ptr : NULL; 234 rcu_dereference_raw(dst->neighbour->dev->dn_ptr) : NULL;
231 235
232 if (dn && dn->use_long == 0) 236 if (dn && dn->use_long == 0)
233 min_mtu -= 6; 237 min_mtu -= 6;
@@ -277,13 +281,15 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
277 281
278static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) 282static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
279{ 283{
280 struct dn_route *rth, **rthp; 284 struct dn_route *rth;
285 struct dn_route __rcu **rthp;
281 unsigned long now = jiffies; 286 unsigned long now = jiffies;
282 287
283 rthp = &dn_rt_hash_table[hash].chain; 288 rthp = &dn_rt_hash_table[hash].chain;
284 289
285 spin_lock_bh(&dn_rt_hash_table[hash].lock); 290 spin_lock_bh(&dn_rt_hash_table[hash].lock);
286 while((rth = *rthp) != NULL) { 291 while ((rth = rcu_dereference_protected(*rthp,
292 lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
287 if (compare_keys(&rth->fl, &rt->fl)) { 293 if (compare_keys(&rth->fl, &rt->fl)) {
288 /* Put it first */ 294 /* Put it first */
289 *rthp = rth->dst.dn_next; 295 *rthp = rth->dst.dn_next;
@@ -315,15 +321,15 @@ static void dn_run_flush(unsigned long dummy)
315 int i; 321 int i;
316 struct dn_route *rt, *next; 322 struct dn_route *rt, *next;
317 323
318 for(i = 0; i < dn_rt_hash_mask; i++) { 324 for (i = 0; i < dn_rt_hash_mask; i++) {
319 spin_lock_bh(&dn_rt_hash_table[i].lock); 325 spin_lock_bh(&dn_rt_hash_table[i].lock);
320 326
321 if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL) 327 if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL)
322 goto nothing_to_declare; 328 goto nothing_to_declare;
323 329
324 for(; rt; rt=next) { 330 for(; rt; rt = next) {
325 next = rt->dst.dn_next; 331 next = rcu_dereference_raw(rt->dst.dn_next);
326 rt->dst.dn_next = NULL; 332 RCU_INIT_POINTER(rt->dst.dn_next, NULL);
327 dst_free((struct dst_entry *)rt); 333 dst_free((struct dst_entry *)rt);
328 } 334 }
329 335
@@ -458,15 +464,16 @@ static int dn_return_long(struct sk_buff *skb)
458 */ 464 */
459static int dn_route_rx_packet(struct sk_buff *skb) 465static int dn_route_rx_packet(struct sk_buff *skb)
460{ 466{
461 struct dn_skb_cb *cb = DN_SKB_CB(skb); 467 struct dn_skb_cb *cb;
462 int err; 468 int err;
463 469
464 if ((err = dn_route_input(skb)) == 0) 470 if ((err = dn_route_input(skb)) == 0)
465 return dst_input(skb); 471 return dst_input(skb);
466 472
473 cb = DN_SKB_CB(skb);
467 if (decnet_debug_level & 4) { 474 if (decnet_debug_level & 4) {
468 char *devname = skb->dev ? skb->dev->name : "???"; 475 char *devname = skb->dev ? skb->dev->name : "???";
469 struct dn_skb_cb *cb = DN_SKB_CB(skb); 476
470 printk(KERN_DEBUG 477 printk(KERN_DEBUG
471 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n", 478 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
472 (int)cb->rt_flags, devname, skb->len, 479 (int)cb->rt_flags, devname, skb->len,
@@ -573,7 +580,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
573 struct dn_skb_cb *cb; 580 struct dn_skb_cb *cb;
574 unsigned char flags = 0; 581 unsigned char flags = 0;
575 __u16 len = le16_to_cpu(*(__le16 *)skb->data); 582 __u16 len = le16_to_cpu(*(__le16 *)skb->data);
576 struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr; 583 struct dn_dev *dn = rcu_dereference(dev->dn_ptr);
577 unsigned char padlen = 0; 584 unsigned char padlen = 0;
578 585
579 if (!net_eq(dev_net(dev), &init_net)) 586 if (!net_eq(dev_net(dev), &init_net))
@@ -728,7 +735,7 @@ static int dn_forward(struct sk_buff *skb)
728{ 735{
729 struct dn_skb_cb *cb = DN_SKB_CB(skb); 736 struct dn_skb_cb *cb = DN_SKB_CB(skb);
730 struct dst_entry *dst = skb_dst(skb); 737 struct dst_entry *dst = skb_dst(skb);
731 struct dn_dev *dn_db = dst->dev->dn_ptr; 738 struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
732 struct dn_route *rt; 739 struct dn_route *rt;
733 struct neighbour *neigh = dst->neighbour; 740 struct neighbour *neigh = dst->neighbour;
734 int header_len; 741 int header_len;
@@ -835,13 +842,16 @@ static inline int dn_match_addr(__le16 addr1, __le16 addr2)
835static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope) 842static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope)
836{ 843{
837 __le16 saddr = 0; 844 __le16 saddr = 0;
838 struct dn_dev *dn_db = dev->dn_ptr; 845 struct dn_dev *dn_db;
839 struct dn_ifaddr *ifa; 846 struct dn_ifaddr *ifa;
840 int best_match = 0; 847 int best_match = 0;
841 int ret; 848 int ret;
842 849
843 read_lock(&dev_base_lock); 850 rcu_read_lock();
844 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { 851 dn_db = rcu_dereference(dev->dn_ptr);
852 for (ifa = rcu_dereference(dn_db->ifa_list);
853 ifa != NULL;
854 ifa = rcu_dereference(ifa->ifa_next)) {
845 if (ifa->ifa_scope > scope) 855 if (ifa->ifa_scope > scope)
846 continue; 856 continue;
847 if (!daddr) { 857 if (!daddr) {
@@ -854,7 +864,7 @@ static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int
854 if (best_match == 0) 864 if (best_match == 0)
855 saddr = ifa->ifa_local; 865 saddr = ifa->ifa_local;
856 } 866 }
857 read_unlock(&dev_base_lock); 867 rcu_read_unlock();
858 868
859 return saddr; 869 return saddr;
860} 870}
@@ -1020,7 +1030,7 @@ source_ok:
1020 err = -ENODEV; 1030 err = -ENODEV;
1021 if (dev_out == NULL) 1031 if (dev_out == NULL)
1022 goto out; 1032 goto out;
1023 dn_db = dev_out->dn_ptr; 1033 dn_db = rcu_dereference_raw(dev_out->dn_ptr);
1024 /* Possible improvement - check all devices for local addr */ 1034 /* Possible improvement - check all devices for local addr */
1025 if (dn_dev_islocal(dev_out, fl.fld_dst)) { 1035 if (dn_dev_islocal(dev_out, fl.fld_dst)) {
1026 dev_put(dev_out); 1036 dev_put(dev_out);
@@ -1171,7 +1181,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
1171 if ((flp->fld_dst == rt->fl.fld_dst) && 1181 if ((flp->fld_dst == rt->fl.fld_dst) &&
1172 (flp->fld_src == rt->fl.fld_src) && 1182 (flp->fld_src == rt->fl.fld_src) &&
1173 (flp->mark == rt->fl.mark) && 1183 (flp->mark == rt->fl.mark) &&
1174 (rt->fl.iif == 0) && 1184 dn_is_output_route(rt) &&
1175 (rt->fl.oif == flp->oif)) { 1185 (rt->fl.oif == flp->oif)) {
1176 dst_use(&rt->dst, jiffies); 1186 dst_use(&rt->dst, jiffies);
1177 rcu_read_unlock_bh(); 1187 rcu_read_unlock_bh();
@@ -1233,7 +1243,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
1233 1243
1234 dev_hold(in_dev); 1244 dev_hold(in_dev);
1235 1245
1236 if ((dn_db = in_dev->dn_ptr) == NULL) 1246 if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL)
1237 goto out; 1247 goto out;
1238 1248
1239 /* Zero source addresses are not allowed */ 1249 /* Zero source addresses are not allowed */
@@ -1502,7 +1512,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1502 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires, 1512 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires,
1503 rt->dst.error) < 0) 1513 rt->dst.error) < 0)
1504 goto rtattr_failure; 1514 goto rtattr_failure;
1505 if (rt->fl.iif) 1515 if (dn_is_input_route(rt))
1506 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); 1516 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
1507 1517
1508 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1518 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
@@ -1677,15 +1687,15 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
1677{ 1687{
1678 struct dn_rt_cache_iter_state *s = seq->private; 1688 struct dn_rt_cache_iter_state *s = seq->private;
1679 1689
1680 rt = rt->dst.dn_next; 1690 rt = rcu_dereference_bh(rt->dst.dn_next);
1681 while(!rt) { 1691 while (!rt) {
1682 rcu_read_unlock_bh(); 1692 rcu_read_unlock_bh();
1683 if (--s->bucket < 0) 1693 if (--s->bucket < 0)
1684 break; 1694 break;
1685 rcu_read_lock_bh(); 1695 rcu_read_lock_bh();
1686 rt = dn_rt_hash_table[s->bucket].chain; 1696 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
1687 } 1697 }
1688 return rcu_dereference_bh(rt); 1698 return rt;
1689} 1699}
1690 1700
1691static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 1701static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 96bc7f9475a3..c6e2affafbd3 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -506,8 +506,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
506 struct net_device *dev = NULL; 506 struct net_device *dev = NULL;
507 507
508 rcu_read_lock(); 508 rcu_read_lock();
509 if (rt->fl.iif && 509 if (rt_is_input_route(rt) &&
510 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) 510 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
511 dev = dev_get_by_index_rcu(net, rt->fl.iif); 511 dev = dev_get_by_index_rcu(net, rt->fl.iif);
512 512
513 if (dev) 513 if (dev)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 3c53c2d89e3b..0f0e0f0279b8 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -149,11 +149,17 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc);
149static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, 149static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
150 int sfcount, __be32 *psfsrc, int delta); 150 int sfcount, __be32 *psfsrc, int delta);
151 151
152
153static void ip_mc_list_reclaim(struct rcu_head *head)
154{
155 kfree(container_of(head, struct ip_mc_list, rcu));
156}
157
152static void ip_ma_put(struct ip_mc_list *im) 158static void ip_ma_put(struct ip_mc_list *im)
153{ 159{
154 if (atomic_dec_and_test(&im->refcnt)) { 160 if (atomic_dec_and_test(&im->refcnt)) {
155 in_dev_put(im->interface); 161 in_dev_put(im->interface);
156 kfree(im); 162 call_rcu(&im->rcu, ip_mc_list_reclaim);
157 } 163 }
158} 164}
159 165
@@ -163,7 +169,7 @@ static void ip_ma_put(struct ip_mc_list *im)
163 * Timer management 169 * Timer management
164 */ 170 */
165 171
166static __inline__ void igmp_stop_timer(struct ip_mc_list *im) 172static void igmp_stop_timer(struct ip_mc_list *im)
167{ 173{
168 spin_lock_bh(&im->lock); 174 spin_lock_bh(&im->lock);
169 if (del_timer(&im->timer)) 175 if (del_timer(&im->timer))
@@ -496,14 +502,24 @@ empty_source:
496 return skb; 502 return skb;
497} 503}
498 504
505#define for_each_pmc_rcu(in_dev, pmc) \
506 for (pmc = rcu_dereference(in_dev->mc_list); \
507 pmc != NULL; \
508 pmc = rcu_dereference(pmc->next_rcu))
509
510#define for_each_pmc_rtnl(in_dev, pmc) \
511 for (pmc = rtnl_dereference(in_dev->mc_list); \
512 pmc != NULL; \
513 pmc = rtnl_dereference(pmc->next_rcu))
514
499static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc) 515static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
500{ 516{
501 struct sk_buff *skb = NULL; 517 struct sk_buff *skb = NULL;
502 int type; 518 int type;
503 519
504 if (!pmc) { 520 if (!pmc) {
505 read_lock(&in_dev->mc_list_lock); 521 rcu_read_lock();
506 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { 522 for_each_pmc_rcu(in_dev, pmc) {
507 if (pmc->multiaddr == IGMP_ALL_HOSTS) 523 if (pmc->multiaddr == IGMP_ALL_HOSTS)
508 continue; 524 continue;
509 spin_lock_bh(&pmc->lock); 525 spin_lock_bh(&pmc->lock);
@@ -514,7 +530,7 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
514 skb = add_grec(skb, pmc, type, 0, 0); 530 skb = add_grec(skb, pmc, type, 0, 0);
515 spin_unlock_bh(&pmc->lock); 531 spin_unlock_bh(&pmc->lock);
516 } 532 }
517 read_unlock(&in_dev->mc_list_lock); 533 rcu_read_unlock();
518 } else { 534 } else {
519 spin_lock_bh(&pmc->lock); 535 spin_lock_bh(&pmc->lock);
520 if (pmc->sfcount[MCAST_EXCLUDE]) 536 if (pmc->sfcount[MCAST_EXCLUDE])
@@ -556,7 +572,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
556 struct sk_buff *skb = NULL; 572 struct sk_buff *skb = NULL;
557 int type, dtype; 573 int type, dtype;
558 574
559 read_lock(&in_dev->mc_list_lock); 575 rcu_read_lock();
560 spin_lock_bh(&in_dev->mc_tomb_lock); 576 spin_lock_bh(&in_dev->mc_tomb_lock);
561 577
562 /* deleted MCA's */ 578 /* deleted MCA's */
@@ -593,7 +609,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
593 spin_unlock_bh(&in_dev->mc_tomb_lock); 609 spin_unlock_bh(&in_dev->mc_tomb_lock);
594 610
595 /* change recs */ 611 /* change recs */
596 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { 612 for_each_pmc_rcu(in_dev, pmc) {
597 spin_lock_bh(&pmc->lock); 613 spin_lock_bh(&pmc->lock);
598 if (pmc->sfcount[MCAST_EXCLUDE]) { 614 if (pmc->sfcount[MCAST_EXCLUDE]) {
599 type = IGMPV3_BLOCK_OLD_SOURCES; 615 type = IGMPV3_BLOCK_OLD_SOURCES;
@@ -616,7 +632,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
616 } 632 }
617 spin_unlock_bh(&pmc->lock); 633 spin_unlock_bh(&pmc->lock);
618 } 634 }
619 read_unlock(&in_dev->mc_list_lock); 635 rcu_read_unlock();
620 636
621 if (!skb) 637 if (!skb)
622 return; 638 return;
@@ -813,14 +829,14 @@ static void igmp_heard_report(struct in_device *in_dev, __be32 group)
813 if (group == IGMP_ALL_HOSTS) 829 if (group == IGMP_ALL_HOSTS)
814 return; 830 return;
815 831
816 read_lock(&in_dev->mc_list_lock); 832 rcu_read_lock();
817 for (im=in_dev->mc_list; im!=NULL; im=im->next) { 833 for_each_pmc_rcu(in_dev, im) {
818 if (im->multiaddr == group) { 834 if (im->multiaddr == group) {
819 igmp_stop_timer(im); 835 igmp_stop_timer(im);
820 break; 836 break;
821 } 837 }
822 } 838 }
823 read_unlock(&in_dev->mc_list_lock); 839 rcu_read_unlock();
824} 840}
825 841
826static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, 842static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
@@ -906,8 +922,8 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
906 * - Use the igmp->igmp_code field as the maximum 922 * - Use the igmp->igmp_code field as the maximum
907 * delay possible 923 * delay possible
908 */ 924 */
909 read_lock(&in_dev->mc_list_lock); 925 rcu_read_lock();
910 for (im=in_dev->mc_list; im!=NULL; im=im->next) { 926 for_each_pmc_rcu(in_dev, im) {
911 int changed; 927 int changed;
912 928
913 if (group && group != im->multiaddr) 929 if (group && group != im->multiaddr)
@@ -925,7 +941,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
925 if (changed) 941 if (changed)
926 igmp_mod_timer(im, max_delay); 942 igmp_mod_timer(im, max_delay);
927 } 943 }
928 read_unlock(&in_dev->mc_list_lock); 944 rcu_read_unlock();
929} 945}
930 946
931/* called in rcu_read_lock() section */ 947/* called in rcu_read_lock() section */
@@ -961,7 +977,7 @@ int igmp_rcv(struct sk_buff *skb)
961 case IGMP_HOST_MEMBERSHIP_REPORT: 977 case IGMP_HOST_MEMBERSHIP_REPORT:
962 case IGMPV2_HOST_MEMBERSHIP_REPORT: 978 case IGMPV2_HOST_MEMBERSHIP_REPORT:
963 /* Is it our report looped back? */ 979 /* Is it our report looped back? */
964 if (skb_rtable(skb)->fl.iif == 0) 980 if (rt_is_output_route(skb_rtable(skb)))
965 break; 981 break;
966 /* don't rely on MC router hearing unicast reports */ 982 /* don't rely on MC router hearing unicast reports */
967 if (skb->pkt_type == PACKET_MULTICAST || 983 if (skb->pkt_type == PACKET_MULTICAST ||
@@ -1110,8 +1126,8 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
1110 kfree(pmc); 1126 kfree(pmc);
1111 } 1127 }
1112 /* clear dead sources, too */ 1128 /* clear dead sources, too */
1113 read_lock(&in_dev->mc_list_lock); 1129 rcu_read_lock();
1114 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { 1130 for_each_pmc_rcu(in_dev, pmc) {
1115 struct ip_sf_list *psf, *psf_next; 1131 struct ip_sf_list *psf, *psf_next;
1116 1132
1117 spin_lock_bh(&pmc->lock); 1133 spin_lock_bh(&pmc->lock);
@@ -1123,7 +1139,7 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
1123 kfree(psf); 1139 kfree(psf);
1124 } 1140 }
1125 } 1141 }
1126 read_unlock(&in_dev->mc_list_lock); 1142 rcu_read_unlock();
1127} 1143}
1128#endif 1144#endif
1129 1145
@@ -1209,7 +1225,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1209 1225
1210 ASSERT_RTNL(); 1226 ASSERT_RTNL();
1211 1227
1212 for (im=in_dev->mc_list; im; im=im->next) { 1228 for_each_pmc_rtnl(in_dev, im) {
1213 if (im->multiaddr == addr) { 1229 if (im->multiaddr == addr) {
1214 im->users++; 1230 im->users++;
1215 ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0); 1231 ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
@@ -1217,7 +1233,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1217 } 1233 }
1218 } 1234 }
1219 1235
1220 im = kmalloc(sizeof(*im), GFP_KERNEL); 1236 im = kzalloc(sizeof(*im), GFP_KERNEL);
1221 if (!im) 1237 if (!im)
1222 goto out; 1238 goto out;
1223 1239
@@ -1227,26 +1243,18 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1227 im->multiaddr = addr; 1243 im->multiaddr = addr;
1228 /* initial mode is (EX, empty) */ 1244 /* initial mode is (EX, empty) */
1229 im->sfmode = MCAST_EXCLUDE; 1245 im->sfmode = MCAST_EXCLUDE;
1230 im->sfcount[MCAST_INCLUDE] = 0;
1231 im->sfcount[MCAST_EXCLUDE] = 1; 1246 im->sfcount[MCAST_EXCLUDE] = 1;
1232 im->sources = NULL;
1233 im->tomb = NULL;
1234 im->crcount = 0;
1235 atomic_set(&im->refcnt, 1); 1247 atomic_set(&im->refcnt, 1);
1236 spin_lock_init(&im->lock); 1248 spin_lock_init(&im->lock);
1237#ifdef CONFIG_IP_MULTICAST 1249#ifdef CONFIG_IP_MULTICAST
1238 im->tm_running = 0;
1239 setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im); 1250 setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
1240 im->unsolicit_count = IGMP_Unsolicited_Report_Count; 1251 im->unsolicit_count = IGMP_Unsolicited_Report_Count;
1241 im->reporter = 0;
1242 im->gsquery = 0;
1243#endif 1252#endif
1244 im->loaded = 0; 1253
1245 write_lock_bh(&in_dev->mc_list_lock); 1254 im->next_rcu = in_dev->mc_list;
1246 im->next = in_dev->mc_list;
1247 in_dev->mc_list = im;
1248 in_dev->mc_count++; 1255 in_dev->mc_count++;
1249 write_unlock_bh(&in_dev->mc_list_lock); 1256 rcu_assign_pointer(in_dev->mc_list, im);
1257
1250#ifdef CONFIG_IP_MULTICAST 1258#ifdef CONFIG_IP_MULTICAST
1251 igmpv3_del_delrec(in_dev, im->multiaddr); 1259 igmpv3_del_delrec(in_dev, im->multiaddr);
1252#endif 1260#endif
@@ -1287,17 +1295,18 @@ EXPORT_SYMBOL(ip_mc_rejoin_group);
1287 1295
1288void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) 1296void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1289{ 1297{
1290 struct ip_mc_list *i, **ip; 1298 struct ip_mc_list *i;
1299 struct ip_mc_list __rcu **ip;
1291 1300
1292 ASSERT_RTNL(); 1301 ASSERT_RTNL();
1293 1302
1294 for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { 1303 for (ip = &in_dev->mc_list;
1304 (i = rtnl_dereference(*ip)) != NULL;
1305 ip = &i->next_rcu) {
1295 if (i->multiaddr == addr) { 1306 if (i->multiaddr == addr) {
1296 if (--i->users == 0) { 1307 if (--i->users == 0) {
1297 write_lock_bh(&in_dev->mc_list_lock); 1308 *ip = i->next_rcu;
1298 *ip = i->next;
1299 in_dev->mc_count--; 1309 in_dev->mc_count--;
1300 write_unlock_bh(&in_dev->mc_list_lock);
1301 igmp_group_dropped(i); 1310 igmp_group_dropped(i);
1302 1311
1303 if (!in_dev->dead) 1312 if (!in_dev->dead)
@@ -1316,34 +1325,34 @@ EXPORT_SYMBOL(ip_mc_dec_group);
1316 1325
1317void ip_mc_unmap(struct in_device *in_dev) 1326void ip_mc_unmap(struct in_device *in_dev)
1318{ 1327{
1319 struct ip_mc_list *i; 1328 struct ip_mc_list *pmc;
1320 1329
1321 ASSERT_RTNL(); 1330 ASSERT_RTNL();
1322 1331
1323 for (i = in_dev->mc_list; i; i = i->next) 1332 for_each_pmc_rtnl(in_dev, pmc)
1324 igmp_group_dropped(i); 1333 igmp_group_dropped(pmc);
1325} 1334}
1326 1335
1327void ip_mc_remap(struct in_device *in_dev) 1336void ip_mc_remap(struct in_device *in_dev)
1328{ 1337{
1329 struct ip_mc_list *i; 1338 struct ip_mc_list *pmc;
1330 1339
1331 ASSERT_RTNL(); 1340 ASSERT_RTNL();
1332 1341
1333 for (i = in_dev->mc_list; i; i = i->next) 1342 for_each_pmc_rtnl(in_dev, pmc)
1334 igmp_group_added(i); 1343 igmp_group_added(pmc);
1335} 1344}
1336 1345
1337/* Device going down */ 1346/* Device going down */
1338 1347
1339void ip_mc_down(struct in_device *in_dev) 1348void ip_mc_down(struct in_device *in_dev)
1340{ 1349{
1341 struct ip_mc_list *i; 1350 struct ip_mc_list *pmc;
1342 1351
1343 ASSERT_RTNL(); 1352 ASSERT_RTNL();
1344 1353
1345 for (i=in_dev->mc_list; i; i=i->next) 1354 for_each_pmc_rtnl(in_dev, pmc)
1346 igmp_group_dropped(i); 1355 igmp_group_dropped(pmc);
1347 1356
1348#ifdef CONFIG_IP_MULTICAST 1357#ifdef CONFIG_IP_MULTICAST
1349 in_dev->mr_ifc_count = 0; 1358 in_dev->mr_ifc_count = 0;
@@ -1374,7 +1383,6 @@ void ip_mc_init_dev(struct in_device *in_dev)
1374 in_dev->mr_qrv = IGMP_Unsolicited_Report_Count; 1383 in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
1375#endif 1384#endif
1376 1385
1377 rwlock_init(&in_dev->mc_list_lock);
1378 spin_lock_init(&in_dev->mc_tomb_lock); 1386 spin_lock_init(&in_dev->mc_tomb_lock);
1379} 1387}
1380 1388
@@ -1382,14 +1390,14 @@ void ip_mc_init_dev(struct in_device *in_dev)
1382 1390
1383void ip_mc_up(struct in_device *in_dev) 1391void ip_mc_up(struct in_device *in_dev)
1384{ 1392{
1385 struct ip_mc_list *i; 1393 struct ip_mc_list *pmc;
1386 1394
1387 ASSERT_RTNL(); 1395 ASSERT_RTNL();
1388 1396
1389 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); 1397 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
1390 1398
1391 for (i=in_dev->mc_list; i; i=i->next) 1399 for_each_pmc_rtnl(in_dev, pmc)
1392 igmp_group_added(i); 1400 igmp_group_added(pmc);
1393} 1401}
1394 1402
1395/* 1403/*
@@ -1405,17 +1413,13 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1405 /* Deactivate timers */ 1413 /* Deactivate timers */
1406 ip_mc_down(in_dev); 1414 ip_mc_down(in_dev);
1407 1415
1408 write_lock_bh(&in_dev->mc_list_lock); 1416 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
1409 while ((i = in_dev->mc_list) != NULL) { 1417 in_dev->mc_list = i->next_rcu;
1410 in_dev->mc_list = i->next;
1411 in_dev->mc_count--; 1418 in_dev->mc_count--;
1412 write_unlock_bh(&in_dev->mc_list_lock); 1419
1413 igmp_group_dropped(i); 1420 igmp_group_dropped(i);
1414 ip_ma_put(i); 1421 ip_ma_put(i);
1415
1416 write_lock_bh(&in_dev->mc_list_lock);
1417 } 1422 }
1418 write_unlock_bh(&in_dev->mc_list_lock);
1419} 1423}
1420 1424
1421/* RTNL is locked */ 1425/* RTNL is locked */
@@ -1513,18 +1517,18 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1513 1517
1514 if (!in_dev) 1518 if (!in_dev)
1515 return -ENODEV; 1519 return -ENODEV;
1516 read_lock(&in_dev->mc_list_lock); 1520 rcu_read_lock();
1517 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { 1521 for_each_pmc_rcu(in_dev, pmc) {
1518 if (*pmca == pmc->multiaddr) 1522 if (*pmca == pmc->multiaddr)
1519 break; 1523 break;
1520 } 1524 }
1521 if (!pmc) { 1525 if (!pmc) {
1522 /* MCA not found?? bug */ 1526 /* MCA not found?? bug */
1523 read_unlock(&in_dev->mc_list_lock); 1527 rcu_read_unlock();
1524 return -ESRCH; 1528 return -ESRCH;
1525 } 1529 }
1526 spin_lock_bh(&pmc->lock); 1530 spin_lock_bh(&pmc->lock);
1527 read_unlock(&in_dev->mc_list_lock); 1531 rcu_read_unlock();
1528#ifdef CONFIG_IP_MULTICAST 1532#ifdef CONFIG_IP_MULTICAST
1529 sf_markstate(pmc); 1533 sf_markstate(pmc);
1530#endif 1534#endif
@@ -1685,18 +1689,18 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1685 1689
1686 if (!in_dev) 1690 if (!in_dev)
1687 return -ENODEV; 1691 return -ENODEV;
1688 read_lock(&in_dev->mc_list_lock); 1692 rcu_read_lock();
1689 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) { 1693 for_each_pmc_rcu(in_dev, pmc) {
1690 if (*pmca == pmc->multiaddr) 1694 if (*pmca == pmc->multiaddr)
1691 break; 1695 break;
1692 } 1696 }
1693 if (!pmc) { 1697 if (!pmc) {
1694 /* MCA not found?? bug */ 1698 /* MCA not found?? bug */
1695 read_unlock(&in_dev->mc_list_lock); 1699 rcu_read_unlock();
1696 return -ESRCH; 1700 return -ESRCH;
1697 } 1701 }
1698 spin_lock_bh(&pmc->lock); 1702 spin_lock_bh(&pmc->lock);
1699 read_unlock(&in_dev->mc_list_lock); 1703 rcu_read_unlock();
1700 1704
1701#ifdef CONFIG_IP_MULTICAST 1705#ifdef CONFIG_IP_MULTICAST
1702 sf_markstate(pmc); 1706 sf_markstate(pmc);
@@ -1793,7 +1797,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1793 1797
1794 err = -EADDRINUSE; 1798 err = -EADDRINUSE;
1795 ifindex = imr->imr_ifindex; 1799 ifindex = imr->imr_ifindex;
1796 for (i = inet->mc_list; i; i = i->next) { 1800 for_each_pmc_rtnl(inet, i) {
1797 if (i->multi.imr_multiaddr.s_addr == addr && 1801 if (i->multi.imr_multiaddr.s_addr == addr &&
1798 i->multi.imr_ifindex == ifindex) 1802 i->multi.imr_ifindex == ifindex)
1799 goto done; 1803 goto done;
@@ -1807,7 +1811,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1807 goto done; 1811 goto done;
1808 1812
1809 memcpy(&iml->multi, imr, sizeof(*imr)); 1813 memcpy(&iml->multi, imr, sizeof(*imr));
1810 iml->next = inet->mc_list; 1814 iml->next_rcu = inet->mc_list;
1811 iml->sflist = NULL; 1815 iml->sflist = NULL;
1812 iml->sfmode = MCAST_EXCLUDE; 1816 iml->sfmode = MCAST_EXCLUDE;
1813 rcu_assign_pointer(inet->mc_list, iml); 1817 rcu_assign_pointer(inet->mc_list, iml);
@@ -1821,17 +1825,14 @@ EXPORT_SYMBOL(ip_mc_join_group);
1821 1825
1822static void ip_sf_socklist_reclaim(struct rcu_head *rp) 1826static void ip_sf_socklist_reclaim(struct rcu_head *rp)
1823{ 1827{
1824 struct ip_sf_socklist *psf; 1828 kfree(container_of(rp, struct ip_sf_socklist, rcu));
1825
1826 psf = container_of(rp, struct ip_sf_socklist, rcu);
1827 /* sk_omem_alloc should have been decreased by the caller*/ 1829 /* sk_omem_alloc should have been decreased by the caller*/
1828 kfree(psf);
1829} 1830}
1830 1831
1831static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, 1832static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1832 struct in_device *in_dev) 1833 struct in_device *in_dev)
1833{ 1834{
1834 struct ip_sf_socklist *psf = iml->sflist; 1835 struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
1835 int err; 1836 int err;
1836 1837
1837 if (psf == NULL) { 1838 if (psf == NULL) {
@@ -1851,11 +1852,8 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1851 1852
1852static void ip_mc_socklist_reclaim(struct rcu_head *rp) 1853static void ip_mc_socklist_reclaim(struct rcu_head *rp)
1853{ 1854{
1854 struct ip_mc_socklist *iml; 1855 kfree(container_of(rp, struct ip_mc_socklist, rcu));
1855
1856 iml = container_of(rp, struct ip_mc_socklist, rcu);
1857 /* sk_omem_alloc should have been decreased by the caller*/ 1856 /* sk_omem_alloc should have been decreased by the caller*/
1858 kfree(iml);
1859} 1857}
1860 1858
1861 1859
@@ -1866,7 +1864,8 @@ static void ip_mc_socklist_reclaim(struct rcu_head *rp)
1866int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) 1864int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1867{ 1865{
1868 struct inet_sock *inet = inet_sk(sk); 1866 struct inet_sock *inet = inet_sk(sk);
1869 struct ip_mc_socklist *iml, **imlp; 1867 struct ip_mc_socklist *iml;
1868 struct ip_mc_socklist __rcu **imlp;
1870 struct in_device *in_dev; 1869 struct in_device *in_dev;
1871 struct net *net = sock_net(sk); 1870 struct net *net = sock_net(sk);
1872 __be32 group = imr->imr_multiaddr.s_addr; 1871 __be32 group = imr->imr_multiaddr.s_addr;
@@ -1876,7 +1875,9 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1876 rtnl_lock(); 1875 rtnl_lock();
1877 in_dev = ip_mc_find_dev(net, imr); 1876 in_dev = ip_mc_find_dev(net, imr);
1878 ifindex = imr->imr_ifindex; 1877 ifindex = imr->imr_ifindex;
1879 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { 1878 for (imlp = &inet->mc_list;
1879 (iml = rtnl_dereference(*imlp)) != NULL;
1880 imlp = &iml->next_rcu) {
1880 if (iml->multi.imr_multiaddr.s_addr != group) 1881 if (iml->multi.imr_multiaddr.s_addr != group)
1881 continue; 1882 continue;
1882 if (ifindex) { 1883 if (ifindex) {
@@ -1888,7 +1889,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1888 1889
1889 (void) ip_mc_leave_src(sk, iml, in_dev); 1890 (void) ip_mc_leave_src(sk, iml, in_dev);
1890 1891
1891 rcu_assign_pointer(*imlp, iml->next); 1892 *imlp = iml->next_rcu;
1892 1893
1893 if (in_dev) 1894 if (in_dev)
1894 ip_mc_dec_group(in_dev, group); 1895 ip_mc_dec_group(in_dev, group);
@@ -1934,7 +1935,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1934 } 1935 }
1935 err = -EADDRNOTAVAIL; 1936 err = -EADDRNOTAVAIL;
1936 1937
1937 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 1938 for_each_pmc_rtnl(inet, pmc) {
1938 if ((pmc->multi.imr_multiaddr.s_addr == 1939 if ((pmc->multi.imr_multiaddr.s_addr ==
1939 imr.imr_multiaddr.s_addr) && 1940 imr.imr_multiaddr.s_addr) &&
1940 (pmc->multi.imr_ifindex == imr.imr_ifindex)) 1941 (pmc->multi.imr_ifindex == imr.imr_ifindex))
@@ -1958,7 +1959,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1958 pmc->sfmode = omode; 1959 pmc->sfmode = omode;
1959 } 1960 }
1960 1961
1961 psl = pmc->sflist; 1962 psl = rtnl_dereference(pmc->sflist);
1962 if (!add) { 1963 if (!add) {
1963 if (!psl) 1964 if (!psl)
1964 goto done; /* err = -EADDRNOTAVAIL */ 1965 goto done; /* err = -EADDRNOTAVAIL */
@@ -2077,7 +2078,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2077 goto done; 2078 goto done;
2078 } 2079 }
2079 2080
2080 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 2081 for_each_pmc_rtnl(inet, pmc) {
2081 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && 2082 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2082 pmc->multi.imr_ifindex == imr.imr_ifindex) 2083 pmc->multi.imr_ifindex == imr.imr_ifindex)
2083 break; 2084 break;
@@ -2107,7 +2108,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2107 (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr, 2108 (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
2108 msf->imsf_fmode, 0, NULL, 0); 2109 msf->imsf_fmode, 0, NULL, 0);
2109 } 2110 }
2110 psl = pmc->sflist; 2111 psl = rtnl_dereference(pmc->sflist);
2111 if (psl) { 2112 if (psl) {
2112 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2113 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2113 psl->sl_count, psl->sl_addr, 0); 2114 psl->sl_count, psl->sl_addr, 0);
@@ -2155,7 +2156,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2155 } 2156 }
2156 err = -EADDRNOTAVAIL; 2157 err = -EADDRNOTAVAIL;
2157 2158
2158 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 2159 for_each_pmc_rtnl(inet, pmc) {
2159 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && 2160 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2160 pmc->multi.imr_ifindex == imr.imr_ifindex) 2161 pmc->multi.imr_ifindex == imr.imr_ifindex)
2161 break; 2162 break;
@@ -2163,7 +2164,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2163 if (!pmc) /* must have a prior join */ 2164 if (!pmc) /* must have a prior join */
2164 goto done; 2165 goto done;
2165 msf->imsf_fmode = pmc->sfmode; 2166 msf->imsf_fmode = pmc->sfmode;
2166 psl = pmc->sflist; 2167 psl = rtnl_dereference(pmc->sflist);
2167 rtnl_unlock(); 2168 rtnl_unlock();
2168 if (!psl) { 2169 if (!psl) {
2169 len = 0; 2170 len = 0;
@@ -2208,7 +2209,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2208 2209
2209 err = -EADDRNOTAVAIL; 2210 err = -EADDRNOTAVAIL;
2210 2211
2211 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 2212 for_each_pmc_rtnl(inet, pmc) {
2212 if (pmc->multi.imr_multiaddr.s_addr == addr && 2213 if (pmc->multi.imr_multiaddr.s_addr == addr &&
2213 pmc->multi.imr_ifindex == gsf->gf_interface) 2214 pmc->multi.imr_ifindex == gsf->gf_interface)
2214 break; 2215 break;
@@ -2216,7 +2217,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2216 if (!pmc) /* must have a prior join */ 2217 if (!pmc) /* must have a prior join */
2217 goto done; 2218 goto done;
2218 gsf->gf_fmode = pmc->sfmode; 2219 gsf->gf_fmode = pmc->sfmode;
2219 psl = pmc->sflist; 2220 psl = rtnl_dereference(pmc->sflist);
2220 rtnl_unlock(); 2221 rtnl_unlock();
2221 count = psl ? psl->sl_count : 0; 2222 count = psl ? psl->sl_count : 0;
2222 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; 2223 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
@@ -2257,7 +2258,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
2257 goto out; 2258 goto out;
2258 2259
2259 rcu_read_lock(); 2260 rcu_read_lock();
2260 for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) { 2261 for_each_pmc_rcu(inet, pmc) {
2261 if (pmc->multi.imr_multiaddr.s_addr == loc_addr && 2262 if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
2262 pmc->multi.imr_ifindex == dif) 2263 pmc->multi.imr_ifindex == dif)
2263 break; 2264 break;
@@ -2265,7 +2266,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
2265 ret = inet->mc_all; 2266 ret = inet->mc_all;
2266 if (!pmc) 2267 if (!pmc)
2267 goto unlock; 2268 goto unlock;
2268 psl = pmc->sflist; 2269 psl = rcu_dereference(pmc->sflist);
2269 ret = (pmc->sfmode == MCAST_EXCLUDE); 2270 ret = (pmc->sfmode == MCAST_EXCLUDE);
2270 if (!psl) 2271 if (!psl)
2271 goto unlock; 2272 goto unlock;
@@ -2300,10 +2301,10 @@ void ip_mc_drop_socket(struct sock *sk)
2300 return; 2301 return;
2301 2302
2302 rtnl_lock(); 2303 rtnl_lock();
2303 while ((iml = inet->mc_list) != NULL) { 2304 while ((iml = rtnl_dereference(inet->mc_list)) != NULL) {
2304 struct in_device *in_dev; 2305 struct in_device *in_dev;
2305 rcu_assign_pointer(inet->mc_list, iml->next);
2306 2306
2307 inet->mc_list = iml->next_rcu;
2307 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2308 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2308 (void) ip_mc_leave_src(sk, iml, in_dev); 2309 (void) ip_mc_leave_src(sk, iml, in_dev);
2309 if (in_dev != NULL) 2310 if (in_dev != NULL)
@@ -2321,8 +2322,8 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p
2321 struct ip_sf_list *psf; 2322 struct ip_sf_list *psf;
2322 int rv = 0; 2323 int rv = 0;
2323 2324
2324 read_lock(&in_dev->mc_list_lock); 2325 rcu_read_lock();
2325 for (im=in_dev->mc_list; im; im=im->next) { 2326 for_each_pmc_rcu(in_dev, im) {
2326 if (im->multiaddr == mc_addr) 2327 if (im->multiaddr == mc_addr)
2327 break; 2328 break;
2328 } 2329 }
@@ -2343,7 +2344,7 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p
2343 } else 2344 } else
2344 rv = 1; /* unspecified source; tentatively allow */ 2345 rv = 1; /* unspecified source; tentatively allow */
2345 } 2346 }
2346 read_unlock(&in_dev->mc_list_lock); 2347 rcu_read_unlock();
2347 return rv; 2348 return rv;
2348} 2349}
2349 2350
@@ -2369,13 +2370,11 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2369 in_dev = __in_dev_get_rcu(state->dev); 2370 in_dev = __in_dev_get_rcu(state->dev);
2370 if (!in_dev) 2371 if (!in_dev)
2371 continue; 2372 continue;
2372 read_lock(&in_dev->mc_list_lock); 2373 im = rcu_dereference(in_dev->mc_list);
2373 im = in_dev->mc_list;
2374 if (im) { 2374 if (im) {
2375 state->in_dev = in_dev; 2375 state->in_dev = in_dev;
2376 break; 2376 break;
2377 } 2377 }
2378 read_unlock(&in_dev->mc_list_lock);
2379 } 2378 }
2380 return im; 2379 return im;
2381} 2380}
@@ -2383,11 +2382,9 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2383static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im) 2382static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
2384{ 2383{
2385 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2384 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2386 im = im->next;
2387 while (!im) {
2388 if (likely(state->in_dev != NULL))
2389 read_unlock(&state->in_dev->mc_list_lock);
2390 2385
2386 im = rcu_dereference(im->next_rcu);
2387 while (!im) {
2391 state->dev = next_net_device_rcu(state->dev); 2388 state->dev = next_net_device_rcu(state->dev);
2392 if (!state->dev) { 2389 if (!state->dev) {
2393 state->in_dev = NULL; 2390 state->in_dev = NULL;
@@ -2396,8 +2393,7 @@ static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_li
2396 state->in_dev = __in_dev_get_rcu(state->dev); 2393 state->in_dev = __in_dev_get_rcu(state->dev);
2397 if (!state->in_dev) 2394 if (!state->in_dev)
2398 continue; 2395 continue;
2399 read_lock(&state->in_dev->mc_list_lock); 2396 im = rcu_dereference(state->in_dev->mc_list);
2400 im = state->in_dev->mc_list;
2401 } 2397 }
2402 return im; 2398 return im;
2403} 2399}
@@ -2433,10 +2429,8 @@ static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
2433 __releases(rcu) 2429 __releases(rcu)
2434{ 2430{
2435 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2431 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2436 if (likely(state->in_dev != NULL)) { 2432
2437 read_unlock(&state->in_dev->mc_list_lock); 2433 state->in_dev = NULL;
2438 state->in_dev = NULL;
2439 }
2440 state->dev = NULL; 2434 state->dev = NULL;
2441 rcu_read_unlock(); 2435 rcu_read_unlock();
2442} 2436}
@@ -2458,7 +2452,7 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
2458 querier = "NONE"; 2452 querier = "NONE";
2459#endif 2453#endif
2460 2454
2461 if (state->in_dev->mc_list == im) { 2455 if (rcu_dereference(state->in_dev->mc_list) == im) {
2462 seq_printf(seq, "%d\t%-10s: %5d %7s\n", 2456 seq_printf(seq, "%d\t%-10s: %5d %7s\n",
2463 state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier); 2457 state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
2464 } 2458 }
@@ -2517,8 +2511,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2517 idev = __in_dev_get_rcu(state->dev); 2511 idev = __in_dev_get_rcu(state->dev);
2518 if (unlikely(idev == NULL)) 2512 if (unlikely(idev == NULL))
2519 continue; 2513 continue;
2520 read_lock(&idev->mc_list_lock); 2514 im = rcu_dereference(idev->mc_list);
2521 im = idev->mc_list;
2522 if (likely(im != NULL)) { 2515 if (likely(im != NULL)) {
2523 spin_lock_bh(&im->lock); 2516 spin_lock_bh(&im->lock);
2524 psf = im->sources; 2517 psf = im->sources;
@@ -2529,7 +2522,6 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2529 } 2522 }
2530 spin_unlock_bh(&im->lock); 2523 spin_unlock_bh(&im->lock);
2531 } 2524 }
2532 read_unlock(&idev->mc_list_lock);
2533 } 2525 }
2534 return psf; 2526 return psf;
2535} 2527}
@@ -2543,9 +2535,6 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
2543 spin_unlock_bh(&state->im->lock); 2535 spin_unlock_bh(&state->im->lock);
2544 state->im = state->im->next; 2536 state->im = state->im->next;
2545 while (!state->im) { 2537 while (!state->im) {
2546 if (likely(state->idev != NULL))
2547 read_unlock(&state->idev->mc_list_lock);
2548
2549 state->dev = next_net_device_rcu(state->dev); 2538 state->dev = next_net_device_rcu(state->dev);
2550 if (!state->dev) { 2539 if (!state->dev) {
2551 state->idev = NULL; 2540 state->idev = NULL;
@@ -2554,8 +2543,7 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
2554 state->idev = __in_dev_get_rcu(state->dev); 2543 state->idev = __in_dev_get_rcu(state->dev);
2555 if (!state->idev) 2544 if (!state->idev)
2556 continue; 2545 continue;
2557 read_lock(&state->idev->mc_list_lock); 2546 state->im = rcu_dereference(state->idev->mc_list);
2558 state->im = state->idev->mc_list;
2559 } 2547 }
2560 if (!state->im) 2548 if (!state->im)
2561 break; 2549 break;
@@ -2601,10 +2589,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
2601 spin_unlock_bh(&state->im->lock); 2589 spin_unlock_bh(&state->im->lock);
2602 state->im = NULL; 2590 state->im = NULL;
2603 } 2591 }
2604 if (likely(state->idev != NULL)) { 2592 state->idev = NULL;
2605 read_unlock(&state->idev->mc_list_lock);
2606 state->idev = NULL;
2607 }
2608 state->dev = NULL; 2593 state->dev = NULL;
2609 rcu_read_unlock(); 2594 rcu_read_unlock();
2610} 2595}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 70ff77f02eee..cab2057d5430 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -634,7 +634,7 @@ static int ipgre_rcv(struct sk_buff *skb)
634#ifdef CONFIG_NET_IPGRE_BROADCAST 634#ifdef CONFIG_NET_IPGRE_BROADCAST
635 if (ipv4_is_multicast(iph->daddr)) { 635 if (ipv4_is_multicast(iph->daddr)) {
636 /* Looped back packet, drop it! */ 636 /* Looped back packet, drop it! */
637 if (skb_rtable(skb)->fl.iif == 0) 637 if (rt_is_output_route(skb_rtable(skb)))
638 goto drop; 638 goto drop;
639 tunnel->dev->stats.multicast++; 639 tunnel->dev->stats.multicast++;
640 skb->pkt_type = PACKET_BROADCAST; 640 skb->pkt_type = PACKET_BROADCAST;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 86dd5691af46..ef2b0089e0ea 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1654,7 +1654,7 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1654 if (mrt->vif_table[vif].dev != skb->dev) { 1654 if (mrt->vif_table[vif].dev != skb->dev) {
1655 int true_vifi; 1655 int true_vifi;
1656 1656
1657 if (skb_rtable(skb)->fl.iif == 0) { 1657 if (rt_is_output_route(skb_rtable(skb))) {
1658 /* It is our own packet, looped back. 1658 /* It is our own packet, looped back.
1659 * Very complicated situation... 1659 * Very complicated situation...
1660 * 1660 *
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 987bf9adb318..66610ea3c87b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -140,13 +140,15 @@ static unsigned long expires_ljiffies;
140 140
141static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); 141static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
142static void ipv4_dst_destroy(struct dst_entry *dst); 142static void ipv4_dst_destroy(struct dst_entry *dst);
143static void ipv4_dst_ifdown(struct dst_entry *dst,
144 struct net_device *dev, int how);
145static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); 143static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
146static void ipv4_link_failure(struct sk_buff *skb); 144static void ipv4_link_failure(struct sk_buff *skb);
147static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); 145static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
148static int rt_garbage_collect(struct dst_ops *ops); 146static int rt_garbage_collect(struct dst_ops *ops);
149 147
148static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
149 int how)
150{
151}
150 152
151static struct dst_ops ipv4_dst_ops = { 153static struct dst_ops ipv4_dst_ops = {
152 .family = AF_INET, 154 .family = AF_INET,
@@ -621,7 +623,7 @@ static inline int rt_fast_clean(struct rtable *rth)
621 /* Kill broadcast/multicast entries very aggresively, if they 623 /* Kill broadcast/multicast entries very aggresively, if they
622 collide in hash table with more useful entries */ 624 collide in hash table with more useful entries */
623 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) && 625 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
624 rth->fl.iif && rth->dst.rt_next; 626 rt_is_input_route(rth) && rth->dst.rt_next;
625} 627}
626 628
627static inline int rt_valuable(struct rtable *rth) 629static inline int rt_valuable(struct rtable *rth)
@@ -666,7 +668,7 @@ static inline u32 rt_score(struct rtable *rt)
666 if (rt_valuable(rt)) 668 if (rt_valuable(rt))
667 score |= (1<<31); 669 score |= (1<<31);
668 670
669 if (!rt->fl.iif || 671 if (rt_is_output_route(rt) ||
670 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL))) 672 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
671 score |= (1<<30); 673 score |= (1<<30);
672 674
@@ -1124,7 +1126,7 @@ restart:
1124 */ 1126 */
1125 1127
1126 rt->dst.flags |= DST_NOCACHE; 1128 rt->dst.flags |= DST_NOCACHE;
1127 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { 1129 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1128 int err = arp_bind_neighbour(&rt->dst); 1130 int err = arp_bind_neighbour(&rt->dst);
1129 if (err) { 1131 if (err) {
1130 if (net_ratelimit()) 1132 if (net_ratelimit())
@@ -1222,7 +1224,7 @@ restart:
1222 /* Try to bind route to arp only if it is output 1224 /* Try to bind route to arp only if it is output
1223 route or unicast forwarding path. 1225 route or unicast forwarding path.
1224 */ 1226 */
1225 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { 1227 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1226 int err = arp_bind_neighbour(&rt->dst); 1228 int err = arp_bind_neighbour(&rt->dst);
1227 if (err) { 1229 if (err) {
1228 spin_unlock_bh(rt_hash_lock_addr(hash)); 1230 spin_unlock_bh(rt_hash_lock_addr(hash));
@@ -1404,7 +1406,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1404 if (rth->fl.fl4_dst != daddr || 1406 if (rth->fl.fl4_dst != daddr ||
1405 rth->fl.fl4_src != skeys[i] || 1407 rth->fl.fl4_src != skeys[i] ||
1406 rth->fl.oif != ikeys[k] || 1408 rth->fl.oif != ikeys[k] ||
1407 rth->fl.iif != 0 || 1409 rt_is_input_route(rth) ||
1408 rt_is_expired(rth) || 1410 rt_is_expired(rth) ||
1409 !net_eq(dev_net(rth->dst.dev), net)) { 1411 !net_eq(dev_net(rth->dst.dev), net)) {
1410 rthp = &rth->dst.rt_next; 1412 rthp = &rth->dst.rt_next;
@@ -1433,8 +1435,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1433 rt->dst.child = NULL; 1435 rt->dst.child = NULL;
1434 if (rt->dst.dev) 1436 if (rt->dst.dev)
1435 dev_hold(rt->dst.dev); 1437 dev_hold(rt->dst.dev);
1436 if (rt->idev)
1437 in_dev_hold(rt->idev);
1438 rt->dst.obsolete = -1; 1438 rt->dst.obsolete = -1;
1439 rt->dst.lastuse = jiffies; 1439 rt->dst.lastuse = jiffies;
1440 rt->dst.path = &rt->dst; 1440 rt->dst.path = &rt->dst;
@@ -1666,7 +1666,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1666 rth->rt_dst != daddr || 1666 rth->rt_dst != daddr ||
1667 rth->rt_src != iph->saddr || 1667 rth->rt_src != iph->saddr ||
1668 rth->fl.oif != ikeys[k] || 1668 rth->fl.oif != ikeys[k] ||
1669 rth->fl.iif != 0 || 1669 rt_is_input_route(rth) ||
1670 dst_metric_locked(&rth->dst, RTAX_MTU) || 1670 dst_metric_locked(&rth->dst, RTAX_MTU) ||
1671 !net_eq(dev_net(rth->dst.dev), net) || 1671 !net_eq(dev_net(rth->dst.dev), net) ||
1672 rt_is_expired(rth)) 1672 rt_is_expired(rth))
@@ -1728,33 +1728,13 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
1728{ 1728{
1729 struct rtable *rt = (struct rtable *) dst; 1729 struct rtable *rt = (struct rtable *) dst;
1730 struct inet_peer *peer = rt->peer; 1730 struct inet_peer *peer = rt->peer;
1731 struct in_device *idev = rt->idev;
1732 1731
1733 if (peer) { 1732 if (peer) {
1734 rt->peer = NULL; 1733 rt->peer = NULL;
1735 inet_putpeer(peer); 1734 inet_putpeer(peer);
1736 } 1735 }
1737
1738 if (idev) {
1739 rt->idev = NULL;
1740 in_dev_put(idev);
1741 }
1742} 1736}
1743 1737
1744static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
1745 int how)
1746{
1747 struct rtable *rt = (struct rtable *) dst;
1748 struct in_device *idev = rt->idev;
1749 if (dev != dev_net(dev)->loopback_dev && idev && idev->dev == dev) {
1750 struct in_device *loopback_idev =
1751 in_dev_get(dev_net(dev)->loopback_dev);
1752 if (loopback_idev) {
1753 rt->idev = loopback_idev;
1754 in_dev_put(idev);
1755 }
1756 }
1757}
1758 1738
1759static void ipv4_link_failure(struct sk_buff *skb) 1739static void ipv4_link_failure(struct sk_buff *skb)
1760{ 1740{
@@ -1790,7 +1770,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
1790 __be32 src; 1770 __be32 src;
1791 struct fib_result res; 1771 struct fib_result res;
1792 1772
1793 if (rt->fl.iif == 0) 1773 if (rt_is_output_route(rt))
1794 src = rt->rt_src; 1774 src = rt->rt_src;
1795 else { 1775 else {
1796 rcu_read_lock(); 1776 rcu_read_lock();
@@ -1910,7 +1890,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1910 rth->fl.iif = dev->ifindex; 1890 rth->fl.iif = dev->ifindex;
1911 rth->dst.dev = init_net.loopback_dev; 1891 rth->dst.dev = init_net.loopback_dev;
1912 dev_hold(rth->dst.dev); 1892 dev_hold(rth->dst.dev);
1913 rth->idev = in_dev_get(rth->dst.dev);
1914 rth->fl.oif = 0; 1893 rth->fl.oif = 0;
1915 rth->rt_gateway = daddr; 1894 rth->rt_gateway = daddr;
1916 rth->rt_spec_dst= spec_dst; 1895 rth->rt_spec_dst= spec_dst;
@@ -2050,7 +2029,6 @@ static int __mkroute_input(struct sk_buff *skb,
2050 rth->fl.iif = in_dev->dev->ifindex; 2029 rth->fl.iif = in_dev->dev->ifindex;
2051 rth->dst.dev = (out_dev)->dev; 2030 rth->dst.dev = (out_dev)->dev;
2052 dev_hold(rth->dst.dev); 2031 dev_hold(rth->dst.dev);
2053 rth->idev = in_dev_get(rth->dst.dev);
2054 rth->fl.oif = 0; 2032 rth->fl.oif = 0;
2055 rth->rt_spec_dst= spec_dst; 2033 rth->rt_spec_dst= spec_dst;
2056 2034
@@ -2231,7 +2209,6 @@ local_input:
2231 rth->fl.iif = dev->ifindex; 2209 rth->fl.iif = dev->ifindex;
2232 rth->dst.dev = net->loopback_dev; 2210 rth->dst.dev = net->loopback_dev;
2233 dev_hold(rth->dst.dev); 2211 dev_hold(rth->dst.dev);
2234 rth->idev = in_dev_get(rth->dst.dev);
2235 rth->rt_gateway = daddr; 2212 rth->rt_gateway = daddr;
2236 rth->rt_spec_dst= spec_dst; 2213 rth->rt_spec_dst= spec_dst;
2237 rth->dst.input= ip_local_deliver; 2214 rth->dst.input= ip_local_deliver;
@@ -2417,9 +2394,6 @@ static int __mkroute_output(struct rtable **result,
2417 if (!rth) 2394 if (!rth)
2418 return -ENOBUFS; 2395 return -ENOBUFS;
2419 2396
2420 in_dev_hold(in_dev);
2421 rth->idev = in_dev;
2422
2423 atomic_set(&rth->dst.__refcnt, 1); 2397 atomic_set(&rth->dst.__refcnt, 1);
2424 rth->dst.flags= DST_HOST; 2398 rth->dst.flags= DST_HOST;
2425 if (IN_DEV_CONF_GET(in_dev, NOXFRM)) 2399 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
@@ -2695,7 +2669,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2695 rth = rcu_dereference_bh(rth->dst.rt_next)) { 2669 rth = rcu_dereference_bh(rth->dst.rt_next)) {
2696 if (rth->fl.fl4_dst == flp->fl4_dst && 2670 if (rth->fl.fl4_dst == flp->fl4_dst &&
2697 rth->fl.fl4_src == flp->fl4_src && 2671 rth->fl.fl4_src == flp->fl4_src &&
2698 rth->fl.iif == 0 && 2672 rt_is_output_route(rth) &&
2699 rth->fl.oif == flp->oif && 2673 rth->fl.oif == flp->oif &&
2700 rth->fl.mark == flp->mark && 2674 rth->fl.mark == flp->mark &&
2701 !((rth->fl.fl4_tos ^ flp->fl4_tos) & 2675 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
@@ -2759,9 +2733,6 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
2759 2733
2760 rt->fl = ort->fl; 2734 rt->fl = ort->fl;
2761 2735
2762 rt->idev = ort->idev;
2763 if (rt->idev)
2764 in_dev_hold(rt->idev);
2765 rt->rt_genid = rt_genid(net); 2736 rt->rt_genid = rt_genid(net);
2766 rt->rt_flags = ort->rt_flags; 2737 rt->rt_flags = ort->rt_flags;
2767 rt->rt_type = ort->rt_type; 2738 rt->rt_type = ort->rt_type;
@@ -2853,7 +2824,7 @@ static int rt_fill_info(struct net *net,
2853 if (rt->dst.tclassid) 2824 if (rt->dst.tclassid)
2854 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid); 2825 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
2855#endif 2826#endif
2856 if (rt->fl.iif) 2827 if (rt_is_input_route(rt))
2857 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); 2828 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2858 else if (rt->rt_src != rt->fl.fl4_src) 2829 else if (rt->rt_src != rt->fl.fl4_src)
2859 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src); 2830 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
@@ -2878,7 +2849,7 @@ static int rt_fill_info(struct net *net,
2878 } 2849 }
2879 } 2850 }
2880 2851
2881 if (rt->fl.iif) { 2852 if (rt_is_input_route(rt)) {
2882#ifdef CONFIG_IP_MROUTE 2853#ifdef CONFIG_IP_MROUTE
2883 __be32 dst = rt->rt_dst; 2854 __be32 dst = rt->rt_dst;
2884 2855
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 081419969485..2bb46d55f40c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1193,7 +1193,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1193 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1193 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1194 1194
1195 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1195 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1196 KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1196 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1197 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1197 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1198#endif 1198#endif
1199 1199
@@ -1477,10 +1477,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1477 * shouldn't happen. 1477 * shouldn't happen.
1478 */ 1478 */
1479 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 1479 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1480 KERN_INFO "recvmsg bug: copied %X " 1480 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1481 "seq %X rcvnxt %X fl %X\n", *seq, 1481 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1482 TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 1482 flags))
1483 flags))
1484 break; 1483 break;
1485 1484
1486 offset = *seq - TCP_SKB_CB(skb)->seq; 1485 offset = *seq - TCP_SKB_CB(skb)->seq;
@@ -1490,10 +1489,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1490 goto found_ok_skb; 1489 goto found_ok_skb;
1491 if (tcp_hdr(skb)->fin) 1490 if (tcp_hdr(skb)->fin)
1492 goto found_fin_ok; 1491 goto found_fin_ok;
1493 WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: " 1492 WARN(!(flags & MSG_PEEK),
1494 "copied %X seq %X rcvnxt %X fl %X\n", 1493 "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1495 *seq, TCP_SKB_CB(skb)->seq, 1494 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
1496 tp->rcv_nxt, flags);
1497 } 1495 }
1498 1496
1499 /* Well, if we have backlog, try to process it now yet. */ 1497 /* Well, if we have backlog, try to process it now yet. */
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 4464f3bff6a7..dd1fd8c473fc 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -80,10 +80,6 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
80 xdst->u.dst.dev = dev; 80 xdst->u.dst.dev = dev;
81 dev_hold(dev); 81 dev_hold(dev);
82 82
83 xdst->u.rt.idev = in_dev_get(dev);
84 if (!xdst->u.rt.idev)
85 return -ENODEV;
86
87 xdst->u.rt.peer = rt->peer; 83 xdst->u.rt.peer = rt->peer;
88 if (rt->peer) 84 if (rt->peer)
89 atomic_inc(&rt->peer->refcnt); 85 atomic_inc(&rt->peer->refcnt);
@@ -189,8 +185,6 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
189{ 185{
190 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 186 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
191 187
192 if (likely(xdst->u.rt.idev))
193 in_dev_put(xdst->u.rt.idev);
194 if (likely(xdst->u.rt.peer)) 188 if (likely(xdst->u.rt.peer))
195 inet_putpeer(xdst->u.rt.peer); 189 inet_putpeer(xdst->u.rt.peer);
196 xfrm_dst_destroy(xdst); 190 xfrm_dst_destroy(xdst);
@@ -199,27 +193,9 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
199static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, 193static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
200 int unregister) 194 int unregister)
201{ 195{
202 struct xfrm_dst *xdst;
203
204 if (!unregister) 196 if (!unregister)
205 return; 197 return;
206 198
207 xdst = (struct xfrm_dst *)dst;
208 if (xdst->u.rt.idev->dev == dev) {
209 struct in_device *loopback_idev =
210 in_dev_get(dev_net(dev)->loopback_dev);
211 BUG_ON(!loopback_idev);
212
213 do {
214 in_dev_put(xdst->u.rt.idev);
215 xdst->u.rt.idev = loopback_idev;
216 in_dev_hold(loopback_idev);
217 xdst = (struct xfrm_dst *)xdst->u.dst.child;
218 } while (xdst->u.dst.xfrm);
219
220 __in_dev_put(loopback_idev);
221 }
222
223 xfrm_dst_ifdown(dst, dev); 199 xfrm_dst_ifdown(dst, dev);
224} 200}
225 201
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index de04ea39cde8..10bd39c0ae2d 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -169,7 +169,7 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
169 struct net *net = dev_net(dev); 169 struct net *net = dev_net(dev);
170 struct iphdr *iph = ip_hdr(skb); 170 struct iphdr *iph = ip_hdr(skb);
171 171
172 if (rt->fl.iif) { 172 if (rt_is_input_route(rt)) {
173 unsigned long orefdst = skb->_skb_refdst; 173 unsigned long orefdst = skb->_skb_refdst;
174 174
175 if (ip_route_input(skb, iph->daddr, iph->saddr, 175 if (ip_route_input(skb, iph->daddr, iph->saddr,
@@ -552,7 +552,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
552#endif 552#endif
553 553
554 /* From world but DNAT to loopback address? */ 554 /* From world but DNAT to loopback address? */
555 if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) { 555 if (local && ipv4_is_loopback(rt->rt_dst) &&
556 rt_is_input_route(skb_rtable(skb))) {
556 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): " 557 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
557 "stopping DNAT to loopback address"); 558 "stopping DNAT to loopback address");
558 goto tx_error_put; 559 goto tx_error_put;
@@ -1165,7 +1166,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1165#endif 1166#endif
1166 1167
1167 /* From world but DNAT to loopback address? */ 1168 /* From world but DNAT to loopback address? */
1168 if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) { 1169 if (local && ipv4_is_loopback(rt->rt_dst) &&
1170 rt_is_input_route(skb_rtable(skb))) {
1169 IP_VS_DBG(1, "%s(): " 1171 IP_VS_DBG(1, "%s(): "
1170 "stopping DNAT to loopback %pI4\n", 1172 "stopping DNAT to loopback %pI4\n",
1171 __func__, &cp->daddr.ip); 1173 __func__, &cp->daddr.ip);
diff --git a/net/socket.c b/net/socket.c
index 3ca2fd9e3720..c898df76e924 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -156,7 +156,7 @@ static const struct file_operations socket_file_ops = {
156 */ 156 */
157 157
158static DEFINE_SPINLOCK(net_family_lock); 158static DEFINE_SPINLOCK(net_family_lock);
159static const struct net_proto_family *net_families[NPROTO] __read_mostly; 159static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
160 160
161/* 161/*
162 * Statistics counters of the socket lists 162 * Statistics counters of the socket lists
@@ -1200,7 +1200,7 @@ int __sock_create(struct net *net, int family, int type, int protocol,
1200 * requested real, full-featured networking support upon configuration. 1200 * requested real, full-featured networking support upon configuration.
1201 * Otherwise module support will break! 1201 * Otherwise module support will break!
1202 */ 1202 */
1203 if (net_families[family] == NULL) 1203 if (rcu_access_pointer(net_families[family]) == NULL)
1204 request_module("net-pf-%d", family); 1204 request_module("net-pf-%d", family);
1205#endif 1205#endif
1206 1206
@@ -2332,10 +2332,11 @@ int sock_register(const struct net_proto_family *ops)
2332 } 2332 }
2333 2333
2334 spin_lock(&net_family_lock); 2334 spin_lock(&net_family_lock);
2335 if (net_families[ops->family]) 2335 if (rcu_dereference_protected(net_families[ops->family],
2336 lockdep_is_held(&net_family_lock)))
2336 err = -EEXIST; 2337 err = -EEXIST;
2337 else { 2338 else {
2338 net_families[ops->family] = ops; 2339 rcu_assign_pointer(net_families[ops->family], ops);
2339 err = 0; 2340 err = 0;
2340 } 2341 }
2341 spin_unlock(&net_family_lock); 2342 spin_unlock(&net_family_lock);
@@ -2363,7 +2364,7 @@ void sock_unregister(int family)
2363 BUG_ON(family < 0 || family >= NPROTO); 2364 BUG_ON(family < 0 || family >= NPROTO);
2364 2365
2365 spin_lock(&net_family_lock); 2366 spin_lock(&net_family_lock);
2366 net_families[family] = NULL; 2367 rcu_assign_pointer(net_families[family], NULL);
2367 spin_unlock(&net_family_lock); 2368 spin_unlock(&net_family_lock);
2368 2369
2369 synchronize_rcu(); 2370 synchronize_rcu();
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 3c95304a0817..7ff31c60186a 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -316,7 +316,8 @@ static void unix_write_space(struct sock *sk)
316 if (unix_writable(sk)) { 316 if (unix_writable(sk)) {
317 wq = rcu_dereference(sk->sk_wq); 317 wq = rcu_dereference(sk->sk_wq);
318 if (wq_has_sleeper(wq)) 318 if (wq_has_sleeper(wq))
319 wake_up_interruptible_sync(&wq->wait); 319 wake_up_interruptible_sync_poll(&wq->wait,
320 POLLOUT | POLLWRNORM | POLLWRBAND);
320 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 321 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
321 } 322 }
322 rcu_read_unlock(); 323 rcu_read_unlock();
@@ -1710,7 +1711,8 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1710 goto out_unlock; 1711 goto out_unlock;
1711 } 1712 }
1712 1713
1713 wake_up_interruptible_sync(&u->peer_wait); 1714 wake_up_interruptible_sync_poll(&u->peer_wait,
1715 POLLOUT | POLLWRNORM | POLLWRBAND);
1714 1716
1715 if (msg->msg_name) 1717 if (msg->msg_name)
1716 unix_copy_addr(msg, skb->sk); 1718 unix_copy_addr(msg, skb->sk);
@@ -2072,13 +2074,12 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2072 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 2074 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2073 mask |= POLLERR; 2075 mask |= POLLERR;
2074 if (sk->sk_shutdown & RCV_SHUTDOWN) 2076 if (sk->sk_shutdown & RCV_SHUTDOWN)
2075 mask |= POLLRDHUP; 2077 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2076 if (sk->sk_shutdown == SHUTDOWN_MASK) 2078 if (sk->sk_shutdown == SHUTDOWN_MASK)
2077 mask |= POLLHUP; 2079 mask |= POLLHUP;
2078 2080
2079 /* readable? */ 2081 /* readable? */
2080 if (!skb_queue_empty(&sk->sk_receive_queue) || 2082 if (!skb_queue_empty(&sk->sk_receive_queue))
2081 (sk->sk_shutdown & RCV_SHUTDOWN))
2082 mask |= POLLIN | POLLRDNORM; 2083 mask |= POLLIN | POLLRDNORM;
2083 2084
2084 /* Connection-based need to check for termination and startup */ 2085 /* Connection-based need to check for termination and startup */
@@ -2090,20 +2091,19 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2090 return mask; 2091 return mask;
2091 } 2092 }
2092 2093
2093 /* writable? */ 2094 /* No write status requested, avoid expensive OUT tests. */
2094 writable = unix_writable(sk); 2095 if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT)))
2095 if (writable) { 2096 return mask;
2096 other = unix_peer_get(sk);
2097 if (other) {
2098 if (unix_peer(other) != sk) {
2099 sock_poll_wait(file, &unix_sk(other)->peer_wait,
2100 wait);
2101 if (unix_recvq_full(other))
2102 writable = 0;
2103 }
2104 2097
2105 sock_put(other); 2098 writable = unix_writable(sk);
2099 other = unix_peer_get(sk);
2100 if (other) {
2101 if (unix_peer(other) != sk) {
2102 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2103 if (unix_recvq_full(other))
2104 writable = 0;
2106 } 2105 }
2106 sock_put(other);
2107 } 2107 }
2108 2108
2109 if (writable) 2109 if (writable)