aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-08-12 09:43:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-08-12 09:43:53 -0400
commitce8a84ef1e4b30bcee78aa99bc1032db90a6c1c4 (patch)
tree3faf99c6fbd99eedce3ad2193ce779c25bfc8064
parenteeca7360f756f7e36e846f35018df20808c7ef63 (diff)
parentd80bcf46f1dae47805260dc60fb900cc4dabe35e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (44 commits) e1000e: increase driver version number e1000e: alternate MAC address update e1000e: do not disable receiver on 82574/82583 e1000e: alternate MAC address does not work on device id 0x1060 PCnet: Fix section mismatch bnx2x: disable dcb on 578xx since not supported yet bnx2x: properly clean indirect addresses bnx2x: prevent race between undi_unload and load flows bnx2x: fix select_queue when FCoE is disabled bnx2x: init FCOE FP only once ipv4: some rt_iif -> rt_route_iif conversions net/bridge/netfilter/ebtables.c: use available error handling code net/netlabel/netlabel_kapi.c: add missing cleanup code net/irda: sh_sir: tidyup compile warning net/irda: sh_sir: add missing header net/irda: sh_irda: add missing header slcan: ldisc generated skbs are received in softirq context scm: Capture the full credentials of the scm sender tcp: initialize variable ecn_ok in syncookies path drivers/net/wireless/wl1251: add missing kfree ...
-rw-r--r--Documentation/networking/bonding.txt29
-rw-r--r--Documentation/networking/scaling.txt371
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c35
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c23
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h26
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/e1000e/82571.c6
-rw-r--r--drivers/net/e1000e/e1000.h1
-rw-r--r--drivers/net/e1000e/ethtool.c3
-rw-r--r--drivers/net/e1000e/lib.c7
-rw-r--r--drivers/net/e1000e/netdev.c9
-rw-r--r--drivers/net/gianfar_ptp.c9
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c4
-rw-r--r--drivers/net/pcnet32.c2
-rw-r--r--drivers/net/phy/dp83640.c5
-rw-r--r--drivers/net/slip.c2
-rw-r--r--drivers/net/usb/rtl8150.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/b43/dma.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c11
-rw-r--r--drivers/net/wireless/wl1251/acx.c6
-rw-r--r--drivers/net/wireless/wl1251/cmd.c2
-rw-r--r--fs/compat_ioctl.c1
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/socket.h6
-rw-r--r--include/net/inet_sock.h2
-rw-r--r--net/bridge/br_if.c6
-rw-r--r--net/bridge/br_notify.c7
-rw-r--r--net/bridge/netfilter/ebtables.c3
-rw-r--r--net/core/scm.c2
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ip_sockglue.c9
-rw-r--r--net/ipv4/netfilter.c18
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/route.c9
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/netfilter/nf_queue.c1
-rw-r--r--net/netlabel/netlabel_kapi.c20
-rw-r--r--net/sched/sch_prio.c2
46 files changed, 604 insertions, 106 deletions
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 5dd960d75174..91df678fb7f8 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -238,6 +238,18 @@ ad_select
238 238
239 This option was added in bonding version 3.4.0. 239 This option was added in bonding version 3.4.0.
240 240
241all_slaves_active
242
243 Specifies that duplicate frames (received on inactive ports) should be
244 dropped (0) or delivered (1).
245
246 Normally, bonding will drop duplicate frames (received on inactive
247 ports), which is desirable for most users. But there are some times
248 it is nice to allow duplicate frames to be delivered.
249
250 The default value is 0 (drop duplicate frames received on inactive
251 ports).
252
241arp_interval 253arp_interval
242 254
243 Specifies the ARP link monitoring frequency in milliseconds. 255 Specifies the ARP link monitoring frequency in milliseconds.
@@ -433,6 +445,23 @@ miimon
433 determined. See the High Availability section for additional 445 determined. See the High Availability section for additional
434 information. The default value is 0. 446 information. The default value is 0.
435 447
448min_links
449
450 Specifies the minimum number of links that must be active before
451 asserting carrier. It is similar to the Cisco EtherChannel min-links
452 feature. This allows setting the minimum number of member ports that
453 must be up (link-up state) before marking the bond device as up
454 (carrier on). This is useful for situations where higher level services
455 such as clustering want to ensure a minimum number of low bandwidth
456 links are active before switchover. This option only affect 802.3ad
457 mode.
458
459 The default value is 0. This will cause carrier to be asserted (for
460 802.3ad mode) whenever there is an active aggregator, regardless of the
461 number of available links in that aggregator. Note that, because an
462 aggregator cannot be active without at least one available link,
463 setting this option to 0 or to 1 has the exact same effect.
464
436mode 465mode
437 466
438 Specifies one of the bonding policies. The default is 467 Specifies one of the bonding policies. The default is
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
new file mode 100644
index 000000000000..7254b4b5910e
--- /dev/null
+++ b/Documentation/networking/scaling.txt
@@ -0,0 +1,371 @@
1Scaling in the Linux Networking Stack
2
3
4Introduction
5============
6
7This document describes a set of complementary techniques in the Linux
8networking stack to increase parallelism and improve performance for
9multi-processor systems.
10
11The following technologies are described:
12
13 RSS: Receive Side Scaling
14 RPS: Receive Packet Steering
15 RFS: Receive Flow Steering
16 Accelerated Receive Flow Steering
17 XPS: Transmit Packet Steering
18
19
20RSS: Receive Side Scaling
21=========================
22
23Contemporary NICs support multiple receive and transmit descriptor queues
24(multi-queue). On reception, a NIC can send different packets to different
25queues to distribute processing among CPUs. The NIC distributes packets by
26applying a filter to each packet that assigns it to one of a small number
27of logical flows. Packets for each flow are steered to a separate receive
28queue, which in turn can be processed by separate CPUs. This mechanism is
29generally known as “Receive-side Scaling” (RSS). The goal of RSS and
30the other scaling techniques to increase performance uniformly.
31Multi-queue distribution can also be used for traffic prioritization, but
32that is not the focus of these techniques.
33
34The filter used in RSS is typically a hash function over the network
35and/or transport layer headers-- for example, a 4-tuple hash over
36IP addresses and TCP ports of a packet. The most common hardware
37implementation of RSS uses a 128-entry indirection table where each entry
38stores a queue number. The receive queue for a packet is determined
39by masking out the low order seven bits of the computed hash for the
40packet (usually a Toeplitz hash), taking this number as a key into the
41indirection table and reading the corresponding value.
42
43Some advanced NICs allow steering packets to queues based on
44programmable filters. For example, webserver bound TCP port 80 packets
45can be directed to their own receive queue. Such “n-tuple” filters can
46be configured from ethtool (--config-ntuple).
47
48==== RSS Configuration
49
50The driver for a multi-queue capable NIC typically provides a kernel
51module parameter for specifying the number of hardware queues to
52configure. In the bnx2x driver, for instance, this parameter is called
53num_queues. A typical RSS configuration would be to have one receive queue
54for each CPU if the device supports enough queues, or otherwise at least
55one for each cache domain at a particular cache level (L1, L2, etc.).
56
57The indirection table of an RSS device, which resolves a queue by masked
58hash, is usually programmed by the driver at initialization. The
59default mapping is to distribute the queues evenly in the table, but the
60indirection table can be retrieved and modified at runtime using ethtool
61commands (--show-rxfh-indir and --set-rxfh-indir). Modifying the
62indirection table could be done to give different queues different
63relative weights.
64
65== RSS IRQ Configuration
66
67Each receive queue has a separate IRQ associated with it. The NIC triggers
68this to notify a CPU when new packets arrive on the given queue. The
69signaling path for PCIe devices uses message signaled interrupts (MSI-X),
70that can route each interrupt to a particular CPU. The active mapping
71of queues to IRQs can be determined from /proc/interrupts. By default,
72an IRQ may be handled on any CPU. Because a non-negligible part of packet
73processing takes place in receive interrupt handling, it is advantageous
74to spread receive interrupts between CPUs. To manually adjust the IRQ
75affinity of each interrupt see Documentation/IRQ-affinity. Some systems
76will be running irqbalance, a daemon that dynamically optimizes IRQ
77assignments and as a result may override any manual settings.
78
79== Suggested Configuration
80
81RSS should be enabled when latency is a concern or whenever receive
82interrupt processing forms a bottleneck. Spreading load between CPUs
83decreases queue length. For low latency networking, the optimal setting
84is to allocate as many queues as there are CPUs in the system (or the
85NIC maximum, if lower). Because the aggregate number of interrupts grows
86with each additional queue, the most efficient high-rate configuration
87is likely the one with the smallest number of receive queues where no
88CPU that processes receive interrupts reaches 100% utilization. Per-cpu
89load can be observed using the mpstat utility.
90
91
92RPS: Receive Packet Steering
93============================
94
95Receive Packet Steering (RPS) is logically a software implementation of
96RSS. Being in software, it is necessarily called later in the datapath.
97Whereas RSS selects the queue and hence CPU that will run the hardware
98interrupt handler, RPS selects the CPU to perform protocol processing
99above the interrupt handler. This is accomplished by placing the packet
100on the desired CPU’s backlog queue and waking up the CPU for processing.
101RPS has some advantages over RSS: 1) it can be used with any NIC,
1022) software filters can easily be added to hash over new protocols,
1033) it does not increase hardware device interrupt rate (although it does
104introduce inter-processor interrupts (IPIs)).
105
106RPS is called during bottom half of the receive interrupt handler, when
107a driver sends a packet up the network stack with netif_rx() or
108netif_receive_skb(). These call the get_rps_cpu() function, which
109selects the queue that should process a packet.
110
111The first step in determining the target CPU for RPS is to calculate a
112flow hash over the packet’s addresses or ports (2-tuple or 4-tuple hash
113depending on the protocol). This serves as a consistent hash of the
114associated flow of the packet. The hash is either provided by hardware
115or will be computed in the stack. Capable hardware can pass the hash in
116the receive descriptor for the packet; this would usually be the same
117hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in
118skb->rx_hash and can be used elsewhere in the stack as a hash of the
119packet’s flow.
120
121Each receive hardware queue has an associated list of CPUs to which
122RPS may enqueue packets for processing. For each received packet,
123an index into the list is computed from the flow hash modulo the size
124of the list. The indexed CPU is the target for processing the packet,
125and the packet is queued to the tail of that CPU’s backlog queue. At
126the end of the bottom half routine, IPIs are sent to any CPUs for which
127packets have been queued to their backlog queue. The IPI wakes backlog
128processing on the remote CPU, and any queued packets are then processed
129up the networking stack.
130
131==== RPS Configuration
132
133RPS requires a kernel compiled with the CONFIG_RPS kconfig symbol (on
134by default for SMP). Even when compiled in, RPS remains disabled until
135explicitly configured. The list of CPUs to which RPS may forward traffic
136can be configured for each receive queue using a sysfs file entry:
137
138 /sys/class/net/<dev>/queues/rx-<n>/rps_cpus
139
140This file implements a bitmap of CPUs. RPS is disabled when it is zero
141(the default), in which case packets are processed on the interrupting
142CPU. Documentation/IRQ-affinity.txt explains how CPUs are assigned to
143the bitmap.
144
145== Suggested Configuration
146
147For a single queue device, a typical RPS configuration would be to set
148the rps_cpus to the CPUs in the same cache domain of the interrupting
149CPU. If NUMA locality is not an issue, this could also be all CPUs in
150the system. At high interrupt rate, it might be wise to exclude the
151interrupting CPU from the map since that already performs much work.
152
153For a multi-queue system, if RSS is configured so that a hardware
154receive queue is mapped to each CPU, then RPS is probably redundant
155and unnecessary. If there are fewer hardware queues than CPUs, then
156RPS might be beneficial if the rps_cpus for each queue are the ones that
157share the same cache domain as the interrupting CPU for that queue.
158
159
160RFS: Receive Flow Steering
161==========================
162
163While RPS steers packets solely based on hash, and thus generally
164provides good load distribution, it does not take into account
165application locality. This is accomplished by Receive Flow Steering
166(RFS). The goal of RFS is to increase datacache hitrate by steering
167kernel processing of packets to the CPU where the application thread
168consuming the packet is running. RFS relies on the same RPS mechanisms
169to enqueue packets onto the backlog of another CPU and to wake up that
170CPU.
171
172In RFS, packets are not forwarded directly by the value of their hash,
173but the hash is used as index into a flow lookup table. This table maps
174flows to the CPUs where those flows are being processed. The flow hash
175(see RPS section above) is used to calculate the index into this table.
176The CPU recorded in each entry is the one which last processed the flow.
177If an entry does not hold a valid CPU, then packets mapped to that entry
178are steered using plain RPS. Multiple table entries may point to the
179same CPU. Indeed, with many flows and few CPUs, it is very likely that
180a single application thread handles flows with many different flow hashes.
181
182rps_sock_table is a global flow table that contains the *desired* CPU for
183flows: the CPU that is currently processing the flow in userspace. Each
184table value is a CPU index that is updated during calls to recvmsg and
185sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
186and tcp_splice_read()).
187
188When the scheduler moves a thread to a new CPU while it has outstanding
189receive packets on the old CPU, packets may arrive out of order. To
190avoid this, RFS uses a second flow table to track outstanding packets
191for each flow: rps_dev_flow_table is a table specific to each hardware
192receive queue of each device. Each table value stores a CPU index and a
193counter. The CPU index represents the *current* CPU onto which packets
194for this flow are enqueued for further kernel processing. Ideally, kernel
195and userspace processing occur on the same CPU, and hence the CPU index
196in both tables is identical. This is likely false if the scheduler has
197recently migrated a userspace thread while the kernel still has packets
198enqueued for kernel processing on the old CPU.
199
200The counter in rps_dev_flow_table values records the length of the current
201CPU's backlog when a packet in this flow was last enqueued. Each backlog
202queue has a head counter that is incremented on dequeue. A tail counter
203is computed as head counter + queue length. In other words, the counter
204in rps_dev_flow_table[i] records the last element in flow i that has
205been enqueued onto the currently designated CPU for flow i (of course,
206entry i is actually selected by hash and multiple flows may hash to the
207same entry i).
208
209And now the trick for avoiding out of order packets: when selecting the
210CPU for packet processing (from get_rps_cpu()) the rps_sock_flow table
211and the rps_dev_flow table of the queue that the packet was received on
212are compared. If the desired CPU for the flow (found in the
213rps_sock_flow table) matches the current CPU (found in the rps_dev_flow
214table), the packet is enqueued onto that CPU’s backlog. If they differ,
215the current CPU is updated to match the desired CPU if one of the
216following is true:
217
218- The current CPU's queue head counter >= the recorded tail counter
219 value in rps_dev_flow[i]
220- The current CPU is unset (equal to NR_CPUS)
221- The current CPU is offline
222
223After this check, the packet is sent to the (possibly updated) current
224CPU. These rules aim to ensure that a flow only moves to a new CPU when
225there are no packets outstanding on the old CPU, as the outstanding
226packets could arrive later than those about to be processed on the new
227CPU.
228
229==== RFS Configuration
230
231RFS is only available if the kconfig symbol CONFIG_RFS is enabled (on
232by default for SMP). The functionality remains disabled until explicitly
233configured. The number of entries in the global flow table is set through:
234
235 /proc/sys/net/core/rps_sock_flow_entries
236
237The number of entries in the per-queue flow table are set through:
238
239 /sys/class/net/<dev>/queues/tx-<n>/rps_flow_cnt
240
241== Suggested Configuration
242
243Both of these need to be set before RFS is enabled for a receive queue.
244Values for both are rounded up to the nearest power of two. The
245suggested flow count depends on the expected number of active connections
246at any given time, which may be significantly less than the number of open
247connections. We have found that a value of 32768 for rps_sock_flow_entries
248works fairly well on a moderately loaded server.
249
250For a single queue device, the rps_flow_cnt value for the single queue
251would normally be configured to the same value as rps_sock_flow_entries.
252For a multi-queue device, the rps_flow_cnt for each queue might be
253configured as rps_sock_flow_entries / N, where N is the number of
254queues. So for instance, if rps_flow_entries is set to 32768 and there
255are 16 configured receive queues, rps_flow_cnt for each queue might be
256configured as 2048.
257
258
259Accelerated RFS
260===============
261
262Accelerated RFS is to RFS what RSS is to RPS: a hardware-accelerated load
263balancing mechanism that uses soft state to steer flows based on where
264the application thread consuming the packets of each flow is running.
265Accelerated RFS should perform better than RFS since packets are sent
266directly to a CPU local to the thread consuming the data. The target CPU
267will either be the same CPU where the application runs, or at least a CPU
268which is local to the application thread’s CPU in the cache hierarchy.
269
270To enable accelerated RFS, the networking stack calls the
271ndo_rx_flow_steer driver function to communicate the desired hardware
272queue for packets matching a particular flow. The network stack
273automatically calls this function every time a flow entry in
274rps_dev_flow_table is updated. The driver in turn uses a device specific
275method to program the NIC to steer the packets.
276
277The hardware queue for a flow is derived from the CPU recorded in
278rps_dev_flow_table. The stack consults a CPU to hardware queue map which
279is maintained by the NIC driver. This is an auto-generated reverse map of
280the IRQ affinity table shown by /proc/interrupts. Drivers can use
281functions in the cpu_rmap (“CPU affinity reverse map”) kernel library
282to populate the map. For each CPU, the corresponding queue in the map is
283set to be one whose processing CPU is closest in cache locality.
284
285==== Accelerated RFS Configuration
286
287Accelerated RFS is only available if the kernel is compiled with
288CONFIG_RFS_ACCEL and support is provided by the NIC device and driver.
289It also requires that ntuple filtering is enabled via ethtool. The map
290of CPU to queues is automatically deduced from the IRQ affinities
291configured for each receive queue by the driver, so no additional
292configuration should be necessary.
293
294== Suggested Configuration
295
296This technique should be enabled whenever one wants to use RFS and the
297NIC supports hardware acceleration.
298
299XPS: Transmit Packet Steering
300=============================
301
302Transmit Packet Steering is a mechanism for intelligently selecting
303which transmit queue to use when transmitting a packet on a multi-queue
304device. To accomplish this, a mapping from CPU to hardware queue(s) is
305recorded. The goal of this mapping is usually to assign queues
306exclusively to a subset of CPUs, where the transmit completions for
307these queues are processed on a CPU within this set. This choice
308provides two benefits. First, contention on the device queue lock is
309significantly reduced since fewer CPUs contend for the same queue
310(contention can be eliminated completely if each CPU has its own
311transmit queue). Secondly, cache miss rate on transmit completion is
312reduced, in particular for data cache lines that hold the sk_buff
313structures.
314
315XPS is configured per transmit queue by setting a bitmap of CPUs that
316may use that queue to transmit. The reverse mapping, from CPUs to
317transmit queues, is computed and maintained for each network device.
318When transmitting the first packet in a flow, the function
319get_xps_queue() is called to select a queue. This function uses the ID
320of the running CPU as a key into the CPU-to-queue lookup table. If the
321ID matches a single queue, that is used for transmission. If multiple
322queues match, one is selected by using the flow hash to compute an index
323into the set.
324
325The queue chosen for transmitting a particular flow is saved in the
326corresponding socket structure for the flow (e.g. a TCP connection).
327This transmit queue is used for subsequent packets sent on the flow to
328prevent out of order (ooo) packets. The choice also amortizes the cost
329of calling get_xps_queues() over all packets in the connection. To avoid
330ooo packets, the queue for a flow can subsequently only be changed if
331skb->ooo_okay is set for a packet in the flow. This flag indicates that
332there are no outstanding packets in the flow, so the transmit queue can
333change without the risk of generating out of order packets. The
334transport layer is responsible for setting ooo_okay appropriately. TCP,
335for instance, sets the flag when all data for a connection has been
336acknowledged.
337
338==== XPS Configuration
339
340XPS is only available if the kconfig symbol CONFIG_XPS is enabled (on by
341default for SMP). The functionality remains disabled until explicitly
342configured. To enable XPS, the bitmap of CPUs that may use a transmit
343queue is configured using the sysfs file entry:
344
345/sys/class/net/<dev>/queues/tx-<n>/xps_cpus
346
347== Suggested Configuration
348
349For a network device with a single transmission queue, XPS configuration
350has no effect, since there is no choice in this case. In a multi-queue
351system, XPS is preferably configured so that each CPU maps onto one queue.
352If there are as many queues as there are CPUs in the system, then each
353queue can also map onto one CPU, resulting in exclusive pairings that
354experience no contention. If there are fewer queues than CPUs, then the
355best CPUs to share a given queue are probably those that share the cache
356with the CPU that processes transmit completions for that queue
357(transmit interrupts).
358
359
360Further Information
361===================
362RPS and RFS were introduced in kernel 2.6.35. XPS was incorporated into
3632.6.38. Original patches were submitted by Tom Herbert
364(therbert@google.com)
365
366Accelerated RFS was introduced in 2.6.35. Original patches were
367submitted by Ben Hutchings (bhutchings@solarflare.com)
368
369Authors:
370Tom Herbert (therbert@google.com)
371Willem de Bruijn (willemb@google.com)
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index d724a18b5285..37e5790681ad 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -63,8 +63,9 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
63 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0); 63 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
64 64
65#ifdef BCM_CNIC 65#ifdef BCM_CNIC
66 /* We don't want TPA on FCoE, FWD and OOO L2 rings */ 66 /* We don't want TPA on an FCoE L2 ring */
67 bnx2x_fcoe(bp, disable_tpa) = 1; 67 if (IS_FCOE_FP(fp))
68 fp->disable_tpa = 1;
68#endif 69#endif
69} 70}
70 71
@@ -1404,10 +1405,9 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1404u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1405u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1405{ 1406{
1406 struct bnx2x *bp = netdev_priv(dev); 1407 struct bnx2x *bp = netdev_priv(dev);
1408
1407#ifdef BCM_CNIC 1409#ifdef BCM_CNIC
1408 if (NO_FCOE(bp)) 1410 if (!NO_FCOE(bp)) {
1409 return skb_tx_hash(dev, skb);
1410 else {
1411 struct ethhdr *hdr = (struct ethhdr *)skb->data; 1411 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1412 u16 ether_type = ntohs(hdr->h_proto); 1412 u16 ether_type = ntohs(hdr->h_proto);
1413 1413
@@ -1424,8 +1424,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1424 return bnx2x_fcoe_tx(bp, txq_index); 1424 return bnx2x_fcoe_tx(bp, txq_index);
1425 } 1425 }
1426#endif 1426#endif
1427 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring 1427 /* select a non-FCoE queue */
1428 */
1429 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); 1428 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1430} 1429}
1431 1430
@@ -1448,6 +1447,28 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1448 bp->num_queues += NON_ETH_CONTEXT_USE; 1447 bp->num_queues += NON_ETH_CONTEXT_USE;
1449} 1448}
1450 1449
1450/**
1451 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1452 *
1453 * @bp: Driver handle
1454 *
1455 * We currently support for at most 16 Tx queues for each CoS thus we will
1456 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1457 * bp->max_cos.
1458 *
1459 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1460 * index after all ETH L2 indices.
1461 *
1462 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1463 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1464 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1465 *
1466 * The proper configuration of skb->queue_mapping is handled by
1467 * bnx2x_select_queue() and __skb_tx_hash().
1468 *
1469 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1470 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1471 */
1451static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) 1472static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1452{ 1473{
1453 int rc, tx, rx; 1474 int rc, tx, rx;
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index a4ea35f6a456..a1e004a82f7a 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -920,7 +920,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
920 920
921void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) 921void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
922{ 922{
923 if (!CHIP_IS_E1x(bp)) { 923 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
924 bp->dcb_state = dcb_on; 924 bp->dcb_state = dcb_on;
925 bp->dcbx_enabled = dcbx_enabled; 925 bp->dcbx_enabled = dcbx_enabled;
926 } else { 926 } else {
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 150709111548..f74582a22c68 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -5798,6 +5798,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
5798 5798
5799 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); 5799 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
5800 5800
5801 /*
5802 * take the UNDI lock to protect undi_unload flow from accessing
5803 * registers while we're resetting the chip
5804 */
5805 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5806
5801 bnx2x_reset_common(bp); 5807 bnx2x_reset_common(bp);
5802 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 5808 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5803 5809
@@ -5808,6 +5814,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
5808 } 5814 }
5809 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 5815 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
5810 5816
5817 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5818
5811 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 5819 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
5812 5820
5813 if (!CHIP_IS_E1x(bp)) { 5821 if (!CHIP_IS_E1x(bp)) {
@@ -10251,10 +10259,17 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10251 /* clean indirect addresses */ 10259 /* clean indirect addresses */
10252 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 10260 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10253 PCICFG_VENDOR_ID_OFFSET); 10261 PCICFG_VENDOR_ID_OFFSET);
10254 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0); 10262 /* Clean the following indirect addresses for all functions since it
10255 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0); 10263 * is not used by the driver.
10256 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); 10264 */
10257 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); 10265 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
10266 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
10267 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
10268 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
10269 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
10270 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
10271 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
10272 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
10258 10273
10259 /* 10274 /*
10260 * Enable internal target-read (in case we are probed after PF FLR). 10275 * Enable internal target-read (in case we are probed after PF FLR).
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 27b5ecb11830..40266c14e6dc 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -3007,11 +3007,27 @@
3007/* [R 6] Debug only: Number of used entries in the data FIFO */ 3007/* [R 6] Debug only: Number of used entries in the data FIFO */
3008#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c 3008#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
3009/* [R 7] Debug only: Number of used entries in the header FIFO */ 3009/* [R 7] Debug only: Number of used entries in the header FIFO */
3010#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 3010#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
3011#define PXP2_REG_PGL_ADDR_88_F0 0x120534 3011#define PXP2_REG_PGL_ADDR_88_F0 0x120534
3012#define PXP2_REG_PGL_ADDR_8C_F0 0x120538 3012/* [R 32] GRC address for configuration access to PCIE config address 0x88.
3013#define PXP2_REG_PGL_ADDR_90_F0 0x12053c 3013 * any write to this PCIE address will cause a GRC write access to the
3014#define PXP2_REG_PGL_ADDR_94_F0 0x120540 3014 * address that's in t this register */
3015#define PXP2_REG_PGL_ADDR_88_F1 0x120544
3016#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
3017/* [R 32] GRC address for configuration access to PCIE config address 0x8c.
3018 * any write to this PCIE address will cause a GRC write access to the
3019 * address that's in t this register */
3020#define PXP2_REG_PGL_ADDR_8C_F1 0x120548
3021#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
3022/* [R 32] GRC address for configuration access to PCIE config address 0x90.
3023 * any write to this PCIE address will cause a GRC write access to the
3024 * address that's in t this register */
3025#define PXP2_REG_PGL_ADDR_90_F1 0x12054c
3026#define PXP2_REG_PGL_ADDR_94_F0 0x120540
3027/* [R 32] GRC address for configuration access to PCIE config address 0x94.
3028 * any write to this PCIE address will cause a GRC write access to the
3029 * address that's in t this register */
3030#define PXP2_REG_PGL_ADDR_94_F1 0x120550
3015#define PXP2_REG_PGL_CONTROL0 0x120490 3031#define PXP2_REG_PGL_CONTROL0 0x120490
3016#define PXP2_REG_PGL_CONTROL1 0x120514 3032#define PXP2_REG_PGL_CONTROL1 0x120514
3017#define PXP2_REG_PGL_DEBUG 0x120520 3033#define PXP2_REG_PGL_DEBUG 0x120520
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index f523f1cc5142..4b70b7e8bdeb 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -197,7 +197,7 @@ static void slc_bump(struct slcan *sl)
197 skb->ip_summed = CHECKSUM_UNNECESSARY; 197 skb->ip_summed = CHECKSUM_UNNECESSARY;
198 memcpy(skb_put(skb, sizeof(struct can_frame)), 198 memcpy(skb_put(skb, sizeof(struct can_frame)),
199 &cf, sizeof(struct can_frame)); 199 &cf, sizeof(struct can_frame));
200 netif_rx(skb); 200 netif_rx_ni(skb);
201 201
202 sl->dev->stats.rx_packets++; 202 sl->dev->stats.rx_packets++;
203 sl->dev->stats.rx_bytes += cf.can_dlc; 203 sl->dev->stats.rx_bytes += cf.can_dlc;
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 480f2592f8a5..536b3a55c45f 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -2085,7 +2085,8 @@ struct e1000_info e1000_82574_info = {
2085 | FLAG_HAS_AMT 2085 | FLAG_HAS_AMT
2086 | FLAG_HAS_CTRLEXT_ON_LOAD, 2086 | FLAG_HAS_CTRLEXT_ON_LOAD,
2087 .flags2 = FLAG2_CHECK_PHY_HANG 2087 .flags2 = FLAG2_CHECK_PHY_HANG
2088 | FLAG2_DISABLE_ASPM_L0S, 2088 | FLAG2_DISABLE_ASPM_L0S
2089 | FLAG2_NO_DISABLE_RX,
2089 .pba = 32, 2090 .pba = 32,
2090 .max_hw_frame_size = DEFAULT_JUMBO, 2091 .max_hw_frame_size = DEFAULT_JUMBO,
2091 .get_variants = e1000_get_variants_82571, 2092 .get_variants = e1000_get_variants_82571,
@@ -2104,7 +2105,8 @@ struct e1000_info e1000_82583_info = {
2104 | FLAG_HAS_AMT 2105 | FLAG_HAS_AMT
2105 | FLAG_HAS_JUMBO_FRAMES 2106 | FLAG_HAS_JUMBO_FRAMES
2106 | FLAG_HAS_CTRLEXT_ON_LOAD, 2107 | FLAG_HAS_CTRLEXT_ON_LOAD,
2107 .flags2 = FLAG2_DISABLE_ASPM_L0S, 2108 .flags2 = FLAG2_DISABLE_ASPM_L0S
2109 | FLAG2_NO_DISABLE_RX,
2108 .pba = 32, 2110 .pba = 32,
2109 .max_hw_frame_size = DEFAULT_JUMBO, 2111 .max_hw_frame_size = DEFAULT_JUMBO,
2110 .get_variants = e1000_get_variants_82571, 2112 .get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 638d175792cf..35916f485028 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -453,6 +453,7 @@ struct e1000_info {
453#define FLAG2_DISABLE_ASPM_L0S (1 << 7) 453#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
454#define FLAG2_DISABLE_AIM (1 << 8) 454#define FLAG2_DISABLE_AIM (1 << 8)
455#define FLAG2_CHECK_PHY_HANG (1 << 9) 455#define FLAG2_CHECK_PHY_HANG (1 << 9)
456#define FLAG2_NO_DISABLE_RX (1 << 10)
456 457
457#define E1000_RX_DESC_PS(R, i) \ 458#define E1000_RX_DESC_PS(R, i) \
458 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 459 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 06d88f316dce..6a0526a59a8a 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1206,7 +1206,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1206 rx_ring->next_to_clean = 0; 1206 rx_ring->next_to_clean = 0;
1207 1207
1208 rctl = er32(RCTL); 1208 rctl = er32(RCTL);
1209 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1209 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
1210 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1210 ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); 1211 ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
1211 ew32(RDBAH, ((u64) rx_ring->dma >> 32)); 1212 ew32(RDBAH, ((u64) rx_ring->dma >> 32));
1212 ew32(RDLEN, rx_ring->size); 1213 ew32(RDLEN, rx_ring->size);
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 7898a67d6505..0893ab107adf 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -190,7 +190,8 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
190 /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ 190 /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
191 if (!((nvm_data & NVM_COMPAT_LOM) || 191 if (!((nvm_data & NVM_COMPAT_LOM) ||
192 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || 192 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
193 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD))) 193 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) ||
194 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES)))
194 goto out; 195 goto out;
195 196
196 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, 197 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
@@ -200,10 +201,10 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
200 goto out; 201 goto out;
201 } 202 }
202 203
203 if (nvm_alt_mac_addr_offset == 0xFFFF) { 204 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
205 (nvm_alt_mac_addr_offset == 0x0000))
204 /* There is no Alternate MAC Address */ 206 /* There is no Alternate MAC Address */
205 goto out; 207 goto out;
206 }
207 208
208 if (hw->bus.func == E1000_FUNC_1) 209 if (hw->bus.func == E1000_FUNC_1)
209 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; 210 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index ab4be80f7ab5..362f70382cdd 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -56,7 +56,7 @@
56 56
57#define DRV_EXTRAVERSION "-k" 57#define DRV_EXTRAVERSION "-k"
58 58
59#define DRV_VERSION "1.3.16" DRV_EXTRAVERSION 59#define DRV_VERSION "1.4.4" DRV_EXTRAVERSION
60char e1000e_driver_name[] = "e1000e"; 60char e1000e_driver_name[] = "e1000e";
61const char e1000e_driver_version[] = DRV_VERSION; 61const char e1000e_driver_version[] = DRV_VERSION;
62 62
@@ -2915,7 +2915,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2915 2915
2916 /* disable receives while setting up the descriptors */ 2916 /* disable receives while setting up the descriptors */
2917 rctl = er32(RCTL); 2917 rctl = er32(RCTL);
2918 ew32(RCTL, rctl & ~E1000_RCTL_EN); 2918 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
2919 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2919 e1e_flush(); 2920 e1e_flush();
2920 usleep_range(10000, 20000); 2921 usleep_range(10000, 20000);
2921 2922
@@ -3394,7 +3395,8 @@ void e1000e_down(struct e1000_adapter *adapter)
3394 3395
3395 /* disable receives in the hardware */ 3396 /* disable receives in the hardware */
3396 rctl = er32(RCTL); 3397 rctl = er32(RCTL);
3397 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3398 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3399 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3398 /* flush and sleep below */ 3400 /* flush and sleep below */
3399 3401
3400 netif_stop_queue(netdev); 3402 netif_stop_queue(netdev);
@@ -3403,6 +3405,7 @@ void e1000e_down(struct e1000_adapter *adapter)
3403 tctl = er32(TCTL); 3405 tctl = er32(TCTL);
3404 tctl &= ~E1000_TCTL_EN; 3406 tctl &= ~E1000_TCTL_EN;
3405 ew32(TCTL, tctl); 3407 ew32(TCTL, tctl);
3408
3406 /* flush both disables and wait for them to finish */ 3409 /* flush both disables and wait for them to finish */
3407 e1e_flush(); 3410 e1e_flush();
3408 usleep_range(10000, 20000); 3411 usleep_range(10000, 20000);
diff --git a/drivers/net/gianfar_ptp.c b/drivers/net/gianfar_ptp.c
index 1c97861596f0..f67b8aebc89c 100644
--- a/drivers/net/gianfar_ptp.c
+++ b/drivers/net/gianfar_ptp.c
@@ -193,14 +193,9 @@ static void set_alarm(struct etsects *etsects)
193/* Caller must hold etsects->lock. */ 193/* Caller must hold etsects->lock. */
194static void set_fipers(struct etsects *etsects) 194static void set_fipers(struct etsects *etsects)
195{ 195{
196 u32 tmr_ctrl = gfar_read(&etsects->regs->tmr_ctrl); 196 set_alarm(etsects);
197
198 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl & (~TE));
199 gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc);
200 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); 197 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
201 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); 198 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
202 set_alarm(etsects);
203 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|TE);
204} 199}
205 200
206/* 201/*
@@ -511,7 +506,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
511 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); 506 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
512 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); 507 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
513 set_alarm(etsects); 508 set_alarm(etsects);
514 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE); 509 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD);
515 510
516 spin_unlock_irqrestore(&etsects->lock, flags); 511 spin_unlock_irqrestore(&etsects->lock, flags);
517 512
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 4488bd581eca..82660672dcd9 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -22,6 +22,8 @@
22 * - DMA transfer support 22 * - DMA transfer support
23 * - FIFO mode support 23 * - FIFO mode support
24 */ 24 */
25#include <linux/io.h>
26#include <linux/interrupt.h>
25#include <linux/module.h> 27#include <linux/module.h>
26#include <linux/platform_device.h> 28#include <linux/platform_device.h>
27#include <linux/clk.h> 29#include <linux/clk.h>
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 52a7c86af663..ed7d7d62bf68 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -12,6 +12,8 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15#include <linux/io.h>
16#include <linux/interrupt.h>
15#include <linux/module.h> 17#include <linux/module.h>
16#include <linux/platform_device.h> 18#include <linux/platform_device.h>
17#include <linux/slab.h> 19#include <linux/slab.h>
@@ -511,7 +513,7 @@ static void sh_sir_tx(struct sh_sir_self *self, int phase)
511 513
512static int sh_sir_read_data(struct sh_sir_self *self) 514static int sh_sir_read_data(struct sh_sir_self *self)
513{ 515{
514 u16 val; 516 u16 val = 0;
515 int timeout = 1024; 517 int timeout = 1024;
516 518
517 while (timeout--) { 519 while (timeout--) {
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 8b3090dc4bcd..80b6f36a8074 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -82,7 +82,7 @@ static int cards_found;
82/* 82/*
83 * VLB I/O addresses 83 * VLB I/O addresses
84 */ 84 */
85static unsigned int pcnet32_portlist[] __initdata = 85static unsigned int pcnet32_portlist[] =
86 { 0x300, 0x320, 0x340, 0x360, 0 }; 86 { 0x300, 0x320, 0x340, 0x360, 0 };
87 87
88static int pcnet32_debug; 88static int pcnet32_debug;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 2cd8dc5847b4..cb6e0b486b1e 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -34,8 +34,7 @@
34#define PAGESEL 0x13 34#define PAGESEL 0x13
35#define LAYER4 0x02 35#define LAYER4 0x02
36#define LAYER2 0x01 36#define LAYER2 0x01
37#define MAX_RXTS 4 37#define MAX_RXTS 64
38#define MAX_TXTS 4
39#define N_EXT_TS 1 38#define N_EXT_TS 1
40#define PSF_PTPVER 2 39#define PSF_PTPVER 2
41#define PSF_EVNT 0x4000 40#define PSF_EVNT 0x4000
@@ -218,7 +217,7 @@ static void phy2rxts(struct phy_rxts *p, struct rxts *rxts)
218 rxts->seqid = p->seqid; 217 rxts->seqid = p->seqid;
219 rxts->msgtype = (p->msgtype >> 12) & 0xf; 218 rxts->msgtype = (p->msgtype >> 12) & 0xf;
220 rxts->hash = p->msgtype & 0x0fff; 219 rxts->hash = p->msgtype & 0x0fff;
221 rxts->tmo = jiffies + HZ; 220 rxts->tmo = jiffies + 2;
222} 221}
223 222
224static u64 phy2txts(struct phy_txts *p) 223static u64 phy2txts(struct phy_txts *p)
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index f11b3f3df24f..4c617534f937 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -367,7 +367,7 @@ static void sl_bump(struct slip *sl)
367 memcpy(skb_put(skb, count), sl->rbuff, count); 367 memcpy(skb_put(skb, count), sl->rbuff, count);
368 skb_reset_mac_header(skb); 368 skb_reset_mac_header(skb);
369 skb->protocol = htons(ETH_P_IP); 369 skb->protocol = htons(ETH_P_IP);
370 netif_rx(skb); 370 netif_rx_ni(skb);
371 dev->stats.rx_packets++; 371 dev->stats.rx_packets++;
372} 372}
373 373
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 041fb7d43c4f..ef3b236b5145 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -977,7 +977,6 @@ static void rtl8150_disconnect(struct usb_interface *intf)
977 usb_set_intfdata(intf, NULL); 977 usb_set_intfdata(intf, NULL);
978 if (dev) { 978 if (dev) {
979 set_bit(RTL8150_UNPLUG, &dev->flags); 979 set_bit(RTL8150_UNPLUG, &dev->flags);
980 tasklet_disable(&dev->tl);
981 tasklet_kill(&dev->tl); 980 tasklet_kill(&dev->tl);
982 unregister_netdev(dev->netdev); 981 unregister_netdev(dev->netdev);
983 unlink_all_urbs(dev); 982 unlink_all_urbs(dev);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index f54dff44ed50..c3119a6caace 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1735,6 +1735,8 @@ ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
1735 1735
1736 if (dma_mapping_error(ah->dev, bf->skbaddr)) { 1736 if (dma_mapping_error(ah->dev, bf->skbaddr)) {
1737 ATH5K_ERR(ah, "beacon DMA mapping failed\n"); 1737 ATH5K_ERR(ah, "beacon DMA mapping failed\n");
1738 dev_kfree_skb_any(skb);
1739 bf->skb = NULL;
1738 return -EIO; 1740 return -EIO;
1739 } 1741 }
1740 1742
@@ -1819,8 +1821,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1819 ath5k_txbuf_free_skb(ah, avf->bbuf); 1821 ath5k_txbuf_free_skb(ah, avf->bbuf);
1820 avf->bbuf->skb = skb; 1822 avf->bbuf->skb = skb;
1821 ret = ath5k_beacon_setup(ah, avf->bbuf); 1823 ret = ath5k_beacon_setup(ah, avf->bbuf);
1822 if (ret)
1823 avf->bbuf->skb = NULL;
1824out: 1824out:
1825 return ret; 1825 return ret;
1826} 1826}
@@ -1840,6 +1840,7 @@ ath5k_beacon_send(struct ath5k_hw *ah)
1840 struct ath5k_vif *avf; 1840 struct ath5k_vif *avf;
1841 struct ath5k_buf *bf; 1841 struct ath5k_buf *bf;
1842 struct sk_buff *skb; 1842 struct sk_buff *skb;
1843 int err;
1843 1844
1844 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n"); 1845 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1845 1846
@@ -1888,11 +1889,6 @@ ath5k_beacon_send(struct ath5k_hw *ah)
1888 1889
1889 avf = (void *)vif->drv_priv; 1890 avf = (void *)vif->drv_priv;
1890 bf = avf->bbuf; 1891 bf = avf->bbuf;
1891 if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
1892 ah->opmode == NL80211_IFTYPE_MONITOR)) {
1893 ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
1894 return;
1895 }
1896 1892
1897 /* 1893 /*
1898 * Stop any current dma and put the new frame on the queue. 1894 * Stop any current dma and put the new frame on the queue.
@@ -1906,8 +1902,17 @@ ath5k_beacon_send(struct ath5k_hw *ah)
1906 1902
1907 /* refresh the beacon for AP or MESH mode */ 1903 /* refresh the beacon for AP or MESH mode */
1908 if (ah->opmode == NL80211_IFTYPE_AP || 1904 if (ah->opmode == NL80211_IFTYPE_AP ||
1909 ah->opmode == NL80211_IFTYPE_MESH_POINT) 1905 ah->opmode == NL80211_IFTYPE_MESH_POINT) {
1910 ath5k_beacon_update(ah->hw, vif); 1906 err = ath5k_beacon_update(ah->hw, vif);
1907 if (err)
1908 return;
1909 }
1910
1911 if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
1912 ah->opmode == NL80211_IFTYPE_MONITOR)) {
1913 ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb);
1914 return;
1915 }
1911 1916
1912 trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]); 1917 trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]);
1913 1918
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index d109c25417f4..c34bef1bf2b0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -307,7 +307,7 @@ static const struct ar9300_eeprom ar9300_default = {
307 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 307 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
308 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, 308 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
309 309
310 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, 310 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
311 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 311 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
312 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 312 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
313 313
@@ -884,7 +884,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
884 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 884 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
885 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, 885 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
886 886
887 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, 887 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
888 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 888 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
889 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 889 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
890 890
@@ -2040,7 +2040,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
2040 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 2040 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2041 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, 2041 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
2042 2042
2043 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, 2043 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
2044 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 2044 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2045 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 2045 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2046 2046
@@ -3734,7 +3734,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
3734 } 3734 }
3735 } else { 3735 } else {
3736 reg_pmu_set = (5 << 1) | (7 << 4) | 3736 reg_pmu_set = (5 << 1) | (7 << 4) |
3737 (1 << 8) | (2 << 14) | 3737 (2 << 8) | (2 << 14) |
3738 (6 << 17) | (1 << 20) | 3738 (6 << 17) | (1 << 20) |
3739 (3 << 24) | (1 << 28); 3739 (3 << 24) | (1 << 28);
3740 } 3740 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 6de3f0bc18e6..5c590429f120 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -850,7 +850,7 @@
850#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220) 850#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
851#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240) 851#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240)
852#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c) 852#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
853#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM_BASE + 0x450 + ((_i) << 2)) 853#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM1_BASE + 0x450 + ((_i) << 2))
854 854
855/* 855/*
856 * Channel 2 Register Map 856 * Channel 2 Register Map
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 83cba22ac6e8..481e534534eb 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -795,9 +795,23 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
795 u32 tmp; 795 u32 tmp;
796 u16 mmio_base; 796 u16 mmio_base;
797 797
798 tmp = b43_read32(dev, SSB_TMSHIGH); 798 switch (dev->dev->bus_type) {
799 if (tmp & SSB_TMSHIGH_DMA64) 799#ifdef CONFIG_B43_BCMA
800 return DMA_BIT_MASK(64); 800 case B43_BUS_BCMA:
801 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
802 if (tmp & BCMA_IOST_DMA64)
803 return DMA_BIT_MASK(64);
804 break;
805#endif
806#ifdef CONFIG_B43_SSB
807 case B43_BUS_SSB:
808 tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
809 if (tmp & SSB_TMSHIGH_DMA64)
810 return DMA_BIT_MASK(64);
811 break;
812#endif
813 }
814
801 mmio_base = b43_dmacontroller_base(0, 0); 815 mmio_base = b43_dmacontroller_base(0, 0);
802 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); 816 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
803 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); 817 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 507559361d87..939563162fb3 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -921,6 +921,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
921 { USB_DEVICE(0x07d1, 0x3c16) }, 921 { USB_DEVICE(0x07d1, 0x3c16) },
922 /* Draytek */ 922 /* Draytek */
923 { USB_DEVICE(0x07fa, 0x7712) }, 923 { USB_DEVICE(0x07fa, 0x7712) },
924 /* DVICO */
925 { USB_DEVICE(0x0fe9, 0xb307) },
924 /* Edimax */ 926 /* Edimax */
925 { USB_DEVICE(0x7392, 0x7711) }, 927 { USB_DEVICE(0x7392, 0x7711) },
926 { USB_DEVICE(0x7392, 0x7717) }, 928 { USB_DEVICE(0x7392, 0x7717) },
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 6a93939f44e8..0baeb894f093 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2420,6 +2420,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2420 /* Buffalo */ 2420 /* Buffalo */
2421 { USB_DEVICE(0x0411, 0x00d8) }, 2421 { USB_DEVICE(0x0411, 0x00d8) },
2422 { USB_DEVICE(0x0411, 0x00d9) }, 2422 { USB_DEVICE(0x0411, 0x00d9) },
2423 { USB_DEVICE(0x0411, 0x00e6) },
2423 { USB_DEVICE(0x0411, 0x00f4) }, 2424 { USB_DEVICE(0x0411, 0x00f4) },
2424 { USB_DEVICE(0x0411, 0x0116) }, 2425 { USB_DEVICE(0x0411, 0x0116) },
2425 { USB_DEVICE(0x0411, 0x0119) }, 2426 { USB_DEVICE(0x0411, 0x0119) },
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 942f7a3969a7..ef63c0df006a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -281,6 +281,8 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
281 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)}, 281 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
282 /* 8188CE-VAU USB minCard (b/g mode only) */ 282 /* 8188CE-VAU USB minCard (b/g mode only) */
283 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)}, 283 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
284 /* 8188RU in Alfa AWUS036NHR */
285 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
284 /* 8188 Combo for BC4 */ 286 /* 8188 Combo for BC4 */
285 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)}, 287 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
286 288
@@ -303,20 +305,23 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
303 {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ 305 {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
304 /* HP - Lite-On ,8188CUS Slim Combo */ 306 /* HP - Lite-On ,8188CUS Slim Combo */
305 {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)}, 307 {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
308 {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
306 {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/ 309 {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
307 {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/ 310 {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
308 {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/ 311 {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
309 {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/ 312 {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
310 {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/ 313 {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
311 {RTL_USB_DEVICE(0x3358, 0x13d3, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/ 314 {RTL_USB_DEVICE(0x13d3, 0x3358, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/
312 /* Russian customer -Azwave (8188CE-VAU b/g mode only) */ 315 /* Russian customer -Azwave (8188CE-VAU b/g mode only) */
313 {RTL_USB_DEVICE(0x3359, 0x13d3, rtl92cu_hal_cfg)}, 316 {RTL_USB_DEVICE(0x13d3, 0x3359, rtl92cu_hal_cfg)},
317 {RTL_USB_DEVICE(0x4855, 0x0090, rtl92cu_hal_cfg)}, /* Feixun */
318 {RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */
319 {RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */
314 320
315 /****** 8192CU ********/ 321 /****** 8192CU ********/
316 {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/ 322 {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
317 {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/ 323 {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
318 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/ 324 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
319 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Abocom -Abocom*/
320 {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/ 325 {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
321 {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ 326 {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
322 {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ 327 {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/wl1251/acx.c
index ef8370edace7..ad87a1ac6462 100644
--- a/drivers/net/wireless/wl1251/acx.c
+++ b/drivers/net/wireless/wl1251/acx.c
@@ -140,8 +140,6 @@ int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth)
140 auth->sleep_auth = sleep_auth; 140 auth->sleep_auth = sleep_auth;
141 141
142 ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); 142 ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
143 if (ret < 0)
144 return ret;
145 143
146out: 144out:
147 kfree(auth); 145 kfree(auth);
@@ -681,10 +679,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl)
681 679
682 ret = wl1251_cmd_configure(wl, ACX_CCA_THRESHOLD, 680 ret = wl1251_cmd_configure(wl, ACX_CCA_THRESHOLD,
683 detection, sizeof(*detection)); 681 detection, sizeof(*detection));
684 if (ret < 0) { 682 if (ret < 0)
685 wl1251_warning("failed to set cca threshold: %d", ret); 683 wl1251_warning("failed to set cca threshold: %d", ret);
686 return ret;
687 }
688 684
689out: 685out:
690 kfree(detection); 686 kfree(detection);
diff --git a/drivers/net/wireless/wl1251/cmd.c b/drivers/net/wireless/wl1251/cmd.c
index 81f164bc4888..d14d69d733a0 100644
--- a/drivers/net/wireless/wl1251/cmd.c
+++ b/drivers/net/wireless/wl1251/cmd.c
@@ -241,7 +241,7 @@ int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
241 if (ret < 0) { 241 if (ret < 0) {
242 wl1251_error("tx %s cmd for channel %d failed", 242 wl1251_error("tx %s cmd for channel %d failed",
243 enable ? "start" : "stop", channel); 243 enable ? "start" : "stop", channel);
244 return ret; 244 goto out;
245 } 245 }
246 246
247 wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d", 247 wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 8be086e9abe4..51352de88ef1 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1003,6 +1003,7 @@ COMPATIBLE_IOCTL(PPPIOCCONNECT)
1003COMPATIBLE_IOCTL(PPPIOCDISCONN) 1003COMPATIBLE_IOCTL(PPPIOCDISCONN)
1004COMPATIBLE_IOCTL(PPPIOCATTCHAN) 1004COMPATIBLE_IOCTL(PPPIOCATTCHAN)
1005COMPATIBLE_IOCTL(PPPIOCGCHAN) 1005COMPATIBLE_IOCTL(PPPIOCGCHAN)
1006COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
1006/* PPPOX */ 1007/* PPPOX */
1007COMPATIBLE_IOCTL(PPPOEIOCSFWD) 1008COMPATIBLE_IOCTL(PPPOEIOCSFWD)
1008COMPATIBLE_IOCTL(PPPOEIOCDFWD) 1009COMPATIBLE_IOCTL(PPPOEIOCDFWD)
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 2e17c5dbdcb8..180540a84d37 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -29,7 +29,7 @@
29#define MAX_LINKS 32 29#define MAX_LINKS 32
30 30
31struct sockaddr_nl { 31struct sockaddr_nl {
32 sa_family_t nl_family; /* AF_NETLINK */ 32 __kernel_sa_family_t nl_family; /* AF_NETLINK */
33 unsigned short nl_pad; /* zero */ 33 unsigned short nl_pad; /* zero */
34 __u32 nl_pid; /* port ID */ 34 __u32 nl_pid; /* port ID */
35 __u32 nl_groups; /* multicast groups mask */ 35 __u32 nl_groups; /* multicast groups mask */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index e17f82266639..d0e77f607a79 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -8,8 +8,10 @@
8#define _K_SS_ALIGNSIZE (__alignof__ (struct sockaddr *)) 8#define _K_SS_ALIGNSIZE (__alignof__ (struct sockaddr *))
9 /* Implementation specific desired alignment */ 9 /* Implementation specific desired alignment */
10 10
11typedef unsigned short __kernel_sa_family_t;
12
11struct __kernel_sockaddr_storage { 13struct __kernel_sockaddr_storage {
12 unsigned short ss_family; /* address family */ 14 __kernel_sa_family_t ss_family; /* address family */
13 /* Following field(s) are implementation specific */ 15 /* Following field(s) are implementation specific */
14 char __data[_K_SS_MAXSIZE - sizeof(unsigned short)]; 16 char __data[_K_SS_MAXSIZE - sizeof(unsigned short)];
15 /* space to achieve desired size, */ 17 /* space to achieve desired size, */
@@ -35,7 +37,7 @@ struct seq_file;
35extern void socket_seq_show(struct seq_file *seq); 37extern void socket_seq_show(struct seq_file *seq);
36#endif 38#endif
37 39
38typedef unsigned short sa_family_t; 40typedef __kernel_sa_family_t sa_family_t;
39 41
40/* 42/*
41 * 1003.1g requires sa_family_t and that sa_data is char. 43 * 1003.1g requires sa_family_t and that sa_data is char.
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index caaff5f5f39f..b897d6e6d0a5 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -238,7 +238,7 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
238{ 238{
239 __u8 flags = 0; 239 __u8 flags = 0;
240 240
241 if (inet_sk(sk)->transparent) 241 if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl)
242 flags |= FLOWI_FLAG_ANYSRC; 242 flags |= FLOWI_FLAG_ANYSRC;
243 if (sk->sk_protocol == IPPROTO_TCP) 243 if (sk->sk_protocol == IPPROTO_TCP)
244 flags |= FLOWI_FLAG_PRECOW_METRICS; 244 flags |= FLOWI_FLAG_PRECOW_METRICS;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 3176e2e13d9b..2cdf0070419f 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -417,6 +417,7 @@ put_back:
417int br_del_if(struct net_bridge *br, struct net_device *dev) 417int br_del_if(struct net_bridge *br, struct net_device *dev)
418{ 418{
419 struct net_bridge_port *p; 419 struct net_bridge_port *p;
420 bool changed_addr;
420 421
421 p = br_port_get_rtnl(dev); 422 p = br_port_get_rtnl(dev);
422 if (!p || p->br != br) 423 if (!p || p->br != br)
@@ -425,9 +426,12 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
425 del_nbp(p); 426 del_nbp(p);
426 427
427 spin_lock_bh(&br->lock); 428 spin_lock_bh(&br->lock);
428 br_stp_recalculate_bridge_id(br); 429 changed_addr = br_stp_recalculate_bridge_id(br);
429 spin_unlock_bh(&br->lock); 430 spin_unlock_bh(&br->lock);
430 431
432 if (changed_addr)
433 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
434
431 netdev_update_features(br->dev); 435 netdev_update_features(br->dev);
432 436
433 return 0; 437 return 0;
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 6545ee9591d1..a76b62135558 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -34,6 +34,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
34 struct net_device *dev = ptr; 34 struct net_device *dev = ptr;
35 struct net_bridge_port *p; 35 struct net_bridge_port *p;
36 struct net_bridge *br; 36 struct net_bridge *br;
37 bool changed_addr;
37 int err; 38 int err;
38 39
39 /* register of bridge completed, add sysfs entries */ 40 /* register of bridge completed, add sysfs entries */
@@ -57,8 +58,12 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
57 case NETDEV_CHANGEADDR: 58 case NETDEV_CHANGEADDR:
58 spin_lock_bh(&br->lock); 59 spin_lock_bh(&br->lock);
59 br_fdb_changeaddr(p, dev->dev_addr); 60 br_fdb_changeaddr(p, dev->dev_addr);
60 br_stp_recalculate_bridge_id(br); 61 changed_addr = br_stp_recalculate_bridge_id(br);
61 spin_unlock_bh(&br->lock); 62 spin_unlock_bh(&br->lock);
63
64 if (changed_addr)
65 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
66
62 break; 67 break;
63 68
64 case NETDEV_CHANGE: 69 case NETDEV_CHANGE:
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 2b5ca1a0054d..5864cc491369 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1198,7 +1198,8 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table)
1198 1198
1199 if (table->check && table->check(newinfo, table->valid_hooks)) { 1199 if (table->check && table->check(newinfo, table->valid_hooks)) {
1200 BUGPRINT("The table doesn't like its own initial data, lol\n"); 1200 BUGPRINT("The table doesn't like its own initial data, lol\n");
1201 return ERR_PTR(-EINVAL); 1201 ret = -EINVAL;
1202 goto free_chainstack;
1202 } 1203 }
1203 1204
1204 table->private = newinfo; 1205 table->private = newinfo;
diff --git a/net/core/scm.c b/net/core/scm.c
index 4c1ef026d695..811b53fb330e 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -192,7 +192,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
192 goto error; 192 goto error;
193 193
194 cred->uid = cred->euid = p->creds.uid; 194 cred->uid = cred->euid = p->creds.uid;
195 cred->gid = cred->egid = p->creds.uid; 195 cred->gid = cred->egid = p->creds.gid;
196 put_cred(p->cred); 196 put_cred(p->cred);
197 p->cred = cred; 197 p->cred = cred;
198 } 198 }
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 77d3eded665a..8c6563361ab5 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -122,6 +122,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb)
122 newskb->pkt_type = PACKET_LOOPBACK; 122 newskb->pkt_type = PACKET_LOOPBACK;
123 newskb->ip_summed = CHECKSUM_UNNECESSARY; 123 newskb->ip_summed = CHECKSUM_UNNECESSARY;
124 WARN_ON(!skb_dst(newskb)); 124 WARN_ON(!skb_dst(newskb));
125 skb_dst_force(newskb);
125 netif_rx_ni(newskb); 126 netif_rx_ni(newskb);
126 return 0; 127 return 0;
127} 128}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index ab0c9efd1efa..8905e92f896a 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1067,7 +1067,7 @@ EXPORT_SYMBOL(compat_ip_setsockopt);
1067 */ 1067 */
1068 1068
1069static int do_ip_getsockopt(struct sock *sk, int level, int optname, 1069static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1070 char __user *optval, int __user *optlen) 1070 char __user *optval, int __user *optlen, unsigned flags)
1071{ 1071{
1072 struct inet_sock *inet = inet_sk(sk); 1072 struct inet_sock *inet = inet_sk(sk);
1073 int val; 1073 int val;
@@ -1240,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1240 1240
1241 msg.msg_control = optval; 1241 msg.msg_control = optval;
1242 msg.msg_controllen = len; 1242 msg.msg_controllen = len;
1243 msg.msg_flags = 0; 1243 msg.msg_flags = flags;
1244 1244
1245 if (inet->cmsg_flags & IP_CMSG_PKTINFO) { 1245 if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
1246 struct in_pktinfo info; 1246 struct in_pktinfo info;
@@ -1294,7 +1294,7 @@ int ip_getsockopt(struct sock *sk, int level,
1294{ 1294{
1295 int err; 1295 int err;
1296 1296
1297 err = do_ip_getsockopt(sk, level, optname, optval, optlen); 1297 err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
1298#ifdef CONFIG_NETFILTER 1298#ifdef CONFIG_NETFILTER
1299 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1299 /* we need to exclude all possible ENOPROTOOPTs except default case */
1300 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && 1300 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
@@ -1327,7 +1327,8 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1327 return compat_mc_getsockopt(sk, level, optname, optval, optlen, 1327 return compat_mc_getsockopt(sk, level, optname, optval, optlen,
1328 ip_getsockopt); 1328 ip_getsockopt);
1329 1329
1330 err = do_ip_getsockopt(sk, level, optname, optval, optlen); 1330 err = do_ip_getsockopt(sk, level, optname, optval, optlen,
1331 MSG_CMSG_COMPAT);
1331 1332
1332#ifdef CONFIG_NETFILTER 1333#ifdef CONFIG_NETFILTER
1333 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1334 /* we need to exclude all possible ENOPROTOOPTs except default case */
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 2e97e3ec1eb7..929b27bdeb79 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -18,17 +18,15 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
18 struct rtable *rt; 18 struct rtable *rt;
19 struct flowi4 fl4 = {}; 19 struct flowi4 fl4 = {};
20 __be32 saddr = iph->saddr; 20 __be32 saddr = iph->saddr;
21 __u8 flags = 0; 21 __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
22 unsigned int hh_len; 22 unsigned int hh_len;
23 23
24 if (!skb->sk && addr_type != RTN_LOCAL) { 24 if (addr_type == RTN_UNSPEC)
25 if (addr_type == RTN_UNSPEC) 25 addr_type = inet_addr_type(net, saddr);
26 addr_type = inet_addr_type(net, saddr); 26 if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
27 if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) 27 flags |= FLOWI_FLAG_ANYSRC;
28 flags |= FLOWI_FLAG_ANYSRC; 28 else
29 else 29 saddr = 0;
30 saddr = 0;
31 }
32 30
33 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause 31 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
34 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. 32 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
@@ -38,7 +36,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
38 fl4.flowi4_tos = RT_TOS(iph->tos); 36 fl4.flowi4_tos = RT_TOS(iph->tos);
39 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; 37 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
40 fl4.flowi4_mark = skb->mark; 38 fl4.flowi4_mark = skb->mark;
41 fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : flags; 39 fl4.flowi4_flags = flags;
42 rt = ip_route_output_key(net, &fl4); 40 rt = ip_route_output_key(net, &fl4);
43 if (IS_ERR(rt)) 41 if (IS_ERR(rt))
44 return -1; 42 return -1;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 1457acb39cec..61714bd52925 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -563,7 +563,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
563 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, 563 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
564 RT_SCOPE_UNIVERSE, 564 RT_SCOPE_UNIVERSE,
565 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, 565 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
566 FLOWI_FLAG_CAN_SLEEP, daddr, saddr, 0, 0); 566 inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP,
567 daddr, saddr, 0, 0);
567 568
568 if (!inet->hdrincl) { 569 if (!inet->hdrincl) {
569 err = raw_probe_proto_opt(&fl4, msg); 570 err = raw_probe_proto_opt(&fl4, msg);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e3dec1c9f09d..075212e41b83 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -722,7 +722,7 @@ static inline bool compare_hash_inputs(const struct rtable *rt1,
722{ 722{
723 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) | 723 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
724 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) | 724 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
725 (rt1->rt_iif ^ rt2->rt_iif)) == 0); 725 (rt1->rt_route_iif ^ rt2->rt_route_iif)) == 0);
726} 726}
727 727
728static inline int compare_keys(struct rtable *rt1, struct rtable *rt2) 728static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
@@ -731,8 +731,8 @@ static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
731 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) | 731 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
732 (rt1->rt_mark ^ rt2->rt_mark) | 732 (rt1->rt_mark ^ rt2->rt_mark) |
733 (rt1->rt_key_tos ^ rt2->rt_key_tos) | 733 (rt1->rt_key_tos ^ rt2->rt_key_tos) |
734 (rt1->rt_oif ^ rt2->rt_oif) | 734 (rt1->rt_route_iif ^ rt2->rt_route_iif) |
735 (rt1->rt_iif ^ rt2->rt_iif)) == 0; 735 (rt1->rt_oif ^ rt2->rt_oif)) == 0;
736} 736}
737 737
738static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) 738static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
@@ -2320,8 +2320,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2320 rth = rcu_dereference(rth->dst.rt_next)) { 2320 rth = rcu_dereference(rth->dst.rt_next)) {
2321 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) | 2321 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2322 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) | 2322 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2323 (rth->rt_iif ^ iif) | 2323 (rth->rt_route_iif ^ iif) |
2324 rth->rt_oif |
2325 (rth->rt_key_tos ^ tos)) == 0 && 2324 (rth->rt_key_tos ^ tos)) == 0 &&
2326 rth->rt_mark == skb->mark && 2325 rth->rt_mark == skb->mark &&
2327 net_eq(dev_net(rth->dst.dev), net) && 2326 net_eq(dev_net(rth->dst.dev), net) &&
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 92bb9434b338..3bc5c8f7c71b 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -276,7 +276,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
276 int mss; 276 int mss;
277 struct rtable *rt; 277 struct rtable *rt;
278 __u8 rcv_wscale; 278 __u8 rcv_wscale;
279 bool ecn_ok; 279 bool ecn_ok = false;
280 280
281 if (!sysctl_tcp_syncookies || !th->ack || th->rst) 281 if (!sysctl_tcp_syncookies || !th->ack || th->rst)
282 goto out; 282 goto out;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 89d5bf806222..ac838965ff34 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -165,7 +165,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
165 int mss; 165 int mss;
166 struct dst_entry *dst; 166 struct dst_entry *dst;
167 __u8 rcv_wscale; 167 __u8 rcv_wscale;
168 bool ecn_ok; 168 bool ecn_ok = false;
169 169
170 if (!sysctl_tcp_syncookies || !th->ack || th->rst) 170 if (!sysctl_tcp_syncookies || !th->ack || th->rst)
171 goto out; 171 goto out;
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 5b466cd1272f..84d0fd47636a 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -312,6 +312,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
312 } 312 }
313 break; 313 break;
314 case NF_STOLEN: 314 case NF_STOLEN:
315 break;
315 default: 316 default:
316 kfree_skb(skb); 317 kfree_skb(skb);
317 } 318 }
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 58107d060846..9c24de10a657 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -341,11 +341,11 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
341 341
342 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 342 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
343 if (entry == NULL) 343 if (entry == NULL)
344 return -ENOMEM; 344 goto out_entry;
345 if (domain != NULL) { 345 if (domain != NULL) {
346 entry->domain = kstrdup(domain, GFP_ATOMIC); 346 entry->domain = kstrdup(domain, GFP_ATOMIC);
347 if (entry->domain == NULL) 347 if (entry->domain == NULL)
348 goto cfg_cipsov4_map_add_failure; 348 goto out_domain;
349 } 349 }
350 350
351 if (addr == NULL && mask == NULL) { 351 if (addr == NULL && mask == NULL) {
@@ -354,13 +354,13 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
354 } else if (addr != NULL && mask != NULL) { 354 } else if (addr != NULL && mask != NULL) {
355 addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); 355 addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
356 if (addrmap == NULL) 356 if (addrmap == NULL)
357 goto cfg_cipsov4_map_add_failure; 357 goto out_addrmap;
358 INIT_LIST_HEAD(&addrmap->list4); 358 INIT_LIST_HEAD(&addrmap->list4);
359 INIT_LIST_HEAD(&addrmap->list6); 359 INIT_LIST_HEAD(&addrmap->list6);
360 360
361 addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC); 361 addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC);
362 if (addrinfo == NULL) 362 if (addrinfo == NULL)
363 goto cfg_cipsov4_map_add_failure; 363 goto out_addrinfo;
364 addrinfo->type_def.cipsov4 = doi_def; 364 addrinfo->type_def.cipsov4 = doi_def;
365 addrinfo->type = NETLBL_NLTYPE_CIPSOV4; 365 addrinfo->type = NETLBL_NLTYPE_CIPSOV4;
366 addrinfo->list.addr = addr->s_addr & mask->s_addr; 366 addrinfo->list.addr = addr->s_addr & mask->s_addr;
@@ -374,7 +374,7 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
374 entry->type = NETLBL_NLTYPE_ADDRSELECT; 374 entry->type = NETLBL_NLTYPE_ADDRSELECT;
375 } else { 375 } else {
376 ret_val = -EINVAL; 376 ret_val = -EINVAL;
377 goto cfg_cipsov4_map_add_failure; 377 goto out_addrmap;
378 } 378 }
379 379
380 ret_val = netlbl_domhsh_add(entry, audit_info); 380 ret_val = netlbl_domhsh_add(entry, audit_info);
@@ -384,11 +384,15 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
384 return 0; 384 return 0;
385 385
386cfg_cipsov4_map_add_failure: 386cfg_cipsov4_map_add_failure:
387 cipso_v4_doi_putdef(doi_def); 387 kfree(addrinfo);
388out_addrinfo:
389 kfree(addrmap);
390out_addrmap:
388 kfree(entry->domain); 391 kfree(entry->domain);
392out_domain:
389 kfree(entry); 393 kfree(entry);
390 kfree(addrmap); 394out_entry:
391 kfree(addrinfo); 395 cipso_v4_doi_putdef(doi_def);
392 return ret_val; 396 return ret_val;
393} 397}
394 398
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 2a318f2dc3e5..b5d56a22b1d2 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -112,7 +112,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
112 112
113 for (prio = 0; prio < q->bands; prio++) { 113 for (prio = 0; prio < q->bands; prio++) {
114 struct Qdisc *qdisc = q->queues[prio]; 114 struct Qdisc *qdisc = q->queues[prio];
115 struct sk_buff *skb = qdisc->dequeue(qdisc); 115 struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
116 if (skb) { 116 if (skb) {
117 qdisc_bstats_update(sch, skb); 117 qdisc_bstats_update(sch, skb);
118 sch->q.qlen--; 118 sch->q.qlen--;