aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2013-04-03 06:27:29 -0400
committerThomas Gleixner <tglx@linutronix.de>2013-04-03 06:27:29 -0400
commit0ed2aef9b3bffe598045b62a31a50d912eee92d8 (patch)
treed7dda12955c838f531727d2775d09c4e04bdf066
parentcfea7d7e452f57682a0bb55a55e9f79c569558c2 (diff)
parent8011657b9e63cb2e914b9a0f75233b910c1854cb (diff)
Merge branch 'fortglx/3.10/time' of git://git.linaro.org/people/jstultz/linux into timers/core
-rw-r--r--Documentation/input/alps.txt67
-rw-r--r--Documentation/networking/tuntap.txt77
-rw-r--r--Documentation/trace/ftrace.txt2
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm/mach-bcm/Kconfig1
-rw-r--r--arch/arm/mach-bcm/board_bcm.c7
-rw-r--r--arch/s390/include/asm/cpu_mf.h1
-rw-r--r--arch/um/drivers/chan.h2
-rw-r--r--arch/um/drivers/chan_kern.c4
-rw-r--r--arch/um/drivers/chan_user.c12
-rw-r--r--arch/um/drivers/chan_user.h6
-rw-r--r--arch/um/drivers/line.c42
-rw-r--r--arch/um/drivers/net_kern.c2
-rw-r--r--arch/um/drivers/ssl.c1
-rw-r--r--arch/um/drivers/stdio_console.c1
-rw-r--r--arch/um/os-Linux/signal.c2
-rw-r--r--arch/um/os-Linux/start_up.c2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/kernel/cpu/intel.c12
-rw-r--r--arch/x86/kernel/rtc.c69
-rw-r--r--arch/x86/kernel/tsc.c6
-rw-r--r--arch/x86/platform/efi/efi.c24
-rw-r--r--arch/x86/platform/mrst/vrtc.c44
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/bcm_kona_timer.c211
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c8
-rw-r--r--drivers/input/mouse/alps.c85
-rw-r--r--drivers/input/mouse/alps.h1
-rw-r--r--drivers/input/mouse/cypress_ps2.c19
-rw-r--r--drivers/input/tablet/wacom_wac.c4
-rw-r--r--drivers/input/touchscreen/ads7846.c7
-rw-r--r--drivers/input/touchscreen/mms114.c34
-rw-r--r--drivers/isdn/i4l/isdn_tty.c4
-rw-r--r--drivers/net/bonding/bond_main.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c36
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c71
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c82
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c11
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c14
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c76
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/rx.c2
-rw-r--r--drivers/net/hippi/rrunner.c3
-rw-r--r--drivers/net/macvlan.c1
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxlan.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h18
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c133
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c18
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c37
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h34
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c14
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c266
-rw-r--r--fs/hostfs/hostfs_kern.c9
-rw-r--r--include/linux/clocksource.h1
-rw-r--r--include/linux/hrtimer.h5
-rw-r--r--include/linux/jiffies.h1
-rw-r--r--include/linux/time.h3
-rw-r--r--include/linux/timekeeper_internal.h7
-rw-r--r--include/uapi/linux/time.h6
-rw-r--r--kernel/hrtimer.c14
-rw-r--r--kernel/posix-timers.c16
-rw-r--r--kernel/time.c11
-rw-r--r--kernel/time/ntp.c18
-rw-r--r--kernel/time/tick-internal.h2
-rw-r--r--kernel/time/timekeeping.c282
-rw-r--r--kernel/trace/Kconfig24
-rw-r--r--kernel/trace/trace.c27
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_mdb.c4
-rw-r--r--net/bridge/br_multicast.c3
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/core/dev.c5
-rw-r--r--net/core/rtnetlink.c1
-rw-r--r--net/dcb/dcbnl.c8
-rw-r--r--net/ieee802154/6lowpan.h2
-rw-r--r--net/ipv4/inet_connection_sock.c1
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv6/ip6_input.c3
-rw-r--r--net/irda/ircomm/ircomm_tty.c29
-rw-r--r--net/key/af_key.c8
-rw-r--r--net/mac80211/cfg.c21
-rw-r--r--net/mac80211/iface.c6
-rw-r--r--net/mac80211/mlme.c28
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/netfilter/nf_conntrack_helper.c11
-rw-r--r--net/netfilter/nfnetlink.c7
-rw-r--r--net/netfilter/xt_AUDIT.c3
-rw-r--r--net/netlabel/netlabel_unlabeled.c27
-rw-r--r--net/rds/stats.c1
-rw-r--r--net/sched/sch_qfq.c66
-rw-r--r--net/wireless/core.c3
-rw-r--r--net/wireless/nl80211.c51
130 files changed, 1676 insertions, 968 deletions
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index 3262b6e4d686..e544c7ff8cfa 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -3,10 +3,26 @@ ALPS Touchpad Protocol
3 3
4Introduction 4Introduction
5------------ 5------------
6 6Currently the ALPS touchpad driver supports five protocol versions in use by
7Currently the ALPS touchpad driver supports four protocol versions in use by 7ALPS touchpads, called versions 1, 2, 3, 4 and 5.
8ALPS touchpads, called versions 1, 2, 3, and 4. Information about the various 8
9protocol versions is contained in the following sections. 9Since roughly mid-2010 several new ALPS touchpads have been released and
10integrated into a variety of laptops and netbooks. These new touchpads
11have enough behavior differences that the alps_model_data definition
12table, describing the properties of the different versions, is no longer
13adequate. The design choices were to re-define the alps_model_data
14table, with the risk of regression testing existing devices, or isolate
15the new devices outside of the alps_model_data table. The latter design
16choice was made. The new touchpad signatures are named: "Rushmore",
17"Pinnacle", and "Dolphin", which you will see in the alps.c code.
18For the purposes of this document, this group of ALPS touchpads will
19generically be called "new ALPS touchpads".
20
21We experimented with probing the ACPI interface _HID (Hardware ID)/_CID
22(Compatibility ID) definition as a way to uniquely identify the
23different ALPS variants but there did not appear to be a 1:1 mapping.
24In fact, it appeared to be an m:n mapping between the _HID and actual
25hardware type.
10 26
11Detection 27Detection
12--------- 28---------
@@ -20,9 +36,13 @@ If the E6 report is successful, the touchpad model is identified using the "E7
20report" sequence: E8-E7-E7-E7-E9. The response is the model signature and is 36report" sequence: E8-E7-E7-E7-E9. The response is the model signature and is
21matched against known models in the alps_model_data_array. 37matched against known models in the alps_model_data_array.
22 38
23With protocol versions 3 and 4, the E7 report model signature is always 39For older touchpads supporting protocol versions 3 and 4, the E7 report
2473-02-64. To differentiate between these versions, the response from the 40model signature is always 73-02-64. To differentiate between these
25"Enter Command Mode" sequence must be inspected as described below. 41versions, the response from the "Enter Command Mode" sequence must be
42inspected as described below.
43
44The new ALPS touchpads have an E7 signature of 73-03-50 or 73-03-0A but
45seem to be better differentiated by the EC Command Mode response.
26 46
27Command Mode 47Command Mode
28------------ 48------------
@@ -47,6 +67,14 @@ address of the register being read, and the third contains the value of the
47register. Registers are written by writing the value one nibble at a time 67register. Registers are written by writing the value one nibble at a time
48using the same encoding used for addresses. 68using the same encoding used for addresses.
49 69
70For the new ALPS touchpads, the EC command is used to enter command
71mode. The response in the new ALPS touchpads is significantly different,
72and more important in determining the behavior. This code has been
73separated from the original alps_model_data table and put in the
74alps_identify function. For example, there seem to be two hardware init
75sequences for the "Dolphin" touchpads as determined by the second byte
76of the EC response.
77
50Packet Format 78Packet Format
51------------- 79-------------
52 80
@@ -187,3 +215,28 @@ There are several things worth noting here.
187 well. 215 well.
188 216
189So far no v4 devices with tracksticks have been encountered. 217So far no v4 devices with tracksticks have been encountered.
218
219ALPS Absolute Mode - Protocol Version 5
220---------------------------------------
221This is basically Protocol Version 3 but with different logic for packet
222decode. It uses the same alps_process_touchpad_packet_v3 call with a
223specialized decode_fields function pointer to correctly interpret the
224packets. This appears to only be used by the Dolphin devices.
225
226For single-touch, the 6-byte packet format is:
227
228 byte 0: 1 1 0 0 1 0 0 0
229 byte 1: 0 x6 x5 x4 x3 x2 x1 x0
230 byte 2: 0 y6 y5 y4 y3 y2 y1 y0
231 byte 3: 0 M R L 1 m r l
232 byte 4: y10 y9 y8 y7 x10 x9 x8 x7
233 byte 5: 0 z6 z5 z4 z3 z2 z1 z0
234
235For mt, the format is:
236
237 byte 0: 1 1 1 n3 1 n2 n1 x24
238 byte 1: 1 y7 y6 y5 y4 y3 y2 y1
239 byte 2: ? x2 x1 y12 y11 y10 y9 y8
240 byte 3: 0 x23 x22 x21 x20 x19 x18 x17
241 byte 4: 0 x9 x8 x7 x6 x5 x4 x3
242 byte 5: 0 x16 x15 x14 x13 x12 x11 x10
diff --git a/Documentation/networking/tuntap.txt b/Documentation/networking/tuntap.txt
index c0aab985bad9..949d5dcdd9a3 100644
--- a/Documentation/networking/tuntap.txt
+++ b/Documentation/networking/tuntap.txt
@@ -105,6 +105,83 @@ Copyright (C) 1999-2000 Maxim Krasnyansky <max_mk@yahoo.com>
105 Proto [2 bytes] 105 Proto [2 bytes]
106 Raw protocol(IP, IPv6, etc) frame. 106 Raw protocol(IP, IPv6, etc) frame.
107 107
108 3.3 Multiqueue tuntap interface:
109
110 From version 3.8, Linux supports multiqueue tuntap which can uses multiple
111 file descriptors (queues) to parallelize packets sending or receiving. The
112 device allocation is the same as before, and if user wants to create multiple
113 queues, TUNSETIFF with the same device name must be called many times with
114 IFF_MULTI_QUEUE flag.
115
116 char *dev should be the name of the device, queues is the number of queues to
117 be created, fds is used to store and return the file descriptors (queues)
118 created to the caller. Each file descriptor were served as the interface of a
119 queue which could be accessed by userspace.
120
121 #include <linux/if.h>
122 #include <linux/if_tun.h>
123
124 int tun_alloc_mq(char *dev, int queues, int *fds)
125 {
126 struct ifreq ifr;
127 int fd, err, i;
128
129 if (!dev)
130 return -1;
131
132 memset(&ifr, 0, sizeof(ifr));
133 /* Flags: IFF_TUN - TUN device (no Ethernet headers)
134 * IFF_TAP - TAP device
135 *
136 * IFF_NO_PI - Do not provide packet information
137 * IFF_MULTI_QUEUE - Create a queue of multiqueue device
138 */
139 ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_MULTI_QUEUE;
140 strcpy(ifr.ifr_name, dev);
141
142 for (i = 0; i < queues; i++) {
143 if ((fd = open("/dev/net/tun", O_RDWR)) < 0)
144 goto err;
145 err = ioctl(fd, TUNSETIFF, (void *)&ifr);
146 if (err) {
147 close(fd);
148 goto err;
149 }
150 fds[i] = fd;
151 }
152
153 return 0;
154 err:
155 for (--i; i >= 0; i--)
156 close(fds[i]);
157 return err;
158 }
159
160 A new ioctl(TUNSETQUEUE) were introduced to enable or disable a queue. When
161 calling it with IFF_DETACH_QUEUE flag, the queue were disabled. And when
162 calling it with IFF_ATTACH_QUEUE flag, the queue were enabled. The queue were
163 enabled by default after it was created through TUNSETIFF.
164
165 fd is the file descriptor (queue) that we want to enable or disable, when
166 enable is true we enable it, otherwise we disable it
167
168 #include <linux/if.h>
169 #include <linux/if_tun.h>
170
171 int tun_set_queue(int fd, int enable)
172 {
173 struct ifreq ifr;
174
175 memset(&ifr, 0, sizeof(ifr));
176
177 if (enable)
178 ifr.ifr_flags = IFF_ATTACH_QUEUE;
179 else
180 ifr.ifr_flags = IFF_DETACH_QUEUE;
181
182 return ioctl(fd, TUNSETQUEUE, (void *)&ifr);
183 }
184
108Universal TUN/TAP device driver Frequently Asked Question. 185Universal TUN/TAP device driver Frequently Asked Question.
109 186
1101. What platforms are supported by TUN/TAP driver ? 1871. What platforms are supported by TUN/TAP driver ?
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 53d6a3c51d87..a372304aef10 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -1873,7 +1873,7 @@ feature:
1873 1873
1874 status\input | 0 | 1 | else | 1874 status\input | 0 | 1 | else |
1875 --------------+------------+------------+------------+ 1875 --------------+------------+------------+------------+
1876 not allocated |(do nothing)| alloc+swap | EINVAL | 1876 not allocated |(do nothing)| alloc+swap |(do nothing)|
1877 --------------+------------+------------+------------+ 1877 --------------+------------+------------+------------+
1878 allocated | free | swap | clear | 1878 allocated | free | swap | clear |
1879 --------------+------------+------------+------------+ 1879 --------------+------------+------------+------------+
diff --git a/MAINTAINERS b/MAINTAINERS
index 95616582c728..c08411b27499 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6412,6 +6412,8 @@ F: Documentation/networking/LICENSE.qla3xxx
6412F: drivers/net/ethernet/qlogic/qla3xxx.* 6412F: drivers/net/ethernet/qlogic/qla3xxx.*
6413 6413
6414QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER 6414QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
6415M: Rajesh Borundia <rajesh.borundia@qlogic.com>
6416M: Shahed Shaikh <shahed.shaikh@qlogic.com>
6415M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com> 6417M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
6416M: Sony Chacko <sony.chacko@qlogic.com> 6418M: Sony Chacko <sony.chacko@qlogic.com>
6417M: linux-driver@qlogic.com 6419M: linux-driver@qlogic.com
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index bf02471d7e7c..f11289519c39 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -6,6 +6,7 @@ config ARCH_BCM
6 select ARM_ERRATA_764369 if SMP 6 select ARM_ERRATA_764369 if SMP
7 select ARM_GIC 7 select ARM_GIC
8 select CPU_V7 8 select CPU_V7
9 select CLKSRC_OF
9 select GENERIC_CLOCKEVENTS 10 select GENERIC_CLOCKEVENTS
10 select GENERIC_TIME 11 select GENERIC_TIME
11 select GPIO_BCM 12 select GPIO_BCM
diff --git a/arch/arm/mach-bcm/board_bcm.c b/arch/arm/mach-bcm/board_bcm.c
index f0f9abafad29..259593540477 100644
--- a/arch/arm/mach-bcm/board_bcm.c
+++ b/arch/arm/mach-bcm/board_bcm.c
@@ -16,14 +16,11 @@
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/irqchip.h> 18#include <linux/irqchip.h>
19#include <linux/clocksource.h>
19 20
20#include <asm/mach/arch.h> 21#include <asm/mach/arch.h>
21#include <asm/mach/time.h> 22#include <asm/mach/time.h>
22 23
23static void timer_init(void)
24{
25}
26
27 24
28static void __init board_init(void) 25static void __init board_init(void)
29{ 26{
@@ -35,7 +32,7 @@ static const char * const bcm11351_dt_compat[] = { "bcm,bcm11351", NULL, };
35 32
36DT_MACHINE_START(BCM11351_DT, "Broadcom Application Processor") 33DT_MACHINE_START(BCM11351_DT, "Broadcom Application Processor")
37 .init_irq = irqchip_init, 34 .init_irq = irqchip_init,
38 .init_time = timer_init, 35 .init_time = clocksource_of_init,
39 .init_machine = board_init, 36 .init_machine = board_init,
40 .dt_compat = bcm11351_dt_compat, 37 .dt_compat = bcm11351_dt_compat,
41MACHINE_END 38MACHINE_END
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index f1eddd150dd7..c879fad404c8 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -12,6 +12,7 @@
12#ifndef _ASM_S390_CPU_MF_H 12#ifndef _ASM_S390_CPU_MF_H
13#define _ASM_S390_CPU_MF_H 13#define _ASM_S390_CPU_MF_H
14 14
15#include <linux/errno.h>
15#include <asm/facility.h> 16#include <asm/facility.h>
16 17
17#define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */ 18#define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */
diff --git a/arch/um/drivers/chan.h b/arch/um/drivers/chan.h
index 78f1b8999964..c512b0306dd4 100644
--- a/arch/um/drivers/chan.h
+++ b/arch/um/drivers/chan.h
@@ -37,7 +37,7 @@ extern int console_write_chan(struct chan *chan, const char *buf,
37extern int console_open_chan(struct line *line, struct console *co); 37extern int console_open_chan(struct line *line, struct console *co);
38extern void deactivate_chan(struct chan *chan, int irq); 38extern void deactivate_chan(struct chan *chan, int irq);
39extern void reactivate_chan(struct chan *chan, int irq); 39extern void reactivate_chan(struct chan *chan, int irq);
40extern void chan_enable_winch(struct chan *chan, struct tty_struct *tty); 40extern void chan_enable_winch(struct chan *chan, struct tty_port *port);
41extern int enable_chan(struct line *line); 41extern int enable_chan(struct line *line);
42extern void close_chan(struct line *line); 42extern void close_chan(struct line *line);
43extern int chan_window_size(struct line *line, 43extern int chan_window_size(struct line *line,
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 15c553c239a1..80b47cb71e0a 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -122,10 +122,10 @@ static int open_chan(struct list_head *chans)
122 return err; 122 return err;
123} 123}
124 124
125void chan_enable_winch(struct chan *chan, struct tty_struct *tty) 125void chan_enable_winch(struct chan *chan, struct tty_port *port)
126{ 126{
127 if (chan && chan->primary && chan->ops->winch) 127 if (chan && chan->primary && chan->ops->winch)
128 register_winch(chan->fd, tty); 128 register_winch(chan->fd, port);
129} 129}
130 130
131static void line_timer_cb(struct work_struct *work) 131static void line_timer_cb(struct work_struct *work)
diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
index 9be670ad23b5..3fd7c3efdb18 100644
--- a/arch/um/drivers/chan_user.c
+++ b/arch/um/drivers/chan_user.c
@@ -216,7 +216,7 @@ static int winch_thread(void *arg)
216 } 216 }
217} 217}
218 218
219static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out, 219static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
220 unsigned long *stack_out) 220 unsigned long *stack_out)
221{ 221{
222 struct winch_data data; 222 struct winch_data data;
@@ -271,7 +271,7 @@ static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out,
271 return err; 271 return err;
272} 272}
273 273
274void register_winch(int fd, struct tty_struct *tty) 274void register_winch(int fd, struct tty_port *port)
275{ 275{
276 unsigned long stack; 276 unsigned long stack;
277 int pid, thread, count, thread_fd = -1; 277 int pid, thread, count, thread_fd = -1;
@@ -281,17 +281,17 @@ void register_winch(int fd, struct tty_struct *tty)
281 return; 281 return;
282 282
283 pid = tcgetpgrp(fd); 283 pid = tcgetpgrp(fd);
284 if (is_skas_winch(pid, fd, tty)) { 284 if (is_skas_winch(pid, fd, port)) {
285 register_winch_irq(-1, fd, -1, tty, 0); 285 register_winch_irq(-1, fd, -1, port, 0);
286 return; 286 return;
287 } 287 }
288 288
289 if (pid == -1) { 289 if (pid == -1) {
290 thread = winch_tramp(fd, tty, &thread_fd, &stack); 290 thread = winch_tramp(fd, port, &thread_fd, &stack);
291 if (thread < 0) 291 if (thread < 0)
292 return; 292 return;
293 293
294 register_winch_irq(thread_fd, fd, thread, tty, stack); 294 register_winch_irq(thread_fd, fd, thread, port, stack);
295 295
296 count = write(thread_fd, &c, sizeof(c)); 296 count = write(thread_fd, &c, sizeof(c));
297 if (count != sizeof(c)) 297 if (count != sizeof(c))
diff --git a/arch/um/drivers/chan_user.h b/arch/um/drivers/chan_user.h
index dc693298eb8f..03f1b565c5f9 100644
--- a/arch/um/drivers/chan_user.h
+++ b/arch/um/drivers/chan_user.h
@@ -38,10 +38,10 @@ extern int generic_window_size(int fd, void *unused, unsigned short *rows_out,
38 unsigned short *cols_out); 38 unsigned short *cols_out);
39extern void generic_free(void *data); 39extern void generic_free(void *data);
40 40
41struct tty_struct; 41struct tty_port;
42extern void register_winch(int fd, struct tty_struct *tty); 42extern void register_winch(int fd, struct tty_port *port);
43extern void register_winch_irq(int fd, int tty_fd, int pid, 43extern void register_winch_irq(int fd, int tty_fd, int pid,
44 struct tty_struct *tty, unsigned long stack); 44 struct tty_port *port, unsigned long stack);
45 45
46#define __channel_help(fn, prefix) \ 46#define __channel_help(fn, prefix) \
47__uml_help(fn, prefix "[0-9]*=<channel description>\n" \ 47__uml_help(fn, prefix "[0-9]*=<channel description>\n" \
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index f1b38571f94e..be541cf69fd2 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -305,7 +305,7 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty)
305 return ret; 305 return ret;
306 306
307 if (!line->sigio) { 307 if (!line->sigio) {
308 chan_enable_winch(line->chan_out, tty); 308 chan_enable_winch(line->chan_out, port);
309 line->sigio = 1; 309 line->sigio = 1;
310 } 310 }
311 311
@@ -315,8 +315,22 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty)
315 return 0; 315 return 0;
316} 316}
317 317
318static void unregister_winch(struct tty_struct *tty);
319
320static void line_destruct(struct tty_port *port)
321{
322 struct tty_struct *tty = tty_port_tty_get(port);
323 struct line *line = tty->driver_data;
324
325 if (line->sigio) {
326 unregister_winch(tty);
327 line->sigio = 0;
328 }
329}
330
318static const struct tty_port_operations line_port_ops = { 331static const struct tty_port_operations line_port_ops = {
319 .activate = line_activate, 332 .activate = line_activate,
333 .destruct = line_destruct,
320}; 334};
321 335
322int line_open(struct tty_struct *tty, struct file *filp) 336int line_open(struct tty_struct *tty, struct file *filp)
@@ -340,18 +354,6 @@ int line_install(struct tty_driver *driver, struct tty_struct *tty,
340 return 0; 354 return 0;
341} 355}
342 356
343static void unregister_winch(struct tty_struct *tty);
344
345void line_cleanup(struct tty_struct *tty)
346{
347 struct line *line = tty->driver_data;
348
349 if (line->sigio) {
350 unregister_winch(tty);
351 line->sigio = 0;
352 }
353}
354
355void line_close(struct tty_struct *tty, struct file * filp) 357void line_close(struct tty_struct *tty, struct file * filp)
356{ 358{
357 struct line *line = tty->driver_data; 359 struct line *line = tty->driver_data;
@@ -601,7 +603,7 @@ struct winch {
601 int fd; 603 int fd;
602 int tty_fd; 604 int tty_fd;
603 int pid; 605 int pid;
604 struct tty_struct *tty; 606 struct tty_port *port;
605 unsigned long stack; 607 unsigned long stack;
606 struct work_struct work; 608 struct work_struct work;
607}; 609};
@@ -655,7 +657,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
655 goto out; 657 goto out;
656 } 658 }
657 } 659 }
658 tty = winch->tty; 660 tty = tty_port_tty_get(winch->port);
659 if (tty != NULL) { 661 if (tty != NULL) {
660 line = tty->driver_data; 662 line = tty->driver_data;
661 if (line != NULL) { 663 if (line != NULL) {
@@ -663,6 +665,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
663 &tty->winsize.ws_col); 665 &tty->winsize.ws_col);
664 kill_pgrp(tty->pgrp, SIGWINCH, 1); 666 kill_pgrp(tty->pgrp, SIGWINCH, 1);
665 } 667 }
668 tty_kref_put(tty);
666 } 669 }
667 out: 670 out:
668 if (winch->fd != -1) 671 if (winch->fd != -1)
@@ -670,7 +673,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
670 return IRQ_HANDLED; 673 return IRQ_HANDLED;
671} 674}
672 675
673void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty, 676void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port,
674 unsigned long stack) 677 unsigned long stack)
675{ 678{
676 struct winch *winch; 679 struct winch *winch;
@@ -685,7 +688,7 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty,
685 .fd = fd, 688 .fd = fd,
686 .tty_fd = tty_fd, 689 .tty_fd = tty_fd,
687 .pid = pid, 690 .pid = pid,
688 .tty = tty, 691 .port = port,
689 .stack = stack }); 692 .stack = stack });
690 693
691 if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, 694 if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
@@ -714,15 +717,18 @@ static void unregister_winch(struct tty_struct *tty)
714{ 717{
715 struct list_head *ele, *next; 718 struct list_head *ele, *next;
716 struct winch *winch; 719 struct winch *winch;
720 struct tty_struct *wtty;
717 721
718 spin_lock(&winch_handler_lock); 722 spin_lock(&winch_handler_lock);
719 723
720 list_for_each_safe(ele, next, &winch_handlers) { 724 list_for_each_safe(ele, next, &winch_handlers) {
721 winch = list_entry(ele, struct winch, list); 725 winch = list_entry(ele, struct winch, list);
722 if (winch->tty == tty) { 726 wtty = tty_port_tty_get(winch->port);
727 if (wtty == tty) {
723 free_winch(winch); 728 free_winch(winch);
724 break; 729 break;
725 } 730 }
731 tty_kref_put(wtty);
726 } 732 }
727 spin_unlock(&winch_handler_lock); 733 spin_unlock(&winch_handler_lock);
728} 734}
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index d8926c303629..39f186252e02 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -218,6 +218,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
218 spin_lock_irqsave(&lp->lock, flags); 218 spin_lock_irqsave(&lp->lock, flags);
219 219
220 len = (*lp->write)(lp->fd, skb, lp); 220 len = (*lp->write)(lp->fd, skb, lp);
221 skb_tx_timestamp(skb);
221 222
222 if (len == skb->len) { 223 if (len == skb->len) {
223 dev->stats.tx_packets++; 224 dev->stats.tx_packets++;
@@ -281,6 +282,7 @@ static void uml_net_get_drvinfo(struct net_device *dev,
281static const struct ethtool_ops uml_net_ethtool_ops = { 282static const struct ethtool_ops uml_net_ethtool_ops = {
282 .get_drvinfo = uml_net_get_drvinfo, 283 .get_drvinfo = uml_net_get_drvinfo,
283 .get_link = ethtool_op_get_link, 284 .get_link = ethtool_op_get_link,
285 .get_ts_info = ethtool_op_get_ts_info,
284}; 286};
285 287
286static void uml_net_user_timer_expire(unsigned long _conn) 288static void uml_net_user_timer_expire(unsigned long _conn)
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index 16fdd0a0f9d6..b8d14fa52059 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -105,7 +105,6 @@ static const struct tty_operations ssl_ops = {
105 .throttle = line_throttle, 105 .throttle = line_throttle,
106 .unthrottle = line_unthrottle, 106 .unthrottle = line_unthrottle,
107 .install = ssl_install, 107 .install = ssl_install,
108 .cleanup = line_cleanup,
109 .hangup = line_hangup, 108 .hangup = line_hangup,
110}; 109};
111 110
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index 827777af3f6d..7b361f36ca96 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -110,7 +110,6 @@ static const struct tty_operations console_ops = {
110 .set_termios = line_set_termios, 110 .set_termios = line_set_termios,
111 .throttle = line_throttle, 111 .throttle = line_throttle,
112 .unthrottle = line_unthrottle, 112 .unthrottle = line_unthrottle,
113 .cleanup = line_cleanup,
114 .hangup = line_hangup, 113 .hangup = line_hangup,
115}; 114};
116 115
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index b1469fe93295..9d9f1b4bf826 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -15,7 +15,7 @@
15#include <sysdep/mcontext.h> 15#include <sysdep/mcontext.h>
16#include "internal.h" 16#include "internal.h"
17 17
18void (*sig_info[NSIG])(int, siginfo_t *, struct uml_pt_regs *) = { 18void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
19 [SIGTRAP] = relay_signal, 19 [SIGTRAP] = relay_signal,
20 [SIGFPE] = relay_signal, 20 [SIGFPE] = relay_signal,
21 [SIGILL] = relay_signal, 21 [SIGILL] = relay_signal,
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index da4b9e9999fd..337518c5042a 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -15,6 +15,8 @@
15#include <sys/mman.h> 15#include <sys/mman.h>
16#include <sys/stat.h> 16#include <sys/stat.h>
17#include <sys/wait.h> 17#include <sys/wait.h>
18#include <sys/time.h>
19#include <sys/resource.h>
18#include <asm/unistd.h> 20#include <asm/unistd.h>
19#include <init.h> 21#include <init.h>
20#include <os.h> 22#include <os.h>
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a4f24f5b1218..26bd79261532 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -120,6 +120,7 @@ config X86
120 select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION 120 select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION
121 select OLD_SIGACTION if X86_32 121 select OLD_SIGACTION if X86_32
122 select COMPAT_OLD_SIGACTION if IA32_EMULATION 122 select COMPAT_OLD_SIGACTION if IA32_EMULATION
123 select RTC_LIB
123 124
124config INSTRUCTION_DECODER 125config INSTRUCTION_DECODER
125 def_bool y 126 def_bool y
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 93fe929d1cee..a8466f203e62 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -100,6 +100,7 @@
100#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ 100#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
101#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ 101#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */
102#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */ 102#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */
103#define X86_FEATURE_NONSTOP_TSC_S3 (3*32+30) /* TSC doesn't stop in S3 state */
103 104
104/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 105/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
105#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ 106#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1905ce98bee0..e7ae0d89e7e0 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -96,6 +96,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
96 sched_clock_stable = 1; 96 sched_clock_stable = 1;
97 } 97 }
98 98
99 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
100 if (c->x86 == 6) {
101 switch (c->x86_model) {
102 case 0x27: /* Penwell */
103 case 0x35: /* Cloverview */
104 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
105 break;
106 default:
107 break;
108 }
109 }
110
99 /* 111 /*
100 * There is a known erratum on Pentium III and Core Solo 112 * There is a known erratum on Pentium III and Core Solo
101 * and Core Duo CPUs. 113 * and Core Duo CPUs.
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 2e8f3d3b5641..198eb201ed3b 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -13,6 +13,7 @@
13#include <asm/x86_init.h> 13#include <asm/x86_init.h>
14#include <asm/time.h> 14#include <asm/time.h>
15#include <asm/mrst.h> 15#include <asm/mrst.h>
16#include <asm/rtc.h>
16 17
17#ifdef CONFIG_X86_32 18#ifdef CONFIG_X86_32
18/* 19/*
@@ -36,70 +37,24 @@ EXPORT_SYMBOL(rtc_lock);
36 * nowtime is written into the registers of the CMOS clock, it will 37 * nowtime is written into the registers of the CMOS clock, it will
37 * jump to the next second precisely 500 ms later. Check the Motorola 38 * jump to the next second precisely 500 ms later. Check the Motorola
38 * MC146818A or Dallas DS12887 data sheet for details. 39 * MC146818A or Dallas DS12887 data sheet for details.
39 *
40 * BUG: This routine does not handle hour overflow properly; it just
41 * sets the minutes. Usually you'll only notice that after reboot!
42 */ 40 */
43int mach_set_rtc_mmss(unsigned long nowtime) 41int mach_set_rtc_mmss(unsigned long nowtime)
44{ 42{
45 int real_seconds, real_minutes, cmos_minutes; 43 struct rtc_time tm;
46 unsigned char save_control, save_freq_select;
47 unsigned long flags;
48 int retval = 0; 44 int retval = 0;
49 45
50 spin_lock_irqsave(&rtc_lock, flags); 46 rtc_time_to_tm(nowtime, &tm);
51 47 if (!rtc_valid_tm(&tm)) {
52 /* tell the clock it's being set */ 48 retval = set_rtc_time(&tm);
53 save_control = CMOS_READ(RTC_CONTROL); 49 if (retval)
54 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); 50 printk(KERN_ERR "%s: RTC write failed with error %d\n",
55 51 __FUNCTION__, retval);
56 /* stop and reset prescaler */
57 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
58 CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
59
60 cmos_minutes = CMOS_READ(RTC_MINUTES);
61 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
62 cmos_minutes = bcd2bin(cmos_minutes);
63
64 /*
65 * since we're only adjusting minutes and seconds,
66 * don't interfere with hour overflow. This avoids
67 * messing with unknown time zones but requires your
68 * RTC not to be off by more than 15 minutes
69 */
70 real_seconds = nowtime % 60;
71 real_minutes = nowtime / 60;
72 /* correct for half hour time zone */
73 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
74 real_minutes += 30;
75 real_minutes %= 60;
76
77 if (abs(real_minutes - cmos_minutes) < 30) {
78 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
79 real_seconds = bin2bcd(real_seconds);
80 real_minutes = bin2bcd(real_minutes);
81 }
82 CMOS_WRITE(real_seconds, RTC_SECONDS);
83 CMOS_WRITE(real_minutes, RTC_MINUTES);
84 } else { 52 } else {
85 printk_once(KERN_NOTICE 53 printk(KERN_ERR
86 "set_rtc_mmss: can't update from %d to %d\n", 54 "%s: Invalid RTC value: write of %lx to RTC failed\n",
87 cmos_minutes, real_minutes); 55 __FUNCTION__, nowtime);
88 retval = -1; 56 retval = -EINVAL;
89 } 57 }
90
91 /* The following flags have to be released exactly in this order,
92 * otherwise the DS12887 (popular MC146818A clone with integrated
93 * battery and quartz) will not reset the oscillator and will not
94 * update precisely 500 ms later. You won't find this mentioned in
95 * the Dallas Semiconductor data sheets, but who believes data
96 * sheets anyway ... -- Markus Kuhn
97 */
98 CMOS_WRITE(save_control, RTC_CONTROL);
99 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
100
101 spin_unlock_irqrestore(&rtc_lock, flags);
102
103 return retval; 58 return retval;
104} 59}
105 60
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 4b9ea101fe3b..098b3cfda72e 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -768,7 +768,8 @@ static cycle_t read_tsc(struct clocksource *cs)
768 768
769static void resume_tsc(struct clocksource *cs) 769static void resume_tsc(struct clocksource *cs)
770{ 770{
771 clocksource_tsc.cycle_last = 0; 771 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
772 clocksource_tsc.cycle_last = 0;
772} 773}
773 774
774static struct clocksource clocksource_tsc = { 775static struct clocksource clocksource_tsc = {
@@ -939,6 +940,9 @@ static int __init init_tsc_clocksource(void)
939 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; 940 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
940 } 941 }
941 942
943 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
944 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
945
942 /* 946 /*
943 * Trust the results of the earlier calibration on systems 947 * Trust the results of the earlier calibration on systems
944 * exporting a reliable TSC. 948 * exporting a reliable TSC.
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 5f2ecaf3f9d8..28d9efacc9b6 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -48,6 +48,7 @@
48#include <asm/cacheflush.h> 48#include <asm/cacheflush.h>
49#include <asm/tlbflush.h> 49#include <asm/tlbflush.h>
50#include <asm/x86_init.h> 50#include <asm/x86_init.h>
51#include <asm/rtc.h>
51 52
52#define EFI_DEBUG 1 53#define EFI_DEBUG 1
53 54
@@ -258,10 +259,10 @@ static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
258 259
259int efi_set_rtc_mmss(unsigned long nowtime) 260int efi_set_rtc_mmss(unsigned long nowtime)
260{ 261{
261 int real_seconds, real_minutes;
262 efi_status_t status; 262 efi_status_t status;
263 efi_time_t eft; 263 efi_time_t eft;
264 efi_time_cap_t cap; 264 efi_time_cap_t cap;
265 struct rtc_time tm;
265 266
266 status = efi.get_time(&eft, &cap); 267 status = efi.get_time(&eft, &cap);
267 if (status != EFI_SUCCESS) { 268 if (status != EFI_SUCCESS) {
@@ -269,13 +270,20 @@ int efi_set_rtc_mmss(unsigned long nowtime)
269 return -1; 270 return -1;
270 } 271 }
271 272
272 real_seconds = nowtime % 60; 273 rtc_time_to_tm(nowtime, &tm);
273 real_minutes = nowtime / 60; 274 if (!rtc_valid_tm(&tm)) {
274 if (((abs(real_minutes - eft.minute) + 15)/30) & 1) 275 eft.year = tm.tm_year + 1900;
275 real_minutes += 30; 276 eft.month = tm.tm_mon + 1;
276 real_minutes %= 60; 277 eft.day = tm.tm_mday;
277 eft.minute = real_minutes; 278 eft.minute = tm.tm_min;
278 eft.second = real_seconds; 279 eft.second = tm.tm_sec;
280 eft.nanosecond = 0;
281 } else {
282 printk(KERN_ERR
283 "%s: Invalid EFI RTC value: write of %lx to EFI RTC failed\n",
284 __FUNCTION__, nowtime);
285 return -1;
286 }
279 287
280 status = efi.set_time(&eft); 288 status = efi.set_time(&eft);
281 if (status != EFI_SUCCESS) { 289 if (status != EFI_SUCCESS) {
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c
index 225bd0f0f675..d62b0a3b5c14 100644
--- a/arch/x86/platform/mrst/vrtc.c
+++ b/arch/x86/platform/mrst/vrtc.c
@@ -85,27 +85,35 @@ unsigned long vrtc_get_time(void)
85 return mktime(year, mon, mday, hour, min, sec); 85 return mktime(year, mon, mday, hour, min, sec);
86} 86}
87 87
88/* Only care about the minutes and seconds */
89int vrtc_set_mmss(unsigned long nowtime) 88int vrtc_set_mmss(unsigned long nowtime)
90{ 89{
91 int real_sec, real_min;
92 unsigned long flags; 90 unsigned long flags;
93 int vrtc_min; 91 struct rtc_time tm;
94 92 int year;
95 spin_lock_irqsave(&rtc_lock, flags); 93 int retval = 0;
96 vrtc_min = vrtc_cmos_read(RTC_MINUTES); 94
97 95 rtc_time_to_tm(nowtime, &tm);
98 real_sec = nowtime % 60; 96 if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) {
99 real_min = nowtime / 60; 97 /*
100 if (((abs(real_min - vrtc_min) + 15)/30) & 1) 98 * tm.year is the number of years since 1900, and the
101 real_min += 30; 99 * vrtc need the years since 1972.
102 real_min %= 60; 100 */
103 101 year = tm.tm_year - 72;
104 vrtc_cmos_write(real_sec, RTC_SECONDS); 102 spin_lock_irqsave(&rtc_lock, flags);
105 vrtc_cmos_write(real_min, RTC_MINUTES); 103 vrtc_cmos_write(year, RTC_YEAR);
106 spin_unlock_irqrestore(&rtc_lock, flags); 104 vrtc_cmos_write(tm.tm_mon, RTC_MONTH);
107 105 vrtc_cmos_write(tm.tm_mday, RTC_DAY_OF_MONTH);
108 return 0; 106 vrtc_cmos_write(tm.tm_hour, RTC_HOURS);
107 vrtc_cmos_write(tm.tm_min, RTC_MINUTES);
108 vrtc_cmos_write(tm.tm_sec, RTC_SECONDS);
109 spin_unlock_irqrestore(&rtc_lock, flags);
110 } else {
111 printk(KERN_ERR
112 "%s: Invalid vRTC value: write of %lx to vRTC failed\n",
113 __FUNCTION__, nowtime);
114 retval = -EINVAL;
115 }
116 return retval;
109} 117}
110 118
111void __init mrst_rtc_init(void) 119void __init mrst_rtc_init(void)
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 4d8283aec5b5..96e25319659b 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
19obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o 19obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o
20obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o 20obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o
21obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o 21obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
22obj-$(CONFIG_ARCH_BCM) += bcm_kona_timer.o
22 23
23obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o 24obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
24obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o 25obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
new file mode 100644
index 000000000000..350f49356458
--- /dev/null
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -0,0 +1,211 @@
1/*
2 * Copyright (C) 2012 Broadcom Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
7 *
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/init.h>
15#include <linux/irq.h>
16#include <linux/interrupt.h>
17#include <linux/jiffies.h>
18#include <linux/clockchips.h>
19#include <linux/types.h>
20
21#include <linux/io.h>
22#include <asm/mach/time.h>
23
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27
28
29#define KONA_GPTIMER_STCS_OFFSET 0x00000000
30#define KONA_GPTIMER_STCLO_OFFSET 0x00000004
31#define KONA_GPTIMER_STCHI_OFFSET 0x00000008
32#define KONA_GPTIMER_STCM0_OFFSET 0x0000000C
33
34#define KONA_GPTIMER_STCS_TIMER_MATCH_SHIFT 0
35#define KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT 4
36
37struct kona_bcm_timers {
38 int tmr_irq;
39 void __iomem *tmr_regs;
40};
41
42static struct kona_bcm_timers timers;
43
44static u32 arch_timer_rate;
45
46/*
47 * We use the peripheral timers for system tick, the cpu global timer for
48 * profile tick
49 */
50static void kona_timer_disable_and_clear(void __iomem *base)
51{
52 uint32_t reg;
53
54 /*
55 * clear and disable interrupts
56 * We are using compare/match register 0 for our system interrupts
57 */
58 reg = readl(base + KONA_GPTIMER_STCS_OFFSET);
59
60 /* Clear compare (0) interrupt */
61 reg |= 1 << KONA_GPTIMER_STCS_TIMER_MATCH_SHIFT;
62 /* disable compare */
63 reg &= ~(1 << KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT);
64
65 writel(reg, base + KONA_GPTIMER_STCS_OFFSET);
66
67}
68
69static void
70kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
71{
72 void __iomem *base = IOMEM(timer_base);
73 int loop_limit = 4;
74
75 /*
76 * Read 64-bit free running counter
77 * 1. Read hi-word
78 * 2. Read low-word
79 * 3. Read hi-word again
80 * 4.1
81 * if new hi-word is not equal to previously read hi-word, then
82 * start from #1
83 * 4.2
84 * if new hi-word is equal to previously read hi-word then stop.
85 */
86
87 while (--loop_limit) {
88 *msw = readl(base + KONA_GPTIMER_STCHI_OFFSET);
89 *lsw = readl(base + KONA_GPTIMER_STCLO_OFFSET);
90 if (*msw == readl(base + KONA_GPTIMER_STCHI_OFFSET))
91 break;
92 }
93 if (!loop_limit) {
94 pr_err("bcm_kona_timer: getting counter failed.\n");
95 pr_err(" Timer will be impacted\n");
96 }
97
98 return;
99}
100
101static const struct of_device_id bcm_timer_ids[] __initconst = {
102 {.compatible = "bcm,kona-timer"},
103 {},
104};
105
106static void __init kona_timers_init(void)
107{
108 struct device_node *node;
109 u32 freq;
110
111 node = of_find_matching_node(NULL, bcm_timer_ids);
112
113 if (!node)
114 panic("No timer");
115
116 if (!of_property_read_u32(node, "clock-frequency", &freq))
117 arch_timer_rate = freq;
118 else
119 panic("clock-frequency not set in the .dts file");
120
121 /* Setup IRQ numbers */
122 timers.tmr_irq = irq_of_parse_and_map(node, 0);
123
124 /* Setup IO addresses */
125 timers.tmr_regs = of_iomap(node, 0);
126
127 kona_timer_disable_and_clear(timers.tmr_regs);
128}
129
130static int kona_timer_set_next_event(unsigned long clc,
131 struct clock_event_device *unused)
132{
133 /*
134 * timer (0) is disabled by the timer interrupt already
135 * so, here we reload the next event value and re-enable
136 * the timer.
137 *
138 * This way, we are potentially losing the time between
139 * timer-interrupt->set_next_event. CPU local timers, when
140 * they come in should get rid of skew.
141 */
142
143 uint32_t lsw, msw;
144 uint32_t reg;
145
146 kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
147
148 /* Load the "next" event tick value */
149 writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET);
150
151 /* Enable compare */
152 reg = readl(timers.tmr_regs + KONA_GPTIMER_STCS_OFFSET);
153 reg |= (1 << KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT);
154 writel(reg, timers.tmr_regs + KONA_GPTIMER_STCS_OFFSET);
155
156 return 0;
157}
158
159static void kona_timer_set_mode(enum clock_event_mode mode,
160 struct clock_event_device *unused)
161{
162 switch (mode) {
163 case CLOCK_EVT_MODE_ONESHOT:
164 /* by default mode is one shot don't do any thing */
165 break;
166 case CLOCK_EVT_MODE_UNUSED:
167 case CLOCK_EVT_MODE_SHUTDOWN:
168 default:
169 kona_timer_disable_and_clear(timers.tmr_regs);
170 }
171}
172
173static struct clock_event_device kona_clockevent_timer = {
174 .name = "timer 1",
175 .features = CLOCK_EVT_FEAT_ONESHOT,
176 .set_next_event = kona_timer_set_next_event,
177 .set_mode = kona_timer_set_mode
178};
179
180static void __init kona_timer_clockevents_init(void)
181{
182 kona_clockevent_timer.cpumask = cpumask_of(0);
183 clockevents_config_and_register(&kona_clockevent_timer,
184 arch_timer_rate, 6, 0xffffffff);
185}
186
187static irqreturn_t kona_timer_interrupt(int irq, void *dev_id)
188{
189 struct clock_event_device *evt = &kona_clockevent_timer;
190
191 kona_timer_disable_and_clear(timers.tmr_regs);
192 evt->event_handler(evt);
193 return IRQ_HANDLED;
194}
195
196static struct irqaction kona_timer_irq = {
197 .name = "Kona Timer Tick",
198 .flags = IRQF_TIMER,
199 .handler = kona_timer_interrupt,
200};
201
202static void __init kona_timer_init(void)
203{
204 kona_timers_init();
205 kona_timer_clockevents_init();
206 setup_irq(timers.tmr_irq, &kona_timer_irq);
207 kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
208}
209
210CLOCKSOURCE_OF_DECLARE(bcm_kona, "bcm,kona-timer",
211 kona_timer_init);
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 2fb0d76a04c4..208de7cbb7fa 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -70,8 +70,6 @@
70#define TC3589x_EVT_INT_CLR 0x2 70#define TC3589x_EVT_INT_CLR 0x2
71#define TC3589x_KBD_INT_CLR 0x1 71#define TC3589x_KBD_INT_CLR 0x1
72 72
73#define TC3589x_KBD_KEYMAP_SIZE 64
74
75/** 73/**
76 * struct tc_keypad - data structure used by keypad driver 74 * struct tc_keypad - data structure used by keypad driver
77 * @tc3589x: pointer to tc35893 75 * @tc3589x: pointer to tc35893
@@ -88,7 +86,7 @@ struct tc_keypad {
88 const struct tc3589x_keypad_platform_data *board; 86 const struct tc3589x_keypad_platform_data *board;
89 unsigned int krow; 87 unsigned int krow;
90 unsigned int kcol; 88 unsigned int kcol;
91 unsigned short keymap[TC3589x_KBD_KEYMAP_SIZE]; 89 unsigned short *keymap;
92 bool keypad_stopped; 90 bool keypad_stopped;
93}; 91};
94 92
@@ -338,12 +336,14 @@ static int tc3589x_keypad_probe(struct platform_device *pdev)
338 336
339 error = matrix_keypad_build_keymap(plat->keymap_data, NULL, 337 error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
340 TC3589x_MAX_KPROW, TC3589x_MAX_KPCOL, 338 TC3589x_MAX_KPROW, TC3589x_MAX_KPCOL,
341 keypad->keymap, input); 339 NULL, input);
342 if (error) { 340 if (error) {
343 dev_err(&pdev->dev, "Failed to build keymap\n"); 341 dev_err(&pdev->dev, "Failed to build keymap\n");
344 goto err_free_mem; 342 goto err_free_mem;
345 } 343 }
346 344
345 keypad->keymap = input->keycode;
346
347 input_set_capability(input, EV_MSC, MSC_SCAN); 347 input_set_capability(input, EV_MSC, MSC_SCAN);
348 if (!plat->no_autorepeat) 348 if (!plat->no_autorepeat)
349 __set_bit(EV_REP, input->evbit); 349 __set_bit(EV_REP, input->evbit);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 7b99fc7c9438..0238e0e14335 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -490,6 +490,29 @@ static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p)
490 f->y_map |= (p[5] & 0x20) << 6; 490 f->y_map |= (p[5] & 0x20) << 6;
491} 491}
492 492
493static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p)
494{
495 f->first_mp = !!(p[0] & 0x02);
496 f->is_mp = !!(p[0] & 0x20);
497
498 f->fingers = ((p[0] & 0x6) >> 1 |
499 (p[0] & 0x10) >> 2);
500 f->x_map = ((p[2] & 0x60) >> 5) |
501 ((p[4] & 0x7f) << 2) |
502 ((p[5] & 0x7f) << 9) |
503 ((p[3] & 0x07) << 16) |
504 ((p[3] & 0x70) << 15) |
505 ((p[0] & 0x01) << 22);
506 f->y_map = (p[1] & 0x7f) |
507 ((p[2] & 0x1f) << 7);
508
509 f->x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7));
510 f->y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3));
511 f->z = (p[0] & 4) ? 0 : p[5] & 0x7f;
512
513 alps_decode_buttons_v3(f, p);
514}
515
493static void alps_process_touchpad_packet_v3(struct psmouse *psmouse) 516static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
494{ 517{
495 struct alps_data *priv = psmouse->private; 518 struct alps_data *priv = psmouse->private;
@@ -874,7 +897,8 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
874 } 897 }
875 898
876 /* Bytes 2 - pktsize should have 0 in the highest bit */ 899 /* Bytes 2 - pktsize should have 0 in the highest bit */
877 if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize && 900 if (priv->proto_version != ALPS_PROTO_V5 &&
901 psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
878 (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { 902 (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
879 psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", 903 psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
880 psmouse->pktcnt - 1, 904 psmouse->pktcnt - 1,
@@ -994,8 +1018,7 @@ static int alps_rpt_cmd(struct psmouse *psmouse, int init_command,
994 return 0; 1018 return 0;
995} 1019}
996 1020
997static int alps_enter_command_mode(struct psmouse *psmouse, 1021static int alps_enter_command_mode(struct psmouse *psmouse)
998 unsigned char *resp)
999{ 1022{
1000 unsigned char param[4]; 1023 unsigned char param[4];
1001 1024
@@ -1004,14 +1027,12 @@ static int alps_enter_command_mode(struct psmouse *psmouse,
1004 return -1; 1027 return -1;
1005 } 1028 }
1006 1029
1007 if (param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) { 1030 if ((param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) &&
1031 param[0] != 0x73) {
1008 psmouse_dbg(psmouse, 1032 psmouse_dbg(psmouse,
1009 "unknown response while entering command mode\n"); 1033 "unknown response while entering command mode\n");
1010 return -1; 1034 return -1;
1011 } 1035 }
1012
1013 if (resp)
1014 *resp = param[2];
1015 return 0; 1036 return 0;
1016} 1037}
1017 1038
@@ -1176,7 +1197,7 @@ static int alps_passthrough_mode_v3(struct psmouse *psmouse,
1176{ 1197{
1177 int reg_val, ret = -1; 1198 int reg_val, ret = -1;
1178 1199
1179 if (alps_enter_command_mode(psmouse, NULL)) 1200 if (alps_enter_command_mode(psmouse))
1180 return -1; 1201 return -1;
1181 1202
1182 reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x0008); 1203 reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x0008);
@@ -1216,7 +1237,7 @@ static int alps_probe_trackstick_v3(struct psmouse *psmouse, int reg_base)
1216{ 1237{
1217 int ret = -EIO, reg_val; 1238 int ret = -EIO, reg_val;
1218 1239
1219 if (alps_enter_command_mode(psmouse, NULL)) 1240 if (alps_enter_command_mode(psmouse))
1220 goto error; 1241 goto error;
1221 1242
1222 reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x08); 1243 reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x08);
@@ -1279,7 +1300,7 @@ static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base)
1279 * supported by this driver. If bit 1 isn't set the packet 1300 * supported by this driver. If bit 1 isn't set the packet
1280 * format is different. 1301 * format is different.
1281 */ 1302 */
1282 if (alps_enter_command_mode(psmouse, NULL) || 1303 if (alps_enter_command_mode(psmouse) ||
1283 alps_command_mode_write_reg(psmouse, 1304 alps_command_mode_write_reg(psmouse,
1284 reg_base + 0x08, 0x82) || 1305 reg_base + 0x08, 0x82) ||
1285 alps_exit_command_mode(psmouse)) 1306 alps_exit_command_mode(psmouse))
@@ -1306,7 +1327,7 @@ static int alps_hw_init_v3(struct psmouse *psmouse)
1306 alps_setup_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE) == -EIO) 1327 alps_setup_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE) == -EIO)
1307 goto error; 1328 goto error;
1308 1329
1309 if (alps_enter_command_mode(psmouse, NULL) || 1330 if (alps_enter_command_mode(psmouse) ||
1310 alps_absolute_mode_v3(psmouse)) { 1331 alps_absolute_mode_v3(psmouse)) {
1311 psmouse_err(psmouse, "Failed to enter absolute mode\n"); 1332 psmouse_err(psmouse, "Failed to enter absolute mode\n");
1312 goto error; 1333 goto error;
@@ -1381,7 +1402,7 @@ static int alps_hw_init_rushmore_v3(struct psmouse *psmouse)
1381 priv->flags &= ~ALPS_DUALPOINT; 1402 priv->flags &= ~ALPS_DUALPOINT;
1382 } 1403 }
1383 1404
1384 if (alps_enter_command_mode(psmouse, NULL) || 1405 if (alps_enter_command_mode(psmouse) ||
1385 alps_command_mode_read_reg(psmouse, 0xc2d9) == -1 || 1406 alps_command_mode_read_reg(psmouse, 0xc2d9) == -1 ||
1386 alps_command_mode_write_reg(psmouse, 0xc2cb, 0x00)) 1407 alps_command_mode_write_reg(psmouse, 0xc2cb, 0x00))
1387 goto error; 1408 goto error;
@@ -1431,7 +1452,7 @@ static int alps_hw_init_v4(struct psmouse *psmouse)
1431 struct ps2dev *ps2dev = &psmouse->ps2dev; 1452 struct ps2dev *ps2dev = &psmouse->ps2dev;
1432 unsigned char param[4]; 1453 unsigned char param[4];
1433 1454
1434 if (alps_enter_command_mode(psmouse, NULL)) 1455 if (alps_enter_command_mode(psmouse))
1435 goto error; 1456 goto error;
1436 1457
1437 if (alps_absolute_mode_v4(psmouse)) { 1458 if (alps_absolute_mode_v4(psmouse)) {
@@ -1499,6 +1520,23 @@ error:
1499 return -1; 1520 return -1;
1500} 1521}
1501 1522
1523static int alps_hw_init_dolphin_v1(struct psmouse *psmouse)
1524{
1525 struct ps2dev *ps2dev = &psmouse->ps2dev;
1526 unsigned char param[2];
1527
1528 /* This is dolphin "v1" as empirically defined by florin9doi */
1529 param[0] = 0x64;
1530 param[1] = 0x28;
1531
1532 if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM) ||
1533 ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) ||
1534 ps2_command(ps2dev, &param[1], PSMOUSE_CMD_SETRATE))
1535 return -1;
1536
1537 return 0;
1538}
1539
1502static void alps_set_defaults(struct alps_data *priv) 1540static void alps_set_defaults(struct alps_data *priv)
1503{ 1541{
1504 priv->byte0 = 0x8f; 1542 priv->byte0 = 0x8f;
@@ -1532,6 +1570,21 @@ static void alps_set_defaults(struct alps_data *priv)
1532 priv->nibble_commands = alps_v4_nibble_commands; 1570 priv->nibble_commands = alps_v4_nibble_commands;
1533 priv->addr_command = PSMOUSE_CMD_DISABLE; 1571 priv->addr_command = PSMOUSE_CMD_DISABLE;
1534 break; 1572 break;
1573 case ALPS_PROTO_V5:
1574 priv->hw_init = alps_hw_init_dolphin_v1;
1575 priv->process_packet = alps_process_packet_v3;
1576 priv->decode_fields = alps_decode_dolphin;
1577 priv->set_abs_params = alps_set_abs_params_mt;
1578 priv->nibble_commands = alps_v3_nibble_commands;
1579 priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
1580 priv->byte0 = 0xc8;
1581 priv->mask0 = 0xc8;
1582 priv->flags = 0;
1583 priv->x_max = 1360;
1584 priv->y_max = 660;
1585 priv->x_bits = 23;
1586 priv->y_bits = 12;
1587 break;
1535 } 1588 }
1536} 1589}
1537 1590
@@ -1592,6 +1645,12 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
1592 1645
1593 if (alps_match_table(psmouse, priv, e7, ec) == 0) { 1646 if (alps_match_table(psmouse, priv, e7, ec) == 0) {
1594 return 0; 1647 return 0;
1648 } else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
1649 ec[0] == 0x73 && ec[1] == 0x01) {
1650 priv->proto_version = ALPS_PROTO_V5;
1651 alps_set_defaults(priv);
1652
1653 return 0;
1595 } else if (ec[0] == 0x88 && ec[1] == 0x08) { 1654 } else if (ec[0] == 0x88 && ec[1] == 0x08) {
1596 priv->proto_version = ALPS_PROTO_V3; 1655 priv->proto_version = ALPS_PROTO_V3;
1597 alps_set_defaults(priv); 1656 alps_set_defaults(priv);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 970480551b6e..eee59853b9ce 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -16,6 +16,7 @@
16#define ALPS_PROTO_V2 2 16#define ALPS_PROTO_V2 2
17#define ALPS_PROTO_V3 3 17#define ALPS_PROTO_V3 3
18#define ALPS_PROTO_V4 4 18#define ALPS_PROTO_V4 4
19#define ALPS_PROTO_V5 5
19 20
20/** 21/**
21 * struct alps_model_info - touchpad ID table 22 * struct alps_model_info - touchpad ID table
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index 1673dc6c8092..f51765fff054 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -236,6 +236,13 @@ static int cypress_read_fw_version(struct psmouse *psmouse)
236 cytp->fw_version = param[2] & FW_VERSION_MASX; 236 cytp->fw_version = param[2] & FW_VERSION_MASX;
237 cytp->tp_metrics_supported = (param[2] & TP_METRICS_MASK) ? 1 : 0; 237 cytp->tp_metrics_supported = (param[2] & TP_METRICS_MASK) ? 1 : 0;
238 238
239 /*
240 * Trackpad fw_version 11 (in Dell XPS12) yields a bogus response to
241 * CYTP_CMD_READ_TP_METRICS so do not try to use it. LP: #1103594.
242 */
243 if (cytp->fw_version >= 11)
244 cytp->tp_metrics_supported = 0;
245
239 psmouse_dbg(psmouse, "cytp->fw_version = %d\n", cytp->fw_version); 246 psmouse_dbg(psmouse, "cytp->fw_version = %d\n", cytp->fw_version);
240 psmouse_dbg(psmouse, "cytp->tp_metrics_supported = %d\n", 247 psmouse_dbg(psmouse, "cytp->tp_metrics_supported = %d\n",
241 cytp->tp_metrics_supported); 248 cytp->tp_metrics_supported);
@@ -258,6 +265,9 @@ static int cypress_read_tp_metrics(struct psmouse *psmouse)
258 cytp->tp_res_x = cytp->tp_max_abs_x / cytp->tp_width; 265 cytp->tp_res_x = cytp->tp_max_abs_x / cytp->tp_width;
259 cytp->tp_res_y = cytp->tp_max_abs_y / cytp->tp_high; 266 cytp->tp_res_y = cytp->tp_max_abs_y / cytp->tp_high;
260 267
268 if (!cytp->tp_metrics_supported)
269 return 0;
270
261 memset(param, 0, sizeof(param)); 271 memset(param, 0, sizeof(param));
262 if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_TP_METRICS, param) == 0) { 272 if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_TP_METRICS, param) == 0) {
263 /* Update trackpad parameters. */ 273 /* Update trackpad parameters. */
@@ -315,18 +325,15 @@ static int cypress_read_tp_metrics(struct psmouse *psmouse)
315 325
316static int cypress_query_hardware(struct psmouse *psmouse) 326static int cypress_query_hardware(struct psmouse *psmouse)
317{ 327{
318 struct cytp_data *cytp = psmouse->private;
319 int ret; 328 int ret;
320 329
321 ret = cypress_read_fw_version(psmouse); 330 ret = cypress_read_fw_version(psmouse);
322 if (ret) 331 if (ret)
323 return ret; 332 return ret;
324 333
325 if (cytp->tp_metrics_supported) { 334 ret = cypress_read_tp_metrics(psmouse);
326 ret = cypress_read_tp_metrics(psmouse); 335 if (ret)
327 if (ret) 336 return ret;
328 return ret;
329 }
330 337
331 return 0; 338 return 0;
332} 339}
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 41b6fbf60112..1daa97913b7d 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -2017,6 +2017,9 @@ static const struct wacom_features wacom_features_0x100 =
2017static const struct wacom_features wacom_features_0x101 = 2017static const struct wacom_features wacom_features_0x101 =
2018 { "Wacom ISDv4 101", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 2018 { "Wacom ISDv4 101", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
2019 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2019 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2020static const struct wacom_features wacom_features_0x10D =
2021 { "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
2022 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2020static const struct wacom_features wacom_features_0x4001 = 2023static const struct wacom_features wacom_features_0x4001 =
2021 { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 2024 { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
2022 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2025 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2201,6 +2204,7 @@ const struct usb_device_id wacom_ids[] = {
2201 { USB_DEVICE_WACOM(0xEF) }, 2204 { USB_DEVICE_WACOM(0xEF) },
2202 { USB_DEVICE_WACOM(0x100) }, 2205 { USB_DEVICE_WACOM(0x100) },
2203 { USB_DEVICE_WACOM(0x101) }, 2206 { USB_DEVICE_WACOM(0x101) },
2207 { USB_DEVICE_WACOM(0x10D) },
2204 { USB_DEVICE_WACOM(0x4001) }, 2208 { USB_DEVICE_WACOM(0x4001) },
2205 { USB_DEVICE_WACOM(0x47) }, 2209 { USB_DEVICE_WACOM(0x47) },
2206 { USB_DEVICE_WACOM(0xF4) }, 2210 { USB_DEVICE_WACOM(0xF4) },
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 4f702b3ec1a3..434c3df250ca 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -236,7 +236,12 @@ static void __ads7846_disable(struct ads7846 *ts)
236/* Must be called with ts->lock held */ 236/* Must be called with ts->lock held */
237static void __ads7846_enable(struct ads7846 *ts) 237static void __ads7846_enable(struct ads7846 *ts)
238{ 238{
239 regulator_enable(ts->reg); 239 int error;
240
241 error = regulator_enable(ts->reg);
242 if (error != 0)
243 dev_err(&ts->spi->dev, "Failed to enable supply: %d\n", error);
244
240 ads7846_restart(ts); 245 ads7846_restart(ts);
241} 246}
242 247
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 4a29ddf6bf1e..1443532fe6c4 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -314,15 +314,27 @@ static int mms114_start(struct mms114_data *data)
314 struct i2c_client *client = data->client; 314 struct i2c_client *client = data->client;
315 int error; 315 int error;
316 316
317 if (data->core_reg) 317 error = regulator_enable(data->core_reg);
318 regulator_enable(data->core_reg); 318 if (error) {
319 if (data->io_reg) 319 dev_err(&client->dev, "Failed to enable avdd: %d\n", error);
320 regulator_enable(data->io_reg); 320 return error;
321 }
322
323 error = regulator_enable(data->io_reg);
324 if (error) {
325 dev_err(&client->dev, "Failed to enable vdd: %d\n", error);
326 regulator_disable(data->core_reg);
327 return error;
328 }
329
321 mdelay(MMS114_POWERON_DELAY); 330 mdelay(MMS114_POWERON_DELAY);
322 331
323 error = mms114_setup_regs(data); 332 error = mms114_setup_regs(data);
324 if (error < 0) 333 if (error < 0) {
334 regulator_disable(data->io_reg);
335 regulator_disable(data->core_reg);
325 return error; 336 return error;
337 }
326 338
327 if (data->pdata->cfg_pin) 339 if (data->pdata->cfg_pin)
328 data->pdata->cfg_pin(true); 340 data->pdata->cfg_pin(true);
@@ -335,16 +347,20 @@ static int mms114_start(struct mms114_data *data)
335static void mms114_stop(struct mms114_data *data) 347static void mms114_stop(struct mms114_data *data)
336{ 348{
337 struct i2c_client *client = data->client; 349 struct i2c_client *client = data->client;
350 int error;
338 351
339 disable_irq(client->irq); 352 disable_irq(client->irq);
340 353
341 if (data->pdata->cfg_pin) 354 if (data->pdata->cfg_pin)
342 data->pdata->cfg_pin(false); 355 data->pdata->cfg_pin(false);
343 356
344 if (data->io_reg) 357 error = regulator_disable(data->io_reg);
345 regulator_disable(data->io_reg); 358 if (error)
346 if (data->core_reg) 359 dev_warn(&client->dev, "Failed to disable vdd: %d\n", error);
347 regulator_disable(data->core_reg); 360
361 error = regulator_disable(data->core_reg);
362 if (error)
363 dev_warn(&client->dev, "Failed to disable avdd: %d\n", error);
348} 364}
349 365
350static int mms114_input_open(struct input_dev *dev) 366static int mms114_input_open(struct input_dev *dev)
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index d8a7d8323414..ebaebdf30f98 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -902,7 +902,9 @@ isdn_tty_send_msg(modem_info *info, atemu *m, char *msg)
902 int j; 902 int j;
903 int l; 903 int l;
904 904
905 l = strlen(msg); 905 l = min(strlen(msg), sizeof(cmd.parm) - sizeof(cmd.parm.cmsg)
906 + sizeof(cmd.parm.cmsg.para) - 2);
907
906 if (!l) { 908 if (!l) {
907 isdn_tty_modem_result(RESULT_ERROR, info); 909 isdn_tty_modem_result(RESULT_ERROR, info);
908 return; 910 return;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 7bd068a6056a..8b4e96e01d6c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1964,7 +1964,6 @@ static int __bond_release_one(struct net_device *bond_dev,
1964 } 1964 }
1965 1965
1966 block_netpoll_tx(); 1966 block_netpoll_tx();
1967 call_netdevice_notifiers(NETDEV_RELEASE, bond_dev);
1968 write_lock_bh(&bond->lock); 1967 write_lock_bh(&bond->lock);
1969 1968
1970 slave = bond_get_slave_by_dev(bond, slave_dev); 1969 slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -2066,8 +2065,10 @@ static int __bond_release_one(struct net_device *bond_dev,
2066 write_unlock_bh(&bond->lock); 2065 write_unlock_bh(&bond->lock);
2067 unblock_netpoll_tx(); 2066 unblock_netpoll_tx();
2068 2067
2069 if (bond->slave_cnt == 0) 2068 if (bond->slave_cnt == 0) {
2070 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); 2069 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2070 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
2071 }
2071 2072
2072 bond_compute_features(bond); 2073 bond_compute_features(bond);
2073 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && 2074 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 31c5787970db..77ebae0ac64a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -8647,7 +8647,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
8647 MDIO_WC_DEVAD, 8647 MDIO_WC_DEVAD,
8648 MDIO_WC_REG_DIGITAL5_MISC6, 8648 MDIO_WC_REG_DIGITAL5_MISC6,
8649 &rx_tx_in_reset); 8649 &rx_tx_in_reset);
8650 if (!rx_tx_in_reset) { 8650 if ((!rx_tx_in_reset) &&
8651 (params->link_flags &
8652 PHY_INITIALIZED)) {
8651 bnx2x_warpcore_reset_lane(bp, phy, 1); 8653 bnx2x_warpcore_reset_lane(bp, phy, 1);
8652 bnx2x_warpcore_config_sfi(phy, params); 8654 bnx2x_warpcore_config_sfi(phy, params);
8653 bnx2x_warpcore_reset_lane(bp, phy, 0); 8655 bnx2x_warpcore_reset_lane(bp, phy, 0);
@@ -12527,6 +12529,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12527 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 12529 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
12528 vars->mac_type = MAC_TYPE_NONE; 12530 vars->mac_type = MAC_TYPE_NONE;
12529 vars->phy_flags = 0; 12531 vars->phy_flags = 0;
12532 vars->check_kr2_recovery_cnt = 0;
12533 params->link_flags = PHY_INITIALIZED;
12530 /* Driver opens NIG-BRB filters */ 12534 /* Driver opens NIG-BRB filters */
12531 bnx2x_set_rx_filter(params, 1); 12535 bnx2x_set_rx_filter(params, 1);
12532 /* Check if link flap can be avoided */ 12536 /* Check if link flap can be avoided */
@@ -12691,6 +12695,7 @@ int bnx2x_lfa_reset(struct link_params *params,
12691 struct bnx2x *bp = params->bp; 12695 struct bnx2x *bp = params->bp;
12692 vars->link_up = 0; 12696 vars->link_up = 0;
12693 vars->phy_flags = 0; 12697 vars->phy_flags = 0;
12698 params->link_flags &= ~PHY_INITIALIZED;
12694 if (!params->lfa_base) 12699 if (!params->lfa_base)
12695 return bnx2x_link_reset(params, vars, 1); 12700 return bnx2x_link_reset(params, vars, 1);
12696 /* 12701 /*
@@ -13411,6 +13416,7 @@ static void bnx2x_disable_kr2(struct link_params *params,
13411 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; 13416 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
13412 bnx2x_update_link_attr(params, vars->link_attr_sync); 13417 bnx2x_update_link_attr(params, vars->link_attr_sync);
13413 13418
13419 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
13414 /* Restart AN on leading lane */ 13420 /* Restart AN on leading lane */
13415 bnx2x_warpcore_restart_AN_KR(phy, params); 13421 bnx2x_warpcore_restart_AN_KR(phy, params);
13416} 13422}
@@ -13439,6 +13445,15 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13439 return; 13445 return;
13440 } 13446 }
13441 13447
13448 /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
13449 * since some switches tend to reinit the AN process and clear the
13450 * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
13451 * and recovered many times
13452 */
13453 if (vars->check_kr2_recovery_cnt > 0) {
13454 vars->check_kr2_recovery_cnt--;
13455 return;
13456 }
13442 lane = bnx2x_get_warpcore_lane(phy, params); 13457 lane = bnx2x_get_warpcore_lane(phy, params);
13443 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 13458 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
13444 MDIO_AER_BLOCK_AER_REG, lane); 13459 MDIO_AER_BLOCK_AER_REG, lane);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index be5c195d03dd..56c2aae4e2c8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -309,6 +309,7 @@ struct link_params {
309 req_flow_ctrl is set to AUTO */ 309 req_flow_ctrl is set to AUTO */
310 u16 link_flags; 310 u16 link_flags;
311#define LINK_FLAGS_INT_DISABLED (1<<0) 311#define LINK_FLAGS_INT_DISABLED (1<<0)
312#define PHY_INITIALIZED (1<<1)
312 u32 lfa_base; 313 u32 lfa_base;
313}; 314};
314 315
@@ -342,7 +343,8 @@ struct link_vars {
342 u32 link_status; 343 u32 link_status;
343 u32 eee_status; 344 u32 eee_status;
344 u8 fault_detected; 345 u8 fault_detected;
345 u8 rsrv1; 346 u8 check_kr2_recovery_cnt;
347#define CHECK_KR2_RECOVERY_CNT 5
346 u16 periodic_flags; 348 u16 periodic_flags;
347#define PERIODIC_FLAGS_LINK_EVENT 0x0001 349#define PERIODIC_FLAGS_LINK_EVENT 0x0001
348 350
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index fdb9b5655414..93729f942358 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1869,6 +1869,8 @@ static void tg3_link_report(struct tg3 *tp)
1869 1869
1870 tg3_ump_link_report(tp); 1870 tg3_ump_link_report(tp);
1871 } 1871 }
1872
1873 tp->link_up = netif_carrier_ok(tp->dev);
1872} 1874}
1873 1875
1874static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1876static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
@@ -2522,12 +2524,6 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2522 return err; 2524 return err;
2523} 2525}
2524 2526
2525static void tg3_carrier_on(struct tg3 *tp)
2526{
2527 netif_carrier_on(tp->dev);
2528 tp->link_up = true;
2529}
2530
2531static void tg3_carrier_off(struct tg3 *tp) 2527static void tg3_carrier_off(struct tg3 *tp)
2532{ 2528{
2533 netif_carrier_off(tp->dev); 2529 netif_carrier_off(tp->dev);
@@ -2553,7 +2549,7 @@ static int tg3_phy_reset(struct tg3 *tp)
2553 return -EBUSY; 2549 return -EBUSY;
2554 2550
2555 if (netif_running(tp->dev) && tp->link_up) { 2551 if (netif_running(tp->dev) && tp->link_up) {
2556 tg3_carrier_off(tp); 2552 netif_carrier_off(tp->dev);
2557 tg3_link_report(tp); 2553 tg3_link_report(tp);
2558 } 2554 }
2559 2555
@@ -4262,9 +4258,9 @@ static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4262{ 4258{
4263 if (curr_link_up != tp->link_up) { 4259 if (curr_link_up != tp->link_up) {
4264 if (curr_link_up) { 4260 if (curr_link_up) {
4265 tg3_carrier_on(tp); 4261 netif_carrier_on(tp->dev);
4266 } else { 4262 } else {
4267 tg3_carrier_off(tp); 4263 netif_carrier_off(tp->dev);
4268 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4264 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4269 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 4265 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4270 } 4266 }
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 28ceb8414185..29aff55f2eea 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -349,6 +349,7 @@ struct be_adapter {
349 struct pci_dev *pdev; 349 struct pci_dev *pdev;
350 struct net_device *netdev; 350 struct net_device *netdev;
351 351
352 u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
352 u8 __iomem *db; /* Door Bell */ 353 u8 __iomem *db; /* Door Bell */
353 354
354 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ 355 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 071aea79d218..3c9b4f12e3e5 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -473,19 +473,17 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
473 return 0; 473 return 0;
474} 474}
475 475
476static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) 476static u16 be_POST_stage_get(struct be_adapter *adapter)
477{ 477{
478 u32 sem; 478 u32 sem;
479 u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
480 SLIPORT_SEMAPHORE_OFFSET_BE;
481 479
482 pci_read_config_dword(adapter->pdev, reg, &sem); 480 if (BEx_chip(adapter))
483 *stage = sem & POST_STAGE_MASK; 481 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
484
485 if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
486 return -1;
487 else 482 else
488 return 0; 483 pci_read_config_dword(adapter->pdev,
484 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
485
486 return sem & POST_STAGE_MASK;
489} 487}
490 488
491int lancer_wait_ready(struct be_adapter *adapter) 489int lancer_wait_ready(struct be_adapter *adapter)
@@ -579,19 +577,17 @@ int be_fw_wait_ready(struct be_adapter *adapter)
579 } 577 }
580 578
581 do { 579 do {
582 status = be_POST_stage_get(adapter, &stage); 580 stage = be_POST_stage_get(adapter);
583 if (status) { 581 if (stage == POST_STAGE_ARMFW_RDY)
584 dev_err(dev, "POST error; stage=0x%x\n", stage);
585 return -1;
586 } else if (stage != POST_STAGE_ARMFW_RDY) {
587 if (msleep_interruptible(2000)) {
588 dev_err(dev, "Waiting for POST aborted\n");
589 return -EINTR;
590 }
591 timeout += 2;
592 } else {
593 return 0; 582 return 0;
583
584 dev_info(dev, "Waiting for POST, %ds elapsed\n",
585 timeout);
586 if (msleep_interruptible(2000)) {
587 dev_err(dev, "Waiting for POST aborted\n");
588 return -EINTR;
594 } 589 }
590 timeout += 2;
595 } while (timeout < 60); 591 } while (timeout < 60);
596 592
597 dev_err(dev, "POST timeout; stage=0x%x\n", stage); 593 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 541d4530d5bf..62dc220695f7 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -32,8 +32,8 @@
32#define MPU_EP_CONTROL 0 32#define MPU_EP_CONTROL 0
33 33
34/********** MPU semphore: used for SH & BE *************/ 34/********** MPU semphore: used for SH & BE *************/
35#define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c 35#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
36#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 36#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
37#define POST_STAGE_MASK 0x0000FFFF 37#define POST_STAGE_MASK 0x0000FFFF
38#define POST_ERR_MASK 0x1 38#define POST_ERR_MASK 0x1
39#define POST_ERR_SHIFT 31 39#define POST_ERR_SHIFT 31
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3860888ac711..08e54f3d288b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3688,6 +3688,8 @@ static void be_netdev_init(struct net_device *netdev)
3688 3688
3689static void be_unmap_pci_bars(struct be_adapter *adapter) 3689static void be_unmap_pci_bars(struct be_adapter *adapter)
3690{ 3690{
3691 if (adapter->csr)
3692 pci_iounmap(adapter->pdev, adapter->csr);
3691 if (adapter->db) 3693 if (adapter->db)
3692 pci_iounmap(adapter->pdev, adapter->db); 3694 pci_iounmap(adapter->pdev, adapter->db);
3693} 3695}
@@ -3721,6 +3723,12 @@ static int be_map_pci_bars(struct be_adapter *adapter)
3721 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> 3723 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3722 SLI_INTF_IF_TYPE_SHIFT; 3724 SLI_INTF_IF_TYPE_SHIFT;
3723 3725
3726 if (BEx_chip(adapter) && be_physfn(adapter)) {
3727 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3728 if (adapter->csr == NULL)
3729 return -ENOMEM;
3730 }
3731
3724 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); 3732 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3725 if (addr == NULL) 3733 if (addr == NULL)
3726 goto pci_map_err; 3734 goto pci_map_err;
@@ -4329,6 +4337,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4329 pci_restore_state(pdev); 4337 pci_restore_state(pdev);
4330 4338
4331 /* Check if card is ok and fw is ready */ 4339 /* Check if card is ok and fw is ready */
4340 dev_info(&adapter->pdev->dev,
4341 "Waiting for FW to be ready after EEH reset\n");
4332 status = be_fw_wait_ready(adapter); 4342 status = be_fw_wait_ready(adapter);
4333 if (status) 4343 if (status)
4334 return PCI_ERS_RESULT_DISCONNECT; 4344 return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 2c1813737f6d..f91a8f3f9d48 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -36,6 +36,7 @@
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/vmalloc.h> 37#include <linux/vmalloc.h>
38#include <linux/mdio.h> 38#include <linux/mdio.h>
39#include <linux/pm_runtime.h>
39 40
40#include "e1000.h" 41#include "e1000.h"
41 42
@@ -2229,7 +2230,19 @@ static int e1000e_get_ts_info(struct net_device *netdev,
2229 return 0; 2230 return 0;
2230} 2231}
2231 2232
2233static int e1000e_ethtool_begin(struct net_device *netdev)
2234{
2235 return pm_runtime_get_sync(netdev->dev.parent);
2236}
2237
2238static void e1000e_ethtool_complete(struct net_device *netdev)
2239{
2240 pm_runtime_put_sync(netdev->dev.parent);
2241}
2242
2232static const struct ethtool_ops e1000_ethtool_ops = { 2243static const struct ethtool_ops e1000_ethtool_ops = {
2244 .begin = e1000e_ethtool_begin,
2245 .complete = e1000e_ethtool_complete,
2233 .get_settings = e1000_get_settings, 2246 .get_settings = e1000_get_settings,
2234 .set_settings = e1000_set_settings, 2247 .set_settings = e1000_set_settings,
2235 .get_drvinfo = e1000_get_drvinfo, 2248 .get_drvinfo = e1000_get_drvinfo,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index dff7bff8b8e0..121a865c7fbd 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -782,6 +782,59 @@ release:
782} 782}
783 783
784/** 784/**
785 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
786 * @hw: pointer to the HW structure
787 * @link: link up bool flag
788 *
789 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
790 * preventing further DMA write requests. Workaround the issue by disabling
791 * the de-assertion of the clock request when in 1Gpbs mode.
792 **/
793static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
794{
795 u32 fextnvm6 = er32(FEXTNVM6);
796 s32 ret_val = 0;
797
798 if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) {
799 u16 kmrn_reg;
800
801 ret_val = hw->phy.ops.acquire(hw);
802 if (ret_val)
803 return ret_val;
804
805 ret_val =
806 e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
807 &kmrn_reg);
808 if (ret_val)
809 goto release;
810
811 ret_val =
812 e1000e_write_kmrn_reg_locked(hw,
813 E1000_KMRNCTRLSTA_K1_CONFIG,
814 kmrn_reg &
815 ~E1000_KMRNCTRLSTA_K1_ENABLE);
816 if (ret_val)
817 goto release;
818
819 usleep_range(10, 20);
820
821 ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
822
823 ret_val =
824 e1000e_write_kmrn_reg_locked(hw,
825 E1000_KMRNCTRLSTA_K1_CONFIG,
826 kmrn_reg);
827release:
828 hw->phy.ops.release(hw);
829 } else {
830 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
831 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
832 }
833
834 return ret_val;
835}
836
837/**
785 * e1000_check_for_copper_link_ich8lan - Check for link (Copper) 838 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
786 * @hw: pointer to the HW structure 839 * @hw: pointer to the HW structure
787 * 840 *
@@ -818,6 +871,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
818 return ret_val; 871 return ret_val;
819 } 872 }
820 873
874 /* Work-around I218 hang issue */
875 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
876 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
877 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
878 if (ret_val)
879 return ret_val;
880 }
881
821 /* Clear link partner's EEE ability */ 882 /* Clear link partner's EEE ability */
822 hw->dev_spec.ich8lan.eee_lp_ability = 0; 883 hw->dev_spec.ich8lan.eee_lp_ability = 0;
823 884
@@ -3954,8 +4015,16 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3954 4015
3955 phy_ctrl = er32(PHY_CTRL); 4016 phy_ctrl = er32(PHY_CTRL);
3956 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; 4017 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4018
3957 if (hw->phy.type == e1000_phy_i217) { 4019 if (hw->phy.type == e1000_phy_i217) {
3958 u16 phy_reg; 4020 u16 phy_reg, device_id = hw->adapter->pdev->device;
4021
4022 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4023 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
4024 u32 fextnvm6 = er32(FEXTNVM6);
4025
4026 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4027 }
3959 4028
3960 ret_val = hw->phy.ops.acquire(hw); 4029 ret_val = hw->phy.ops.acquire(hw);
3961 if (ret_val) 4030 if (ret_val)
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index b6d3174d7d2d..8bf4655c2e17 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -92,6 +92,8 @@
92#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 92#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
93#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 93#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
94 94
95#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
96
95#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 97#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
96 98
97#define E1000_ICH_RAR_ENTRIES 7 99#define E1000_ICH_RAR_ENTRIES 7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a177b8b65c44..948b86ffa4f0 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4303,6 +4303,7 @@ static int e1000_open(struct net_device *netdev)
4303 netif_start_queue(netdev); 4303 netif_start_queue(netdev);
4304 4304
4305 adapter->idle_check = true; 4305 adapter->idle_check = true;
4306 hw->mac.get_link_status = true;
4306 pm_runtime_put(&pdev->dev); 4307 pm_runtime_put(&pdev->dev);
4307 4308
4308 /* fire a link status change interrupt to start the watchdog */ 4309 /* fire a link status change interrupt to start the watchdog */
@@ -4662,6 +4663,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
4662 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 4663 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4663 int ret_val; 4664 int ret_val;
4664 4665
4666 pm_runtime_get_sync(&adapter->pdev->dev);
4665 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); 4667 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
4666 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); 4668 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
4667 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); 4669 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
@@ -4672,6 +4674,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
4672 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); 4674 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
4673 if (ret_val) 4675 if (ret_val)
4674 e_warn("Error reading PHY register\n"); 4676 e_warn("Error reading PHY register\n");
4677 pm_runtime_put_sync(&adapter->pdev->dev);
4675 } else { 4678 } else {
4676 /* Do not read PHY registers if link is not up 4679 /* Do not read PHY registers if link is not up
4677 * Set values to typical power-on defaults 4680 * Set values to typical power-on defaults
@@ -5887,8 +5890,7 @@ release:
5887 return retval; 5890 return retval;
5888} 5891}
5889 5892
5890static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, 5893static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5891 bool runtime)
5892{ 5894{
5893 struct net_device *netdev = pci_get_drvdata(pdev); 5895 struct net_device *netdev = pci_get_drvdata(pdev);
5894 struct e1000_adapter *adapter = netdev_priv(netdev); 5896 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -5912,10 +5914,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5912 } 5914 }
5913 e1000e_reset_interrupt_capability(adapter); 5915 e1000e_reset_interrupt_capability(adapter);
5914 5916
5915 retval = pci_save_state(pdev);
5916 if (retval)
5917 return retval;
5918
5919 status = er32(STATUS); 5917 status = er32(STATUS);
5920 if (status & E1000_STATUS_LU) 5918 if (status & E1000_STATUS_LU)
5921 wufc &= ~E1000_WUFC_LNKC; 5919 wufc &= ~E1000_WUFC_LNKC;
@@ -5971,13 +5969,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5971 ew32(WUFC, 0); 5969 ew32(WUFC, 0);
5972 } 5970 }
5973 5971
5974 *enable_wake = !!wufc;
5975
5976 /* make sure adapter isn't asleep if manageability is enabled */
5977 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
5978 (hw->mac.ops.check_mng_mode(hw)))
5979 *enable_wake = true;
5980
5981 if (adapter->hw.phy.type == e1000_phy_igp_3) 5972 if (adapter->hw.phy.type == e1000_phy_igp_3)
5982 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 5973 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5983 5974
@@ -5986,27 +5977,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5986 */ 5977 */
5987 e1000e_release_hw_control(adapter); 5978 e1000e_release_hw_control(adapter);
5988 5979
5989 pci_disable_device(pdev); 5980 pci_clear_master(pdev);
5990
5991 return 0;
5992}
5993
5994static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
5995{
5996 if (sleep && wake) {
5997 pci_prepare_to_sleep(pdev);
5998 return;
5999 }
6000
6001 pci_wake_from_d3(pdev, wake);
6002 pci_set_power_state(pdev, PCI_D3hot);
6003}
6004
6005static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
6006 bool wake)
6007{
6008 struct net_device *netdev = pci_get_drvdata(pdev);
6009 struct e1000_adapter *adapter = netdev_priv(netdev);
6010 5981
6011 /* The pci-e switch on some quad port adapters will report a 5982 /* The pci-e switch on some quad port adapters will report a
6012 * correctable error when the MAC transitions from D0 to D3. To 5983 * correctable error when the MAC transitions from D0 to D3. To
@@ -6021,12 +5992,13 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
6021 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, 5992 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
6022 (devctl & ~PCI_EXP_DEVCTL_CERE)); 5993 (devctl & ~PCI_EXP_DEVCTL_CERE));
6023 5994
6024 e1000_power_off(pdev, sleep, wake); 5995 pci_save_state(pdev);
5996 pci_prepare_to_sleep(pdev);
6025 5997
6026 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); 5998 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
6027 } else {
6028 e1000_power_off(pdev, sleep, wake);
6029 } 5999 }
6000
6001 return 0;
6030} 6002}
6031 6003
6032#ifdef CONFIG_PCIEASPM 6004#ifdef CONFIG_PCIEASPM
@@ -6084,9 +6056,7 @@ static int __e1000_resume(struct pci_dev *pdev)
6084 if (aspm_disable_flag) 6056 if (aspm_disable_flag)
6085 e1000e_disable_aspm(pdev, aspm_disable_flag); 6057 e1000e_disable_aspm(pdev, aspm_disable_flag);
6086 6058
6087 pci_set_power_state(pdev, PCI_D0); 6059 pci_set_master(pdev);
6088 pci_restore_state(pdev);
6089 pci_save_state(pdev);
6090 6060
6091 e1000e_set_interrupt_capability(adapter); 6061 e1000e_set_interrupt_capability(adapter);
6092 if (netif_running(netdev)) { 6062 if (netif_running(netdev)) {
@@ -6152,14 +6122,8 @@ static int __e1000_resume(struct pci_dev *pdev)
6152static int e1000_suspend(struct device *dev) 6122static int e1000_suspend(struct device *dev)
6153{ 6123{
6154 struct pci_dev *pdev = to_pci_dev(dev); 6124 struct pci_dev *pdev = to_pci_dev(dev);
6155 int retval;
6156 bool wake;
6157
6158 retval = __e1000_shutdown(pdev, &wake, false);
6159 if (!retval)
6160 e1000_complete_shutdown(pdev, true, wake);
6161 6125
6162 return retval; 6126 return __e1000_shutdown(pdev, false);
6163} 6127}
6164 6128
6165static int e1000_resume(struct device *dev) 6129static int e1000_resume(struct device *dev)
@@ -6182,13 +6146,10 @@ static int e1000_runtime_suspend(struct device *dev)
6182 struct net_device *netdev = pci_get_drvdata(pdev); 6146 struct net_device *netdev = pci_get_drvdata(pdev);
6183 struct e1000_adapter *adapter = netdev_priv(netdev); 6147 struct e1000_adapter *adapter = netdev_priv(netdev);
6184 6148
6185 if (e1000e_pm_ready(adapter)) { 6149 if (!e1000e_pm_ready(adapter))
6186 bool wake; 6150 return 0;
6187
6188 __e1000_shutdown(pdev, &wake, true);
6189 }
6190 6151
6191 return 0; 6152 return __e1000_shutdown(pdev, true);
6192} 6153}
6193 6154
6194static int e1000_idle(struct device *dev) 6155static int e1000_idle(struct device *dev)
@@ -6226,12 +6187,7 @@ static int e1000_runtime_resume(struct device *dev)
6226 6187
6227static void e1000_shutdown(struct pci_dev *pdev) 6188static void e1000_shutdown(struct pci_dev *pdev)
6228{ 6189{
6229 bool wake = false; 6190 __e1000_shutdown(pdev, false);
6230
6231 __e1000_shutdown(pdev, &wake, false);
6232
6233 if (system_state == SYSTEM_POWER_OFF)
6234 e1000_complete_shutdown(pdev, false, wake);
6235} 6191}
6236 6192
6237#ifdef CONFIG_NET_POLL_CONTROLLER 6193#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6352,9 +6308,9 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
6352 "Cannot re-enable PCI device after reset.\n"); 6308 "Cannot re-enable PCI device after reset.\n");
6353 result = PCI_ERS_RESULT_DISCONNECT; 6309 result = PCI_ERS_RESULT_DISCONNECT;
6354 } else { 6310 } else {
6355 pci_set_master(pdev);
6356 pdev->state_saved = true; 6311 pdev->state_saved = true;
6357 pci_restore_state(pdev); 6312 pci_restore_state(pdev);
6313 pci_set_master(pdev);
6358 6314
6359 pci_enable_wake(pdev, PCI_D3hot, 0); 6315 pci_enable_wake(pdev, PCI_D3hot, 0);
6360 pci_enable_wake(pdev, PCI_D3cold, 0); 6316 pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -6783,7 +6739,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6783 6739
6784 /* initialize the wol settings based on the eeprom settings */ 6740 /* initialize the wol settings based on the eeprom settings */
6785 adapter->wol = adapter->eeprom_wol; 6741 adapter->wol = adapter->eeprom_wol;
6786 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 6742
6743 /* make sure adapter isn't asleep if manageability is enabled */
6744 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
6745 (hw->mac.ops.check_mng_mode(hw)))
6746 device_wakeup_enable(&pdev->dev);
6787 6747
6788 /* save off EEPROM version number */ 6748 /* save off EEPROM version number */
6789 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); 6749 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 794fe1497666..a7e6a3e37257 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -42,6 +42,7 @@
42#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ 42#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
43#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ 43#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
44#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ 44#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
45#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
45#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ 46#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
46#define E1000_FCT 0x00030 /* Flow Control Type - RW */ 47#define E1000_FCT 0x00030 /* Flow Control Type - RW */
47#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ 48#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 84e7e0909def..b64542acfa34 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1361,11 +1361,16 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1361 switch (hw->phy.type) { 1361 switch (hw->phy.type) {
1362 case e1000_phy_i210: 1362 case e1000_phy_i210:
1363 case e1000_phy_m88: 1363 case e1000_phy_m88:
1364 if (hw->phy.id == I347AT4_E_PHY_ID || 1364 switch (hw->phy.id) {
1365 hw->phy.id == M88E1112_E_PHY_ID) 1365 case I347AT4_E_PHY_ID:
1366 case M88E1112_E_PHY_ID:
1367 case I210_I_PHY_ID:
1366 ret_val = igb_copper_link_setup_m88_gen2(hw); 1368 ret_val = igb_copper_link_setup_m88_gen2(hw);
1367 else 1369 break;
1370 default:
1368 ret_val = igb_copper_link_setup_m88(hw); 1371 ret_val = igb_copper_link_setup_m88(hw);
1372 break;
1373 }
1369 break; 1374 break;
1370 case e1000_phy_igp_3: 1375 case e1000_phy_igp_3:
1371 ret_val = igb_copper_link_setup_igp(hw); 1376 ret_val = igb_copper_link_setup_igp(hw);
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index d27edbc63923..25151401c2ab 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -447,7 +447,7 @@ struct igb_adapter {
447#endif 447#endif
448 struct i2c_algo_bit_data i2c_algo; 448 struct i2c_algo_bit_data i2c_algo;
449 struct i2c_adapter i2c_adap; 449 struct i2c_adapter i2c_adap;
450 struct igb_i2c_client_list *i2c_clients; 450 struct i2c_client *i2c_client;
451}; 451};
452 452
453#define IGB_FLAG_HAS_MSI (1 << 0) 453#define IGB_FLAG_HAS_MSI (1 << 0)
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 0a9b073d0b03..4623502054d5 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -39,6 +39,10 @@
39#include <linux/pci.h> 39#include <linux/pci.h>
40 40
41#ifdef CONFIG_IGB_HWMON 41#ifdef CONFIG_IGB_HWMON
42struct i2c_board_info i350_sensor_info = {
43 I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
44};
45
42/* hwmon callback functions */ 46/* hwmon callback functions */
43static ssize_t igb_hwmon_show_location(struct device *dev, 47static ssize_t igb_hwmon_show_location(struct device *dev,
44 struct device_attribute *attr, 48 struct device_attribute *attr,
@@ -188,6 +192,7 @@ int igb_sysfs_init(struct igb_adapter *adapter)
188 unsigned int i; 192 unsigned int i;
189 int n_attrs; 193 int n_attrs;
190 int rc = 0; 194 int rc = 0;
195 struct i2c_client *client = NULL;
191 196
192 /* If this method isn't defined we don't support thermals */ 197 /* If this method isn't defined we don't support thermals */
193 if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) 198 if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
@@ -198,6 +203,15 @@ int igb_sysfs_init(struct igb_adapter *adapter)
198 if (rc) 203 if (rc)
199 goto exit; 204 goto exit;
200 205
206 /* init i2c_client */
207 client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
208 if (client == NULL) {
209 dev_info(&adapter->pdev->dev,
210 "Failed to create new i2c device..\n");
211 goto exit;
212 }
213 adapter->i2c_client = client;
214
201 /* Allocation space for max attributes 215 /* Allocation space for max attributes
202 * max num sensors * values (loc, temp, max, caution) 216 * max num sensors * values (loc, temp, max, caution)
203 */ 217 */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ed79a1c53b59..4dbd62968c7a 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1923,10 +1923,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
1923 return; 1923 return;
1924} 1924}
1925 1925
1926static const struct i2c_board_info i350_sensor_info = {
1927 I2C_BOARD_INFO("i350bb", 0Xf8),
1928};
1929
1930/* igb_init_i2c - Init I2C interface 1926/* igb_init_i2c - Init I2C interface
1931 * @adapter: pointer to adapter structure 1927 * @adapter: pointer to adapter structure
1932 * 1928 *
@@ -6227,13 +6223,6 @@ static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
6227 /* If we spanned a buffer we have a huge mess so test for it */ 6223 /* If we spanned a buffer we have a huge mess so test for it */
6228 BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); 6224 BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
6229 6225
6230 /* Guarantee this function can be used by verifying buffer sizes */
6231 BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
6232 NET_IP_ALIGN +
6233 IGB_TS_HDR_LEN +
6234 ETH_FRAME_LEN +
6235 ETH_FCS_LEN));
6236
6237 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 6226 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
6238 page = rx_buffer->page; 6227 page = rx_buffer->page;
6239 prefetchw(page); 6228 prefetchw(page);
@@ -7724,67 +7713,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7724 } 7713 }
7725} 7714}
7726 7715
7727static DEFINE_SPINLOCK(i2c_clients_lock);
7728
7729/* igb_get_i2c_client - returns matching client
7730 * in adapters's client list.
7731 * @adapter: adapter struct
7732 * @dev_addr: device address of i2c needed.
7733 */
7734static struct i2c_client *
7735igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
7736{
7737 ulong flags;
7738 struct igb_i2c_client_list *client_list;
7739 struct i2c_client *client = NULL;
7740 struct i2c_board_info client_info = {
7741 I2C_BOARD_INFO("igb", 0x00),
7742 };
7743
7744 spin_lock_irqsave(&i2c_clients_lock, flags);
7745 client_list = adapter->i2c_clients;
7746
7747 /* See if we already have an i2c_client */
7748 while (client_list) {
7749 if (client_list->client->addr == (dev_addr >> 1)) {
7750 client = client_list->client;
7751 goto exit;
7752 } else {
7753 client_list = client_list->next;
7754 }
7755 }
7756
7757 /* no client_list found, create a new one */
7758 client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
7759 if (client_list == NULL)
7760 goto exit;
7761
7762 /* dev_addr passed to us is left-shifted by 1 bit
7763 * i2c_new_device call expects it to be flush to the right.
7764 */
7765 client_info.addr = dev_addr >> 1;
7766 client_info.platform_data = adapter;
7767 client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info);
7768 if (client_list->client == NULL) {
7769 dev_info(&adapter->pdev->dev,
7770 "Failed to create new i2c device..\n");
7771 goto err_no_client;
7772 }
7773
7774 /* insert new client at head of list */
7775 client_list->next = adapter->i2c_clients;
7776 adapter->i2c_clients = client_list;
7777
7778 client = client_list->client;
7779 goto exit;
7780
7781err_no_client:
7782 kfree(client_list);
7783exit:
7784 spin_unlock_irqrestore(&i2c_clients_lock, flags);
7785 return client;
7786}
7787
7788/* igb_read_i2c_byte - Reads 8 bit word over I2C 7716/* igb_read_i2c_byte - Reads 8 bit word over I2C
7789 * @hw: pointer to hardware structure 7717 * @hw: pointer to hardware structure
7790 * @byte_offset: byte offset to read 7718 * @byte_offset: byte offset to read
@@ -7798,7 +7726,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7798 u8 dev_addr, u8 *data) 7726 u8 dev_addr, u8 *data)
7799{ 7727{
7800 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); 7728 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
7801 struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); 7729 struct i2c_client *this_client = adapter->i2c_client;
7802 s32 status; 7730 s32 status;
7803 u16 swfw_mask = 0; 7731 u16 swfw_mask = 0;
7804 7732
@@ -7835,7 +7763,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7835 u8 dev_addr, u8 data) 7763 u8 dev_addr, u8 data)
7836{ 7764{
7837 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); 7765 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
7838 struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); 7766 struct i2c_client *this_client = adapter->i2c_client;
7839 s32 status; 7767 s32 status;
7840 u16 swfw_mask = E1000_SWFW_PHY0_SM; 7768 u16 swfw_mask = E1000_SWFW_PHY0_SM;
7841 7769
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 29140502b71a..6562c736a1d8 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1081,6 +1081,45 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1081 1081
1082 1082
1083/* mii management interface *************************************************/ 1083/* mii management interface *************************************************/
1084static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp)
1085{
1086 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1087 u32 autoneg_disable = FORCE_LINK_PASS |
1088 DISABLE_AUTO_NEG_SPEED_GMII |
1089 DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
1090 DISABLE_AUTO_NEG_FOR_DUPLEX;
1091
1092 if (mp->phy->autoneg == AUTONEG_ENABLE) {
1093 /* enable auto negotiation */
1094 pscr &= ~autoneg_disable;
1095 goto out_write;
1096 }
1097
1098 pscr |= autoneg_disable;
1099
1100 if (mp->phy->speed == SPEED_1000) {
1101 /* force gigabit, half duplex not supported */
1102 pscr |= SET_GMII_SPEED_TO_1000;
1103 pscr |= SET_FULL_DUPLEX_MODE;
1104 goto out_write;
1105 }
1106
1107 pscr &= ~SET_GMII_SPEED_TO_1000;
1108
1109 if (mp->phy->speed == SPEED_100)
1110 pscr |= SET_MII_SPEED_TO_100;
1111 else
1112 pscr &= ~SET_MII_SPEED_TO_100;
1113
1114 if (mp->phy->duplex == DUPLEX_FULL)
1115 pscr |= SET_FULL_DUPLEX_MODE;
1116 else
1117 pscr &= ~SET_FULL_DUPLEX_MODE;
1118
1119out_write:
1120 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1121}
1122
1084static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) 1123static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
1085{ 1124{
1086 struct mv643xx_eth_shared_private *msp = dev_id; 1125 struct mv643xx_eth_shared_private *msp = dev_id;
@@ -1499,6 +1538,7 @@ static int
1499mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1538mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1500{ 1539{
1501 struct mv643xx_eth_private *mp = netdev_priv(dev); 1540 struct mv643xx_eth_private *mp = netdev_priv(dev);
1541 int ret;
1502 1542
1503 if (mp->phy == NULL) 1543 if (mp->phy == NULL)
1504 return -EINVAL; 1544 return -EINVAL;
@@ -1508,7 +1548,10 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1508 */ 1548 */
1509 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1549 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1510 1550
1511 return phy_ethtool_sset(mp->phy, cmd); 1551 ret = phy_ethtool_sset(mp->phy, cmd);
1552 if (!ret)
1553 mv643xx_adjust_pscr(mp);
1554 return ret;
1512} 1555}
1513 1556
1514static void mv643xx_eth_get_drvinfo(struct net_device *dev, 1557static void mv643xx_eth_get_drvinfo(struct net_device *dev,
@@ -2442,11 +2485,15 @@ static int mv643xx_eth_stop(struct net_device *dev)
2442static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2485static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2443{ 2486{
2444 struct mv643xx_eth_private *mp = netdev_priv(dev); 2487 struct mv643xx_eth_private *mp = netdev_priv(dev);
2488 int ret;
2445 2489
2446 if (mp->phy != NULL) 2490 if (mp->phy == NULL)
2447 return phy_mii_ioctl(mp->phy, ifr, cmd); 2491 return -ENOTSUPP;
2448 2492
2449 return -EOPNOTSUPP; 2493 ret = phy_mii_ioctl(mp->phy, ifr, cmd);
2494 if (!ret)
2495 mv643xx_adjust_pscr(mp);
2496 return ret;
2450} 2497}
2451 2498
2452static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 2499static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 7e64033d7de3..0706623cfb96 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -226,7 +226,7 @@ void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
226 226
227static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) 227static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
228{ 228{
229 u64 in_param; 229 u64 in_param = 0;
230 int err; 230 int err;
231 231
232 if (mlx4_is_mfunc(dev)) { 232 if (mlx4_is_mfunc(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index bb4d8d99f36d..995d4b6d5c1e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -565,34 +565,38 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
565 struct mlx4_en_dev *mdev = priv->mdev; 565 struct mlx4_en_dev *mdev = priv->mdev;
566 struct mlx4_dev *dev = mdev->dev; 566 struct mlx4_dev *dev = mdev->dev;
567 int qpn = priv->base_qpn; 567 int qpn = priv->base_qpn;
568 u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); 568 u64 mac;
569
570 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
571 priv->dev->dev_addr);
572 mlx4_unregister_mac(dev, priv->port, mac);
573 569
574 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 570 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
571 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
572 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
573 priv->dev->dev_addr);
574 mlx4_unregister_mac(dev, priv->port, mac);
575 } else {
575 struct mlx4_mac_entry *entry; 576 struct mlx4_mac_entry *entry;
576 struct hlist_node *tmp; 577 struct hlist_node *tmp;
577 struct hlist_head *bucket; 578 struct hlist_head *bucket;
578 unsigned int mac_hash; 579 unsigned int i;
579 580
580 mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX]; 581 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
581 bucket = &priv->mac_hash[mac_hash]; 582 bucket = &priv->mac_hash[i];
582 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 583 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
583 if (ether_addr_equal_64bits(entry->mac, 584 mac = mlx4_en_mac_to_u64(entry->mac);
584 priv->dev->dev_addr)) { 585 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
585 en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n", 586 entry->mac);
586 priv->port, priv->dev->dev_addr, qpn);
587 mlx4_en_uc_steer_release(priv, entry->mac, 587 mlx4_en_uc_steer_release(priv, entry->mac,
588 qpn, entry->reg_id); 588 qpn, entry->reg_id);
589 mlx4_qp_release_range(dev, qpn, 1);
590 589
590 mlx4_unregister_mac(dev, priv->port, mac);
591 hlist_del_rcu(&entry->hlist); 591 hlist_del_rcu(&entry->hlist);
592 kfree_rcu(entry, rcu); 592 kfree_rcu(entry, rcu);
593 break;
594 } 593 }
595 } 594 }
595
596 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
597 priv->port, qpn);
598 mlx4_qp_release_range(dev, qpn, 1);
599 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
596 } 600 }
597} 601}
598 602
@@ -650,28 +654,10 @@ u64 mlx4_en_mac_to_u64(u8 *addr)
650 return mac; 654 return mac;
651} 655}
652 656
653static int mlx4_en_set_mac(struct net_device *dev, void *addr) 657static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
654{
655 struct mlx4_en_priv *priv = netdev_priv(dev);
656 struct mlx4_en_dev *mdev = priv->mdev;
657 struct sockaddr *saddr = addr;
658
659 if (!is_valid_ether_addr(saddr->sa_data))
660 return -EADDRNOTAVAIL;
661
662 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
663 queue_work(mdev->workqueue, &priv->mac_task);
664 return 0;
665}
666
667static void mlx4_en_do_set_mac(struct work_struct *work)
668{ 658{
669 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
670 mac_task);
671 struct mlx4_en_dev *mdev = priv->mdev;
672 int err = 0; 659 int err = 0;
673 660
674 mutex_lock(&mdev->state_lock);
675 if (priv->port_up) { 661 if (priv->port_up) {
676 /* Remove old MAC and insert the new one */ 662 /* Remove old MAC and insert the new one */
677 err = mlx4_en_replace_mac(priv, priv->base_qpn, 663 err = mlx4_en_replace_mac(priv, priv->base_qpn,
@@ -683,7 +669,26 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
683 } else 669 } else
684 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); 670 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
685 671
672 return err;
673}
674
675static int mlx4_en_set_mac(struct net_device *dev, void *addr)
676{
677 struct mlx4_en_priv *priv = netdev_priv(dev);
678 struct mlx4_en_dev *mdev = priv->mdev;
679 struct sockaddr *saddr = addr;
680 int err;
681
682 if (!is_valid_ether_addr(saddr->sa_data))
683 return -EADDRNOTAVAIL;
684
685 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
686
687 mutex_lock(&mdev->state_lock);
688 err = mlx4_en_do_set_mac(priv);
686 mutex_unlock(&mdev->state_lock); 689 mutex_unlock(&mdev->state_lock);
690
691 return err;
687} 692}
688 693
689static void mlx4_en_clear_list(struct net_device *dev) 694static void mlx4_en_clear_list(struct net_device *dev)
@@ -1348,7 +1353,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
1348 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1353 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1349 } 1354 }
1350 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { 1355 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1351 queue_work(mdev->workqueue, &priv->mac_task); 1356 mlx4_en_do_set_mac(priv);
1352 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; 1357 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1353 } 1358 }
1354 mutex_unlock(&mdev->state_lock); 1359 mutex_unlock(&mdev->state_lock);
@@ -1828,9 +1833,11 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1828 } 1833 }
1829 1834
1830#ifdef CONFIG_RFS_ACCEL 1835#ifdef CONFIG_RFS_ACCEL
1831 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); 1836 if (priv->mdev->dev->caps.comp_pool) {
1832 if (!priv->dev->rx_cpu_rmap) 1837 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
1833 goto err; 1838 if (!priv->dev->rx_cpu_rmap)
1839 goto err;
1840 }
1834#endif 1841#endif
1835 1842
1836 return 0; 1843 return 0;
@@ -2078,7 +2085,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2078 priv->msg_enable = MLX4_EN_MSG_LEVEL; 2085 priv->msg_enable = MLX4_EN_MSG_LEVEL;
2079 spin_lock_init(&priv->stats_lock); 2086 spin_lock_init(&priv->stats_lock);
2080 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 2087 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2081 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
2082 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 2088 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2083 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2089 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2084 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2090 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 50917eb3013e..f6245579962d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -787,6 +787,14 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
787 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; 787 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
788 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 788 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
789 789
790 /* turn off device-managed steering capability if not enabled */
791 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
792 MLX4_GET(field, outbox->buf,
793 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
794 field &= 0x7f;
795 MLX4_PUT(outbox->buf, field,
796 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
797 }
790 return 0; 798 return 0;
791} 799}
792 800
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d180bc46826a..16abde20e1fc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1555,7 +1555,7 @@ void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1555 1555
1556void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 1556void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1557{ 1557{
1558 u64 in_param; 1558 u64 in_param = 0;
1559 1559
1560 if (mlx4_is_mfunc(dev)) { 1560 if (mlx4_is_mfunc(dev)) {
1561 set_param_l(&in_param, idx); 1561 set_param_l(&in_param, idx);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index cf883345af88..d738454116a0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1235,7 +1235,7 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
1235 1235
1236static inline void set_param_l(u64 *arg, u32 val) 1236static inline void set_param_l(u64 *arg, u32 val)
1237{ 1237{
1238 *((u32 *)arg) = val; 1238 *arg = (*arg & 0xffffffff00000000ULL) | (u64) val;
1239} 1239}
1240 1240
1241static inline void set_param_h(u64 *arg, u32 val) 1241static inline void set_param_h(u64 *arg, u32 val)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index c313d7e943a9..f710b7ce0dcb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -509,7 +509,6 @@ struct mlx4_en_priv {
509 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; 509 struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
510 struct mlx4_qp drop_qp; 510 struct mlx4_qp drop_qp;
511 struct work_struct rx_mode_task; 511 struct work_struct rx_mode_task;
512 struct work_struct mac_task;
513 struct work_struct watchdog_task; 512 struct work_struct watchdog_task;
514 struct work_struct linkstate_task; 513 struct work_struct linkstate_task;
515 struct delayed_work stats_task; 514 struct delayed_work stats_task;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 602ca9bf78e4..f91719a08cba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -183,7 +183,7 @@ u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
183 183
184static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 184static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
185{ 185{
186 u64 in_param; 186 u64 in_param = 0;
187 u64 out_param; 187 u64 out_param;
188 int err; 188 int err;
189 189
@@ -240,7 +240,7 @@ void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
240 240
241static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) 241static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
242{ 242{
243 u64 in_param; 243 u64 in_param = 0;
244 int err; 244 int err;
245 245
246 if (mlx4_is_mfunc(dev)) { 246 if (mlx4_is_mfunc(dev)) {
@@ -351,7 +351,7 @@ void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
351 351
352static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) 352static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
353{ 353{
354 u64 in_param; 354 u64 in_param = 0;
355 355
356 if (mlx4_is_mfunc(dev)) { 356 if (mlx4_is_mfunc(dev)) {
357 set_param_l(&in_param, index); 357 set_param_l(&in_param, index);
@@ -374,7 +374,7 @@ int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
374 374
375static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) 375static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
376{ 376{
377 u64 param; 377 u64 param = 0;
378 378
379 if (mlx4_is_mfunc(dev)) { 379 if (mlx4_is_mfunc(dev)) {
380 set_param_l(&param, index); 380 set_param_l(&param, index);
@@ -395,7 +395,7 @@ void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
395 395
396static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) 396static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
397{ 397{
398 u64 in_param; 398 u64 in_param = 0;
399 399
400 if (mlx4_is_mfunc(dev)) { 400 if (mlx4_is_mfunc(dev)) {
401 set_param_l(&in_param, index); 401 set_param_l(&in_param, index);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 1ac88637ad9d..00f223acada7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -101,7 +101,7 @@ void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
101 101
102void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) 102void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
103{ 103{
104 u64 in_param; 104 u64 in_param = 0;
105 int err; 105 int err;
106 106
107 if (mlx4_is_mfunc(dev)) { 107 if (mlx4_is_mfunc(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 719ead15e491..10c57c86388b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -175,7 +175,7 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac);
175 175
176int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) 176int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
177{ 177{
178 u64 out_param; 178 u64 out_param = 0;
179 int err; 179 int err;
180 180
181 if (mlx4_is_mfunc(dev)) { 181 if (mlx4_is_mfunc(dev)) {
@@ -222,7 +222,7 @@ EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
222 222
223void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) 223void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
224{ 224{
225 u64 out_param; 225 u64 out_param = 0;
226 226
227 if (mlx4_is_mfunc(dev)) { 227 if (mlx4_is_mfunc(dev)) {
228 set_param_l(&out_param, port); 228 set_param_l(&out_param, port);
@@ -361,7 +361,7 @@ out:
361 361
362int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) 362int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
363{ 363{
364 u64 out_param; 364 u64 out_param = 0;
365 int err; 365 int err;
366 366
367 if (mlx4_is_mfunc(dev)) { 367 if (mlx4_is_mfunc(dev)) {
@@ -406,7 +406,7 @@ out:
406 406
407void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) 407void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
408{ 408{
409 u64 in_param; 409 u64 in_param = 0;
410 int err; 410 int err;
411 411
412 if (mlx4_is_mfunc(dev)) { 412 if (mlx4_is_mfunc(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 81e2abe07bbb..e891b058c1be 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -222,7 +222,7 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
222 222
223int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) 223int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
224{ 224{
225 u64 in_param; 225 u64 in_param = 0;
226 u64 out_param; 226 u64 out_param;
227 int err; 227 int err;
228 228
@@ -255,7 +255,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
255 255
256void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) 256void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
257{ 257{
258 u64 in_param; 258 u64 in_param = 0;
259 int err; 259 int err;
260 260
261 if (mlx4_is_mfunc(dev)) { 261 if (mlx4_is_mfunc(dev)) {
@@ -319,7 +319,7 @@ err_out:
319 319
320static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) 320static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
321{ 321{
322 u64 param; 322 u64 param = 0;
323 323
324 if (mlx4_is_mfunc(dev)) { 324 if (mlx4_is_mfunc(dev)) {
325 set_param_l(&param, qpn); 325 set_param_l(&param, qpn);
@@ -344,7 +344,7 @@ void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
344 344
345static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) 345static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
346{ 346{
347 u64 in_param; 347 u64 in_param = 0;
348 348
349 if (mlx4_is_mfunc(dev)) { 349 if (mlx4_is_mfunc(dev)) {
350 set_param_l(&in_param, qpn); 350 set_param_l(&in_param, qpn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 083fb48dc3d7..2995687f1aee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2990,6 +2990,9 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2990 u8 steer_type_mask = 2; 2990 u8 steer_type_mask = 2;
2991 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; 2991 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2992 2992
2993 if (dev->caps.steering_mode != MLX4_STEERING_MODE_B0)
2994 return -EINVAL;
2995
2993 qpn = vhcr->in_modifier & 0xffffff; 2996 qpn = vhcr->in_modifier & 0xffffff;
2994 err = get_res(dev, slave, qpn, RES_QP, &rqp); 2997 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2995 if (err) 2998 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index feda6c00829f..e329fe1f11b7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -149,7 +149,7 @@ void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
149 149
150static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) 150static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
151{ 151{
152 u64 in_param; 152 u64 in_param = 0;
153 153
154 if (mlx4_is_mfunc(dev)) { 154 if (mlx4_is_mfunc(dev)) {
155 set_param_l(&in_param, srqn); 155 set_param_l(&in_param, srqn);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 50247dfe8f57..d2f790df6dcb 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -171,9 +171,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
171 * TX scheduler is stopped when we're done and before 171 * TX scheduler is stopped when we're done and before
172 * netif_device_present() becomes false. 172 * netif_device_present() becomes false.
173 */ 173 */
174 netif_tx_lock(dev); 174 netif_tx_lock_bh(dev);
175 netif_device_detach(dev); 175 netif_device_detach(dev);
176 netif_tx_unlock(dev); 176 netif_tx_unlock_bh(dev);
177} 177}
178 178
179#endif /* EFX_EFX_H */ 179#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 879ff5849bbd..bb579a6128c8 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -215,7 +215,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
215 rx_buf = efx_rx_buffer(rx_queue, index); 215 rx_buf = efx_rx_buffer(rx_queue, index);
216 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 216 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
217 rx_buf->u.page = page; 217 rx_buf->u.page = page;
218 rx_buf->page_offset = page_offset; 218 rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
219 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 219 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
220 rx_buf->flags = EFX_RX_BUF_PAGE; 220 rx_buf->flags = EFX_RX_BUF_PAGE;
221 ++rx_queue->added_count; 221 ++rx_queue->added_count;
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index e5b19b056909..3c4d6274bb9b 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -202,6 +202,9 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
202 return 0; 202 return 0;
203 203
204 out: 204 out:
205 if (rrpriv->evt_ring)
206 pci_free_consistent(pdev, EVT_RING_SIZE, rrpriv->evt_ring,
207 rrpriv->evt_ring_dma);
205 if (rrpriv->rx_ring) 208 if (rrpriv->rx_ring)
206 pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring, 209 pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring,
207 rrpriv->rx_ring_dma); 210 rrpriv->rx_ring_dma);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 417b2af1aa80..73abbc1655d5 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -660,6 +660,7 @@ void macvlan_common_setup(struct net_device *dev)
660 ether_setup(dev); 660 ether_setup(dev);
661 661
662 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 662 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
663 dev->priv_flags |= IFF_UNICAST_FLT;
663 dev->netdev_ops = &macvlan_netdev_ops; 664 dev->netdev_ops = &macvlan_netdev_ops;
664 dev->destructor = free_netdev; 665 dev->destructor = free_netdev;
665 dev->header_ops = &macvlan_hard_header_ops, 666 dev->header_ops = &macvlan_hard_header_ops,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 05c5efe84591..bf3419297875 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1138,6 +1138,8 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
1138 netdev_upper_dev_unlink(port_dev, dev); 1138 netdev_upper_dev_unlink(port_dev, dev);
1139 team_port_disable_netpoll(port); 1139 team_port_disable_netpoll(port);
1140 vlan_vids_del_by_dev(port_dev, dev); 1140 vlan_vids_del_by_dev(port_dev, dev);
1141 dev_uc_unsync(port_dev, dev);
1142 dev_mc_unsync(port_dev, dev);
1141 dev_close(port_dev); 1143 dev_close(port_dev);
1142 team_port_leave(team, port); 1144 team_port_leave(team, port);
1143 1145
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2c6a22e278ea..b7c457adc0dc 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -747,6 +747,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
747 goto drop; 747 goto drop;
748 skb_orphan(skb); 748 skb_orphan(skb);
749 749
750 nf_reset(skb);
751
750 /* Enqueue packet */ 752 /* Enqueue packet */
751 skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb); 753 skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
752 754
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 4aad350e4dae..eae7a03d4f9b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2958,6 +2958,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2958 2958
2959 adapter->num_rx_queues = num_rx_queues; 2959 adapter->num_rx_queues = num_rx_queues;
2960 adapter->num_tx_queues = num_tx_queues; 2960 adapter->num_tx_queues = num_tx_queues;
2961 adapter->rx_buf_per_pkt = 1;
2961 2962
2962 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; 2963 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2963 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; 2964 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index a0feb17a0238..63a124340cbe 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -472,6 +472,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
472 VMXNET3_RX_RING_MAX_SIZE) 472 VMXNET3_RX_RING_MAX_SIZE)
473 return -EINVAL; 473 return -EINVAL;
474 474
475 /* if adapter not yet initialized, do nothing */
476 if (adapter->rx_buf_per_pkt == 0) {
477 netdev_err(netdev, "adapter not completely initialized, "
478 "ring size cannot be changed yet\n");
479 return -EOPNOTSUPP;
480 }
475 481
476 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ 482 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
477 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & 483 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3198384689d9..35418146fa17 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -70,10 +70,10 @@
70/* 70/*
71 * Version numbers 71 * Version numbers
72 */ 72 */
73#define VMXNET3_DRIVER_VERSION_STRING "1.1.29.0-k" 73#define VMXNET3_DRIVER_VERSION_STRING "1.1.30.0-k"
74 74
75/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 75/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
76#define VMXNET3_DRIVER_VERSION_NUM 0x01011D00 76#define VMXNET3_DRIVER_VERSION_NUM 0x01011E00
77 77
78#if defined(CONFIG_PCI_MSI) 78#if defined(CONFIG_PCI_MSI)
79 /* RSS only makes sense if MSI-X is supported. */ 79 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index f10e58ac9c1b..7cee7a3068ec 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -961,6 +961,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
961 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 961 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
962 tunnel_ip_select_ident(skb, old_iph, &rt->dst); 962 tunnel_ip_select_ident(skb, old_iph, &rt->dst);
963 963
964 nf_reset(skb);
965
964 vxlan_set_owner(dev, skb); 966 vxlan_set_owner(dev, skb);
965 967
966 /* See iptunnel_xmit() */ 968 /* See iptunnel_xmit() */
@@ -1504,6 +1506,14 @@ static __net_init int vxlan_init_net(struct net *net)
1504static __net_exit void vxlan_exit_net(struct net *net) 1506static __net_exit void vxlan_exit_net(struct net *net)
1505{ 1507{
1506 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 1508 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1509 struct vxlan_dev *vxlan;
1510 unsigned h;
1511
1512 rtnl_lock();
1513 for (h = 0; h < VNI_HASH_SIZE; ++h)
1514 hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist)
1515 dev_close(vxlan->dev);
1516 rtnl_unlock();
1507 1517
1508 if (vn->sock) { 1518 if (vn->sock) {
1509 sk_release_kernel(vn->sock->sk); 1519 sk_release_kernel(vn->sock->sk);
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index 94ef33838bc6..b775769f8322 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -151,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
151 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); 151 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
152 152
153 if (!(flags & CMD_ASYNC)) { 153 if (!(flags & CMD_ASYNC)) {
154 cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD; 154 cmd.flags |= CMD_WANT_SKB;
155 might_sleep(); 155 might_sleep();
156 } 156 }
157 157
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 10f01793d7a6..81aa91fab5aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -363,7 +363,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
363 __entry->flags = cmd->flags; 363 __entry->flags = cmd->flags;
364 memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr)); 364 memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
365 365
366 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 366 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
367 if (!cmd->len[i]) 367 if (!cmd->len[i])
368 continue; 368 continue;
369 memcpy((u8 *)__get_dynamic_array(hcmd) + offset, 369 memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 6f228bb2b844..fbfd2d137117 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1102,7 +1102,6 @@ void iwl_drv_stop(struct iwl_drv *drv)
1102 1102
1103/* shared module parameters */ 1103/* shared module parameters */
1104struct iwl_mod_params iwlwifi_mod_params = { 1104struct iwl_mod_params iwlwifi_mod_params = {
1105 .amsdu_size_8K = 1,
1106 .restart_fw = 1, 1105 .restart_fw = 1,
1107 .plcp_check = true, 1106 .plcp_check = true,
1108 .bt_coex_active = true, 1107 .bt_coex_active = true,
@@ -1207,7 +1206,7 @@ MODULE_PARM_DESC(11n_disable,
1207 "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); 1206 "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
1208module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, 1207module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
1209 int, S_IRUGO); 1208 int, S_IRUGO);
1210MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); 1209MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
1211module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO); 1210module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO);
1212MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); 1211MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
1213 1212
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index e5e3a79eae2f..2c2a729092f5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -91,7 +91,7 @@ enum iwl_power_level {
91 * @sw_crypto: using hardware encryption, default = 0 91 * @sw_crypto: using hardware encryption, default = 0
92 * @disable_11n: disable 11n capabilities, default = 0, 92 * @disable_11n: disable 11n capabilities, default = 0,
93 * use IWL_DISABLE_HT_* constants 93 * use IWL_DISABLE_HT_* constants
94 * @amsdu_size_8K: enable 8K amsdu size, default = 1 94 * @amsdu_size_8K: enable 8K amsdu size, default = 0
95 * @restart_fw: restart firmware, default = 1 95 * @restart_fw: restart firmware, default = 1
96 * @plcp_check: enable plcp health check, default = true 96 * @plcp_check: enable plcp health check, default = true
97 * @wd_disable: enable stuck queue check, default = 0 97 * @wd_disable: enable stuck queue check, default = 0
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 8c7bec6b9a0b..0cac2b7af78b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -186,19 +186,13 @@ struct iwl_rx_packet {
186 * @CMD_ASYNC: Return right away and don't want for the response 186 * @CMD_ASYNC: Return right away and don't want for the response
187 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the 187 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
188 * response. The caller needs to call iwl_free_resp when done. 188 * response. The caller needs to call iwl_free_resp when done.
189 * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
190 * response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
191 * copied. The pointer passed to the response handler is in the transport
192 * ownership and don't need to be freed by the op_mode. This also means
193 * that the pointer is invalidated after the op_mode's handler returns.
194 * @CMD_ON_DEMAND: This command is sent by the test mode pipe. 189 * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
195 */ 190 */
196enum CMD_MODE { 191enum CMD_MODE {
197 CMD_SYNC = 0, 192 CMD_SYNC = 0,
198 CMD_ASYNC = BIT(0), 193 CMD_ASYNC = BIT(0),
199 CMD_WANT_SKB = BIT(1), 194 CMD_WANT_SKB = BIT(1),
200 CMD_WANT_HCMD = BIT(2), 195 CMD_ON_DEMAND = BIT(2),
201 CMD_ON_DEMAND = BIT(3),
202}; 196};
203 197
204#define DEF_CMD_PAYLOAD_SIZE 320 198#define DEF_CMD_PAYLOAD_SIZE 320
@@ -217,7 +211,11 @@ struct iwl_device_cmd {
217 211
218#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) 212#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
219 213
220#define IWL_MAX_CMD_TFDS 2 214/*
215 * number of transfer buffers (fragments) per transmit frame descriptor;
216 * this is just the driver's idea, the hardware supports 20
217 */
218#define IWL_MAX_CMD_TBS_PER_TFD 2
221 219
222/** 220/**
223 * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command 221 * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
@@ -254,15 +252,15 @@ enum iwl_hcmd_dataflag {
254 * @id: id of the host command 252 * @id: id of the host command
255 */ 253 */
256struct iwl_host_cmd { 254struct iwl_host_cmd {
257 const void *data[IWL_MAX_CMD_TFDS]; 255 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
258 struct iwl_rx_packet *resp_pkt; 256 struct iwl_rx_packet *resp_pkt;
259 unsigned long _rx_page_addr; 257 unsigned long _rx_page_addr;
260 u32 _rx_page_order; 258 u32 _rx_page_order;
261 int handler_status; 259 int handler_status;
262 260
263 u32 flags; 261 u32 flags;
264 u16 len[IWL_MAX_CMD_TFDS]; 262 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
265 u8 dataflags[IWL_MAX_CMD_TFDS]; 263 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
266 u8 id; 264 u8 id;
267}; 265};
268 266
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 23eebda848b0..2adb61f103f4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -762,18 +762,20 @@ struct iwl_phy_context_cmd {
762#define IWL_RX_INFO_PHY_CNT 8 762#define IWL_RX_INFO_PHY_CNT 8
763#define IWL_RX_INFO_AGC_IDX 1 763#define IWL_RX_INFO_AGC_IDX 1
764#define IWL_RX_INFO_RSSI_AB_IDX 2 764#define IWL_RX_INFO_RSSI_AB_IDX 2
765#define IWL_RX_INFO_RSSI_C_IDX 3 765#define IWL_OFDM_AGC_A_MSK 0x0000007f
766#define IWL_OFDM_AGC_DB_MSK 0xfe00 766#define IWL_OFDM_AGC_A_POS 0
767#define IWL_OFDM_AGC_DB_POS 9 767#define IWL_OFDM_AGC_B_MSK 0x00003f80
768#define IWL_OFDM_AGC_B_POS 7
769#define IWL_OFDM_AGC_CODE_MSK 0x3fe00000
770#define IWL_OFDM_AGC_CODE_POS 20
768#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff 771#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff
769#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
770#define IWL_OFDM_RSSI_A_POS 0 772#define IWL_OFDM_RSSI_A_POS 0
773#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
774#define IWL_OFDM_RSSI_ALLBAND_A_POS 8
771#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000 775#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000
772#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
773#define IWL_OFDM_RSSI_B_POS 16 776#define IWL_OFDM_RSSI_B_POS 16
774#define IWL_OFDM_RSSI_INBAND_C_MSK 0x00ff 777#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
775#define IWL_OFDM_RSSI_ALLBAND_C_MSK 0xff00 778#define IWL_OFDM_RSSI_ALLBAND_B_POS 24
776#define IWL_OFDM_RSSI_C_POS 0
777 779
778/** 780/**
779 * struct iwl_rx_phy_info - phy info 781 * struct iwl_rx_phy_info - phy info
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index d3d959db03a9..500f818dba04 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -79,17 +79,8 @@
79#define UCODE_VALID_OK cpu_to_le32(0x1) 79#define UCODE_VALID_OK cpu_to_le32(0x1)
80 80
81/* Default calibration values for WkP - set to INIT image w/o running */ 81/* Default calibration values for WkP - set to INIT image w/o running */
82static const u8 wkp_calib_values_bb_filter[] = { 0xbf, 0x00, 0x5f, 0x00, 0x2f,
83 0x00, 0x18, 0x00 };
84static const u8 wkp_calib_values_rx_dc[] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
85 0x7f, 0x7f, 0x7f };
86static const u8 wkp_calib_values_tx_lo[] = { 0x00, 0x00, 0x00, 0x00 };
87static const u8 wkp_calib_values_tx_iq[] = { 0xff, 0x00, 0xff, 0x00, 0x00,
88 0x00 };
89static const u8 wkp_calib_values_rx_iq[] = { 0xff, 0x00, 0x00, 0x00 };
90static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 }; 82static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
91static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 }; 83static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
92static const u8 wkp_calib_values_xtal[] = { 0xd2, 0xd2 };
93 84
94struct iwl_calib_default_data { 85struct iwl_calib_default_data {
95 u16 size; 86 u16 size;
@@ -99,12 +90,7 @@ struct iwl_calib_default_data {
99#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf} 90#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
100 91
101static const struct iwl_calib_default_data wkp_calib_default_data[12] = { 92static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
102 [5] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_dc),
103 [6] = CALIB_SIZE_N_DATA(wkp_calib_values_bb_filter),
104 [7] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_lo),
105 [8] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq),
106 [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew), 93 [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
107 [10] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq),
108 [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew), 94 [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
109}; 95};
110 96
@@ -241,20 +227,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
241 227
242 return 0; 228 return 0;
243} 229}
244#define IWL_HW_REV_ID_RAINBOW 0x2
245#define IWL_PROJ_TYPE_LHP 0x5
246
247static u32 iwl_mvm_build_phy_cfg(struct iwl_mvm *mvm)
248{
249 struct iwl_nvm_data *data = mvm->nvm_data;
250 /* Temp calls to static definitions, will be changed to CSR calls */
251 u8 hw_rev_id = IWL_HW_REV_ID_RAINBOW;
252 u8 project_type = IWL_PROJ_TYPE_LHP;
253
254 return data->radio_cfg_dash | (data->radio_cfg_step << 2) |
255 (hw_rev_id << 4) | ((project_type & 0x7f) << 6) |
256 (data->valid_tx_ant << 16) | (data->valid_rx_ant << 20);
257}
258 230
259static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) 231static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
260{ 232{
@@ -262,7 +234,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
262 enum iwl_ucode_type ucode_type = mvm->cur_ucode; 234 enum iwl_ucode_type ucode_type = mvm->cur_ucode;
263 235
264 /* Set parameters */ 236 /* Set parameters */
265 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_build_phy_cfg(mvm)); 237 phy_cfg_cmd.phy_cfg = cpu_to_le32(mvm->fw->phy_config);
266 phy_cfg_cmd.calib_control.event_trigger = 238 phy_cfg_cmd.calib_control.event_trigger =
267 mvm->fw->default_calib[ucode_type].event_trigger; 239 mvm->fw->default_calib[ucode_type].event_trigger;
268 phy_cfg_cmd.calib_control.flow_trigger = 240 phy_cfg_cmd.calib_control.flow_trigger =
@@ -275,103 +247,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
275 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 247 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
276} 248}
277 249
278/* Starting with the new PHY DB implementation - New calibs are enabled */
279/* Value - 0x405e7 */
280#define IWL_CALIB_DEFAULT_FLOW_INIT (IWL_CALIB_CFG_XTAL_IDX |\
281 IWL_CALIB_CFG_TEMPERATURE_IDX |\
282 IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
283 IWL_CALIB_CFG_DC_IDX |\
284 IWL_CALIB_CFG_BB_FILTER_IDX |\
285 IWL_CALIB_CFG_LO_LEAKAGE_IDX |\
286 IWL_CALIB_CFG_TX_IQ_IDX |\
287 IWL_CALIB_CFG_RX_IQ_IDX |\
288 IWL_CALIB_CFG_AGC_IDX)
289
290#define IWL_CALIB_DEFAULT_EVENT_INIT 0x0
291
292/* Value 0x41567 */
293#define IWL_CALIB_DEFAULT_FLOW_RUN (IWL_CALIB_CFG_XTAL_IDX |\
294 IWL_CALIB_CFG_TEMPERATURE_IDX |\
295 IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
296 IWL_CALIB_CFG_BB_FILTER_IDX |\
297 IWL_CALIB_CFG_DC_IDX |\
298 IWL_CALIB_CFG_TX_IQ_IDX |\
299 IWL_CALIB_CFG_RX_IQ_IDX |\
300 IWL_CALIB_CFG_SENSITIVITY_IDX |\
301 IWL_CALIB_CFG_AGC_IDX)
302
303#define IWL_CALIB_DEFAULT_EVENT_RUN (IWL_CALIB_CFG_XTAL_IDX |\
304 IWL_CALIB_CFG_TEMPERATURE_IDX |\
305 IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
306 IWL_CALIB_CFG_TX_PWR_IDX |\
307 IWL_CALIB_CFG_DC_IDX |\
308 IWL_CALIB_CFG_TX_IQ_IDX |\
309 IWL_CALIB_CFG_SENSITIVITY_IDX)
310
311/*
312 * Sets the calibrations trigger values that will be sent to the FW for runtime
313 * and init calibrations.
314 * The ones given in the FW TLV are not correct.
315 */
316static void iwl_set_default_calib_trigger(struct iwl_mvm *mvm)
317{
318 struct iwl_tlv_calib_ctrl default_calib;
319
320 /*
321 * WkP FW TLV calib bits are wrong, overwrite them.
322 * This defines the dynamic calibrations which are implemented in the
323 * uCode both for init(flow) calculation and event driven calibs.
324 */
325
326 /* Init Image */
327 default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_INIT);
328 default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_INIT);
329
330 if (default_calib.event_trigger !=
331 mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger)
332 IWL_ERR(mvm,
333 "Updating the event calib for INIT image: 0x%x -> 0x%x\n",
334 mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger,
335 default_calib.event_trigger);
336 if (default_calib.flow_trigger !=
337 mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger)
338 IWL_ERR(mvm,
339 "Updating the flow calib for INIT image: 0x%x -> 0x%x\n",
340 mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger,
341 default_calib.flow_trigger);
342
343 memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_INIT],
344 &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
345 IWL_ERR(mvm,
346 "Setting uCode init calibrations event 0x%x, trigger 0x%x\n",
347 default_calib.event_trigger,
348 default_calib.flow_trigger);
349
350 /* Run time image */
351 default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_RUN);
352 default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_RUN);
353
354 if (default_calib.event_trigger !=
355 mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger)
356 IWL_ERR(mvm,
357 "Updating the event calib for RT image: 0x%x -> 0x%x\n",
358 mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger,
359 default_calib.event_trigger);
360 if (default_calib.flow_trigger !=
361 mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger)
362 IWL_ERR(mvm,
363 "Updating the flow calib for RT image: 0x%x -> 0x%x\n",
364 mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger,
365 default_calib.flow_trigger);
366
367 memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_REGULAR],
368 &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
369 IWL_ERR(mvm,
370 "Setting uCode runtime calibs event 0x%x, trigger 0x%x\n",
371 default_calib.event_trigger,
372 default_calib.flow_trigger);
373}
374
375static int iwl_set_default_calibrations(struct iwl_mvm *mvm) 250static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
376{ 251{
377 u8 cmd_raw[16]; /* holds the variable size commands */ 252 u8 cmd_raw[16]; /* holds the variable size commands */
@@ -446,8 +321,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
446 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); 321 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
447 WARN_ON(ret); 322 WARN_ON(ret);
448 323
449 /* Override the calibrations from TLV and the const of fw */ 324 /* Send TX valid antennas before triggering calibrations */
450 iwl_set_default_calib_trigger(mvm); 325 ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant);
326 if (ret)
327 goto error;
451 328
452 /* WkP doesn't have all calibrations, need to set default values */ 329 /* WkP doesn't have all calibrations, need to set default values */
453 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 330 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 537711b10478..bdae700c769e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -80,7 +80,8 @@
80 80
81#define IWL_INVALID_MAC80211_QUEUE 0xff 81#define IWL_INVALID_MAC80211_QUEUE 0xff
82#define IWL_MVM_MAX_ADDRESSES 2 82#define IWL_MVM_MAX_ADDRESSES 2
83#define IWL_RSSI_OFFSET 44 83/* RSSI offset for WkP */
84#define IWL_RSSI_OFFSET 50
84 85
85enum iwl_mvm_tx_fifo { 86enum iwl_mvm_tx_fifo {
86 IWL_MVM_TX_FIFO_BK = 0, 87 IWL_MVM_TX_FIFO_BK = 0,
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index aa59adf87db3..d0f9c1e0475e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -624,12 +624,8 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
624 ieee80211_free_txskb(mvm->hw, skb); 624 ieee80211_free_txskb(mvm->hw, skb);
625} 625}
626 626
627static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) 627static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
628{ 628{
629 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
630
631 iwl_mvm_dump_nic_error_log(mvm);
632
633 iwl_abort_notification_waits(&mvm->notif_wait); 629 iwl_abort_notification_waits(&mvm->notif_wait);
634 630
635 /* 631 /*
@@ -663,9 +659,21 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
663 } 659 }
664} 660}
665 661
662static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
663{
664 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
665
666 iwl_mvm_dump_nic_error_log(mvm);
667
668 iwl_mvm_nic_restart(mvm);
669}
670
666static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) 671static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
667{ 672{
673 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
674
668 WARN_ON(1); 675 WARN_ON(1);
676 iwl_mvm_nic_restart(mvm);
669} 677}
670 678
671static const struct iwl_op_mode_ops iwl_mvm_ops = { 679static const struct iwl_op_mode_ops iwl_mvm_ops = {
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 3f40ab05bbd8..b0b190d0ec23 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -131,33 +131,42 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
131static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm, 131static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
132 struct iwl_rx_phy_info *phy_info) 132 struct iwl_rx_phy_info *phy_info)
133{ 133{
134 u32 rssi_a, rssi_b, rssi_c, max_rssi, agc_db; 134 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
135 int rssi_all_band_a, rssi_all_band_b;
136 u32 agc_a, agc_b, max_agc;
135 u32 val; 137 u32 val;
136 138
137 /* Find max rssi among 3 possible receivers. 139 /* Find max rssi among 2 possible receivers.
138 * These values are measured by the Digital Signal Processor (DSP). 140 * These values are measured by the Digital Signal Processor (DSP).
139 * They should stay fairly constant even as the signal strength varies, 141 * They should stay fairly constant even as the signal strength varies,
140 * if the radio's Automatic Gain Control (AGC) is working right. 142 * if the radio's Automatic Gain Control (AGC) is working right.
141 * AGC value (see below) will provide the "interesting" info. 143 * AGC value (see below) will provide the "interesting" info.
142 */ 144 */
145 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
146 agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
147 agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
148 max_agc = max_t(u32, agc_a, agc_b);
149
143 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]); 150 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
144 rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS; 151 rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
145 rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS; 152 rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
146 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_C_IDX]); 153 rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >>
147 rssi_c = (val & IWL_OFDM_RSSI_INBAND_C_MSK) >> IWL_OFDM_RSSI_C_POS; 154 IWL_OFDM_RSSI_ALLBAND_A_POS;
148 155 rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >>
149 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]); 156 IWL_OFDM_RSSI_ALLBAND_B_POS;
150 agc_db = (val & IWL_OFDM_AGC_DB_MSK) >> IWL_OFDM_AGC_DB_POS;
151 157
152 max_rssi = max_t(u32, rssi_a, rssi_b); 158 /*
153 max_rssi = max_t(u32, max_rssi, rssi_c); 159 * dBm = rssi dB - agc dB - constant.
160 * Higher AGC (higher radio gain) means lower signal.
161 */
162 rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
163 rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
164 max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
154 165
155 IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", 166 IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
156 rssi_a, rssi_b, rssi_c, max_rssi, agc_db); 167 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
157 168
158 /* dBm = max_rssi dB - agc dB - constant. 169 return max_rssi_dbm;
159 * Higher AGC (higher radio gain) means lower signal. */
160 return max_rssi - agc_db - IWL_RSSI_OFFSET;
161} 170}
162 171
163/* 172/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 861a7f9f8e7f..274f44e2ef60 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -770,6 +770,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
770 u16 txq_id; 770 u16 txq_id;
771 int err; 771 int err;
772 772
773
774 /*
775 * If mac80211 is cleaning its state, then say that we finished since
776 * our state has been cleared anyway.
777 */
778 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
779 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
780 return 0;
781 }
782
773 spin_lock_bh(&mvmsta->lock); 783 spin_lock_bh(&mvmsta->lock);
774 784
775 txq_id = tid_data->txq_id; 785 txq_id = tid_data->txq_id;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 6b67ce3f679c..6645efe5c03e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -607,12 +607,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
607 607
608 /* Single frame failure in an AMPDU queue => send BAR */ 608 /* Single frame failure in an AMPDU queue => send BAR */
609 if (txq_id >= IWL_FIRST_AMPDU_QUEUE && 609 if (txq_id >= IWL_FIRST_AMPDU_QUEUE &&
610 !(info->flags & IEEE80211_TX_STAT_ACK)) { 610 !(info->flags & IEEE80211_TX_STAT_ACK))
611 /* there must be only one skb in the skb_list */
612 WARN_ON_ONCE(skb_freed > 1 ||
613 !skb_queue_empty(&skbs));
614 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 611 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
615 }
616 612
617 /* W/A FW bug: seq_ctl is wrong when the queue is flushed */ 613 /* W/A FW bug: seq_ctl is wrong when the queue is flushed */
618 if (status == TX_STATUS_FAIL_FIFO_FLUSHED) { 614 if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 3d62e8055352..148843e7f34f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -137,10 +137,6 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
137struct iwl_cmd_meta { 137struct iwl_cmd_meta {
138 /* only for SYNC commands, iff the reply skb is wanted */ 138 /* only for SYNC commands, iff the reply skb is wanted */
139 struct iwl_host_cmd *source; 139 struct iwl_host_cmd *source;
140
141 DEFINE_DMA_UNMAP_ADDR(mapping);
142 DEFINE_DMA_UNMAP_LEN(len);
143
144 u32 flags; 140 u32 flags;
145}; 141};
146 142
@@ -185,25 +181,36 @@ struct iwl_queue {
185/* 181/*
186 * The FH will write back to the first TB only, so we need 182 * The FH will write back to the first TB only, so we need
187 * to copy some data into the buffer regardless of whether 183 * to copy some data into the buffer regardless of whether
188 * it should be mapped or not. This indicates how much to 184 * it should be mapped or not. This indicates how big the
189 * copy, even for HCMDs it must be big enough to fit the 185 * first TB must be to include the scratch buffer. Since
190 * DRAM scratch from the TX cmd, at least 16 bytes. 186 * the scratch is 4 bytes at offset 12, it's 16 now. If we
187 * make it bigger then allocations will be bigger and copy
188 * slower, so that's probably not useful.
191 */ 189 */
192#define IWL_HCMD_MIN_COPY_SIZE 16 190#define IWL_HCMD_SCRATCHBUF_SIZE 16
193 191
194struct iwl_pcie_txq_entry { 192struct iwl_pcie_txq_entry {
195 struct iwl_device_cmd *cmd; 193 struct iwl_device_cmd *cmd;
196 struct iwl_device_cmd *copy_cmd;
197 struct sk_buff *skb; 194 struct sk_buff *skb;
198 /* buffer to free after command completes */ 195 /* buffer to free after command completes */
199 const void *free_buf; 196 const void *free_buf;
200 struct iwl_cmd_meta meta; 197 struct iwl_cmd_meta meta;
201}; 198};
202 199
200struct iwl_pcie_txq_scratch_buf {
201 struct iwl_cmd_header hdr;
202 u8 buf[8];
203 __le32 scratch;
204};
205
203/** 206/**
204 * struct iwl_txq - Tx Queue for DMA 207 * struct iwl_txq - Tx Queue for DMA
205 * @q: generic Rx/Tx queue descriptor 208 * @q: generic Rx/Tx queue descriptor
206 * @tfds: transmit frame descriptors (DMA memory) 209 * @tfds: transmit frame descriptors (DMA memory)
210 * @scratchbufs: start of command headers, including scratch buffers, for
211 * the writeback -- this is DMA memory and an array holding one buffer
212 * for each command on the queue
213 * @scratchbufs_dma: DMA address for the scratchbufs start
207 * @entries: transmit entries (driver state) 214 * @entries: transmit entries (driver state)
208 * @lock: queue lock 215 * @lock: queue lock
209 * @stuck_timer: timer that fires if queue gets stuck 216 * @stuck_timer: timer that fires if queue gets stuck
@@ -217,6 +224,8 @@ struct iwl_pcie_txq_entry {
217struct iwl_txq { 224struct iwl_txq {
218 struct iwl_queue q; 225 struct iwl_queue q;
219 struct iwl_tfd *tfds; 226 struct iwl_tfd *tfds;
227 struct iwl_pcie_txq_scratch_buf *scratchbufs;
228 dma_addr_t scratchbufs_dma;
220 struct iwl_pcie_txq_entry *entries; 229 struct iwl_pcie_txq_entry *entries;
221 spinlock_t lock; 230 spinlock_t lock;
222 struct timer_list stuck_timer; 231 struct timer_list stuck_timer;
@@ -225,6 +234,13 @@ struct iwl_txq {
225 u8 active; 234 u8 active;
226}; 235};
227 236
237static inline dma_addr_t
238iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
239{
240 return txq->scratchbufs_dma +
241 sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
242}
243
228/** 244/**
229 * struct iwl_trans_pcie - PCIe transport specific data 245 * struct iwl_trans_pcie - PCIe transport specific data
230 * @rxq: all the RX queue data 246 * @rxq: all the RX queue data
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index b0ae06d2456f..567e67ad1f61 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -637,22 +637,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
637 index = SEQ_TO_INDEX(sequence); 637 index = SEQ_TO_INDEX(sequence);
638 cmd_index = get_cmd_index(&txq->q, index); 638 cmd_index = get_cmd_index(&txq->q, index);
639 639
640 if (reclaim) { 640 if (reclaim)
641 struct iwl_pcie_txq_entry *ent; 641 cmd = txq->entries[cmd_index].cmd;
642 ent = &txq->entries[cmd_index]; 642 else
643 cmd = ent->copy_cmd;
644 WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
645 } else {
646 cmd = NULL; 643 cmd = NULL;
647 }
648 644
649 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); 645 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
650 646
651 if (reclaim) { 647 if (reclaim) {
652 /* The original command isn't needed any more */
653 kfree(txq->entries[cmd_index].copy_cmd);
654 txq->entries[cmd_index].copy_cmd = NULL;
655 /* nor is the duplicated part of the command */
656 kfree(txq->entries[cmd_index].free_buf); 648 kfree(txq->entries[cmd_index].free_buf);
657 txq->entries[cmd_index].free_buf = NULL; 649 txq->entries[cmd_index].free_buf = NULL;
658 } 650 }
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8b625a7f5685..8595c16f74de 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -191,12 +191,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
191 } 191 }
192 192
193 for (i = q->read_ptr; i != q->write_ptr; 193 for (i = q->read_ptr; i != q->write_ptr;
194 i = iwl_queue_inc_wrap(i, q->n_bd)) { 194 i = iwl_queue_inc_wrap(i, q->n_bd))
195 struct iwl_tx_cmd *tx_cmd =
196 (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
197 IWL_ERR(trans, "scratch %d = 0x%08x\n", i, 195 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
198 get_unaligned_le32(&tx_cmd->scratch)); 196 le32_to_cpu(txq->scratchbufs[i].scratch));
199 }
200 197
201 iwl_op_mode_nic_error(trans->op_mode); 198 iwl_op_mode_nic_error(trans->op_mode);
202} 199}
@@ -367,8 +364,8 @@ static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
367} 364}
368 365
369static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, 366static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
370 struct iwl_cmd_meta *meta, struct iwl_tfd *tfd, 367 struct iwl_cmd_meta *meta,
371 enum dma_data_direction dma_dir) 368 struct iwl_tfd *tfd)
372{ 369{
373 int i; 370 int i;
374 int num_tbs; 371 int num_tbs;
@@ -382,17 +379,12 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
382 return; 379 return;
383 } 380 }
384 381
385 /* Unmap tx_cmd */ 382 /* first TB is never freed - it's the scratchbuf data */
386 if (num_tbs)
387 dma_unmap_single(trans->dev,
388 dma_unmap_addr(meta, mapping),
389 dma_unmap_len(meta, len),
390 DMA_BIDIRECTIONAL);
391 383
392 /* Unmap chunks, if any. */
393 for (i = 1; i < num_tbs; i++) 384 for (i = 1; i < num_tbs; i++)
394 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), 385 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
395 iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir); 386 iwl_pcie_tfd_tb_get_len(tfd, i),
387 DMA_TO_DEVICE);
396 388
397 tfd->num_tbs = 0; 389 tfd->num_tbs = 0;
398} 390}
@@ -406,8 +398,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
406 * Does NOT advance any TFD circular buffer read/write indexes 398 * Does NOT advance any TFD circular buffer read/write indexes
407 * Does NOT free the TFD itself (which is within circular buffer) 399 * Does NOT free the TFD itself (which is within circular buffer)
408 */ 400 */
409static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 401static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
410 enum dma_data_direction dma_dir)
411{ 402{
412 struct iwl_tfd *tfd_tmp = txq->tfds; 403 struct iwl_tfd *tfd_tmp = txq->tfds;
413 404
@@ -418,8 +409,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
418 lockdep_assert_held(&txq->lock); 409 lockdep_assert_held(&txq->lock);
419 410
420 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ 411 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
421 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], 412 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
422 dma_dir);
423 413
424 /* free SKB */ 414 /* free SKB */
425 if (txq->entries) { 415 if (txq->entries) {
@@ -479,6 +469,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
479{ 469{
480 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 470 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
481 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; 471 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
472 size_t scratchbuf_sz;
482 int i; 473 int i;
483 474
484 if (WARN_ON(txq->entries || txq->tfds)) 475 if (WARN_ON(txq->entries || txq->tfds))
@@ -514,9 +505,25 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
514 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); 505 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
515 goto error; 506 goto error;
516 } 507 }
508
509 BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
510 BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
511 sizeof(struct iwl_cmd_header) +
512 offsetof(struct iwl_tx_cmd, scratch));
513
514 scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
515
516 txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
517 &txq->scratchbufs_dma,
518 GFP_KERNEL);
519 if (!txq->scratchbufs)
520 goto err_free_tfds;
521
517 txq->q.id = txq_id; 522 txq->q.id = txq_id;
518 523
519 return 0; 524 return 0;
525err_free_tfds:
526 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
520error: 527error:
521 if (txq->entries && txq_id == trans_pcie->cmd_queue) 528 if (txq->entries && txq_id == trans_pcie->cmd_queue)
522 for (i = 0; i < slots_num; i++) 529 for (i = 0; i < slots_num; i++)
@@ -565,22 +572,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
565 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 572 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
566 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 573 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
567 struct iwl_queue *q = &txq->q; 574 struct iwl_queue *q = &txq->q;
568 enum dma_data_direction dma_dir;
569 575
570 if (!q->n_bd) 576 if (!q->n_bd)
571 return; 577 return;
572 578
573 /* In the command queue, all the TBs are mapped as BIDI
574 * so unmap them as such.
575 */
576 if (txq_id == trans_pcie->cmd_queue)
577 dma_dir = DMA_BIDIRECTIONAL;
578 else
579 dma_dir = DMA_TO_DEVICE;
580
581 spin_lock_bh(&txq->lock); 579 spin_lock_bh(&txq->lock);
582 while (q->write_ptr != q->read_ptr) { 580 while (q->write_ptr != q->read_ptr) {
583 iwl_pcie_txq_free_tfd(trans, txq, dma_dir); 581 iwl_pcie_txq_free_tfd(trans, txq);
584 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 582 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
585 } 583 }
586 spin_unlock_bh(&txq->lock); 584 spin_unlock_bh(&txq->lock);
@@ -610,7 +608,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
610 if (txq_id == trans_pcie->cmd_queue) 608 if (txq_id == trans_pcie->cmd_queue)
611 for (i = 0; i < txq->q.n_window; i++) { 609 for (i = 0; i < txq->q.n_window; i++) {
612 kfree(txq->entries[i].cmd); 610 kfree(txq->entries[i].cmd);
613 kfree(txq->entries[i].copy_cmd);
614 kfree(txq->entries[i].free_buf); 611 kfree(txq->entries[i].free_buf);
615 } 612 }
616 613
@@ -619,6 +616,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
619 dma_free_coherent(dev, sizeof(struct iwl_tfd) * 616 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
620 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 617 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
621 txq->q.dma_addr = 0; 618 txq->q.dma_addr = 0;
619
620 dma_free_coherent(dev,
621 sizeof(*txq->scratchbufs) * txq->q.n_window,
622 txq->scratchbufs, txq->scratchbufs_dma);
622 } 623 }
623 624
624 kfree(txq->entries); 625 kfree(txq->entries);
@@ -962,7 +963,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
962 963
963 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); 964 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
964 965
965 iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); 966 iwl_pcie_txq_free_tfd(trans, txq);
966 } 967 }
967 968
968 iwl_pcie_txq_progress(trans_pcie, txq); 969 iwl_pcie_txq_progress(trans_pcie, txq);
@@ -1152,29 +1153,29 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1152 void *dup_buf = NULL; 1153 void *dup_buf = NULL;
1153 dma_addr_t phys_addr; 1154 dma_addr_t phys_addr;
1154 int idx; 1155 int idx;
1155 u16 copy_size, cmd_size, dma_size; 1156 u16 copy_size, cmd_size, scratch_size;
1156 bool had_nocopy = false; 1157 bool had_nocopy = false;
1157 int i; 1158 int i;
1158 u32 cmd_pos; 1159 u32 cmd_pos;
1159 const u8 *cmddata[IWL_MAX_CMD_TFDS]; 1160 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
1160 u16 cmdlen[IWL_MAX_CMD_TFDS]; 1161 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1161 1162
1162 copy_size = sizeof(out_cmd->hdr); 1163 copy_size = sizeof(out_cmd->hdr);
1163 cmd_size = sizeof(out_cmd->hdr); 1164 cmd_size = sizeof(out_cmd->hdr);
1164 1165
1165 /* need one for the header if the first is NOCOPY */ 1166 /* need one for the header if the first is NOCOPY */
1166 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); 1167 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1167 1168
1168 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1169 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1169 cmddata[i] = cmd->data[i]; 1170 cmddata[i] = cmd->data[i];
1170 cmdlen[i] = cmd->len[i]; 1171 cmdlen[i] = cmd->len[i];
1171 1172
1172 if (!cmd->len[i]) 1173 if (!cmd->len[i])
1173 continue; 1174 continue;
1174 1175
1175 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ 1176 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1176 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { 1177 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1177 int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; 1178 int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1178 1179
1179 if (copy > cmdlen[i]) 1180 if (copy > cmdlen[i])
1180 copy = cmdlen[i]; 1181 copy = cmdlen[i];
@@ -1260,15 +1261,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1260 /* and copy the data that needs to be copied */ 1261 /* and copy the data that needs to be copied */
1261 cmd_pos = offsetof(struct iwl_device_cmd, payload); 1262 cmd_pos = offsetof(struct iwl_device_cmd, payload);
1262 copy_size = sizeof(out_cmd->hdr); 1263 copy_size = sizeof(out_cmd->hdr);
1263 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1264 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1264 int copy = 0; 1265 int copy = 0;
1265 1266
1266 if (!cmd->len) 1267 if (!cmd->len)
1267 continue; 1268 continue;
1268 1269
1269 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ 1270 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1270 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { 1271 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1271 copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; 1272 copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1272 1273
1273 if (copy > cmd->len[i]) 1274 if (copy > cmd->len[i])
1274 copy = cmd->len[i]; 1275 copy = cmd->len[i];
@@ -1286,50 +1287,38 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1286 } 1287 }
1287 } 1288 }
1288 1289
1289 WARN_ON_ONCE(txq->entries[idx].copy_cmd);
1290
1291 /*
1292 * since out_cmd will be the source address of the FH, it will write
1293 * the retry count there. So when the user needs to receivce the HCMD
1294 * that corresponds to the response in the response handler, it needs
1295 * to set CMD_WANT_HCMD.
1296 */
1297 if (cmd->flags & CMD_WANT_HCMD) {
1298 txq->entries[idx].copy_cmd =
1299 kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
1300 if (unlikely(!txq->entries[idx].copy_cmd)) {
1301 idx = -ENOMEM;
1302 goto out;
1303 }
1304 }
1305
1306 IWL_DEBUG_HC(trans, 1290 IWL_DEBUG_HC(trans,
1307 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1291 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1308 get_cmd_string(trans_pcie, out_cmd->hdr.cmd), 1292 get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
1309 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), 1293 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1310 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); 1294 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1311 1295
1312 /* 1296 /* start the TFD with the scratchbuf */
1313 * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must 1297 scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
1314 * still map at least that many bytes for the hardware to write back to. 1298 memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
1315 * We have enough space, so that's not a problem. 1299 iwl_pcie_txq_build_tfd(trans, txq,
1316 */ 1300 iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
1317 dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE); 1301 scratch_size, 1);
1302
1303 /* map first command fragment, if any remains */
1304 if (copy_size > scratch_size) {
1305 phys_addr = dma_map_single(trans->dev,
1306 ((u8 *)&out_cmd->hdr) + scratch_size,
1307 copy_size - scratch_size,
1308 DMA_TO_DEVICE);
1309 if (dma_mapping_error(trans->dev, phys_addr)) {
1310 iwl_pcie_tfd_unmap(trans, out_meta,
1311 &txq->tfds[q->write_ptr]);
1312 idx = -ENOMEM;
1313 goto out;
1314 }
1318 1315
1319 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size, 1316 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1320 DMA_BIDIRECTIONAL); 1317 copy_size - scratch_size, 0);
1321 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1322 idx = -ENOMEM;
1323 goto out;
1324 } 1318 }
1325 1319
1326 dma_unmap_addr_set(out_meta, mapping, phys_addr);
1327 dma_unmap_len_set(out_meta, len, dma_size);
1328
1329 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
1330
1331 /* map the remaining (adjusted) nocopy/dup fragments */ 1320 /* map the remaining (adjusted) nocopy/dup fragments */
1332 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1321 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1333 const void *data = cmddata[i]; 1322 const void *data = cmddata[i];
1334 1323
1335 if (!cmdlen[i]) 1324 if (!cmdlen[i])
@@ -1340,11 +1329,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1340 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1329 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1341 data = dup_buf; 1330 data = dup_buf;
1342 phys_addr = dma_map_single(trans->dev, (void *)data, 1331 phys_addr = dma_map_single(trans->dev, (void *)data,
1343 cmdlen[i], DMA_BIDIRECTIONAL); 1332 cmdlen[i], DMA_TO_DEVICE);
1344 if (dma_mapping_error(trans->dev, phys_addr)) { 1333 if (dma_mapping_error(trans->dev, phys_addr)) {
1345 iwl_pcie_tfd_unmap(trans, out_meta, 1334 iwl_pcie_tfd_unmap(trans, out_meta,
1346 &txq->tfds[q->write_ptr], 1335 &txq->tfds[q->write_ptr]);
1347 DMA_BIDIRECTIONAL);
1348 idx = -ENOMEM; 1336 idx = -ENOMEM;
1349 goto out; 1337 goto out;
1350 } 1338 }
@@ -1418,7 +1406,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1418 cmd = txq->entries[cmd_index].cmd; 1406 cmd = txq->entries[cmd_index].cmd;
1419 meta = &txq->entries[cmd_index].meta; 1407 meta = &txq->entries[cmd_index].meta;
1420 1408
1421 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); 1409 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
1422 1410
1423 /* Input error checking is done when commands are added to queue. */ 1411 /* Input error checking is done when commands are added to queue. */
1424 if (meta->flags & CMD_WANT_SKB) { 1412 if (meta->flags & CMD_WANT_SKB) {
@@ -1597,10 +1585,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1597 struct iwl_cmd_meta *out_meta; 1585 struct iwl_cmd_meta *out_meta;
1598 struct iwl_txq *txq; 1586 struct iwl_txq *txq;
1599 struct iwl_queue *q; 1587 struct iwl_queue *q;
1600 dma_addr_t phys_addr = 0; 1588 dma_addr_t tb0_phys, tb1_phys, scratch_phys;
1601 dma_addr_t txcmd_phys; 1589 void *tb1_addr;
1602 dma_addr_t scratch_phys; 1590 u16 len, tb1_len, tb2_len;
1603 u16 len, firstlen, secondlen;
1604 u8 wait_write_ptr = 0; 1591 u8 wait_write_ptr = 0;
1605 __le16 fc = hdr->frame_control; 1592 __le16 fc = hdr->frame_control;
1606 u8 hdr_len = ieee80211_hdrlen(fc); 1593 u8 hdr_len = ieee80211_hdrlen(fc);
@@ -1638,85 +1625,80 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1638 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1625 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1639 INDEX_TO_SEQ(q->write_ptr))); 1626 INDEX_TO_SEQ(q->write_ptr)));
1640 1627
1628 tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
1629 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
1630 offsetof(struct iwl_tx_cmd, scratch);
1631
1632 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1633 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1634
1641 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1635 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1642 out_meta = &txq->entries[q->write_ptr].meta; 1636 out_meta = &txq->entries[q->write_ptr].meta;
1643 1637
1644 /* 1638 /*
1645 * Use the first empty entry in this queue's command buffer array 1639 * The second TB (tb1) points to the remainder of the TX command
1646 * to contain the Tx command and MAC header concatenated together 1640 * and the 802.11 header - dword aligned size
1647 * (payload data will be in another buffer). 1641 * (This calculation modifies the TX command, so do it before the
1648 * Size of this varies, due to varying MAC header length. 1642 * setup of the first TB)
1649 * If end is not dword aligned, we'll have 2 extra bytes at the end
1650 * of the MAC header (device reads on dword boundaries).
1651 * We'll tell device about this padding later.
1652 */ 1643 */
1653 len = sizeof(struct iwl_tx_cmd) + 1644 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
1654 sizeof(struct iwl_cmd_header) + hdr_len; 1645 hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
1655 firstlen = (len + 3) & ~3; 1646 tb1_len = (len + 3) & ~3;
1656 1647
1657 /* Tell NIC about any 2-byte padding after MAC header */ 1648 /* Tell NIC about any 2-byte padding after MAC header */
1658 if (firstlen != len) 1649 if (tb1_len != len)
1659 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 1650 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1660 1651
1661 /* Physical address of this Tx command's header (not MAC header!), 1652 /* The first TB points to the scratchbuf data - min_copy bytes */
1662 * within command buffer array. */ 1653 memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
1663 txcmd_phys = dma_map_single(trans->dev, 1654 IWL_HCMD_SCRATCHBUF_SIZE);
1664 &dev_cmd->hdr, firstlen, 1655 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
1665 DMA_BIDIRECTIONAL); 1656 IWL_HCMD_SCRATCHBUF_SIZE, 1);
1666 if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
1667 goto out_err;
1668 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1669 dma_unmap_len_set(out_meta, len, firstlen);
1670 1657
1671 if (!ieee80211_has_morefrags(fc)) { 1658 /* there must be data left over for TB1 or this code must be changed */
1672 txq->need_update = 1; 1659 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
1673 } else {
1674 wait_write_ptr = 1;
1675 txq->need_update = 0;
1676 }
1677 1660
1678 /* Set up TFD's 2nd entry to point directly to remainder of skb, 1661 /* map the data for TB1 */
1679 * if any (802.11 null frames have no payload). */ 1662 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
1680 secondlen = skb->len - hdr_len; 1663 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
1681 if (secondlen > 0) { 1664 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
1682 phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, 1665 goto out_err;
1683 secondlen, DMA_TO_DEVICE); 1666 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0);
1684 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { 1667
1685 dma_unmap_single(trans->dev, 1668 /*
1686 dma_unmap_addr(out_meta, mapping), 1669 * Set up TFD's third entry to point directly to remainder
1687 dma_unmap_len(out_meta, len), 1670 * of skb, if any (802.11 null frames have no payload).
1688 DMA_BIDIRECTIONAL); 1671 */
1672 tb2_len = skb->len - hdr_len;
1673 if (tb2_len > 0) {
1674 dma_addr_t tb2_phys = dma_map_single(trans->dev,
1675 skb->data + hdr_len,
1676 tb2_len, DMA_TO_DEVICE);
1677 if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
1678 iwl_pcie_tfd_unmap(trans, out_meta,
1679 &txq->tfds[q->write_ptr]);
1689 goto out_err; 1680 goto out_err;
1690 } 1681 }
1682 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0);
1691 } 1683 }
1692 1684
1693 /* Attach buffers to TFD */
1694 iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
1695 if (secondlen > 0)
1696 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
1697
1698 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1699 offsetof(struct iwl_tx_cmd, scratch);
1700
1701 /* take back ownership of DMA buffer to enable update */
1702 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1703 DMA_BIDIRECTIONAL);
1704 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1705 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1706
1707 /* Set up entry for this TFD in Tx byte-count array */ 1685 /* Set up entry for this TFD in Tx byte-count array */
1708 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); 1686 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1709 1687
1710 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1711 DMA_BIDIRECTIONAL);
1712
1713 trace_iwlwifi_dev_tx(trans->dev, skb, 1688 trace_iwlwifi_dev_tx(trans->dev, skb,
1714 &txq->tfds[txq->q.write_ptr], 1689 &txq->tfds[txq->q.write_ptr],
1715 sizeof(struct iwl_tfd), 1690 sizeof(struct iwl_tfd),
1716 &dev_cmd->hdr, firstlen, 1691 &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
1717 skb->data + hdr_len, secondlen); 1692 skb->data + hdr_len, tb2_len);
1718 trace_iwlwifi_dev_tx_data(trans->dev, skb, 1693 trace_iwlwifi_dev_tx_data(trans->dev, skb,
1719 skb->data + hdr_len, secondlen); 1694 skb->data + hdr_len, tb2_len);
1695
1696 if (!ieee80211_has_morefrags(fc)) {
1697 txq->need_update = 1;
1698 } else {
1699 wait_write_ptr = 1;
1700 txq->need_update = 0;
1701 }
1720 1702
1721 /* start timer if queue currently empty */ 1703 /* start timer if queue currently empty */
1722 if (txq->need_update && q->read_ptr == q->write_ptr && 1704 if (txq->need_update && q->read_ptr == q->write_ptr &&
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index fbabb906066f..178b90c229b5 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -845,15 +845,8 @@ int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
845 return err; 845 return err;
846 846
847 if ((attr->ia_valid & ATTR_SIZE) && 847 if ((attr->ia_valid & ATTR_SIZE) &&
848 attr->ia_size != i_size_read(inode)) { 848 attr->ia_size != i_size_read(inode))
849 int error;
850
851 error = inode_newsize_ok(inode, attr->ia_size);
852 if (error)
853 return error;
854
855 truncate_setsize(inode, attr->ia_size); 849 truncate_setsize(inode, attr->ia_size);
856 }
857 850
858 setattr_copy(inode, attr); 851 setattr_copy(inode, attr);
859 mark_inode_dirty(inode); 852 mark_inode_dirty(inode);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 27cfda427dd9..aa7032c7238f 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -206,6 +206,7 @@ struct clocksource {
206#define CLOCK_SOURCE_WATCHDOG 0x10 206#define CLOCK_SOURCE_WATCHDOG 0x10
207#define CLOCK_SOURCE_VALID_FOR_HRES 0x20 207#define CLOCK_SOURCE_VALID_FOR_HRES 0x20
208#define CLOCK_SOURCE_UNSTABLE 0x40 208#define CLOCK_SOURCE_UNSTABLE 0x40
209#define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80
209 210
210/* simplify initialization of mask field */ 211/* simplify initialization of mask field */
211#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) 212#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index cc07d2777bbe..d19a5c2d2270 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -157,6 +157,7 @@ enum hrtimer_base_type {
157 HRTIMER_BASE_MONOTONIC, 157 HRTIMER_BASE_MONOTONIC,
158 HRTIMER_BASE_REALTIME, 158 HRTIMER_BASE_REALTIME,
159 HRTIMER_BASE_BOOTTIME, 159 HRTIMER_BASE_BOOTTIME,
160 HRTIMER_BASE_TAI,
160 HRTIMER_MAX_CLOCK_BASES, 161 HRTIMER_MAX_CLOCK_BASES,
161}; 162};
162 163
@@ -327,7 +328,9 @@ extern ktime_t ktime_get(void);
327extern ktime_t ktime_get_real(void); 328extern ktime_t ktime_get_real(void);
328extern ktime_t ktime_get_boottime(void); 329extern ktime_t ktime_get_boottime(void);
329extern ktime_t ktime_get_monotonic_offset(void); 330extern ktime_t ktime_get_monotonic_offset(void);
330extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot); 331extern ktime_t ktime_get_clocktai(void);
332extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
333 ktime_t *offs_tai);
331 334
332DECLARE_PER_CPU(struct tick_device, tick_cpu_device); 335DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
333 336
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 82ed068b1ebe..8fb8edf12417 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -75,7 +75,6 @@ extern int register_refined_jiffies(long clock_tick_rate);
75 */ 75 */
76extern u64 __jiffy_data jiffies_64; 76extern u64 __jiffy_data jiffies_64;
77extern unsigned long volatile __jiffy_data jiffies; 77extern unsigned long volatile __jiffy_data jiffies;
78extern seqlock_t jiffies_lock;
79 78
80#if (BITS_PER_LONG < 64) 79#if (BITS_PER_LONG < 64)
81u64 get_jiffies_64(void); 80u64 get_jiffies_64(void);
diff --git a/include/linux/time.h b/include/linux/time.h
index d4835dfdf25e..22d81b3c955b 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -181,6 +181,9 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
181extern int timekeeping_valid_for_hres(void); 181extern int timekeeping_valid_for_hres(void);
182extern u64 timekeeping_max_deferment(void); 182extern u64 timekeeping_max_deferment(void);
183extern int timekeeping_inject_offset(struct timespec *ts); 183extern int timekeeping_inject_offset(struct timespec *ts);
184extern s32 timekeeping_get_tai_offset(void);
185extern void timekeeping_set_tai_offset(s32 tai_offset);
186extern void timekeeping_clocktai(struct timespec *ts);
184 187
185struct tms; 188struct tms;
186extern void do_sys_times(struct tms *); 189extern void do_sys_times(struct tms *);
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index e1d558e237ec..a151bd70e52b 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -62,8 +62,11 @@ struct timekeeper {
62 ktime_t offs_boot; 62 ktime_t offs_boot;
63 /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ 63 /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
64 struct timespec raw_time; 64 struct timespec raw_time;
65 /* Seqlock for all timekeeper values */ 65 /* The current UTC to TAI offset in seconds */
66 seqlock_t lock; 66 s32 tai_offset;
67 /* Offset clock monotonic -> clock tai */
68 ktime_t offs_tai;
69
67}; 70};
68 71
69static inline struct timespec tk_xtime(struct timekeeper *tk) 72static inline struct timespec tk_xtime(struct timekeeper *tk)
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index 0d3c0edc3eda..e75e1b6ff27f 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -54,11 +54,9 @@ struct itimerval {
54#define CLOCK_BOOTTIME 7 54#define CLOCK_BOOTTIME 7
55#define CLOCK_REALTIME_ALARM 8 55#define CLOCK_REALTIME_ALARM 8
56#define CLOCK_BOOTTIME_ALARM 9 56#define CLOCK_BOOTTIME_ALARM 9
57#define CLOCK_SGI_CYCLE 10 /* Hardware specific */
58#define CLOCK_TAI 11
57 59
58/*
59 * The IDs of various hardware clocks:
60 */
61#define CLOCK_SGI_CYCLE 10
62#define MAX_CLOCKS 16 60#define MAX_CLOCKS 16
63#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC) 61#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC)
64#define CLOCKS_MONO CLOCK_MONOTONIC 62#define CLOCKS_MONO CLOCK_MONOTONIC
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 031184e3d872..d6830d5ae730 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -83,6 +83,12 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
83 .get_time = &ktime_get_boottime, 83 .get_time = &ktime_get_boottime,
84 .resolution = KTIME_LOW_RES, 84 .resolution = KTIME_LOW_RES,
85 }, 85 },
86 {
87 .index = HRTIMER_BASE_TAI,
88 .clockid = CLOCK_TAI,
89 .get_time = &ktime_get_clocktai,
90 .resolution = KTIME_LOW_RES,
91 },
86 } 92 }
87}; 93};
88 94
@@ -90,6 +96,7 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
90 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, 96 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
91 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, 97 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
92 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, 98 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
99 [CLOCK_TAI] = HRTIMER_BASE_TAI,
93}; 100};
94 101
95static inline int hrtimer_clockid_to_base(clockid_t clock_id) 102static inline int hrtimer_clockid_to_base(clockid_t clock_id)
@@ -106,8 +113,10 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
106{ 113{
107 ktime_t xtim, mono, boot; 114 ktime_t xtim, mono, boot;
108 struct timespec xts, tom, slp; 115 struct timespec xts, tom, slp;
116 s32 tai_offset;
109 117
110 get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp); 118 get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp);
119 tai_offset = timekeeping_get_tai_offset();
111 120
112 xtim = timespec_to_ktime(xts); 121 xtim = timespec_to_ktime(xts);
113 mono = ktime_add(xtim, timespec_to_ktime(tom)); 122 mono = ktime_add(xtim, timespec_to_ktime(tom));
@@ -115,6 +124,8 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
115 base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; 124 base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
116 base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; 125 base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
117 base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot; 126 base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
127 base->clock_base[HRTIMER_BASE_TAI].softirq_time =
128 ktime_add(xtim, ktime_set(tai_offset, 0));
118} 129}
119 130
120/* 131/*
@@ -651,8 +662,9 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
651{ 662{
652 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; 663 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
653 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; 664 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
665 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
654 666
655 return ktime_get_update_offsets(offs_real, offs_boot); 667 return ktime_get_update_offsets(offs_real, offs_boot, offs_tai);
656} 668}
657 669
658/* 670/*
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 6edbb2c55c22..2a2e173d0a7a 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -221,6 +221,11 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
221 return 0; 221 return 0;
222} 222}
223 223
224static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
225{
226 timekeeping_clocktai(tp);
227 return 0;
228}
224 229
225/* 230/*
226 * Initialize everything, well, just everything in Posix clocks/timers ;) 231 * Initialize everything, well, just everything in Posix clocks/timers ;)
@@ -261,6 +266,16 @@ static __init int init_posix_timers(void)
261 .clock_getres = posix_get_coarse_res, 266 .clock_getres = posix_get_coarse_res,
262 .clock_get = posix_get_monotonic_coarse, 267 .clock_get = posix_get_monotonic_coarse,
263 }; 268 };
269 struct k_clock clock_tai = {
270 .clock_getres = hrtimer_get_res,
271 .clock_get = posix_get_tai,
272 .nsleep = common_nsleep,
273 .nsleep_restart = hrtimer_nanosleep_restart,
274 .timer_create = common_timer_create,
275 .timer_set = common_timer_set,
276 .timer_get = common_timer_get,
277 .timer_del = common_timer_del,
278 };
264 struct k_clock clock_boottime = { 279 struct k_clock clock_boottime = {
265 .clock_getres = hrtimer_get_res, 280 .clock_getres = hrtimer_get_res,
266 .clock_get = posix_get_boottime, 281 .clock_get = posix_get_boottime,
@@ -278,6 +293,7 @@ static __init int init_posix_timers(void)
278 posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse); 293 posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
279 posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse); 294 posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
280 posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime); 295 posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime);
296 posix_timers_register_clock(CLOCK_TAI, &clock_tai);
281 297
282 posix_timers_cache = kmem_cache_create("posix_timers_cache", 298 posix_timers_cache = kmem_cache_create("posix_timers_cache",
283 sizeof (struct k_itimer), 0, SLAB_PANIC, 299 sizeof (struct k_itimer), 0, SLAB_PANIC,
diff --git a/kernel/time.c b/kernel/time.c
index f8342a41efa6..d3617dbd3dca 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -138,13 +138,14 @@ int persistent_clock_is_local;
138 */ 138 */
139static inline void warp_clock(void) 139static inline void warp_clock(void)
140{ 140{
141 struct timespec adjust; 141 if (sys_tz.tz_minuteswest != 0) {
142 struct timespec adjust;
142 143
143 adjust = current_kernel_time();
144 if (sys_tz.tz_minuteswest != 0)
145 persistent_clock_is_local = 1; 144 persistent_clock_is_local = 1;
146 adjust.tv_sec += sys_tz.tz_minuteswest * 60; 145 adjust.tv_sec = sys_tz.tz_minuteswest * 60;
147 do_settimeofday(&adjust); 146 adjust.tv_nsec = 0;
147 timekeeping_inject_offset(&adjust);
148 }
148} 149}
149 150
150/* 151/*
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 072bb066bb7d..59e2749be0fa 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -53,9 +53,6 @@ static int time_state = TIME_OK;
53/* clock status bits: */ 53/* clock status bits: */
54static int time_status = STA_UNSYNC; 54static int time_status = STA_UNSYNC;
55 55
56/* TAI offset (secs): */
57static long time_tai;
58
59/* time adjustment (nsecs): */ 56/* time adjustment (nsecs): */
60static s64 time_offset; 57static s64 time_offset;
61 58
@@ -415,7 +412,6 @@ int second_overflow(unsigned long secs)
415 else if (secs % 86400 == 0) { 412 else if (secs % 86400 == 0) {
416 leap = -1; 413 leap = -1;
417 time_state = TIME_OOP; 414 time_state = TIME_OOP;
418 time_tai++;
419 printk(KERN_NOTICE 415 printk(KERN_NOTICE
420 "Clock: inserting leap second 23:59:60 UTC\n"); 416 "Clock: inserting leap second 23:59:60 UTC\n");
421 } 417 }
@@ -425,7 +421,6 @@ int second_overflow(unsigned long secs)
425 time_state = TIME_OK; 421 time_state = TIME_OK;
426 else if ((secs + 1) % 86400 == 0) { 422 else if ((secs + 1) % 86400 == 0) {
427 leap = 1; 423 leap = 1;
428 time_tai--;
429 time_state = TIME_WAIT; 424 time_state = TIME_WAIT;
430 printk(KERN_NOTICE 425 printk(KERN_NOTICE
431 "Clock: deleting leap second 23:59:59 UTC\n"); 426 "Clock: deleting leap second 23:59:59 UTC\n");
@@ -579,7 +574,9 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
579 * Called with ntp_lock held, so we can access and modify 574 * Called with ntp_lock held, so we can access and modify
580 * all the global NTP state: 575 * all the global NTP state:
581 */ 576 */
582static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts) 577static inline void process_adjtimex_modes(struct timex *txc,
578 struct timespec *ts,
579 s32 *time_tai)
583{ 580{
584 if (txc->modes & ADJ_STATUS) 581 if (txc->modes & ADJ_STATUS)
585 process_adj_status(txc, ts); 582 process_adj_status(txc, ts);
@@ -613,7 +610,7 @@ static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts
613 } 610 }
614 611
615 if (txc->modes & ADJ_TAI && txc->constant > 0) 612 if (txc->modes & ADJ_TAI && txc->constant > 0)
616 time_tai = txc->constant; 613 *time_tai = txc->constant;
617 614
618 if (txc->modes & ADJ_OFFSET) 615 if (txc->modes & ADJ_OFFSET)
619 ntp_update_offset(txc->offset); 616 ntp_update_offset(txc->offset);
@@ -632,6 +629,7 @@ static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts
632int do_adjtimex(struct timex *txc) 629int do_adjtimex(struct timex *txc)
633{ 630{
634 struct timespec ts; 631 struct timespec ts;
632 u32 time_tai, orig_tai;
635 int result; 633 int result;
636 634
637 /* Validate the data before disabling interrupts */ 635 /* Validate the data before disabling interrupts */
@@ -671,6 +669,7 @@ int do_adjtimex(struct timex *txc)
671 } 669 }
672 670
673 getnstimeofday(&ts); 671 getnstimeofday(&ts);
672 orig_tai = time_tai = timekeeping_get_tai_offset();
674 673
675 raw_spin_lock_irq(&ntp_lock); 674 raw_spin_lock_irq(&ntp_lock);
676 675
@@ -687,7 +686,7 @@ int do_adjtimex(struct timex *txc)
687 686
688 /* If there are input parameters, then process them: */ 687 /* If there are input parameters, then process them: */
689 if (txc->modes) 688 if (txc->modes)
690 process_adjtimex_modes(txc, &ts); 689 process_adjtimex_modes(txc, &ts, &time_tai);
691 690
692 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, 691 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
693 NTP_SCALE_SHIFT); 692 NTP_SCALE_SHIFT);
@@ -716,6 +715,9 @@ int do_adjtimex(struct timex *txc)
716 715
717 raw_spin_unlock_irq(&ntp_lock); 716 raw_spin_unlock_irq(&ntp_lock);
718 717
718 if (time_tai != orig_tai)
719 timekeeping_set_tai_offset(time_tai);
720
719 txc->time.tv_sec = ts.tv_sec; 721 txc->time.tv_sec = ts.tv_sec;
720 txc->time.tv_usec = ts.tv_nsec; 722 txc->time.tv_usec = ts.tv_nsec;
721 if (!(time_status & STA_NANO)) 723 if (!(time_status & STA_NANO))
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 46d9bd02844c..f0299eae4602 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -4,6 +4,8 @@
4#include <linux/hrtimer.h> 4#include <linux/hrtimer.h>
5#include <linux/tick.h> 5#include <linux/tick.h>
6 6
7extern seqlock_t jiffies_lock;
8
7#ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD 9#ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD
8 10
9#define TICK_DO_TIMER_NONE -1 11#define TICK_DO_TIMER_NONE -1
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 9a0bc98fbe1d..c5feb7aa3acb 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -23,8 +23,11 @@
23#include <linux/stop_machine.h> 23#include <linux/stop_machine.h>
24#include <linux/pvclock_gtod.h> 24#include <linux/pvclock_gtod.h>
25 25
26#include "tick-internal.h"
26 27
27static struct timekeeper timekeeper; 28static struct timekeeper timekeeper;
29static DEFINE_RAW_SPINLOCK(timekeeper_lock);
30static seqcount_t timekeeper_seq;
28 31
29/* flag for if timekeeping is suspended */ 32/* flag for if timekeeping is suspended */
30int __read_mostly timekeeping_suspended; 33int __read_mostly timekeeping_suspended;
@@ -67,6 +70,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
67 tk->wall_to_monotonic = wtm; 70 tk->wall_to_monotonic = wtm;
68 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec); 71 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
69 tk->offs_real = timespec_to_ktime(tmp); 72 tk->offs_real = timespec_to_ktime(tmp);
73 tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tk->tai_offset, 0));
70} 74}
71 75
72static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t) 76static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
@@ -201,8 +205,6 @@ static void update_pvclock_gtod(struct timekeeper *tk)
201 205
202/** 206/**
203 * pvclock_gtod_register_notifier - register a pvclock timedata update listener 207 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
204 *
205 * Must hold write on timekeeper.lock
206 */ 208 */
207int pvclock_gtod_register_notifier(struct notifier_block *nb) 209int pvclock_gtod_register_notifier(struct notifier_block *nb)
208{ 210{
@@ -210,11 +212,10 @@ int pvclock_gtod_register_notifier(struct notifier_block *nb)
210 unsigned long flags; 212 unsigned long flags;
211 int ret; 213 int ret;
212 214
213 write_seqlock_irqsave(&tk->lock, flags); 215 raw_spin_lock_irqsave(&timekeeper_lock, flags);
214 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb); 216 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
215 /* update timekeeping data */
216 update_pvclock_gtod(tk); 217 update_pvclock_gtod(tk);
217 write_sequnlock_irqrestore(&tk->lock, flags); 218 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
218 219
219 return ret; 220 return ret;
220} 221}
@@ -223,24 +224,21 @@ EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
223/** 224/**
224 * pvclock_gtod_unregister_notifier - unregister a pvclock 225 * pvclock_gtod_unregister_notifier - unregister a pvclock
225 * timedata update listener 226 * timedata update listener
226 *
227 * Must hold write on timekeeper.lock
228 */ 227 */
229int pvclock_gtod_unregister_notifier(struct notifier_block *nb) 228int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
230{ 229{
231 struct timekeeper *tk = &timekeeper;
232 unsigned long flags; 230 unsigned long flags;
233 int ret; 231 int ret;
234 232
235 write_seqlock_irqsave(&tk->lock, flags); 233 raw_spin_lock_irqsave(&timekeeper_lock, flags);
236 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb); 234 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
237 write_sequnlock_irqrestore(&tk->lock, flags); 235 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
238 236
239 return ret; 237 return ret;
240} 238}
241EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier); 239EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
242 240
243/* must hold write on timekeeper.lock */ 241/* must hold timekeeper_lock */
244static void timekeeping_update(struct timekeeper *tk, bool clearntp) 242static void timekeeping_update(struct timekeeper *tk, bool clearntp)
245{ 243{
246 if (clearntp) { 244 if (clearntp) {
@@ -294,12 +292,12 @@ int __getnstimeofday(struct timespec *ts)
294 s64 nsecs = 0; 292 s64 nsecs = 0;
295 293
296 do { 294 do {
297 seq = read_seqbegin(&tk->lock); 295 seq = read_seqcount_begin(&timekeeper_seq);
298 296
299 ts->tv_sec = tk->xtime_sec; 297 ts->tv_sec = tk->xtime_sec;
300 nsecs = timekeeping_get_ns(tk); 298 nsecs = timekeeping_get_ns(tk);
301 299
302 } while (read_seqretry(&tk->lock, seq)); 300 } while (read_seqcount_retry(&timekeeper_seq, seq));
303 301
304 ts->tv_nsec = 0; 302 ts->tv_nsec = 0;
305 timespec_add_ns(ts, nsecs); 303 timespec_add_ns(ts, nsecs);
@@ -335,11 +333,11 @@ ktime_t ktime_get(void)
335 WARN_ON(timekeeping_suspended); 333 WARN_ON(timekeeping_suspended);
336 334
337 do { 335 do {
338 seq = read_seqbegin(&tk->lock); 336 seq = read_seqcount_begin(&timekeeper_seq);
339 secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; 337 secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
340 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec; 338 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
341 339
342 } while (read_seqretry(&tk->lock, seq)); 340 } while (read_seqcount_retry(&timekeeper_seq, seq));
343 /* 341 /*
344 * Use ktime_set/ktime_add_ns to create a proper ktime on 342 * Use ktime_set/ktime_add_ns to create a proper ktime on
345 * 32-bit architectures without CONFIG_KTIME_SCALAR. 343 * 32-bit architectures without CONFIG_KTIME_SCALAR.
@@ -366,12 +364,12 @@ void ktime_get_ts(struct timespec *ts)
366 WARN_ON(timekeeping_suspended); 364 WARN_ON(timekeeping_suspended);
367 365
368 do { 366 do {
369 seq = read_seqbegin(&tk->lock); 367 seq = read_seqcount_begin(&timekeeper_seq);
370 ts->tv_sec = tk->xtime_sec; 368 ts->tv_sec = tk->xtime_sec;
371 nsec = timekeeping_get_ns(tk); 369 nsec = timekeeping_get_ns(tk);
372 tomono = tk->wall_to_monotonic; 370 tomono = tk->wall_to_monotonic;
373 371
374 } while (read_seqretry(&tk->lock, seq)); 372 } while (read_seqcount_retry(&timekeeper_seq, seq));
375 373
376 ts->tv_sec += tomono.tv_sec; 374 ts->tv_sec += tomono.tv_sec;
377 ts->tv_nsec = 0; 375 ts->tv_nsec = 0;
@@ -379,6 +377,50 @@ void ktime_get_ts(struct timespec *ts)
379} 377}
380EXPORT_SYMBOL_GPL(ktime_get_ts); 378EXPORT_SYMBOL_GPL(ktime_get_ts);
381 379
380
381/**
382 * timekeeping_clocktai - Returns the TAI time of day in a timespec
383 * @ts: pointer to the timespec to be set
384 *
385 * Returns the time of day in a timespec.
386 */
387void timekeeping_clocktai(struct timespec *ts)
388{
389 struct timekeeper *tk = &timekeeper;
390 unsigned long seq;
391 u64 nsecs;
392
393 WARN_ON(timekeeping_suspended);
394
395 do {
396 seq = read_seqcount_begin(&timekeeper_seq);
397
398 ts->tv_sec = tk->xtime_sec + tk->tai_offset;
399 nsecs = timekeeping_get_ns(tk);
400
401 } while (read_seqcount_retry(&timekeeper_seq, seq));
402
403 ts->tv_nsec = 0;
404 timespec_add_ns(ts, nsecs);
405
406}
407EXPORT_SYMBOL(timekeeping_clocktai);
408
409
410/**
411 * ktime_get_clocktai - Returns the TAI time of day in a ktime
412 *
413 * Returns the time of day in a ktime.
414 */
415ktime_t ktime_get_clocktai(void)
416{
417 struct timespec ts;
418
419 timekeeping_clocktai(&ts);
420 return timespec_to_ktime(ts);
421}
422EXPORT_SYMBOL(ktime_get_clocktai);
423
382#ifdef CONFIG_NTP_PPS 424#ifdef CONFIG_NTP_PPS
383 425
384/** 426/**
@@ -399,7 +441,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
399 WARN_ON_ONCE(timekeeping_suspended); 441 WARN_ON_ONCE(timekeeping_suspended);
400 442
401 do { 443 do {
402 seq = read_seqbegin(&tk->lock); 444 seq = read_seqcount_begin(&timekeeper_seq);
403 445
404 *ts_raw = tk->raw_time; 446 *ts_raw = tk->raw_time;
405 ts_real->tv_sec = tk->xtime_sec; 447 ts_real->tv_sec = tk->xtime_sec;
@@ -408,7 +450,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
408 nsecs_raw = timekeeping_get_ns_raw(tk); 450 nsecs_raw = timekeeping_get_ns_raw(tk);
409 nsecs_real = timekeeping_get_ns(tk); 451 nsecs_real = timekeeping_get_ns(tk);
410 452
411 } while (read_seqretry(&tk->lock, seq)); 453 } while (read_seqcount_retry(&timekeeper_seq, seq));
412 454
413 timespec_add_ns(ts_raw, nsecs_raw); 455 timespec_add_ns(ts_raw, nsecs_raw);
414 timespec_add_ns(ts_real, nsecs_real); 456 timespec_add_ns(ts_real, nsecs_real);
@@ -448,7 +490,8 @@ int do_settimeofday(const struct timespec *tv)
448 if (!timespec_valid_strict(tv)) 490 if (!timespec_valid_strict(tv))
449 return -EINVAL; 491 return -EINVAL;
450 492
451 write_seqlock_irqsave(&tk->lock, flags); 493 raw_spin_lock_irqsave(&timekeeper_lock, flags);
494 write_seqcount_begin(&timekeeper_seq);
452 495
453 timekeeping_forward_now(tk); 496 timekeeping_forward_now(tk);
454 497
@@ -462,7 +505,8 @@ int do_settimeofday(const struct timespec *tv)
462 505
463 timekeeping_update(tk, true); 506 timekeeping_update(tk, true);
464 507
465 write_sequnlock_irqrestore(&tk->lock, flags); 508 write_seqcount_end(&timekeeper_seq);
509 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
466 510
467 /* signal hrtimers about time change */ 511 /* signal hrtimers about time change */
468 clock_was_set(); 512 clock_was_set();
@@ -487,7 +531,8 @@ int timekeeping_inject_offset(struct timespec *ts)
487 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) 531 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
488 return -EINVAL; 532 return -EINVAL;
489 533
490 write_seqlock_irqsave(&tk->lock, flags); 534 raw_spin_lock_irqsave(&timekeeper_lock, flags);
535 write_seqcount_begin(&timekeeper_seq);
491 536
492 timekeeping_forward_now(tk); 537 timekeeping_forward_now(tk);
493 538
@@ -504,7 +549,8 @@ int timekeeping_inject_offset(struct timespec *ts)
504error: /* even if we error out, we forwarded the time, so call update */ 549error: /* even if we error out, we forwarded the time, so call update */
505 timekeeping_update(tk, true); 550 timekeeping_update(tk, true);
506 551
507 write_sequnlock_irqrestore(&tk->lock, flags); 552 write_seqcount_end(&timekeeper_seq);
553 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
508 554
509 /* signal hrtimers about time change */ 555 /* signal hrtimers about time change */
510 clock_was_set(); 556 clock_was_set();
@@ -513,6 +559,51 @@ error: /* even if we error out, we forwarded the time, so call update */
513} 559}
514EXPORT_SYMBOL(timekeeping_inject_offset); 560EXPORT_SYMBOL(timekeeping_inject_offset);
515 561
562
563/**
564 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
565 *
566 */
567s32 timekeeping_get_tai_offset(void)
568{
569 struct timekeeper *tk = &timekeeper;
570 unsigned int seq;
571 s32 ret;
572
573 do {
574 seq = read_seqcount_begin(&timekeeper_seq);
575 ret = tk->tai_offset;
576 } while (read_seqcount_retry(&timekeeper_seq, seq));
577
578 return ret;
579}
580
581/**
582 * __timekeeping_set_tai_offset - Lock free worker function
583 *
584 */
585static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
586{
587 tk->tai_offset = tai_offset;
588 tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tai_offset, 0));
589}
590
591/**
592 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
593 *
594 */
595void timekeeping_set_tai_offset(s32 tai_offset)
596{
597 struct timekeeper *tk = &timekeeper;
598 unsigned long flags;
599
600 raw_spin_lock_irqsave(&timekeeper_lock, flags);
601 write_seqcount_begin(&timekeeper_seq);
602 __timekeeping_set_tai_offset(tk, tai_offset);
603 write_seqcount_end(&timekeeper_seq);
604 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
605}
606
516/** 607/**
517 * change_clocksource - Swaps clocksources if a new one is available 608 * change_clocksource - Swaps clocksources if a new one is available
518 * 609 *
@@ -526,7 +617,8 @@ static int change_clocksource(void *data)
526 617
527 new = (struct clocksource *) data; 618 new = (struct clocksource *) data;
528 619
529 write_seqlock_irqsave(&tk->lock, flags); 620 raw_spin_lock_irqsave(&timekeeper_lock, flags);
621 write_seqcount_begin(&timekeeper_seq);
530 622
531 timekeeping_forward_now(tk); 623 timekeeping_forward_now(tk);
532 if (!new->enable || new->enable(new) == 0) { 624 if (!new->enable || new->enable(new) == 0) {
@@ -537,7 +629,8 @@ static int change_clocksource(void *data)
537 } 629 }
538 timekeeping_update(tk, true); 630 timekeeping_update(tk, true);
539 631
540 write_sequnlock_irqrestore(&tk->lock, flags); 632 write_seqcount_end(&timekeeper_seq);
633 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
541 634
542 return 0; 635 return 0;
543} 636}
@@ -587,11 +680,11 @@ void getrawmonotonic(struct timespec *ts)
587 s64 nsecs; 680 s64 nsecs;
588 681
589 do { 682 do {
590 seq = read_seqbegin(&tk->lock); 683 seq = read_seqcount_begin(&timekeeper_seq);
591 nsecs = timekeeping_get_ns_raw(tk); 684 nsecs = timekeeping_get_ns_raw(tk);
592 *ts = tk->raw_time; 685 *ts = tk->raw_time;
593 686
594 } while (read_seqretry(&tk->lock, seq)); 687 } while (read_seqcount_retry(&timekeeper_seq, seq));
595 688
596 timespec_add_ns(ts, nsecs); 689 timespec_add_ns(ts, nsecs);
597} 690}
@@ -607,11 +700,11 @@ int timekeeping_valid_for_hres(void)
607 int ret; 700 int ret;
608 701
609 do { 702 do {
610 seq = read_seqbegin(&tk->lock); 703 seq = read_seqcount_begin(&timekeeper_seq);
611 704
612 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; 705 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
613 706
614 } while (read_seqretry(&tk->lock, seq)); 707 } while (read_seqcount_retry(&timekeeper_seq, seq));
615 708
616 return ret; 709 return ret;
617} 710}
@@ -626,11 +719,11 @@ u64 timekeeping_max_deferment(void)
626 u64 ret; 719 u64 ret;
627 720
628 do { 721 do {
629 seq = read_seqbegin(&tk->lock); 722 seq = read_seqcount_begin(&timekeeper_seq);
630 723
631 ret = tk->clock->max_idle_ns; 724 ret = tk->clock->max_idle_ns;
632 725
633 } while (read_seqretry(&tk->lock, seq)); 726 } while (read_seqcount_retry(&timekeeper_seq, seq));
634 727
635 return ret; 728 return ret;
636} 729}
@@ -693,11 +786,10 @@ void __init timekeeping_init(void)
693 boot.tv_nsec = 0; 786 boot.tv_nsec = 0;
694 } 787 }
695 788
696 seqlock_init(&tk->lock);
697
698 ntp_init(); 789 ntp_init();
699 790
700 write_seqlock_irqsave(&tk->lock, flags); 791 raw_spin_lock_irqsave(&timekeeper_lock, flags);
792 write_seqcount_begin(&timekeeper_seq);
701 clock = clocksource_default_clock(); 793 clock = clocksource_default_clock();
702 if (clock->enable) 794 if (clock->enable)
703 clock->enable(clock); 795 clock->enable(clock);
@@ -716,7 +808,8 @@ void __init timekeeping_init(void)
716 tmp.tv_nsec = 0; 808 tmp.tv_nsec = 0;
717 tk_set_sleep_time(tk, tmp); 809 tk_set_sleep_time(tk, tmp);
718 810
719 write_sequnlock_irqrestore(&tk->lock, flags); 811 write_seqcount_end(&timekeeper_seq);
812 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
720} 813}
721 814
722/* time in seconds when suspend began */ 815/* time in seconds when suspend began */
@@ -764,7 +857,8 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
764 if (has_persistent_clock()) 857 if (has_persistent_clock())
765 return; 858 return;
766 859
767 write_seqlock_irqsave(&tk->lock, flags); 860 raw_spin_lock_irqsave(&timekeeper_lock, flags);
861 write_seqcount_begin(&timekeeper_seq);
768 862
769 timekeeping_forward_now(tk); 863 timekeeping_forward_now(tk);
770 864
@@ -772,7 +866,8 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
772 866
773 timekeeping_update(tk, true); 867 timekeeping_update(tk, true);
774 868
775 write_sequnlock_irqrestore(&tk->lock, flags); 869 write_seqcount_end(&timekeeper_seq);
870 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
776 871
777 /* signal hrtimers about time change */ 872 /* signal hrtimers about time change */
778 clock_was_set(); 873 clock_was_set();
@@ -788,26 +883,72 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
788static void timekeeping_resume(void) 883static void timekeeping_resume(void)
789{ 884{
790 struct timekeeper *tk = &timekeeper; 885 struct timekeeper *tk = &timekeeper;
886 struct clocksource *clock = tk->clock;
791 unsigned long flags; 887 unsigned long flags;
792 struct timespec ts; 888 struct timespec ts_new, ts_delta;
889 cycle_t cycle_now, cycle_delta;
890 bool suspendtime_found = false;
793 891
794 read_persistent_clock(&ts); 892 read_persistent_clock(&ts_new);
795 893
796 clockevents_resume(); 894 clockevents_resume();
797 clocksource_resume(); 895 clocksource_resume();
798 896
799 write_seqlock_irqsave(&tk->lock, flags); 897 raw_spin_lock_irqsave(&timekeeper_lock, flags);
898 write_seqcount_begin(&timekeeper_seq);
899
900 /*
901 * After system resumes, we need to calculate the suspended time and
902 * compensate it for the OS time. There are 3 sources that could be
903 * used: Nonstop clocksource during suspend, persistent clock and rtc
904 * device.
905 *
906 * One specific platform may have 1 or 2 or all of them, and the
907 * preference will be:
908 * suspend-nonstop clocksource -> persistent clock -> rtc
909 * The less preferred source will only be tried if there is no better
910 * usable source. The rtc part is handled separately in rtc core code.
911 */
912 cycle_now = clock->read(clock);
913 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
914 cycle_now > clock->cycle_last) {
915 u64 num, max = ULLONG_MAX;
916 u32 mult = clock->mult;
917 u32 shift = clock->shift;
918 s64 nsec = 0;
800 919
801 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { 920 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
802 ts = timespec_sub(ts, timekeeping_suspend_time); 921
803 __timekeeping_inject_sleeptime(tk, &ts); 922 /*
923 * "cycle_delta * mutl" may cause 64 bits overflow, if the
924 * suspended time is too long. In that case we need do the
925 * 64 bits math carefully
926 */
927 do_div(max, mult);
928 if (cycle_delta > max) {
929 num = div64_u64(cycle_delta, max);
930 nsec = (((u64) max * mult) >> shift) * num;
931 cycle_delta -= num * max;
932 }
933 nsec += ((u64) cycle_delta * mult) >> shift;
934
935 ts_delta = ns_to_timespec(nsec);
936 suspendtime_found = true;
937 } else if (timespec_compare(&ts_new, &timekeeping_suspend_time) > 0) {
938 ts_delta = timespec_sub(ts_new, timekeeping_suspend_time);
939 suspendtime_found = true;
804 } 940 }
805 /* re-base the last cycle value */ 941
806 tk->clock->cycle_last = tk->clock->read(tk->clock); 942 if (suspendtime_found)
943 __timekeeping_inject_sleeptime(tk, &ts_delta);
944
945 /* Re-base the last cycle value */
946 clock->cycle_last = cycle_now;
807 tk->ntp_error = 0; 947 tk->ntp_error = 0;
808 timekeeping_suspended = 0; 948 timekeeping_suspended = 0;
809 timekeeping_update(tk, false); 949 timekeeping_update(tk, false);
810 write_sequnlock_irqrestore(&tk->lock, flags); 950 write_seqcount_end(&timekeeper_seq);
951 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
811 952
812 touch_softlockup_watchdog(); 953 touch_softlockup_watchdog();
813 954
@@ -826,7 +967,8 @@ static int timekeeping_suspend(void)
826 967
827 read_persistent_clock(&timekeeping_suspend_time); 968 read_persistent_clock(&timekeeping_suspend_time);
828 969
829 write_seqlock_irqsave(&tk->lock, flags); 970 raw_spin_lock_irqsave(&timekeeper_lock, flags);
971 write_seqcount_begin(&timekeeper_seq);
830 timekeeping_forward_now(tk); 972 timekeeping_forward_now(tk);
831 timekeeping_suspended = 1; 973 timekeeping_suspended = 1;
832 974
@@ -849,7 +991,8 @@ static int timekeeping_suspend(void)
849 timekeeping_suspend_time = 991 timekeeping_suspend_time =
850 timespec_add(timekeeping_suspend_time, delta_delta); 992 timespec_add(timekeeping_suspend_time, delta_delta);
851 } 993 }
852 write_sequnlock_irqrestore(&tk->lock, flags); 994 write_seqcount_end(&timekeeper_seq);
995 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
853 996
854 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); 997 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
855 clocksource_suspend(); 998 clocksource_suspend();
@@ -1099,6 +1242,8 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
1099 tk_set_wall_to_mono(tk, 1242 tk_set_wall_to_mono(tk,
1100 timespec_sub(tk->wall_to_monotonic, ts)); 1243 timespec_sub(tk->wall_to_monotonic, ts));
1101 1244
1245 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1246
1102 clock_was_set_delayed(); 1247 clock_was_set_delayed();
1103 } 1248 }
1104 } 1249 }
@@ -1116,15 +1261,16 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
1116static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, 1261static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1117 u32 shift) 1262 u32 shift)
1118{ 1263{
1264 cycle_t interval = tk->cycle_interval << shift;
1119 u64 raw_nsecs; 1265 u64 raw_nsecs;
1120 1266
1121 /* If the offset is smaller then a shifted interval, do nothing */ 1267 /* If the offset is smaller then a shifted interval, do nothing */
1122 if (offset < tk->cycle_interval<<shift) 1268 if (offset < interval)
1123 return offset; 1269 return offset;
1124 1270
1125 /* Accumulate one shifted interval */ 1271 /* Accumulate one shifted interval */
1126 offset -= tk->cycle_interval << shift; 1272 offset -= interval;
1127 tk->clock->cycle_last += tk->cycle_interval << shift; 1273 tk->clock->cycle_last += interval;
1128 1274
1129 tk->xtime_nsec += tk->xtime_interval << shift; 1275 tk->xtime_nsec += tk->xtime_interval << shift;
1130 accumulate_nsecs_to_secs(tk); 1276 accumulate_nsecs_to_secs(tk);
@@ -1186,7 +1332,8 @@ static void update_wall_time(void)
1186 int shift = 0, maxshift; 1332 int shift = 0, maxshift;
1187 unsigned long flags; 1333 unsigned long flags;
1188 1334
1189 write_seqlock_irqsave(&tk->lock, flags); 1335 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1336 write_seqcount_begin(&timekeeper_seq);
1190 1337
1191 /* Make sure we're fully resumed: */ 1338 /* Make sure we're fully resumed: */
1192 if (unlikely(timekeeping_suspended)) 1339 if (unlikely(timekeeping_suspended))
@@ -1241,7 +1388,8 @@ static void update_wall_time(void)
1241 timekeeping_update(tk, false); 1388 timekeeping_update(tk, false);
1242 1389
1243out: 1390out:
1244 write_sequnlock_irqrestore(&tk->lock, flags); 1391 write_seqcount_end(&timekeeper_seq);
1392 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1245 1393
1246} 1394}
1247 1395
@@ -1289,13 +1437,13 @@ void get_monotonic_boottime(struct timespec *ts)
1289 WARN_ON(timekeeping_suspended); 1437 WARN_ON(timekeeping_suspended);
1290 1438
1291 do { 1439 do {
1292 seq = read_seqbegin(&tk->lock); 1440 seq = read_seqcount_begin(&timekeeper_seq);
1293 ts->tv_sec = tk->xtime_sec; 1441 ts->tv_sec = tk->xtime_sec;
1294 nsec = timekeeping_get_ns(tk); 1442 nsec = timekeeping_get_ns(tk);
1295 tomono = tk->wall_to_monotonic; 1443 tomono = tk->wall_to_monotonic;
1296 sleep = tk->total_sleep_time; 1444 sleep = tk->total_sleep_time;
1297 1445
1298 } while (read_seqretry(&tk->lock, seq)); 1446 } while (read_seqcount_retry(&timekeeper_seq, seq));
1299 1447
1300 ts->tv_sec += tomono.tv_sec + sleep.tv_sec; 1448 ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
1301 ts->tv_nsec = 0; 1449 ts->tv_nsec = 0;
@@ -1354,10 +1502,10 @@ struct timespec current_kernel_time(void)
1354 unsigned long seq; 1502 unsigned long seq;
1355 1503
1356 do { 1504 do {
1357 seq = read_seqbegin(&tk->lock); 1505 seq = read_seqcount_begin(&timekeeper_seq);
1358 1506
1359 now = tk_xtime(tk); 1507 now = tk_xtime(tk);
1360 } while (read_seqretry(&tk->lock, seq)); 1508 } while (read_seqcount_retry(&timekeeper_seq, seq));
1361 1509
1362 return now; 1510 return now;
1363} 1511}
@@ -1370,11 +1518,11 @@ struct timespec get_monotonic_coarse(void)
1370 unsigned long seq; 1518 unsigned long seq;
1371 1519
1372 do { 1520 do {
1373 seq = read_seqbegin(&tk->lock); 1521 seq = read_seqcount_begin(&timekeeper_seq);
1374 1522
1375 now = tk_xtime(tk); 1523 now = tk_xtime(tk);
1376 mono = tk->wall_to_monotonic; 1524 mono = tk->wall_to_monotonic;
1377 } while (read_seqretry(&tk->lock, seq)); 1525 } while (read_seqcount_retry(&timekeeper_seq, seq));
1378 1526
1379 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, 1527 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
1380 now.tv_nsec + mono.tv_nsec); 1528 now.tv_nsec + mono.tv_nsec);
@@ -1405,11 +1553,11 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1405 unsigned long seq; 1553 unsigned long seq;
1406 1554
1407 do { 1555 do {
1408 seq = read_seqbegin(&tk->lock); 1556 seq = read_seqcount_begin(&timekeeper_seq);
1409 *xtim = tk_xtime(tk); 1557 *xtim = tk_xtime(tk);
1410 *wtom = tk->wall_to_monotonic; 1558 *wtom = tk->wall_to_monotonic;
1411 *sleep = tk->total_sleep_time; 1559 *sleep = tk->total_sleep_time;
1412 } while (read_seqretry(&tk->lock, seq)); 1560 } while (read_seqcount_retry(&timekeeper_seq, seq));
1413} 1561}
1414 1562
1415#ifdef CONFIG_HIGH_RES_TIMERS 1563#ifdef CONFIG_HIGH_RES_TIMERS
@@ -1421,7 +1569,8 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1421 * Returns current monotonic time and updates the offsets 1569 * Returns current monotonic time and updates the offsets
1422 * Called from hrtimer_interupt() or retrigger_next_event() 1570 * Called from hrtimer_interupt() or retrigger_next_event()
1423 */ 1571 */
1424ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot) 1572ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
1573 ktime_t *offs_tai)
1425{ 1574{
1426 struct timekeeper *tk = &timekeeper; 1575 struct timekeeper *tk = &timekeeper;
1427 ktime_t now; 1576 ktime_t now;
@@ -1429,14 +1578,15 @@ ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
1429 u64 secs, nsecs; 1578 u64 secs, nsecs;
1430 1579
1431 do { 1580 do {
1432 seq = read_seqbegin(&tk->lock); 1581 seq = read_seqcount_begin(&timekeeper_seq);
1433 1582
1434 secs = tk->xtime_sec; 1583 secs = tk->xtime_sec;
1435 nsecs = timekeeping_get_ns(tk); 1584 nsecs = timekeeping_get_ns(tk);
1436 1585
1437 *offs_real = tk->offs_real; 1586 *offs_real = tk->offs_real;
1438 *offs_boot = tk->offs_boot; 1587 *offs_boot = tk->offs_boot;
1439 } while (read_seqretry(&tk->lock, seq)); 1588 *offs_tai = tk->offs_tai;
1589 } while (read_seqcount_retry(&timekeeper_seq, seq));
1440 1590
1441 now = ktime_add_ns(ktime_set(secs, 0), nsecs); 1591 now = ktime_add_ns(ktime_set(secs, 0), nsecs);
1442 now = ktime_sub(now, *offs_real); 1592 now = ktime_sub(now, *offs_real);
@@ -1454,9 +1604,9 @@ ktime_t ktime_get_monotonic_offset(void)
1454 struct timespec wtom; 1604 struct timespec wtom;
1455 1605
1456 do { 1606 do {
1457 seq = read_seqbegin(&tk->lock); 1607 seq = read_seqcount_begin(&timekeeper_seq);
1458 wtom = tk->wall_to_monotonic; 1608 wtom = tk->wall_to_monotonic;
1459 } while (read_seqretry(&tk->lock, seq)); 1609 } while (read_seqcount_retry(&timekeeper_seq, seq));
1460 1610
1461 return timespec_to_ktime(wtom); 1611 return timespec_to_ktime(wtom);
1462} 1612}
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 192473b22799..fc382d6e2765 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -414,24 +414,28 @@ config PROBE_EVENTS
414 def_bool n 414 def_bool n
415 415
416config DYNAMIC_FTRACE 416config DYNAMIC_FTRACE
417 bool "enable/disable ftrace tracepoints dynamically" 417 bool "enable/disable function tracing dynamically"
418 depends on FUNCTION_TRACER 418 depends on FUNCTION_TRACER
419 depends on HAVE_DYNAMIC_FTRACE 419 depends on HAVE_DYNAMIC_FTRACE
420 default y 420 default y
421 help 421 help
422 This option will modify all the calls to ftrace dynamically 422 This option will modify all the calls to function tracing
423 (will patch them out of the binary image and replace them 423 dynamically (will patch them out of the binary image and
424 with a No-Op instruction) as they are called. A table is 424 replace them with a No-Op instruction) on boot up. During
425 created to dynamically enable them again. 425 compile time, a table is made of all the locations that ftrace
426 can function trace, and this table is linked into the kernel
427 image. When this is enabled, functions can be individually
428 enabled, and the functions not enabled will not affect
429 performance of the system.
430
431 See the files in /sys/kernel/debug/tracing:
432 available_filter_functions
433 set_ftrace_filter
434 set_ftrace_notrace
426 435
427 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 436 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
428 otherwise has native performance as long as no tracing is active. 437 otherwise has native performance as long as no tracing is active.
429 438
430 The changes to the code are done by a kernel thread that
431 wakes up once a second and checks to see if any ftrace calls
432 were made. If so, it runs stop_machine (stops all CPUS)
433 and modifies the code to jump over the call to ftrace.
434
435config DYNAMIC_FTRACE_WITH_REGS 439config DYNAMIC_FTRACE_WITH_REGS
436 def_bool y 440 def_bool y
437 depends on DYNAMIC_FTRACE 441 depends on DYNAMIC_FTRACE
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c2e2c2310374..1f835a83cb2c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2400,6 +2400,27 @@ static void test_ftrace_alive(struct seq_file *m)
2400 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); 2400 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2401} 2401}
2402 2402
2403#ifdef CONFIG_TRACER_MAX_TRACE
2404static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2405{
2406 if (iter->trace->allocated_snapshot)
2407 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2408 else
2409 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2410
2411 seq_printf(m, "# Snapshot commands:\n");
2412 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2413 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2414 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
2415 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
2416 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2417 seq_printf(m, "# is not a '0' or '1')\n");
2418}
2419#else
2420/* Should never be called */
2421static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2422#endif
2423
2403static int s_show(struct seq_file *m, void *v) 2424static int s_show(struct seq_file *m, void *v)
2404{ 2425{
2405 struct trace_iterator *iter = v; 2426 struct trace_iterator *iter = v;
@@ -2411,7 +2432,9 @@ static int s_show(struct seq_file *m, void *v)
2411 seq_puts(m, "#\n"); 2432 seq_puts(m, "#\n");
2412 test_ftrace_alive(m); 2433 test_ftrace_alive(m);
2413 } 2434 }
2414 if (iter->trace && iter->trace->print_header) 2435 if (iter->snapshot && trace_empty(iter))
2436 print_snapshot_help(m, iter);
2437 else if (iter->trace && iter->trace->print_header)
2415 iter->trace->print_header(m); 2438 iter->trace->print_header(m);
2416 else 2439 else
2417 trace_default_header(m); 2440 trace_default_header(m);
@@ -4144,8 +4167,6 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4144 default: 4167 default:
4145 if (current_trace->allocated_snapshot) 4168 if (current_trace->allocated_snapshot)
4146 tracing_reset_online_cpus(&max_tr); 4169 tracing_reset_online_cpus(&max_tr);
4147 else
4148 ret = -EINVAL;
4149 break; 4170 break;
4150 } 4171 }
4151 4172
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index d5f1d3fd4b28..314c73ed418f 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -66,7 +66,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
66 goto out; 66 goto out;
67 } 67 }
68 68
69 mdst = br_mdb_get(br, skb); 69 mdst = br_mdb_get(br, skb, vid);
70 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) 70 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
71 br_multicast_deliver(mdst, skb); 71 br_multicast_deliver(mdst, skb);
72 else 72 else
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 480330151898..828e2bcc1f52 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -97,7 +97,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
97 if (is_broadcast_ether_addr(dest)) 97 if (is_broadcast_ether_addr(dest))
98 skb2 = skb; 98 skb2 = skb;
99 else if (is_multicast_ether_addr(dest)) { 99 else if (is_multicast_ether_addr(dest)) {
100 mdst = br_mdb_get(br, skb); 100 mdst = br_mdb_get(br, skb, vid);
101 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { 101 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
102 if ((mdst && mdst->mglist) || 102 if ((mdst && mdst->mglist) ||
103 br_multicast_is_router(br)) 103 br_multicast_is_router(br))
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 9f97b850fc65..ee79f3f20383 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -80,6 +80,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
80 port = p->port; 80 port = p->port;
81 if (port) { 81 if (port) {
82 struct br_mdb_entry e; 82 struct br_mdb_entry e;
83 memset(&e, 0, sizeof(e));
83 e.ifindex = port->dev->ifindex; 84 e.ifindex = port->dev->ifindex;
84 e.state = p->state; 85 e.state = p->state;
85 if (p->addr.proto == htons(ETH_P_IP)) 86 if (p->addr.proto == htons(ETH_P_IP))
@@ -136,6 +137,7 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
136 break; 137 break;
137 138
138 bpm = nlmsg_data(nlh); 139 bpm = nlmsg_data(nlh);
140 memset(bpm, 0, sizeof(*bpm));
139 bpm->ifindex = dev->ifindex; 141 bpm->ifindex = dev->ifindex;
140 if (br_mdb_fill_info(skb, cb, dev) < 0) 142 if (br_mdb_fill_info(skb, cb, dev) < 0)
141 goto out; 143 goto out;
@@ -171,6 +173,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
171 return -EMSGSIZE; 173 return -EMSGSIZE;
172 174
173 bpm = nlmsg_data(nlh); 175 bpm = nlmsg_data(nlh);
176 memset(bpm, 0, sizeof(*bpm));
174 bpm->family = AF_BRIDGE; 177 bpm->family = AF_BRIDGE;
175 bpm->ifindex = dev->ifindex; 178 bpm->ifindex = dev->ifindex;
176 nest = nla_nest_start(skb, MDBA_MDB); 179 nest = nla_nest_start(skb, MDBA_MDB);
@@ -228,6 +231,7 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
228{ 231{
229 struct br_mdb_entry entry; 232 struct br_mdb_entry entry;
230 233
234 memset(&entry, 0, sizeof(entry));
231 entry.ifindex = port->dev->ifindex; 235 entry.ifindex = port->dev->ifindex;
232 entry.addr.proto = group->proto; 236 entry.addr.proto = group->proto;
233 entry.addr.u.ip4 = group->u.ip4; 237 entry.addr.u.ip4 = group->u.ip4;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 10e6fce1bb62..923fbeaf7afd 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -132,7 +132,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(
132#endif 132#endif
133 133
134struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 134struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
135 struct sk_buff *skb) 135 struct sk_buff *skb, u16 vid)
136{ 136{
137 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); 137 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
138 struct br_ip ip; 138 struct br_ip ip;
@@ -144,6 +144,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
144 return NULL; 144 return NULL;
145 145
146 ip.proto = skb->protocol; 146 ip.proto = skb->protocol;
147 ip.vid = vid;
147 148
148 switch (skb->protocol) { 149 switch (skb->protocol) {
149 case htons(ETH_P_IP): 150 case htons(ETH_P_IP):
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 6d314c4e6bcb..3cbf5beb3d4b 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -442,7 +442,7 @@ extern int br_multicast_rcv(struct net_bridge *br,
442 struct net_bridge_port *port, 442 struct net_bridge_port *port,
443 struct sk_buff *skb); 443 struct sk_buff *skb);
444extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 444extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
445 struct sk_buff *skb); 445 struct sk_buff *skb, u16 vid);
446extern void br_multicast_add_port(struct net_bridge_port *port); 446extern void br_multicast_add_port(struct net_bridge_port *port);
447extern void br_multicast_del_port(struct net_bridge_port *port); 447extern void br_multicast_del_port(struct net_bridge_port *port);
448extern void br_multicast_enable_port(struct net_bridge_port *port); 448extern void br_multicast_enable_port(struct net_bridge_port *port);
@@ -504,7 +504,7 @@ static inline int br_multicast_rcv(struct net_bridge *br,
504} 504}
505 505
506static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 506static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
507 struct sk_buff *skb) 507 struct sk_buff *skb, u16 vid)
508{ 508{
509 return NULL; 509 return NULL;
510} 510}
diff --git a/net/core/dev.c b/net/core/dev.c
index a06a7a58dd11..dffbef70cd31 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3444,6 +3444,7 @@ ncls:
3444 } 3444 }
3445 switch (rx_handler(&skb)) { 3445 switch (rx_handler(&skb)) {
3446 case RX_HANDLER_CONSUMED: 3446 case RX_HANDLER_CONSUMED:
3447 ret = NET_RX_SUCCESS;
3447 goto unlock; 3448 goto unlock;
3448 case RX_HANDLER_ANOTHER: 3449 case RX_HANDLER_ANOTHER:
3449 goto another_round; 3450 goto another_round;
@@ -4103,7 +4104,7 @@ static void net_rx_action(struct softirq_action *h)
4103 * Allow this to run for 2 jiffies since which will allow 4104 * Allow this to run for 2 jiffies since which will allow
4104 * an average latency of 1.5/HZ. 4105 * an average latency of 1.5/HZ.
4105 */ 4106 */
4106 if (unlikely(budget <= 0 || time_after(jiffies, time_limit))) 4107 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
4107 goto softnet_break; 4108 goto softnet_break;
4108 4109
4109 local_irq_enable(); 4110 local_irq_enable();
@@ -4780,7 +4781,7 @@ EXPORT_SYMBOL(dev_set_mac_address);
4780/** 4781/**
4781 * dev_change_carrier - Change device carrier 4782 * dev_change_carrier - Change device carrier
4782 * @dev: device 4783 * @dev: device
4783 * @new_carries: new value 4784 * @new_carrier: new value
4784 * 4785 *
4785 * Change device carrier 4786 * Change device carrier
4786 */ 4787 */
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b376410ff259..a585d45cc9d9 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -979,6 +979,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
979 * report anything. 979 * report anything.
980 */ 980 */
981 ivi.spoofchk = -1; 981 ivi.spoofchk = -1;
982 memset(ivi.mac, 0, sizeof(ivi.mac));
982 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) 983 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
983 break; 984 break;
984 vf_mac.vf = 985 vf_mac.vf =
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 1b588e23cf80..21291f1abcd6 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -284,6 +284,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
284 if (!netdev->dcbnl_ops->getpermhwaddr) 284 if (!netdev->dcbnl_ops->getpermhwaddr)
285 return -EOPNOTSUPP; 285 return -EOPNOTSUPP;
286 286
287 memset(perm_addr, 0, sizeof(perm_addr));
287 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); 288 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
288 289
289 return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); 290 return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
@@ -1042,6 +1043,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1042 1043
1043 if (ops->ieee_getets) { 1044 if (ops->ieee_getets) {
1044 struct ieee_ets ets; 1045 struct ieee_ets ets;
1046 memset(&ets, 0, sizeof(ets));
1045 err = ops->ieee_getets(netdev, &ets); 1047 err = ops->ieee_getets(netdev, &ets);
1046 if (!err && 1048 if (!err &&
1047 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) 1049 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
@@ -1050,6 +1052,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1050 1052
1051 if (ops->ieee_getmaxrate) { 1053 if (ops->ieee_getmaxrate) {
1052 struct ieee_maxrate maxrate; 1054 struct ieee_maxrate maxrate;
1055 memset(&maxrate, 0, sizeof(maxrate));
1053 err = ops->ieee_getmaxrate(netdev, &maxrate); 1056 err = ops->ieee_getmaxrate(netdev, &maxrate);
1054 if (!err) { 1057 if (!err) {
1055 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, 1058 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
@@ -1061,6 +1064,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1061 1064
1062 if (ops->ieee_getpfc) { 1065 if (ops->ieee_getpfc) {
1063 struct ieee_pfc pfc; 1066 struct ieee_pfc pfc;
1067 memset(&pfc, 0, sizeof(pfc));
1064 err = ops->ieee_getpfc(netdev, &pfc); 1068 err = ops->ieee_getpfc(netdev, &pfc);
1065 if (!err && 1069 if (!err &&
1066 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) 1070 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
@@ -1094,6 +1098,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1094 /* get peer info if available */ 1098 /* get peer info if available */
1095 if (ops->ieee_peer_getets) { 1099 if (ops->ieee_peer_getets) {
1096 struct ieee_ets ets; 1100 struct ieee_ets ets;
1101 memset(&ets, 0, sizeof(ets));
1097 err = ops->ieee_peer_getets(netdev, &ets); 1102 err = ops->ieee_peer_getets(netdev, &ets);
1098 if (!err && 1103 if (!err &&
1099 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) 1104 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
@@ -1102,6 +1107,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1102 1107
1103 if (ops->ieee_peer_getpfc) { 1108 if (ops->ieee_peer_getpfc) {
1104 struct ieee_pfc pfc; 1109 struct ieee_pfc pfc;
1110 memset(&pfc, 0, sizeof(pfc));
1105 err = ops->ieee_peer_getpfc(netdev, &pfc); 1111 err = ops->ieee_peer_getpfc(netdev, &pfc);
1106 if (!err && 1112 if (!err &&
1107 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) 1113 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
@@ -1280,6 +1286,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1280 /* peer info if available */ 1286 /* peer info if available */
1281 if (ops->cee_peer_getpg) { 1287 if (ops->cee_peer_getpg) {
1282 struct cee_pg pg; 1288 struct cee_pg pg;
1289 memset(&pg, 0, sizeof(pg));
1283 err = ops->cee_peer_getpg(netdev, &pg); 1290 err = ops->cee_peer_getpg(netdev, &pg);
1284 if (!err && 1291 if (!err &&
1285 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) 1292 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
@@ -1288,6 +1295,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1288 1295
1289 if (ops->cee_peer_getpfc) { 1296 if (ops->cee_peer_getpfc) {
1290 struct cee_pfc pfc; 1297 struct cee_pfc pfc;
1298 memset(&pfc, 0, sizeof(pfc));
1291 err = ops->cee_peer_getpfc(netdev, &pfc); 1299 err = ops->cee_peer_getpfc(netdev, &pfc);
1292 if (!err && 1300 if (!err &&
1293 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) 1301 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index 8c2251fb0a3f..bba5f8336317 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -84,7 +84,7 @@
84 (memcmp(addr1, addr2, length >> 3) == 0) 84 (memcmp(addr1, addr2, length >> 3) == 0)
85 85
86/* local link, i.e. FE80::/10 */ 86/* local link, i.e. FE80::/10 */
87#define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE) 87#define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80))
88 88
89/* 89/*
90 * check whether we can compress the IID to 16 bits, 90 * check whether we can compress the IID to 16 bits,
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 7d1874be1df3..786d97aee751 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -735,6 +735,7 @@ EXPORT_SYMBOL(inet_csk_destroy_sock);
735 * tcp/dccp_create_openreq_child(). 735 * tcp/dccp_create_openreq_child().
736 */ 736 */
737void inet_csk_prepare_forced_close(struct sock *sk) 737void inet_csk_prepare_forced_close(struct sock *sk)
738 __releases(&sk->sk_lock.slock)
738{ 739{
739 /* sk_clone_lock locked the socket and set refcnt to 2 */ 740 /* sk_clone_lock locked the socket and set refcnt to 2 */
740 bh_unlock_sock(sk); 741 bh_unlock_sock(sk);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index f6289bf6f332..310a3647c83d 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -423,7 +423,7 @@ int ip_options_compile(struct net *net,
423 put_unaligned_be32(midtime, timeptr); 423 put_unaligned_be32(midtime, timeptr);
424 opt->is_changed = 1; 424 opt->is_changed = 1;
425 } 425 }
426 } else { 426 } else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) {
427 unsigned int overflow = optptr[3]>>4; 427 unsigned int overflow = optptr[3]>>4;
428 if (overflow == 15) { 428 if (overflow == 15) {
429 pp_ptr = optptr + 3; 429 pp_ptr = optptr + 3;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index b1876e52091e..e33fe0ab2568 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -281,7 +281,8 @@ int ip6_mc_input(struct sk_buff *skb)
281 * IPv6 multicast router mode is now supported ;) 281 * IPv6 multicast router mode is now supported ;)
282 */ 282 */
283 if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && 283 if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
284 !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) && 284 !(ipv6_addr_type(&hdr->daddr) &
285 (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) &&
285 likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { 286 likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
286 /* 287 /*
287 * Okay, we try to forward - split and duplicate 288 * Okay, we try to forward - split and duplicate
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 9a5fd3c3e530..362ba47968e4 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -280,7 +280,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
280 struct tty_port *port = &self->port; 280 struct tty_port *port = &self->port;
281 DECLARE_WAITQUEUE(wait, current); 281 DECLARE_WAITQUEUE(wait, current);
282 int retval; 282 int retval;
283 int do_clocal = 0, extra_count = 0; 283 int do_clocal = 0;
284 unsigned long flags; 284 unsigned long flags;
285 285
286 IRDA_DEBUG(2, "%s()\n", __func__ ); 286 IRDA_DEBUG(2, "%s()\n", __func__ );
@@ -289,8 +289,15 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
289 * If non-blocking mode is set, or the port is not enabled, 289 * If non-blocking mode is set, or the port is not enabled,
290 * then make the check up front and then exit. 290 * then make the check up front and then exit.
291 */ 291 */
292 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ 292 if (test_bit(TTY_IO_ERROR, &tty->flags)) {
293 /* nonblock mode is set or port is not enabled */ 293 port->flags |= ASYNC_NORMAL_ACTIVE;
294 return 0;
295 }
296
297 if (filp->f_flags & O_NONBLOCK) {
298 /* nonblock mode is set */
299 if (tty->termios.c_cflag & CBAUD)
300 tty_port_raise_dtr_rts(port);
294 port->flags |= ASYNC_NORMAL_ACTIVE; 301 port->flags |= ASYNC_NORMAL_ACTIVE;
295 IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); 302 IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ );
296 return 0; 303 return 0;
@@ -315,18 +322,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
315 __FILE__, __LINE__, tty->driver->name, port->count); 322 __FILE__, __LINE__, tty->driver->name, port->count);
316 323
317 spin_lock_irqsave(&port->lock, flags); 324 spin_lock_irqsave(&port->lock, flags);
318 if (!tty_hung_up_p(filp)) { 325 if (!tty_hung_up_p(filp))
319 extra_count = 1;
320 port->count--; 326 port->count--;
321 }
322 spin_unlock_irqrestore(&port->lock, flags);
323 port->blocked_open++; 327 port->blocked_open++;
328 spin_unlock_irqrestore(&port->lock, flags);
324 329
325 while (1) { 330 while (1) {
326 if (tty->termios.c_cflag & CBAUD) 331 if (tty->termios.c_cflag & CBAUD)
327 tty_port_raise_dtr_rts(port); 332 tty_port_raise_dtr_rts(port);
328 333
329 current->state = TASK_INTERRUPTIBLE; 334 set_current_state(TASK_INTERRUPTIBLE);
330 335
331 if (tty_hung_up_p(filp) || 336 if (tty_hung_up_p(filp) ||
332 !test_bit(ASYNCB_INITIALIZED, &port->flags)) { 337 !test_bit(ASYNCB_INITIALIZED, &port->flags)) {
@@ -361,13 +366,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
361 __set_current_state(TASK_RUNNING); 366 __set_current_state(TASK_RUNNING);
362 remove_wait_queue(&port->open_wait, &wait); 367 remove_wait_queue(&port->open_wait, &wait);
363 368
364 if (extra_count) { 369 spin_lock_irqsave(&port->lock, flags);
365 /* ++ is not atomic, so this should be protected - Jean II */ 370 if (!tty_hung_up_p(filp))
366 spin_lock_irqsave(&port->lock, flags);
367 port->count++; 371 port->count++;
368 spin_unlock_irqrestore(&port->lock, flags);
369 }
370 port->blocked_open--; 372 port->blocked_open--;
373 spin_unlock_irqrestore(&port->lock, flags);
371 374
372 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", 375 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
373 __FILE__, __LINE__, tty->driver->name, port->count); 376 __FILE__, __LINE__, tty->driver->name, port->count);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 556fdafdd1ea..8555f331ea60 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2201,7 +2201,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
2201 XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW); 2201 XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW);
2202 xp->priority = pol->sadb_x_policy_priority; 2202 xp->priority = pol->sadb_x_policy_priority;
2203 2203
2204 sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], 2204 sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];
2205 xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr); 2205 xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr);
2206 if (!xp->family) { 2206 if (!xp->family) {
2207 err = -EINVAL; 2207 err = -EINVAL;
@@ -2214,7 +2214,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
2214 if (xp->selector.sport) 2214 if (xp->selector.sport)
2215 xp->selector.sport_mask = htons(0xffff); 2215 xp->selector.sport_mask = htons(0xffff);
2216 2216
2217 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], 2217 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1];
2218 pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr); 2218 pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr);
2219 xp->selector.prefixlen_d = sa->sadb_address_prefixlen; 2219 xp->selector.prefixlen_d = sa->sadb_address_prefixlen;
2220 2220
@@ -2315,7 +2315,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
2315 2315
2316 memset(&sel, 0, sizeof(sel)); 2316 memset(&sel, 0, sizeof(sel));
2317 2317
2318 sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], 2318 sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];
2319 sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr); 2319 sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr);
2320 sel.prefixlen_s = sa->sadb_address_prefixlen; 2320 sel.prefixlen_s = sa->sadb_address_prefixlen;
2321 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); 2321 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
@@ -2323,7 +2323,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
2323 if (sel.sport) 2323 if (sel.sport)
2324 sel.sport_mask = htons(0xffff); 2324 sel.sport_mask = htons(0xffff);
2325 2325
2326 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], 2326 sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1];
2327 pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); 2327 pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr);
2328 sel.prefixlen_d = sa->sadb_address_prefixlen; 2328 sel.prefixlen_d = sa->sadb_address_prefixlen;
2329 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); 2329 sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 808f5fcd1ced..fb306814576a 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3290,14 +3290,19 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
3290 int ret = -ENODATA; 3290 int ret = -ENODATA;
3291 3291
3292 rcu_read_lock(); 3292 rcu_read_lock();
3293 if (local->use_chanctx) { 3293 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
3294 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 3294 if (chanctx_conf) {
3295 if (chanctx_conf) { 3295 *chandef = chanctx_conf->def;
3296 *chandef = chanctx_conf->def; 3296 ret = 0;
3297 ret = 0; 3297 } else if (local->open_count > 0 &&
3298 } 3298 local->open_count == local->monitors &&
3299 } else if (local->open_count == local->monitors) { 3299 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
3300 *chandef = local->monitor_chandef; 3300 if (local->use_chanctx)
3301 *chandef = local->monitor_chandef;
3302 else
3303 cfg80211_chandef_create(chandef,
3304 local->_oper_channel,
3305 local->_oper_channel_type);
3301 ret = 0; 3306 ret = 0;
3302 } 3307 }
3303 rcu_read_unlock(); 3308 rcu_read_unlock();
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 640afab304d7..baaa8608e52d 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -541,6 +541,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
541 541
542 ieee80211_adjust_monitor_flags(sdata, 1); 542 ieee80211_adjust_monitor_flags(sdata, 1);
543 ieee80211_configure_filter(local); 543 ieee80211_configure_filter(local);
544 mutex_lock(&local->mtx);
545 ieee80211_recalc_idle(local);
546 mutex_unlock(&local->mtx);
544 547
545 netif_carrier_on(dev); 548 netif_carrier_on(dev);
546 break; 549 break;
@@ -812,6 +815,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
812 815
813 ieee80211_adjust_monitor_flags(sdata, -1); 816 ieee80211_adjust_monitor_flags(sdata, -1);
814 ieee80211_configure_filter(local); 817 ieee80211_configure_filter(local);
818 mutex_lock(&local->mtx);
819 ieee80211_recalc_idle(local);
820 mutex_unlock(&local->mtx);
815 break; 821 break;
816 case NL80211_IFTYPE_P2P_DEVICE: 822 case NL80211_IFTYPE_P2P_DEVICE:
817 /* relies on synchronize_rcu() below */ 823 /* relies on synchronize_rcu() below */
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 9f6464f3e05f..141577412d84 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -647,6 +647,9 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
647 our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) & 647 our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) &
648 mask) >> shift; 648 mask) >> shift;
649 649
650 if (our_mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED)
651 continue;
652
650 switch (ap_mcs) { 653 switch (ap_mcs) {
651 default: 654 default:
652 if (our_mcs <= ap_mcs) 655 if (our_mcs <= ap_mcs)
@@ -3503,6 +3506,14 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
3503 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3506 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3504 3507
3505 /* 3508 /*
3509 * Stop timers before deleting work items, as timers
3510 * could race and re-add the work-items. They will be
3511 * re-established on connection.
3512 */
3513 del_timer_sync(&ifmgd->conn_mon_timer);
3514 del_timer_sync(&ifmgd->bcn_mon_timer);
3515
3516 /*
3506 * we need to use atomic bitops for the running bits 3517 * we need to use atomic bitops for the running bits
3507 * only because both timers might fire at the same 3518 * only because both timers might fire at the same
3508 * time -- the code here is properly synchronised. 3519 * time -- the code here is properly synchronised.
@@ -3516,13 +3527,9 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
3516 if (del_timer_sync(&ifmgd->timer)) 3527 if (del_timer_sync(&ifmgd->timer))
3517 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); 3528 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
3518 3529
3519 cancel_work_sync(&ifmgd->chswitch_work);
3520 if (del_timer_sync(&ifmgd->chswitch_timer)) 3530 if (del_timer_sync(&ifmgd->chswitch_timer))
3521 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); 3531 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
3522 3532 cancel_work_sync(&ifmgd->chswitch_work);
3523 /* these will just be re-established on connection */
3524 del_timer_sync(&ifmgd->conn_mon_timer);
3525 del_timer_sync(&ifmgd->bcn_mon_timer);
3526} 3533}
3527 3534
3528void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) 3535void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
@@ -4315,6 +4322,17 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
4315{ 4322{
4316 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 4323 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
4317 4324
4325 /*
4326 * Make sure some work items will not run after this,
4327 * they will not do anything but might not have been
4328 * cancelled when disconnecting.
4329 */
4330 cancel_work_sync(&ifmgd->monitor_work);
4331 cancel_work_sync(&ifmgd->beacon_connection_loss_work);
4332 cancel_work_sync(&ifmgd->request_smps_work);
4333 cancel_work_sync(&ifmgd->csa_connection_drop_work);
4334 cancel_work_sync(&ifmgd->chswitch_work);
4335
4318 mutex_lock(&ifmgd->mtx); 4336 mutex_lock(&ifmgd->mtx);
4319 if (ifmgd->assoc_data) 4337 if (ifmgd->assoc_data)
4320 ieee80211_destroy_assoc_data(sdata, false); 4338 ieee80211_destroy_assoc_data(sdata, false);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index ce78d1149f1d..8914d2d2881a 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2745,7 +2745,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2745 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 2745 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
2746 } 2746 }
2747 2747
2748 sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); 2748 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2749 sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
2749 if (!ieee80211_tx_prepare(sdata, &tx, skb)) 2750 if (!ieee80211_tx_prepare(sdata, &tx, skb))
2750 break; 2751 break;
2751 dev_kfree_skb_any(skb); 2752 dev_kfree_skb_any(skb);
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index a9740bd6fe54..94b4b9853f60 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -339,6 +339,13 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,
339{ 339{
340 const struct nf_conn_help *help; 340 const struct nf_conn_help *help;
341 const struct nf_conntrack_helper *helper; 341 const struct nf_conntrack_helper *helper;
342 struct va_format vaf;
343 va_list args;
344
345 va_start(args, fmt);
346
347 vaf.fmt = fmt;
348 vaf.va = &args;
342 349
343 /* Called from the helper function, this call never fails */ 350 /* Called from the helper function, this call never fails */
344 help = nfct_help(ct); 351 help = nfct_help(ct);
@@ -347,7 +354,9 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,
347 helper = rcu_dereference(help->helper); 354 helper = rcu_dereference(help->helper);
348 355
349 nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, 356 nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL,
350 "nf_ct_%s: dropping packet: %s ", helper->name, fmt); 357 "nf_ct_%s: dropping packet: %pV ", helper->name, &vaf);
358
359 va_end(args);
351} 360}
352EXPORT_SYMBOL_GPL(nf_ct_helper_log); 361EXPORT_SYMBOL_GPL(nf_ct_helper_log);
353 362
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index d578ec251712..0b1b32cda307 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -62,11 +62,6 @@ void nfnl_unlock(__u8 subsys_id)
62} 62}
63EXPORT_SYMBOL_GPL(nfnl_unlock); 63EXPORT_SYMBOL_GPL(nfnl_unlock);
64 64
65static struct mutex *nfnl_get_lock(__u8 subsys_id)
66{
67 return &table[subsys_id].mutex;
68}
69
70int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) 65int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
71{ 66{
72 nfnl_lock(n->subsys_id); 67 nfnl_lock(n->subsys_id);
@@ -199,7 +194,7 @@ replay:
199 rcu_read_unlock(); 194 rcu_read_unlock();
200 nfnl_lock(subsys_id); 195 nfnl_lock(subsys_id);
201 if (rcu_dereference_protected(table[subsys_id].subsys, 196 if (rcu_dereference_protected(table[subsys_id].subsys,
202 lockdep_is_held(nfnl_get_lock(subsys_id))) != ss || 197 lockdep_is_held(&table[subsys_id].mutex)) != ss ||
203 nfnetlink_find_client(type, ss) != nc) 198 nfnetlink_find_client(type, ss) != nc)
204 err = -EAGAIN; 199 err = -EAGAIN;
205 else if (nc->call) 200 else if (nc->call)
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
index ba92824086f3..3228d7f24eb4 100644
--- a/net/netfilter/xt_AUDIT.c
+++ b/net/netfilter/xt_AUDIT.c
@@ -124,6 +124,9 @@ audit_tg(struct sk_buff *skb, const struct xt_action_param *par)
124 const struct xt_audit_info *info = par->targinfo; 124 const struct xt_audit_info *info = par->targinfo;
125 struct audit_buffer *ab; 125 struct audit_buffer *ab;
126 126
127 if (audit_enabled == 0)
128 goto errout;
129
127 ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); 130 ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT);
128 if (ab == NULL) 131 if (ab == NULL)
129 goto errout; 132 goto errout;
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 847d495cd4de..8a6c6ea466d8 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1189,8 +1189,6 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
1189 struct netlbl_unlhsh_walk_arg cb_arg; 1189 struct netlbl_unlhsh_walk_arg cb_arg;
1190 u32 skip_bkt = cb->args[0]; 1190 u32 skip_bkt = cb->args[0];
1191 u32 skip_chain = cb->args[1]; 1191 u32 skip_chain = cb->args[1];
1192 u32 skip_addr4 = cb->args[2];
1193 u32 skip_addr6 = cb->args[3];
1194 u32 iter_bkt; 1192 u32 iter_bkt;
1195 u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; 1193 u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0;
1196 struct netlbl_unlhsh_iface *iface; 1194 struct netlbl_unlhsh_iface *iface;
@@ -1215,7 +1213,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
1215 continue; 1213 continue;
1216 netlbl_af4list_foreach_rcu(addr4, 1214 netlbl_af4list_foreach_rcu(addr4,
1217 &iface->addr4_list) { 1215 &iface->addr4_list) {
1218 if (iter_addr4++ < skip_addr4) 1216 if (iter_addr4++ < cb->args[2])
1219 continue; 1217 continue;
1220 if (netlbl_unlabel_staticlist_gen( 1218 if (netlbl_unlabel_staticlist_gen(
1221 NLBL_UNLABEL_C_STATICLIST, 1219 NLBL_UNLABEL_C_STATICLIST,
@@ -1231,7 +1229,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
1231#if IS_ENABLED(CONFIG_IPV6) 1229#if IS_ENABLED(CONFIG_IPV6)
1232 netlbl_af6list_foreach_rcu(addr6, 1230 netlbl_af6list_foreach_rcu(addr6,
1233 &iface->addr6_list) { 1231 &iface->addr6_list) {
1234 if (iter_addr6++ < skip_addr6) 1232 if (iter_addr6++ < cb->args[3])
1235 continue; 1233 continue;
1236 if (netlbl_unlabel_staticlist_gen( 1234 if (netlbl_unlabel_staticlist_gen(
1237 NLBL_UNLABEL_C_STATICLIST, 1235 NLBL_UNLABEL_C_STATICLIST,
@@ -1250,10 +1248,10 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
1250 1248
1251unlabel_staticlist_return: 1249unlabel_staticlist_return:
1252 rcu_read_unlock(); 1250 rcu_read_unlock();
1253 cb->args[0] = skip_bkt; 1251 cb->args[0] = iter_bkt;
1254 cb->args[1] = skip_chain; 1252 cb->args[1] = iter_chain;
1255 cb->args[2] = skip_addr4; 1253 cb->args[2] = iter_addr4;
1256 cb->args[3] = skip_addr6; 1254 cb->args[3] = iter_addr6;
1257 return skb->len; 1255 return skb->len;
1258} 1256}
1259 1257
@@ -1273,12 +1271,9 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
1273{ 1271{
1274 struct netlbl_unlhsh_walk_arg cb_arg; 1272 struct netlbl_unlhsh_walk_arg cb_arg;
1275 struct netlbl_unlhsh_iface *iface; 1273 struct netlbl_unlhsh_iface *iface;
1276 u32 skip_addr4 = cb->args[0]; 1274 u32 iter_addr4 = 0, iter_addr6 = 0;
1277 u32 skip_addr6 = cb->args[1];
1278 u32 iter_addr4 = 0;
1279 struct netlbl_af4list *addr4; 1275 struct netlbl_af4list *addr4;
1280#if IS_ENABLED(CONFIG_IPV6) 1276#if IS_ENABLED(CONFIG_IPV6)
1281 u32 iter_addr6 = 0;
1282 struct netlbl_af6list *addr6; 1277 struct netlbl_af6list *addr6;
1283#endif 1278#endif
1284 1279
@@ -1292,7 +1287,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
1292 goto unlabel_staticlistdef_return; 1287 goto unlabel_staticlistdef_return;
1293 1288
1294 netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { 1289 netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) {
1295 if (iter_addr4++ < skip_addr4) 1290 if (iter_addr4++ < cb->args[0])
1296 continue; 1291 continue;
1297 if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, 1292 if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,
1298 iface, 1293 iface,
@@ -1305,7 +1300,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
1305 } 1300 }
1306#if IS_ENABLED(CONFIG_IPV6) 1301#if IS_ENABLED(CONFIG_IPV6)
1307 netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { 1302 netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) {
1308 if (iter_addr6++ < skip_addr6) 1303 if (iter_addr6++ < cb->args[1])
1309 continue; 1304 continue;
1310 if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, 1305 if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,
1311 iface, 1306 iface,
@@ -1320,8 +1315,8 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
1320 1315
1321unlabel_staticlistdef_return: 1316unlabel_staticlistdef_return:
1322 rcu_read_unlock(); 1317 rcu_read_unlock();
1323 cb->args[0] = skip_addr4; 1318 cb->args[0] = iter_addr4;
1324 cb->args[1] = skip_addr6; 1319 cb->args[1] = iter_addr6;
1325 return skb->len; 1320 return skb->len;
1326} 1321}
1327 1322
diff --git a/net/rds/stats.c b/net/rds/stats.c
index 7be790d60b90..73be187d389e 100644
--- a/net/rds/stats.c
+++ b/net/rds/stats.c
@@ -87,6 +87,7 @@ void rds_stats_info_copy(struct rds_info_iterator *iter,
87 for (i = 0; i < nr; i++) { 87 for (i = 0; i < nr; i++) {
88 BUG_ON(strlen(names[i]) >= sizeof(ctr.name)); 88 BUG_ON(strlen(names[i]) >= sizeof(ctr.name));
89 strncpy(ctr.name, names[i], sizeof(ctr.name) - 1); 89 strncpy(ctr.name, names[i], sizeof(ctr.name) - 1);
90 ctr.name[sizeof(ctr.name) - 1] = '\0';
90 ctr.value = values[i]; 91 ctr.value = values[i];
91 92
92 rds_info_copy(iter, &ctr, sizeof(ctr)); 93 rds_info_copy(iter, &ctr, sizeof(ctr));
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index e9a77f621c3d..d51852bba01c 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -298,6 +298,10 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
298 new_num_classes == q->max_agg_classes - 1) /* agg no more full */ 298 new_num_classes == q->max_agg_classes - 1) /* agg no more full */
299 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); 299 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
300 300
301 /* The next assignment may let
302 * agg->initial_budget > agg->budgetmax
303 * hold, we will take it into account in charge_actual_service().
304 */
301 agg->budgetmax = new_num_classes * agg->lmax; 305 agg->budgetmax = new_num_classes * agg->lmax;
302 new_agg_weight = agg->class_weight * new_num_classes; 306 new_agg_weight = agg->class_weight * new_num_classes;
303 agg->inv_w = ONE_FP/new_agg_weight; 307 agg->inv_w = ONE_FP/new_agg_weight;
@@ -817,7 +821,7 @@ static void qfq_make_eligible(struct qfq_sched *q)
817 unsigned long old_vslot = q->oldV >> q->min_slot_shift; 821 unsigned long old_vslot = q->oldV >> q->min_slot_shift;
818 822
819 if (vslot != old_vslot) { 823 if (vslot != old_vslot) {
820 unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1; 824 unsigned long mask = (1ULL << fls(vslot ^ old_vslot)) - 1;
821 qfq_move_groups(q, mask, IR, ER); 825 qfq_move_groups(q, mask, IR, ER);
822 qfq_move_groups(q, mask, IB, EB); 826 qfq_move_groups(q, mask, IB, EB);
823 } 827 }
@@ -988,12 +992,23 @@ static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
988/* Update F according to the actual service received by the aggregate. */ 992/* Update F according to the actual service received by the aggregate. */
989static inline void charge_actual_service(struct qfq_aggregate *agg) 993static inline void charge_actual_service(struct qfq_aggregate *agg)
990{ 994{
991 /* compute the service received by the aggregate */ 995 /* Compute the service received by the aggregate, taking into
992 u32 service_received = agg->initial_budget - agg->budget; 996 * account that, after decreasing the number of classes in
997 * agg, it may happen that
998 * agg->initial_budget - agg->budget > agg->bugdetmax
999 */
1000 u32 service_received = min(agg->budgetmax,
1001 agg->initial_budget - agg->budget);
993 1002
994 agg->F = agg->S + (u64)service_received * agg->inv_w; 1003 agg->F = agg->S + (u64)service_received * agg->inv_w;
995} 1004}
996 1005
1006static inline void qfq_update_agg_ts(struct qfq_sched *q,
1007 struct qfq_aggregate *agg,
1008 enum update_reason reason);
1009
1010static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
1011
997static struct sk_buff *qfq_dequeue(struct Qdisc *sch) 1012static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
998{ 1013{
999 struct qfq_sched *q = qdisc_priv(sch); 1014 struct qfq_sched *q = qdisc_priv(sch);
@@ -1021,7 +1036,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
1021 in_serv_agg->initial_budget = in_serv_agg->budget = 1036 in_serv_agg->initial_budget = in_serv_agg->budget =
1022 in_serv_agg->budgetmax; 1037 in_serv_agg->budgetmax;
1023 1038
1024 if (!list_empty(&in_serv_agg->active)) 1039 if (!list_empty(&in_serv_agg->active)) {
1025 /* 1040 /*
1026 * Still active: reschedule for 1041 * Still active: reschedule for
1027 * service. Possible optimization: if no other 1042 * service. Possible optimization: if no other
@@ -1032,8 +1047,9 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
1032 * handle it, we would need to maintain an 1047 * handle it, we would need to maintain an
1033 * extra num_active_aggs field. 1048 * extra num_active_aggs field.
1034 */ 1049 */
1035 qfq_activate_agg(q, in_serv_agg, requeue); 1050 qfq_update_agg_ts(q, in_serv_agg, requeue);
1036 else if (sch->q.qlen == 0) { /* no aggregate to serve */ 1051 qfq_schedule_agg(q, in_serv_agg);
1052 } else if (sch->q.qlen == 0) { /* no aggregate to serve */
1037 q->in_serv_agg = NULL; 1053 q->in_serv_agg = NULL;
1038 return NULL; 1054 return NULL;
1039 } 1055 }
@@ -1052,7 +1068,15 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
1052 qdisc_bstats_update(sch, skb); 1068 qdisc_bstats_update(sch, skb);
1053 1069
1054 agg_dequeue(in_serv_agg, cl, len); 1070 agg_dequeue(in_serv_agg, cl, len);
1055 in_serv_agg->budget -= len; 1071 /* If lmax is lowered, through qfq_change_class, for a class
1072 * owning pending packets with larger size than the new value
1073 * of lmax, then the following condition may hold.
1074 */
1075 if (unlikely(in_serv_agg->budget < len))
1076 in_serv_agg->budget = 0;
1077 else
1078 in_serv_agg->budget -= len;
1079
1056 q->V += (u64)len * IWSUM; 1080 q->V += (u64)len * IWSUM;
1057 pr_debug("qfq dequeue: len %u F %lld now %lld\n", 1081 pr_debug("qfq dequeue: len %u F %lld now %lld\n",
1058 len, (unsigned long long) in_serv_agg->F, 1082 len, (unsigned long long) in_serv_agg->F,
@@ -1217,17 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1217 cl->deficit = agg->lmax; 1241 cl->deficit = agg->lmax;
1218 list_add_tail(&cl->alist, &agg->active); 1242 list_add_tail(&cl->alist, &agg->active);
1219 1243
1220 if (list_first_entry(&agg->active, struct qfq_class, alist) != cl) 1244 if (list_first_entry(&agg->active, struct qfq_class, alist) != cl ||
1221 return err; /* aggregate was not empty, nothing else to do */ 1245 q->in_serv_agg == agg)
1246 return err; /* non-empty or in service, nothing else to do */
1222 1247
1223 /* recharge budget */ 1248 qfq_activate_agg(q, agg, enqueue);
1224 agg->initial_budget = agg->budget = agg->budgetmax;
1225
1226 qfq_update_agg_ts(q, agg, enqueue);
1227 if (q->in_serv_agg == NULL)
1228 q->in_serv_agg = agg;
1229 else if (agg != q->in_serv_agg)
1230 qfq_schedule_agg(q, agg);
1231 1249
1232 return err; 1250 return err;
1233} 1251}
@@ -1261,7 +1279,8 @@ static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
1261 /* group was surely ineligible, remove */ 1279 /* group was surely ineligible, remove */
1262 __clear_bit(grp->index, &q->bitmaps[IR]); 1280 __clear_bit(grp->index, &q->bitmaps[IR]);
1263 __clear_bit(grp->index, &q->bitmaps[IB]); 1281 __clear_bit(grp->index, &q->bitmaps[IB]);
1264 } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V)) 1282 } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) &&
1283 q->in_serv_agg == NULL)
1265 q->V = roundedS; 1284 q->V = roundedS;
1266 1285
1267 grp->S = roundedS; 1286 grp->S = roundedS;
@@ -1284,8 +1303,15 @@ skip_update:
1284static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg, 1303static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
1285 enum update_reason reason) 1304 enum update_reason reason)
1286{ 1305{
1306 agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */
1307
1287 qfq_update_agg_ts(q, agg, reason); 1308 qfq_update_agg_ts(q, agg, reason);
1288 qfq_schedule_agg(q, agg); 1309 if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */
1310 q->in_serv_agg = agg; /* start serving this aggregate */
1311 /* update V: to be in service, agg must be eligible */
1312 q->oldV = q->V = agg->S;
1313 } else if (agg != q->in_serv_agg)
1314 qfq_schedule_agg(q, agg);
1289} 1315}
1290 1316
1291static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, 1317static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
@@ -1357,8 +1383,6 @@ static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
1357 __set_bit(grp->index, &q->bitmaps[s]); 1383 __set_bit(grp->index, &q->bitmaps[s]);
1358 } 1384 }
1359 } 1385 }
1360
1361 qfq_update_eligible(q);
1362} 1386}
1363 1387
1364static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) 1388static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 5ffff039b017..ea4155fe9733 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -367,8 +367,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
367 rdev->wiphy.rts_threshold = (u32) -1; 367 rdev->wiphy.rts_threshold = (u32) -1;
368 rdev->wiphy.coverage_class = 0; 368 rdev->wiphy.coverage_class = 0;
369 369
370 rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH | 370 rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH;
371 NL80211_FEATURE_ADVERTISE_CHAN_LIMITS;
372 371
373 return &rdev->wiphy; 372 return &rdev->wiphy;
374} 373}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index e652d05ff712..d44ab216c0ec 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -557,18 +557,6 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
557 if ((chan->flags & IEEE80211_CHAN_RADAR) && 557 if ((chan->flags & IEEE80211_CHAN_RADAR) &&
558 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) 558 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
559 goto nla_put_failure; 559 goto nla_put_failure;
560 if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) &&
561 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS))
562 goto nla_put_failure;
563 if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) &&
564 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS))
565 goto nla_put_failure;
566 if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) &&
567 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ))
568 goto nla_put_failure;
569 if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) &&
570 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ))
571 goto nla_put_failure;
572 560
573 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, 561 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
574 DBM_TO_MBM(chan->max_power))) 562 DBM_TO_MBM(chan->max_power)))
@@ -1310,15 +1298,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
1310 dev->wiphy.max_acl_mac_addrs)) 1298 dev->wiphy.max_acl_mac_addrs))
1311 goto nla_put_failure; 1299 goto nla_put_failure;
1312 1300
1313 if (dev->wiphy.extended_capabilities &&
1314 (nla_put(msg, NL80211_ATTR_EXT_CAPA,
1315 dev->wiphy.extended_capabilities_len,
1316 dev->wiphy.extended_capabilities) ||
1317 nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
1318 dev->wiphy.extended_capabilities_len,
1319 dev->wiphy.extended_capabilities_mask)))
1320 goto nla_put_failure;
1321
1322 return genlmsg_end(msg, hdr); 1301 return genlmsg_end(msg, hdr);
1323 1302
1324 nla_put_failure: 1303 nla_put_failure:
@@ -1328,7 +1307,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
1328 1307
1329static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) 1308static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1330{ 1309{
1331 int idx = 0; 1310 int idx = 0, ret;
1332 int start = cb->args[0]; 1311 int start = cb->args[0];
1333 struct cfg80211_registered_device *dev; 1312 struct cfg80211_registered_device *dev;
1334 1313
@@ -1338,9 +1317,29 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1338 continue; 1317 continue;
1339 if (++idx <= start) 1318 if (++idx <= start)
1340 continue; 1319 continue;
1341 if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, 1320 ret = nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid,
1342 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1321 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1343 dev) < 0) { 1322 dev);
1323 if (ret < 0) {
1324 /*
1325 * If sending the wiphy data didn't fit (ENOBUFS or
1326 * EMSGSIZE returned), this SKB is still empty (so
1327 * it's not too big because another wiphy dataset is
1328 * already in the skb) and we've not tried to adjust
1329 * the dump allocation yet ... then adjust the alloc
1330 * size to be bigger, and return 1 but with the empty
1331 * skb. This results in an empty message being RX'ed
1332 * in userspace, but that is ignored.
1333 *
1334 * We can then retry with the larger buffer.
1335 */
1336 if ((ret == -ENOBUFS || ret == -EMSGSIZE) &&
1337 !skb->len &&
1338 cb->min_dump_alloc < 4096) {
1339 cb->min_dump_alloc = 4096;
1340 mutex_unlock(&cfg80211_mutex);
1341 return 1;
1342 }
1344 idx--; 1343 idx--;
1345 break; 1344 break;
1346 } 1345 }
@@ -1357,7 +1356,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
1357 struct sk_buff *msg; 1356 struct sk_buff *msg;
1358 struct cfg80211_registered_device *dev = info->user_ptr[0]; 1357 struct cfg80211_registered_device *dev = info->user_ptr[0];
1359 1358
1360 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1359 msg = nlmsg_new(4096, GFP_KERNEL);
1361 if (!msg) 1360 if (!msg)
1362 return -ENOMEM; 1361 return -ENOMEM;
1363 1362