aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-11-04 13:48:30 -0500
committerDavid S. Miller <davem@davemloft.net>2013-11-04 13:48:30 -0500
commit394efd19d5fcae936261bd48e5b33b21897aacf8 (patch)
treec48cf3ddbb07fd87309f1abdf31a27c71330e587
parentf421436a591d34fa5279b54a96ac07d70250cc8d (diff)
parentbe408cd3e1fef73e9408b196a79b9934697fe3b1 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/emulex/benet/be.h drivers/net/netconsole.c net/bridge/br_private.h Three mostly trivial conflicts. The net/bridge/br_private.h conflict was a function signature (argument addition) change overlapping with the extern removals from Joe Perches. In drivers/net/netconsole.c we had one change adjusting a printk message whilst another changed "printk(KERN_INFO" into "pr_info(". Lastly, the emulex change was a new inline function addition overlapping with Joe Perches's extern removals. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/networking/dccp.txt4
-rw-r--r--Documentation/networking/e100.txt2
-rw-r--r--Documentation/networking/ieee802154.txt4
-rw-r--r--Documentation/networking/l2tp.txt2
-rw-r--r--Documentation/networking/netdev-FAQ.txt24
-rw-r--r--Documentation/networking/netlink_mmap.txt6
-rw-r--r--Documentation/networking/operstates.txt4
-rw-r--r--Documentation/networking/rxrpc.txt2
-rw-r--r--Documentation/networking/stmmac.txt8
-rw-r--r--Documentation/networking/vortex.txt4
-rw-r--r--Documentation/networking/x25-iface.txt2
-rw-r--r--MAINTAINERS131
-rw-r--r--Makefile2
-rw-r--r--arch/arc/mm/fault.c6
-rw-r--r--arch/arm/boot/dts/integratorcp.dts9
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c4
-rw-r--r--arch/mips/mti-malta/malta-int.c9
-rw-r--r--arch/mips/ralink/timer.c2
-rw-r--r--arch/parisc/kernel/head.S4
-rw-r--r--arch/um/kernel/exitcode.c4
-rw-r--r--arch/x86/include/asm/percpu.h3
-rw-r--r--arch/x86/kernel/cpu/perf_event.c6
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/nmi.c4
-rw-r--r--arch/xtensa/kernel/entry.S49
-rw-r--r--arch/xtensa/kernel/signal.c2
-rw-r--r--arch/xtensa/platforms/iss/network.c3
-rw-r--r--drivers/clk/clk-nomadik.c21
-rw-r--r--drivers/clk/mvebu/armada-370.c4
-rw-r--r--drivers/clk/socfpga/clk.c2
-rw-r--r--drivers/clk/versatile/clk-icst.c2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c38
-rw-r--r--drivers/dma/edma.c2
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c28
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c21
-rw-r--r--drivers/gpu/drm/i915/intel_display.c131
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c20
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c16
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c1
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/input/input.c10
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c11
-rw-r--r--drivers/input/misc/cm109.c14
-rw-r--r--drivers/input/mouse/alps.c1
-rw-r--r--drivers/input/serio/i8042.c23
-rw-r--r--drivers/input/tablet/wacom_sys.c4
-rw-r--r--drivers/input/tablet/wacom_wac.c8
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid10.c1
-rw-r--r--drivers/md/raid5.c20
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c7
-rw-r--r--drivers/net/can/c_can/c_can.c6
-rw-r--r--drivers/net/can/usb/kvaser_usb.c20
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c6
-rw-r--r--drivers/net/netconsole.c20
-rw-r--r--drivers/net/usb/ax88179_178a.c11
-rw-r--r--drivers/net/virtio_net.c13
-rw-r--r--drivers/net/wan/sbni.c89
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/interface.c3
-rw-r--r--drivers/net/xen-netback/netback.c10
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c6
-rw-r--r--drivers/scsi/BusLogic.c16
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c9
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sg.c176
-rw-r--r--drivers/staging/bcm/Bcmchar.c1
-rw-r--r--drivers/staging/ozwpan/ozcdev.c3
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.c2
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.c9
-rw-r--r--drivers/target/target_core_pscsi.c8
-rw-r--r--drivers/target/target_core_sbc.c5
-rw-r--r--drivers/target/target_core_xcopy.c53
-rw-r--r--drivers/tty/serial/atmel_serial.c9
-rw-r--r--drivers/uio/uio.c17
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/pl2303.c274
-rw-r--r--drivers/vhost/scsi.c2
-rw-r--r--drivers/video/au1100fb.c26
-rw-r--r--drivers/video/au1200fb.c23
-rw-r--r--fs/dcache.c5
-rw-r--r--fs/ecryptfs/crypto.c2
-rw-r--r--fs/ecryptfs/keystore.c3
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/file_table.c4
-rw-r--r--fs/select.c3
-rw-r--r--fs/seq_file.c2
-rw-r--r--include/linux/ipc_namespace.h6
-rw-r--r--include/linux/netpoll.h5
-rw-r--r--include/linux/percpu.h8
-rw-r--r--include/net/ip6_fib.h1
-rw-r--r--include/trace/events/target.h4
-rw-r--r--include/uapi/linux/perf_event.h12
-rw-r--r--ipc/ipc_sysctl.c20
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/events/ring_buffer.c31
-rw-r--r--kernel/mutex.c32
-rw-r--r--kernel/power/hibernate.c2
-rw-r--r--kernel/time/clockevents.c65
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/scatterlist.c3
-rw-r--r--mm/huge_memory.c70
-rw-r--r--mm/list_lru.c3
-rw-r--r--mm/memcontrol.c57
-rw-r--r--mm/memory.c53
-rw-r--r--mm/migrate.c19
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/pagewalk.c2
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_multicast.c44
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/bridge/netfilter/ebt_ulog.c9
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/netpoll.c31
-rw-r--r--net/ipv4/netfilter/arp_tables.c5
-rw-r--r--net/ipv4/netfilter/ip_tables.c5
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c7
-rw-r--r--net/ipv4/tcp_input.c34
-rw-r--r--net/ipv4/tcp_offload.c13
-rw-r--r--net/ipv4/xfrm4_policy.c8
-rw-r--r--net/ipv6/netfilter/ip6_tables.c5
-rw-r--r--net/ipv6/route.c9
-rw-r--r--net/ipv6/xfrm6_policy.c8
-rw-r--r--net/netfilter/x_tables.c7
-rw-r--r--net/netfilter/xt_NFQUEUE.c7
-rw-r--r--net/openvswitch/dp_notify.c7
-rw-r--r--net/openvswitch/vport-netdev.c16
-rw-r--r--net/openvswitch/vport-netdev.h1
-rw-r--r--net/sched/sch_fq.c1
-rw-r--r--net/sctp/ipv6.c4
-rw-r--r--net/sctp/sm_sideeffect.c1
-rw-r--r--net/x25/Kconfig4
-rw-r--r--net/xfrm/xfrm_ipcomp.c12
-rw-r--r--scripts/kallsyms.c12
-rw-r--r--scripts/link-vmlinux.sh2
-rw-r--r--sound/core/pcm.c4
-rw-r--r--sound/pci/hda/hda_codec.c4
-rw-r--r--sound/pci/hda/hda_generic.c4
-rw-r--r--sound/pci/hda/patch_analog.c18
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/soc/codecs/wm_hubs.c1
-rw-r--r--sound/soc/soc-dapm.c4
-rw-r--r--tools/perf/Documentation/perf-record.txt14
-rw-r--r--tools/perf/Documentation/perf-top.txt18
-rw-r--r--tools/perf/builtin-kvm.c7
-rw-r--r--tools/perf/builtin-record.c73
-rw-r--r--tools/perf/builtin-top.c33
-rw-r--r--tools/perf/builtin-trace.c8
-rw-r--r--tools/perf/tests/code-reading.c1
-rw-r--r--tools/perf/tests/keep-tracking.c1
-rw-r--r--tools/perf/tests/mmap-basic.c1
-rw-r--r--tools/perf/tests/open-syscall-tp-fields.c4
-rw-r--r--tools/perf/tests/perf-record.c2
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c4
-rw-r--r--tools/perf/tests/sw-clock.c4
-rw-r--r--tools/perf/tests/task-exit.c6
-rw-r--r--tools/perf/ui/stdio/hist.c9
-rw-r--r--tools/perf/util/callchain.h3
-rw-r--r--tools/perf/util/event.c32
-rw-r--r--tools/perf/util/evlist.c13
-rw-r--r--tools/perf/util/evlist.h2
-rw-r--r--tools/perf/util/evsel.c1
-rw-r--r--tools/perf/util/hist.h13
-rw-r--r--tools/perf/util/probe-finder.c2
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c37
-rw-r--r--virt/kvm/kvm_main.c2
189 files changed, 1493 insertions, 1166 deletions
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index d718bc2ff1cf..bf5dbe3ab8c5 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -18,8 +18,8 @@ Introduction
18Datagram Congestion Control Protocol (DCCP) is an unreliable, connection 18Datagram Congestion Control Protocol (DCCP) is an unreliable, connection
19oriented protocol designed to solve issues present in UDP and TCP, particularly 19oriented protocol designed to solve issues present in UDP and TCP, particularly
20for real-time and multimedia (streaming) traffic. 20for real-time and multimedia (streaming) traffic.
21It divides into a base protocol (RFC 4340) and plugable congestion control 21It divides into a base protocol (RFC 4340) and pluggable congestion control
22modules called CCIDs. Like plugable TCP congestion control, at least one CCID 22modules called CCIDs. Like pluggable TCP congestion control, at least one CCID
23needs to be enabled in order for the protocol to function properly. In the Linux 23needs to be enabled in order for the protocol to function properly. In the Linux
24implementation, this is the TCP-like CCID2 (RFC 4341). Additional CCIDs, such as 24implementation, this is the TCP-like CCID2 (RFC 4341). Additional CCIDs, such as
25the TCP-friendly CCID3 (RFC 4342), are optional. 25the TCP-friendly CCID3 (RFC 4342), are optional.
diff --git a/Documentation/networking/e100.txt b/Documentation/networking/e100.txt
index 13a32124bca0..f862cf3aff34 100644
--- a/Documentation/networking/e100.txt
+++ b/Documentation/networking/e100.txt
@@ -103,7 +103,7 @@ Additional Configurations
103 PRO/100 Family of Adapters is e100. 103 PRO/100 Family of Adapters is e100.
104 104
105 As an example, if you install the e100 driver for two PRO/100 adapters 105 As an example, if you install the e100 driver for two PRO/100 adapters
106 (eth0 and eth1), add the following to a configuraton file in /etc/modprobe.d/ 106 (eth0 and eth1), add the following to a configuration file in /etc/modprobe.d/
107 107
108 alias eth0 e100 108 alias eth0 e100
109 alias eth1 e100 109 alias eth1 e100
diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt
index 09eb57329f11..22bbc7225f8e 100644
--- a/Documentation/networking/ieee802154.txt
+++ b/Documentation/networking/ieee802154.txt
@@ -4,7 +4,7 @@
4 4
5Introduction 5Introduction
6============ 6============
7The IEEE 802.15.4 working group focuses on standartization of bottom 7The IEEE 802.15.4 working group focuses on standardization of bottom
8two layers: Medium Access Control (MAC) and Physical (PHY). And there 8two layers: Medium Access Control (MAC) and Physical (PHY). And there
9are mainly two options available for upper layers: 9are mainly two options available for upper layers:
10 - ZigBee - proprietary protocol from ZigBee Alliance 10 - ZigBee - proprietary protocol from ZigBee Alliance
@@ -66,7 +66,7 @@ net_device, with .type = ARPHRD_IEEE802154. Data is exchanged with socket family
66code via plain sk_buffs. On skb reception skb->cb must contain additional 66code via plain sk_buffs. On skb reception skb->cb must contain additional
67info as described in the struct ieee802154_mac_cb. During packet transmission 67info as described in the struct ieee802154_mac_cb. During packet transmission
68the skb->cb is used to provide additional data to device's header_ops->create 68the skb->cb is used to provide additional data to device's header_ops->create
69function. Be aware, that this data can be overriden later (when socket code 69function. Be aware that this data can be overridden later (when socket code
70submits skb to qdisc), so if you need something from that cb later, you should 70submits skb to qdisc), so if you need something from that cb later, you should
71store info in the skb->data on your own. 71store info in the skb->data on your own.
72 72
diff --git a/Documentation/networking/l2tp.txt b/Documentation/networking/l2tp.txt
index e63fc1f7bf87..c74434de2fa5 100644
--- a/Documentation/networking/l2tp.txt
+++ b/Documentation/networking/l2tp.txt
@@ -197,7 +197,7 @@ state information because the file format is subject to change. It is
197implemented to provide extra debug information to help diagnose 197implemented to provide extra debug information to help diagnose
198problems.) Users should use the netlink API. 198problems.) Users should use the netlink API.
199 199
200/proc/net/pppol2tp is also provided for backwards compaibility with 200/proc/net/pppol2tp is also provided for backwards compatibility with
201the original pppol2tp driver. It lists information about L2TPv2 201the original pppol2tp driver. It lists information about L2TPv2
202tunnels and sessions only. Its use is discouraged. 202tunnels and sessions only. Its use is discouraged.
203 203
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
index d9112f01c44a..0fe1c6e0dbcd 100644
--- a/Documentation/networking/netdev-FAQ.txt
+++ b/Documentation/networking/netdev-FAQ.txt
@@ -4,23 +4,23 @@ Information you need to know about netdev
4 4
5Q: What is netdev? 5Q: What is netdev?
6 6
7A: It is a mailing list for all network related linux stuff. This includes 7A: It is a mailing list for all network-related Linux stuff. This includes
8 anything found under net/ (i.e. core code like IPv6) and drivers/net 8 anything found under net/ (i.e. core code like IPv6) and drivers/net
9 (i.e. hardware specific drivers) in the linux source tree. 9 (i.e. hardware specific drivers) in the Linux source tree.
10 10
11 Note that some subsystems (e.g. wireless drivers) which have a high volume 11 Note that some subsystems (e.g. wireless drivers) which have a high volume
12 of traffic have their own specific mailing lists. 12 of traffic have their own specific mailing lists.
13 13
14 The netdev list is managed (like many other linux mailing lists) through 14 The netdev list is managed (like many other Linux mailing lists) through
15 VGER ( http://vger.kernel.org/ ) and archives can be found below: 15 VGER ( http://vger.kernel.org/ ) and archives can be found below:
16 16
17 http://marc.info/?l=linux-netdev 17 http://marc.info/?l=linux-netdev
18 http://www.spinics.net/lists/netdev/ 18 http://www.spinics.net/lists/netdev/
19 19
20 Aside from subsystems like that mentioned above, all network related linux 20 Aside from subsystems like that mentioned above, all network-related Linux
21 development (i.e. RFC, review, comments, etc) takes place on netdev. 21 development (i.e. RFC, review, comments, etc.) takes place on netdev.
22 22
23Q: How do the changes posted to netdev make their way into linux? 23Q: How do the changes posted to netdev make their way into Linux?
24 24
25A: There are always two trees (git repositories) in play. Both are driven 25A: There are always two trees (git repositories) in play. Both are driven
26 by David Miller, the main network maintainer. There is the "net" tree, 26 by David Miller, the main network maintainer. There is the "net" tree,
@@ -35,7 +35,7 @@ A: There are always two trees (git repositories) in play. Both are driven
35Q: How often do changes from these trees make it to the mainline Linus tree? 35Q: How often do changes from these trees make it to the mainline Linus tree?
36 36
37A: To understand this, you need to know a bit of background information 37A: To understand this, you need to know a bit of background information
38 on the cadence of linux development. Each new release starts off with 38 on the cadence of Linux development. Each new release starts off with
39 a two week "merge window" where the main maintainers feed their new 39 a two week "merge window" where the main maintainers feed their new
40 stuff to Linus for merging into the mainline tree. After the two weeks, 40 stuff to Linus for merging into the mainline tree. After the two weeks,
41 the merge window is closed, and it is called/tagged "-rc1". No new 41 the merge window is closed, and it is called/tagged "-rc1". No new
@@ -46,7 +46,7 @@ A: To understand this, you need to know a bit of background information
46 things are in a state of churn), and a week after the last vX.Y-rcN 46 things are in a state of churn), and a week after the last vX.Y-rcN
47 was done, the official "vX.Y" is released. 47 was done, the official "vX.Y" is released.
48 48
49 Relating that to netdev: At the beginning of the 2 week merge window, 49 Relating that to netdev: At the beginning of the 2-week merge window,
50 the net-next tree will be closed - no new changes/features. The 50 the net-next tree will be closed - no new changes/features. The
51 accumulated new content of the past ~10 weeks will be passed onto 51 accumulated new content of the past ~10 weeks will be passed onto
52 mainline/Linus via a pull request for vX.Y -- at the same time, 52 mainline/Linus via a pull request for vX.Y -- at the same time,
@@ -59,16 +59,16 @@ A: To understand this, you need to know a bit of background information
59 IMPORTANT: Do not send new net-next content to netdev during the 59 IMPORTANT: Do not send new net-next content to netdev during the
60 period during which net-next tree is closed. 60 period during which net-next tree is closed.
61 61
62 Shortly after the two weeks have passed, (and vX.Y-rc1 is released) the 62 Shortly after the two weeks have passed (and vX.Y-rc1 is released), the
63 tree for net-next reopens to collect content for the next (vX.Y+1) release. 63 tree for net-next reopens to collect content for the next (vX.Y+1) release.
64 64
65 If you aren't subscribed to netdev and/or are simply unsure if net-next 65 If you aren't subscribed to netdev and/or are simply unsure if net-next
66 has re-opened yet, simply check the net-next git repository link above for 66 has re-opened yet, simply check the net-next git repository link above for
67 any new networking related commits. 67 any new networking-related commits.
68 68
69 The "net" tree continues to collect fixes for the vX.Y content, and 69 The "net" tree continues to collect fixes for the vX.Y content, and
70 is fed back to Linus at regular (~weekly) intervals. Meaning that the 70 is fed back to Linus at regular (~weekly) intervals. Meaning that the
71 focus for "net" is on stablilization and bugfixes. 71 focus for "net" is on stabilization and bugfixes.
72 72
73 Finally, the vX.Y gets released, and the whole cycle starts over. 73 Finally, the vX.Y gets released, and the whole cycle starts over.
74 74
@@ -217,7 +217,7 @@ A: Attention to detail. Re-read your own work as if you were the
217 to why it happens, and then if necessary, explain why the fix proposed 217 to why it happens, and then if necessary, explain why the fix proposed
218 is the best way to get things done. Don't mangle whitespace, and as 218 is the best way to get things done. Don't mangle whitespace, and as
219 is common, don't mis-indent function arguments that span multiple lines. 219 is common, don't mis-indent function arguments that span multiple lines.
220 If it is your 1st patch, mail it to yourself so you can test apply 220 If it is your first patch, mail it to yourself so you can test apply
221 it to an unpatched tree to confirm infrastructure didn't mangle it. 221 it to an unpatched tree to confirm infrastructure didn't mangle it.
222 222
223 Finally, go back and read Documentation/SubmittingPatches to be 223 Finally, go back and read Documentation/SubmittingPatches to be
diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt
index 533378839546..b26122973525 100644
--- a/Documentation/networking/netlink_mmap.txt
+++ b/Documentation/networking/netlink_mmap.txt
@@ -45,7 +45,7 @@ processing.
45 45
46Conversion of the reception path involves calling poll() on the file 46Conversion of the reception path involves calling poll() on the file
47descriptor, once the socket is readable the frames from the ring are 47descriptor, once the socket is readable the frames from the ring are
48processsed in order until no more messages are available, as indicated by 48processed in order until no more messages are available, as indicated by
49a status word in the frame header. 49a status word in the frame header.
50 50
51On kernel side, in order to make use of memory mapped I/O on receive, the 51On kernel side, in order to make use of memory mapped I/O on receive, the
@@ -56,7 +56,7 @@ Dumps of kernel databases automatically support memory mapped I/O.
56 56
57Conversion of the transmit path involves changing message construction to 57Conversion of the transmit path involves changing message construction to
58use memory from the TX ring instead of (usually) a buffer declared on the 58use memory from the TX ring instead of (usually) a buffer declared on the
59stack and setting up the frame header approriately. Optionally poll() can 59stack and setting up the frame header appropriately. Optionally poll() can
60be used to wait for free frames in the TX ring. 60be used to wait for free frames in the TX ring.
61 61
62Structured and definitions for using memory mapped I/O are contained in 62Structured and definitions for using memory mapped I/O are contained in
@@ -231,7 +231,7 @@ Ring setup:
231 if (setsockopt(fd, NETLINK_TX_RING, &req, sizeof(req)) < 0) 231 if (setsockopt(fd, NETLINK_TX_RING, &req, sizeof(req)) < 0)
232 exit(1) 232 exit(1)
233 233
234 /* Calculate size of each invididual ring */ 234 /* Calculate size of each individual ring */
235 ring_size = req.nm_block_nr * req.nm_block_size; 235 ring_size = req.nm_block_nr * req.nm_block_size;
236 236
237 /* Map RX/TX rings. The TX ring is located after the RX ring */ 237 /* Map RX/TX rings. The TX ring is located after the RX ring */
diff --git a/Documentation/networking/operstates.txt b/Documentation/networking/operstates.txt
index 97694572338b..355c6d8ef8ad 100644
--- a/Documentation/networking/operstates.txt
+++ b/Documentation/networking/operstates.txt
@@ -89,8 +89,8 @@ packets. The name 'carrier' and the inversion are historical, think of
89it as lower layer. 89it as lower layer.
90 90
91Note that for certain kind of soft-devices, which are not managing any 91Note that for certain kind of soft-devices, which are not managing any
92real hardware, there is possible to set this bit from userpsace. 92real hardware, it is possible to set this bit from userspace. One
93One should use TVL IFLA_CARRIER to do so. 93should use TVL IFLA_CARRIER to do so.
94 94
95netif_carrier_ok() can be used to query that bit. 95netif_carrier_ok() can be used to query that bit.
96 96
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index 60d05eb77c64..b89bc82eed46 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -144,7 +144,7 @@ An overview of the RxRPC protocol:
144 (*) Calls use ACK packets to handle reliability. Data packets are also 144 (*) Calls use ACK packets to handle reliability. Data packets are also
145 explicitly sequenced per call. 145 explicitly sequenced per call.
146 146
147 (*) There are two types of positive acknowledgement: hard-ACKs and soft-ACKs. 147 (*) There are two types of positive acknowledgment: hard-ACKs and soft-ACKs.
148 A hard-ACK indicates to the far side that all the data received to a point 148 A hard-ACK indicates to the far side that all the data received to a point
149 has been received and processed; a soft-ACK indicates that the data has 149 has been received and processed; a soft-ACK indicates that the data has
150 been received but may yet be discarded and re-requested. The sender may 150 been received but may yet be discarded and re-requested. The sender may
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 457b8bbafb08..cdd916da838d 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -160,7 +160,7 @@ Where:
160 o pmt: core has the embedded power module (optional). 160 o pmt: core has the embedded power module (optional).
161 o force_sf_dma_mode: force DMA to use the Store and Forward mode 161 o force_sf_dma_mode: force DMA to use the Store and Forward mode
162 instead of the Threshold. 162 instead of the Threshold.
163 o force_thresh_dma_mode: force DMA to use the Shreshold mode other than 163 o force_thresh_dma_mode: force DMA to use the Threshold mode other than
164 the Store and Forward mode. 164 the Store and Forward mode.
165 o riwt_off: force to disable the RX watchdog feature and switch to NAPI mode. 165 o riwt_off: force to disable the RX watchdog feature and switch to NAPI mode.
166 o fix_mac_speed: this callback is used for modifying some syscfg registers 166 o fix_mac_speed: this callback is used for modifying some syscfg registers
@@ -175,7 +175,7 @@ Where:
175 registers. 175 registers.
176 o custom_cfg/custom_data: this is a custom configuration that can be passed 176 o custom_cfg/custom_data: this is a custom configuration that can be passed
177 while initializing the resources. 177 while initializing the resources.
178 o bsp_priv: another private poiter. 178 o bsp_priv: another private pointer.
179 179
180For MDIO bus The we have: 180For MDIO bus The we have:
181 181
@@ -271,7 +271,7 @@ reset procedure etc).
271 o dwmac1000_dma.c: dma functions for the GMAC chip; 271 o dwmac1000_dma.c: dma functions for the GMAC chip;
272 o dwmac1000.h: specific header file for the GMAC; 272 o dwmac1000.h: specific header file for the GMAC;
273 o dwmac100_core: MAC 100 core and dma code; 273 o dwmac100_core: MAC 100 core and dma code;
274 o dwmac100_dma.c: dma funtions for the MAC chip; 274 o dwmac100_dma.c: dma functions for the MAC chip;
275 o dwmac1000.h: specific header file for the MAC; 275 o dwmac1000.h: specific header file for the MAC;
276 o dwmac_lib.c: generic DMA functions shared among chips; 276 o dwmac_lib.c: generic DMA functions shared among chips;
277 o enh_desc.c: functions for handling enhanced descriptors; 277 o enh_desc.c: functions for handling enhanced descriptors;
@@ -364,4 +364,4 @@ Auto-negotiated Link Parter Ability.
36410) TODO: 36410) TODO:
365 o XGMAC is not supported. 365 o XGMAC is not supported.
366 o Complete the TBI & RTBI support. 366 o Complete the TBI & RTBI support.
367 o extened VLAN support for 3.70a SYNP GMAC. 367 o extend VLAN support for 3.70a SYNP GMAC.
diff --git a/Documentation/networking/vortex.txt b/Documentation/networking/vortex.txt
index 9a8041dcbb53..97282da82b75 100644
--- a/Documentation/networking/vortex.txt
+++ b/Documentation/networking/vortex.txt
@@ -68,7 +68,7 @@ Module parameters
68 68
69There are several parameters which may be provided to the driver when 69There are several parameters which may be provided to the driver when
70its module is loaded. These are usually placed in /etc/modprobe.d/*.conf 70its module is loaded. These are usually placed in /etc/modprobe.d/*.conf
71configuretion files. Example: 71configuration files. Example:
72 72
73options 3c59x debug=3 rx_copybreak=300 73options 3c59x debug=3 rx_copybreak=300
74 74
@@ -178,7 +178,7 @@ max_interrupt_work=N
178 178
179 The driver's interrupt service routine can handle many receive and 179 The driver's interrupt service routine can handle many receive and
180 transmit packets in a single invocation. It does this in a loop. 180 transmit packets in a single invocation. It does this in a loop.
181 The value of max_interrupt_work governs how mnay times the interrupt 181 The value of max_interrupt_work governs how many times the interrupt
182 service routine will loop. The default value is 32 loops. If this 182 service routine will loop. The default value is 32 loops. If this
183 is exceeded the interrupt service routine gives up and generates a 183 is exceeded the interrupt service routine gives up and generates a
184 warning message "eth0: Too much work in interrupt". 184 warning message "eth0: Too much work in interrupt".
diff --git a/Documentation/networking/x25-iface.txt b/Documentation/networking/x25-iface.txt
index 78f662ee0622..7f213b556e85 100644
--- a/Documentation/networking/x25-iface.txt
+++ b/Documentation/networking/x25-iface.txt
@@ -105,7 +105,7 @@ reduced by the following measures or a combination thereof:
105 later. 105 later.
106 The lapb module interface was modified to support this. Its 106 The lapb module interface was modified to support this. Its
107 data_indication() method should now transparently pass the 107 data_indication() method should now transparently pass the
108 netif_rx() return value to the (lapb mopdule) caller. 108 netif_rx() return value to the (lapb module) caller.
109(2) Drivers for kernel versions 2.2.x should always check the global 109(2) Drivers for kernel versions 2.2.x should always check the global
110 variable netdev_dropping when a new frame is received. The driver 110 variable netdev_dropping when a new frame is received. The driver
111 should only call netif_rx() if netdev_dropping is zero. Otherwise 111 should only call netif_rx() if netdev_dropping is zero. Otherwise
diff --git a/MAINTAINERS b/MAINTAINERS
index 66469731e78d..727d7e359182 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1009,6 +1009,7 @@ ARM/Marvell Armada 370 and Armada XP SOC support
1009M: Jason Cooper <jason@lakedaemon.net> 1009M: Jason Cooper <jason@lakedaemon.net>
1010M: Andrew Lunn <andrew@lunn.ch> 1010M: Andrew Lunn <andrew@lunn.ch>
1011M: Gregory Clement <gregory.clement@free-electrons.com> 1011M: Gregory Clement <gregory.clement@free-electrons.com>
1012M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
1012L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1013L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1013S: Maintained 1014S: Maintained
1014F: arch/arm/mach-mvebu/ 1015F: arch/arm/mach-mvebu/
@@ -1016,6 +1017,7 @@ F: arch/arm/mach-mvebu/
1016ARM/Marvell Dove/Kirkwood/MV78xx0/Orion SOC support 1017ARM/Marvell Dove/Kirkwood/MV78xx0/Orion SOC support
1017M: Jason Cooper <jason@lakedaemon.net> 1018M: Jason Cooper <jason@lakedaemon.net>
1018M: Andrew Lunn <andrew@lunn.ch> 1019M: Andrew Lunn <andrew@lunn.ch>
1020M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
1019L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1021L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1020S: Maintained 1022S: Maintained
1021F: arch/arm/mach-dove/ 1023F: arch/arm/mach-dove/
@@ -1148,6 +1150,13 @@ F: drivers/net/ethernet/i825xx/ether1*
1148F: drivers/net/ethernet/seeq/ether3* 1150F: drivers/net/ethernet/seeq/ether3*
1149F: drivers/scsi/arm/ 1151F: drivers/scsi/arm/
1150 1152
1153ARM/Rockchip SoC support
1154M: Heiko Stuebner <heiko@sntech.de>
1155L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1156S: Maintained
1157F: arch/arm/mach-rockchip/
1158F: drivers/*/*rockchip*
1159
1151ARM/SHARK MACHINE SUPPORT 1160ARM/SHARK MACHINE SUPPORT
1152M: Alexander Schulz <alex@shark-linux.de> 1161M: Alexander Schulz <alex@shark-linux.de>
1153W: http://www.shark-linux.de/shark.html 1162W: http://www.shark-linux.de/shark.html
@@ -2719,6 +2728,8 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
2719DMA GENERIC OFFLOAD ENGINE SUBSYSTEM 2728DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
2720M: Vinod Koul <vinod.koul@intel.com> 2729M: Vinod Koul <vinod.koul@intel.com>
2721M: Dan Williams <dan.j.williams@intel.com> 2730M: Dan Williams <dan.j.williams@intel.com>
2731L: dmaengine@vger.kernel.org
2732Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
2722S: Supported 2733S: Supported
2723F: drivers/dma/ 2734F: drivers/dma/
2724F: include/linux/dma* 2735F: include/linux/dma*
@@ -2822,7 +2833,7 @@ M: Terje Bergström <tbergstrom@nvidia.com>
2822L: dri-devel@lists.freedesktop.org 2833L: dri-devel@lists.freedesktop.org
2823L: linux-tegra@vger.kernel.org 2834L: linux-tegra@vger.kernel.org
2824T: git git://anongit.freedesktop.org/tegra/linux.git 2835T: git git://anongit.freedesktop.org/tegra/linux.git
2825S: Maintained 2836S: Supported
2826F: drivers/gpu/host1x/ 2837F: drivers/gpu/host1x/
2827F: include/uapi/drm/tegra_drm.h 2838F: include/uapi/drm/tegra_drm.h
2828F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt 2839F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -4358,7 +4369,10 @@ F: arch/x86/kernel/microcode_intel.c
4358 4369
4359INTEL I/OAT DMA DRIVER 4370INTEL I/OAT DMA DRIVER
4360M: Dan Williams <dan.j.williams@intel.com> 4371M: Dan Williams <dan.j.williams@intel.com>
4361S: Maintained 4372M: Dave Jiang <dave.jiang@intel.com>
4373L: dmaengine@vger.kernel.org
4374Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
4375S: Supported
4362F: drivers/dma/ioat* 4376F: drivers/dma/ioat*
4363 4377
4364INTEL IOMMU (VT-d) 4378INTEL IOMMU (VT-d)
@@ -8310,14 +8324,72 @@ L: linux-media@vger.kernel.org
8310S: Maintained 8324S: Maintained
8311F: drivers/media/rc/ttusbir.c 8325F: drivers/media/rc/ttusbir.c
8312 8326
8313TEGRA SUPPORT 8327TEGRA ARCHITECTURE SUPPORT
8314M: Stephen Warren <swarren@wwwdotorg.org> 8328M: Stephen Warren <swarren@wwwdotorg.org>
8329M: Thierry Reding <thierry.reding@gmail.com>
8315L: linux-tegra@vger.kernel.org 8330L: linux-tegra@vger.kernel.org
8316Q: http://patchwork.ozlabs.org/project/linux-tegra/list/ 8331Q: http://patchwork.ozlabs.org/project/linux-tegra/list/
8317T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git 8332T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
8318S: Supported 8333S: Supported
8319N: [^a-z]tegra 8334N: [^a-z]tegra
8320 8335
8336TEGRA ASOC DRIVER
8337M: Stephen Warren <swarren@wwwdotorg.org>
8338S: Supported
8339F: sound/soc/tegra/
8340
8341TEGRA CLOCK DRIVER
8342M: Peter De Schrijver <pdeschrijver@nvidia.com>
8343M: Prashant Gaikwad <pgaikwad@nvidia.com>
8344S: Supported
8345F: drivers/clk/tegra/
8346
8347TEGRA DMA DRIVER
8348M: Laxman Dewangan <ldewangan@nvidia.com>
8349S: Supported
8350F: drivers/dma/tegra20-apb-dma.c
8351
8352TEGRA GPIO DRIVER
8353M: Stephen Warren <swarren@wwwdotorg.org>
8354S: Supported
8355F: drivers/gpio/gpio-tegra.c
8356
8357TEGRA I2C DRIVER
8358M: Laxman Dewangan <ldewangan@nvidia.com>
8359S: Supported
8360F: drivers/i2c/busses/i2c-tegra.c
8361
8362TEGRA IOMMU DRIVERS
8363M: Hiroshi Doyu <hdoyu@nvidia.com>
8364S: Supported
8365F: drivers/iommu/tegra*
8366
8367TEGRA KBC DRIVER
8368M: Rakesh Iyer <riyer@nvidia.com>
8369M: Laxman Dewangan <ldewangan@nvidia.com>
8370S: Supported
8371F: drivers/input/keyboard/tegra-kbc.c
8372
8373TEGRA PINCTRL DRIVER
8374M: Stephen Warren <swarren@wwwdotorg.org>
8375S: Supported
8376F: drivers/pinctrl/pinctrl-tegra*
8377
8378TEGRA PWM DRIVER
8379M: Thierry Reding <thierry.reding@gmail.com>
8380S: Supported
8381F: drivers/pwm/pwm-tegra.c
8382
8383TEGRA SERIAL DRIVER
8384M: Laxman Dewangan <ldewangan@nvidia.com>
8385S: Supported
8386F: drivers/tty/serial/serial-tegra.c
8387
8388TEGRA SPI DRIVER
8389M: Laxman Dewangan <ldewangan@nvidia.com>
8390S: Supported
8391F: drivers/spi/spi-tegra*
8392
8321TEHUTI ETHERNET DRIVER 8393TEHUTI ETHERNET DRIVER
8322M: Andy Gospodarek <andy@greyhouse.net> 8394M: Andy Gospodarek <andy@greyhouse.net>
8323L: netdev@vger.kernel.org 8395L: netdev@vger.kernel.org
@@ -8853,61 +8925,14 @@ W: http://pegasus2.sourceforge.net/
8853S: Maintained 8925S: Maintained
8854F: drivers/net/usb/rtl8150.c 8926F: drivers/net/usb/rtl8150.c
8855 8927
8856USB SERIAL BELKIN F5U103 DRIVER 8928USB SERIAL SUBSYSTEM
8857M: William Greathouse <wgreathouse@smva.com> 8929M: Johan Hovold <jhovold@gmail.com>
8858L: linux-usb@vger.kernel.org
8859S: Maintained
8860F: drivers/usb/serial/belkin_sa.*
8861
8862USB SERIAL CYPRESS M8 DRIVER
8863M: Lonnie Mendez <dignome@gmail.com>
8864L: linux-usb@vger.kernel.org
8865S: Maintained
8866W: http://geocities.com/i0xox0i
8867W: http://firstlight.net/cvs
8868F: drivers/usb/serial/cypress_m8.*
8869
8870USB SERIAL CYBERJACK DRIVER
8871M: Matthias Bruestle and Harald Welte <support@reiner-sct.com>
8872W: http://www.reiner-sct.de/support/treiber_cyberjack.php
8873S: Maintained
8874F: drivers/usb/serial/cyberjack.c
8875
8876USB SERIAL DIGI ACCELEPORT DRIVER
8877M: Peter Berger <pberger@brimson.com>
8878M: Al Borchers <alborchers@steinerpoint.com>
8879L: linux-usb@vger.kernel.org 8930L: linux-usb@vger.kernel.org
8880S: Maintained 8931S: Maintained
8881F: drivers/usb/serial/digi_acceleport.c
8882
8883USB SERIAL DRIVER
8884M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
8885L: linux-usb@vger.kernel.org
8886S: Supported
8887F: Documentation/usb/usb-serial.txt 8932F: Documentation/usb/usb-serial.txt
8888F: drivers/usb/serial/generic.c 8933F: drivers/usb/serial/
8889F: drivers/usb/serial/usb-serial.c
8890F: include/linux/usb/serial.h 8934F: include/linux/usb/serial.h
8891 8935
8892USB SERIAL EMPEG EMPEG-CAR MARK I/II DRIVER
8893M: Gary Brubaker <xavyer@ix.netcom.com>
8894L: linux-usb@vger.kernel.org
8895S: Maintained
8896F: drivers/usb/serial/empeg.c
8897
8898USB SERIAL KEYSPAN DRIVER
8899M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
8900L: linux-usb@vger.kernel.org
8901S: Maintained
8902F: drivers/usb/serial/*keyspan*
8903
8904USB SERIAL WHITEHEAT DRIVER
8905M: Support Department <support@connecttech.com>
8906L: linux-usb@vger.kernel.org
8907W: http://www.connecttech.com
8908S: Supported
8909F: drivers/usb/serial/whiteheat*
8910
8911USB SMSC75XX ETHERNET DRIVER 8936USB SMSC75XX ETHERNET DRIVER
8912M: Steve Glendinning <steve.glendinning@shawell.net> 8937M: Steve Glendinning <steve.glendinning@shawell.net>
8913L: netdev@vger.kernel.org 8938L: netdev@vger.kernel.org
diff --git a/Makefile b/Makefile
index 126321d2e6ad..67077ad6edbb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 12 2PATCHLEVEL = 12
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION =
5NAME = One Giant Leap for Frogkind 5NAME = One Giant Leap for Frogkind
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index d63f3de0cd5b..0c14d8a52683 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -17,7 +17,7 @@
17#include <asm/pgalloc.h> 17#include <asm/pgalloc.h>
18#include <asm/mmu.h> 18#include <asm/mmu.h>
19 19
20static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address) 20static int handle_vmalloc_fault(unsigned long address)
21{ 21{
22 /* 22 /*
23 * Synchronize this task's top level page-table 23 * Synchronize this task's top level page-table
@@ -27,7 +27,7 @@ static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
27 pud_t *pud, *pud_k; 27 pud_t *pud, *pud_k;
28 pmd_t *pmd, *pmd_k; 28 pmd_t *pmd, *pmd_k;
29 29
30 pgd = pgd_offset_fast(mm, address); 30 pgd = pgd_offset_fast(current->active_mm, address);
31 pgd_k = pgd_offset_k(address); 31 pgd_k = pgd_offset_k(address);
32 32
33 if (!pgd_present(*pgd_k)) 33 if (!pgd_present(*pgd_k))
@@ -72,7 +72,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address)
72 * nothing more. 72 * nothing more.
73 */ 73 */
74 if (address >= VMALLOC_START && address <= VMALLOC_END) { 74 if (address >= VMALLOC_START && address <= VMALLOC_END) {
75 ret = handle_vmalloc_fault(mm, address); 75 ret = handle_vmalloc_fault(address);
76 if (unlikely(ret)) 76 if (unlikely(ret))
77 goto bad_area_nosemaphore; 77 goto bad_area_nosemaphore;
78 else 78 else
diff --git a/arch/arm/boot/dts/integratorcp.dts b/arch/arm/boot/dts/integratorcp.dts
index ff1aea0ee043..72693a69f830 100644
--- a/arch/arm/boot/dts/integratorcp.dts
+++ b/arch/arm/boot/dts/integratorcp.dts
@@ -9,11 +9,6 @@
9 model = "ARM Integrator/CP"; 9 model = "ARM Integrator/CP";
10 compatible = "arm,integrator-cp"; 10 compatible = "arm,integrator-cp";
11 11
12 aliases {
13 arm,timer-primary = &timer2;
14 arm,timer-secondary = &timer1;
15 };
16
17 chosen { 12 chosen {
18 bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk"; 13 bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk";
19 }; 14 };
@@ -24,14 +19,18 @@
24 }; 19 };
25 20
26 timer0: timer@13000000 { 21 timer0: timer@13000000 {
22 /* TIMER0 runs @ 25MHz */
27 compatible = "arm,integrator-cp-timer"; 23 compatible = "arm,integrator-cp-timer";
24 status = "disabled";
28 }; 25 };
29 26
30 timer1: timer@13000100 { 27 timer1: timer@13000100 {
28 /* TIMER1 runs @ 1MHz */
31 compatible = "arm,integrator-cp-timer"; 29 compatible = "arm,integrator-cp-timer";
32 }; 30 };
33 31
34 timer2: timer@13000200 { 32 timer2: timer@13000200 {
33 /* TIMER2 runs @ 1MHz */
35 compatible = "arm,integrator-cp-timer"; 34 compatible = "arm,integrator-cp-timer";
36 }; 35 };
37 36
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 45f1ffcf1a4b..24cdf64789c3 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -971,11 +971,11 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
971[C(LL)] = { 971[C(LL)] = {
972 [C(OP_READ)] = { 972 [C(OP_READ)] = {
973 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, 973 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
974 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, 974 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
975 }, 975 },
976 [C(OP_WRITE)] = { 976 [C(OP_WRITE)] = {
977 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, 977 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
978 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, 978 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
979 }, 979 },
980}, 980},
981[C(ITLB)] = { 981[C(ITLB)] = {
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index c69da3734699..5b28e81d94a0 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -473,7 +473,7 @@ static void __init fill_ipi_map(void)
473{ 473{
474 int cpu; 474 int cpu;
475 475
476 for (cpu = 0; cpu < NR_CPUS; cpu++) { 476 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
477 fill_ipi_map1(gic_resched_int_base, cpu, GIC_CPU_INT1); 477 fill_ipi_map1(gic_resched_int_base, cpu, GIC_CPU_INT1);
478 fill_ipi_map1(gic_call_int_base, cpu, GIC_CPU_INT2); 478 fill_ipi_map1(gic_call_int_base, cpu, GIC_CPU_INT2);
479 } 479 }
@@ -574,8 +574,9 @@ void __init arch_init_irq(void)
574 /* FIXME */ 574 /* FIXME */
575 int i; 575 int i;
576#if defined(CONFIG_MIPS_MT_SMP) 576#if defined(CONFIG_MIPS_MT_SMP)
577 gic_call_int_base = GIC_NUM_INTRS - NR_CPUS; 577 gic_call_int_base = GIC_NUM_INTRS -
578 gic_resched_int_base = gic_call_int_base - NR_CPUS; 578 (NR_CPUS - nr_cpu_ids) * 2 - nr_cpu_ids;
579 gic_resched_int_base = gic_call_int_base - nr_cpu_ids;
579 fill_ipi_map(); 580 fill_ipi_map();
580#endif 581#endif
581 gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map, 582 gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map,
@@ -599,7 +600,7 @@ void __init arch_init_irq(void)
599 printk("CPU%d: status register now %08x\n", smp_processor_id(), read_c0_status()); 600 printk("CPU%d: status register now %08x\n", smp_processor_id(), read_c0_status());
600 write_c0_status(0x1100dc00); 601 write_c0_status(0x1100dc00);
601 printk("CPU%d: status register frc %08x\n", smp_processor_id(), read_c0_status()); 602 printk("CPU%d: status register frc %08x\n", smp_processor_id(), read_c0_status());
602 for (i = 0; i < NR_CPUS; i++) { 603 for (i = 0; i < nr_cpu_ids; i++) {
603 arch_init_ipiirq(MIPS_GIC_IRQ_BASE + 604 arch_init_ipiirq(MIPS_GIC_IRQ_BASE +
604 GIC_RESCHED_INT(i), &irq_resched); 605 GIC_RESCHED_INT(i), &irq_resched);
605 arch_init_ipiirq(MIPS_GIC_IRQ_BASE + 606 arch_init_ipiirq(MIPS_GIC_IRQ_BASE +
diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
index e49241a2c39a..202785709441 100644
--- a/arch/mips/ralink/timer.c
+++ b/arch/mips/ralink/timer.c
@@ -126,7 +126,7 @@ static int rt_timer_probe(struct platform_device *pdev)
126 return -ENOENT; 126 return -ENOENT;
127 } 127 }
128 128
129 rt->membase = devm_request_and_ioremap(&pdev->dev, res); 129 rt->membase = devm_ioremap_resource(&pdev->dev, res);
130 if (IS_ERR(rt->membase)) 130 if (IS_ERR(rt->membase))
131 return PTR_ERR(rt->membase); 131 return PTR_ERR(rt->membase);
132 132
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 37aabd772fbb..d2d58258aea6 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -195,6 +195,8 @@ common_stext:
195 ldw MEM_PDC_HI(%r0),%r6 195 ldw MEM_PDC_HI(%r0),%r6
196 depd %r6, 31, 32, %r3 /* move to upper word */ 196 depd %r6, 31, 32, %r3 /* move to upper word */
197 197
198 mfctl %cr30,%r6 /* PCX-W2 firmware bug */
199
198 ldo PDC_PSW(%r0),%arg0 /* 21 */ 200 ldo PDC_PSW(%r0),%arg0 /* 21 */
199 ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */ 201 ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
200 ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */ 202 ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
@@ -203,6 +205,8 @@ common_stext:
203 copy %r0,%arg3 205 copy %r0,%arg3
204 206
205stext_pdc_ret: 207stext_pdc_ret:
208 mtctl %r6,%cr30 /* restore task thread info */
209
206 /* restore rfi target address*/ 210 /* restore rfi target address*/
207 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10 211 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
208 tophys_r1 %r10 212 tophys_r1 %r10
diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
index 829df49dee99..41ebbfebb333 100644
--- a/arch/um/kernel/exitcode.c
+++ b/arch/um/kernel/exitcode.c
@@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file,
40 const char __user *buffer, size_t count, loff_t *pos) 40 const char __user *buffer, size_t count, loff_t *pos)
41{ 41{
42 char *end, buf[sizeof("nnnnn\0")]; 42 char *end, buf[sizeof("nnnnn\0")];
43 size_t size;
43 int tmp; 44 int tmp;
44 45
45 if (copy_from_user(buf, buffer, count)) 46 size = min(count, sizeof(buf));
47 if (copy_from_user(buf, buffer, size))
46 return -EFAULT; 48 return -EFAULT;
47 49
48 tmp = simple_strtol(buf, &end, 0); 50 tmp = simple_strtol(buf, &end, 0);
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 0da5200ee79d..b3e18f800302 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -128,7 +128,8 @@ do { \
128do { \ 128do { \
129 typedef typeof(var) pao_T__; \ 129 typedef typeof(var) pao_T__; \
130 const int pao_ID__ = (__builtin_constant_p(val) && \ 130 const int pao_ID__ = (__builtin_constant_p(val) && \
131 ((val) == 1 || (val) == -1)) ? (val) : 0; \ 131 ((val) == 1 || (val) == -1)) ? \
132 (int)(val) : 0; \
132 if (0) { \ 133 if (0) { \
133 pao_T__ pao_tmp__; \ 134 pao_T__ pao_tmp__; \
134 pao_tmp__ = (val); \ 135 pao_tmp__ = (val); \
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 9d8449158cf9..8a87a3224121 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1276,16 +1276,16 @@ void perf_events_lapic_init(void)
1276static int __kprobes 1276static int __kprobes
1277perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) 1277perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1278{ 1278{
1279 int ret;
1280 u64 start_clock; 1279 u64 start_clock;
1281 u64 finish_clock; 1280 u64 finish_clock;
1281 int ret;
1282 1282
1283 if (!atomic_read(&active_events)) 1283 if (!atomic_read(&active_events))
1284 return NMI_DONE; 1284 return NMI_DONE;
1285 1285
1286 start_clock = local_clock(); 1286 start_clock = sched_clock();
1287 ret = x86_pmu.handle_irq(regs); 1287 ret = x86_pmu.handle_irq(regs);
1288 finish_clock = local_clock(); 1288 finish_clock = sched_clock();
1289 1289
1290 perf_sample_event_took(finish_clock - start_clock); 1290 perf_sample_event_took(finish_clock - start_clock);
1291 1291
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index a0e2a8a80c94..b2046e4d0b59 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -609,7 +609,7 @@ static struct dentry *d_kvm_debug;
609 609
610struct dentry *kvm_init_debugfs(void) 610struct dentry *kvm_init_debugfs(void)
611{ 611{
612 d_kvm_debug = debugfs_create_dir("kvm", NULL); 612 d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
613 if (!d_kvm_debug) 613 if (!d_kvm_debug)
614 printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n"); 614 printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
615 615
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index ba77ebc2c353..6fcb49ce50a1 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -113,10 +113,10 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
113 u64 before, delta, whole_msecs; 113 u64 before, delta, whole_msecs;
114 int remainder_ns, decimal_msecs, thishandled; 114 int remainder_ns, decimal_msecs, thishandled;
115 115
116 before = local_clock(); 116 before = sched_clock();
117 thishandled = a->handler(type, regs); 117 thishandled = a->handler(type, regs);
118 handled += thishandled; 118 handled += thishandled;
119 delta = local_clock() - before; 119 delta = sched_clock() - before;
120 trace_nmi_handler(a->handler, (int)delta, thishandled); 120 trace_nmi_handler(a->handler, (int)delta, thishandled);
121 121
122 if (delta < nmi_longest_ns) 122 if (delta < nmi_longest_ns)
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index de1dfa18d0a1..21dbe6bdb8ed 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1122,7 +1122,7 @@ ENDPROC(fast_syscall_spill_registers)
1122 * a3: exctable, original value in excsave1 1122 * a3: exctable, original value in excsave1
1123 */ 1123 */
1124 1124
1125fast_syscall_spill_registers_fixup: 1125ENTRY(fast_syscall_spill_registers_fixup)
1126 1126
1127 rsr a2, windowbase # get current windowbase (a2 is saved) 1127 rsr a2, windowbase # get current windowbase (a2 is saved)
1128 xsr a0, depc # restore depc and a0 1128 xsr a0, depc # restore depc and a0
@@ -1134,22 +1134,26 @@ fast_syscall_spill_registers_fixup:
1134 */ 1134 */
1135 1135
1136 xsr a3, excsave1 # get spill-mask 1136 xsr a3, excsave1 # get spill-mask
1137 slli a2, a3, 1 # shift left by one 1137 slli a3, a3, 1 # shift left by one
1138 1138
1139 slli a3, a2, 32-WSBITS 1139 slli a2, a3, 32-WSBITS
1140 src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy...... 1140 src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
1141 wsr a2, windowstart # set corrected windowstart 1141 wsr a2, windowstart # set corrected windowstart
1142 1142
1143 rsr a3, excsave1 1143 srli a3, a3, 1
1144 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2 1144 rsr a2, excsave1
1145 l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task) 1145 l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2
1146 xsr a2, excsave1
1147 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3
1148 l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task)
1149 xsr a2, excsave1
1146 1150
1147 /* Return to the original (user task) WINDOWBASE. 1151 /* Return to the original (user task) WINDOWBASE.
1148 * We leave the following frame behind: 1152 * We leave the following frame behind:
1149 * a0, a1, a2 same 1153 * a0, a1, a2 same
1150 * a3: trashed (saved in excsave_1) 1154 * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE)
1151 * depc: depc (we have to return to that address) 1155 * depc: depc (we have to return to that address)
1152 * excsave_1: a3 1156 * excsave_1: exctable
1153 */ 1157 */
1154 1158
1155 wsr a3, windowbase 1159 wsr a3, windowbase
@@ -1159,9 +1163,9 @@ fast_syscall_spill_registers_fixup:
1159 * a0: return address 1163 * a0: return address
1160 * a1: used, stack pointer 1164 * a1: used, stack pointer
1161 * a2: kernel stack pointer 1165 * a2: kernel stack pointer
1162 * a3: available, saved in EXCSAVE_1 1166 * a3: available
1163 * depc: exception address 1167 * depc: exception address
1164 * excsave: a3 1168 * excsave: exctable
1165 * Note: This frame might be the same as above. 1169 * Note: This frame might be the same as above.
1166 */ 1170 */
1167 1171
@@ -1181,9 +1185,12 @@ fast_syscall_spill_registers_fixup:
1181 rsr a0, exccause 1185 rsr a0, exccause
1182 addx4 a0, a0, a3 # find entry in table 1186 addx4 a0, a0, a3 # find entry in table
1183 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 1187 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
1188 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
1184 jx a0 1189 jx a0
1185 1190
1186fast_syscall_spill_registers_fixup_return: 1191ENDPROC(fast_syscall_spill_registers_fixup)
1192
1193ENTRY(fast_syscall_spill_registers_fixup_return)
1187 1194
1188 /* When we return here, all registers have been restored (a2: DEPC) */ 1195 /* When we return here, all registers have been restored (a2: DEPC) */
1189 1196
@@ -1191,13 +1198,13 @@ fast_syscall_spill_registers_fixup_return:
1191 1198
1192 /* Restore fixup handler. */ 1199 /* Restore fixup handler. */
1193 1200
1194 xsr a3, excsave1 1201 rsr a2, excsave1
1195 movi a2, fast_syscall_spill_registers_fixup 1202 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE
1196 s32i a2, a3, EXC_TABLE_FIXUP 1203 movi a3, fast_syscall_spill_registers_fixup
1197 s32i a0, a3, EXC_TABLE_DOUBLE_SAVE 1204 s32i a3, a2, EXC_TABLE_FIXUP
1198 rsr a2, windowbase 1205 rsr a3, windowbase
1199 s32i a2, a3, EXC_TABLE_PARAM 1206 s32i a3, a2, EXC_TABLE_PARAM
1200 l32i a2, a3, EXC_TABLE_KSTK 1207 l32i a2, a2, EXC_TABLE_KSTK
1201 1208
1202 /* Load WB at the time the exception occurred. */ 1209 /* Load WB at the time the exception occurred. */
1203 1210
@@ -1206,8 +1213,12 @@ fast_syscall_spill_registers_fixup_return:
1206 wsr a3, windowbase 1213 wsr a3, windowbase
1207 rsync 1214 rsync
1208 1215
1216 rsr a3, excsave1
1217 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
1218
1209 rfde 1219 rfde
1210 1220
1221ENDPROC(fast_syscall_spill_registers_fixup_return)
1211 1222
1212/* 1223/*
1213 * spill all registers. 1224 * spill all registers.
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 718eca1850bd..98b67d5f1514 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -341,7 +341,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
341 341
342 sp = regs->areg[1]; 342 sp = regs->areg[1];
343 343
344 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) { 344 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
345 sp = current->sas_ss_sp + current->sas_ss_size; 345 sp = current->sas_ss_sp + current->sas_ss_size;
346 } 346 }
347 347
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index 56f88b7afe2f..e9e1aad8c271 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -737,7 +737,8 @@ static int __init iss_net_setup(char *str)
737 return 1; 737 return 1;
738 } 738 }
739 739
740 if ((new = alloc_bootmem(sizeof new)) == NULL) { 740 new = alloc_bootmem(sizeof(*new));
741 if (new == NULL) {
741 printk("Alloc_bootmem failed\n"); 742 printk("Alloc_bootmem failed\n");
742 return 1; 743 return 1;
743 } 744 }
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 51410c2ac2cb..4d978a3c88f7 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -27,6 +27,14 @@
27 */ 27 */
28 28
29#define SRC_CR 0x00U 29#define SRC_CR 0x00U
30#define SRC_CR_T0_ENSEL BIT(15)
31#define SRC_CR_T1_ENSEL BIT(17)
32#define SRC_CR_T2_ENSEL BIT(19)
33#define SRC_CR_T3_ENSEL BIT(21)
34#define SRC_CR_T4_ENSEL BIT(23)
35#define SRC_CR_T5_ENSEL BIT(25)
36#define SRC_CR_T6_ENSEL BIT(27)
37#define SRC_CR_T7_ENSEL BIT(29)
30#define SRC_XTALCR 0x0CU 38#define SRC_XTALCR 0x0CU
31#define SRC_XTALCR_XTALTIMEN BIT(20) 39#define SRC_XTALCR_XTALTIMEN BIT(20)
32#define SRC_XTALCR_SXTALDIS BIT(19) 40#define SRC_XTALCR_SXTALDIS BIT(19)
@@ -543,6 +551,19 @@ void __init nomadik_clk_init(void)
543 __func__, np->name); 551 __func__, np->name);
544 return; 552 return;
545 } 553 }
554
555 /* Set all timers to use the 2.4 MHz TIMCLK */
556 val = readl(src_base + SRC_CR);
557 val |= SRC_CR_T0_ENSEL;
558 val |= SRC_CR_T1_ENSEL;
559 val |= SRC_CR_T2_ENSEL;
560 val |= SRC_CR_T3_ENSEL;
561 val |= SRC_CR_T4_ENSEL;
562 val |= SRC_CR_T5_ENSEL;
563 val |= SRC_CR_T6_ENSEL;
564 val |= SRC_CR_T7_ENSEL;
565 writel(val, src_base + SRC_CR);
566
546 val = readl(src_base + SRC_XTALCR); 567 val = readl(src_base + SRC_XTALCR);
547 pr_info("SXTALO is %s\n", 568 pr_info("SXTALO is %s\n",
548 (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled"); 569 (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index fc777bdc1886..81a202d12a7a 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -39,8 +39,8 @@ static const struct coreclk_ratio a370_coreclk_ratios[] __initconst = {
39}; 39};
40 40
41static const u32 a370_tclk_freqs[] __initconst = { 41static const u32 a370_tclk_freqs[] __initconst = {
42 16600000, 42 166000000,
43 20000000, 43 200000000,
44}; 44};
45 45
46static u32 __init a370_get_tclk_freq(void __iomem *sar) 46static u32 __init a370_get_tclk_freq(void __iomem *sar)
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
index 5bb848cac6ec..81dd31a686df 100644
--- a/drivers/clk/socfpga/clk.c
+++ b/drivers/clk/socfpga/clk.c
@@ -49,7 +49,7 @@
49#define SOCFPGA_L4_SP_CLK "l4_sp_clk" 49#define SOCFPGA_L4_SP_CLK "l4_sp_clk"
50#define SOCFPGA_NAND_CLK "nand_clk" 50#define SOCFPGA_NAND_CLK "nand_clk"
51#define SOCFPGA_NAND_X_CLK "nand_x_clk" 51#define SOCFPGA_NAND_X_CLK "nand_x_clk"
52#define SOCFPGA_MMC_CLK "mmc_clk" 52#define SOCFPGA_MMC_CLK "sdmmc_clk"
53#define SOCFPGA_DB_CLK "gpio_db_clk" 53#define SOCFPGA_DB_CLK "gpio_db_clk"
54 54
55#define div_mask(width) ((1 << (width)) - 1) 55#define div_mask(width) ((1 << (width)) - 1)
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 67ccf4aa7277..f5e4c21b301f 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -107,7 +107,7 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
107 107
108 vco = icst_hz_to_vco(icst->params, rate); 108 vco = icst_hz_to_vco(icst->params, rate);
109 icst->rate = icst_hz(icst->params, vco); 109 icst->rate = icst_hz(icst->params, vco);
110 vco_set(icst->vcoreg, icst->lockreg, vco); 110 vco_set(icst->lockreg, icst->vcoreg, vco);
111 return 0; 111 return 0;
112} 112}
113 113
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index d2c3253e015e..506fd23c7550 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -986,12 +986,12 @@ static int __init acpi_cpufreq_init(void)
986{ 986{
987 int ret; 987 int ret;
988 988
989 if (acpi_disabled)
990 return -ENODEV;
991
989 /* don't keep reloading if cpufreq_driver exists */ 992 /* don't keep reloading if cpufreq_driver exists */
990 if (cpufreq_get_current_driver()) 993 if (cpufreq_get_current_driver())
991 return 0; 994 return -EEXIST;
992
993 if (acpi_disabled)
994 return 0;
995 995
996 pr_debug("acpi_cpufreq_init\n"); 996 pr_debug("acpi_cpufreq_init\n");
997 997
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index badf6206b2b2..eb3fdc755000 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
48} 48}
49 49
50struct sample { 50struct sample {
51 int core_pct_busy; 51 int32_t core_pct_busy;
52 u64 aperf; 52 u64 aperf;
53 u64 mperf; 53 u64 mperf;
54 int freq; 54 int freq;
@@ -68,7 +68,7 @@ struct _pid {
68 int32_t i_gain; 68 int32_t i_gain;
69 int32_t d_gain; 69 int32_t d_gain;
70 int deadband; 70 int deadband;
71 int last_err; 71 int32_t last_err;
72}; 72};
73 73
74struct cpudata { 74struct cpudata {
@@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent)
153 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 153 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
154} 154}
155 155
156static signed int pid_calc(struct _pid *pid, int busy) 156static signed int pid_calc(struct _pid *pid, int32_t busy)
157{ 157{
158 signed int err, result; 158 signed int result;
159 int32_t pterm, dterm, fp_error; 159 int32_t pterm, dterm, fp_error;
160 int32_t integral_limit; 160 int32_t integral_limit;
161 161
162 err = pid->setpoint - busy; 162 fp_error = int_tofp(pid->setpoint) - busy;
163 fp_error = int_tofp(err);
164 163
165 if (abs(err) <= pid->deadband) 164 if (abs(fp_error) <= int_tofp(pid->deadband))
166 return 0; 165 return 0;
167 166
168 pterm = mul_fp(pid->p_gain, fp_error); 167 pterm = mul_fp(pid->p_gain, fp_error);
@@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy)
176 if (pid->integral < -integral_limit) 175 if (pid->integral < -integral_limit)
177 pid->integral = -integral_limit; 176 pid->integral = -integral_limit;
178 177
179 dterm = mul_fp(pid->d_gain, (err - pid->last_err)); 178 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
180 pid->last_err = err; 179 pid->last_err = fp_error;
181 180
182 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 181 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
183 182
@@ -367,12 +366,13 @@ static int intel_pstate_turbo_pstate(void)
367static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 366static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
368{ 367{
369 int max_perf = cpu->pstate.turbo_pstate; 368 int max_perf = cpu->pstate.turbo_pstate;
369 int max_perf_adj;
370 int min_perf; 370 int min_perf;
371 if (limits.no_turbo) 371 if (limits.no_turbo)
372 max_perf = cpu->pstate.max_pstate; 372 max_perf = cpu->pstate.max_pstate;
373 373
374 max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 374 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
375 *max = clamp_t(int, max_perf, 375 *max = clamp_t(int, max_perf_adj,
376 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 376 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
377 377
378 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); 378 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
@@ -436,8 +436,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
436 struct sample *sample) 436 struct sample *sample)
437{ 437{
438 u64 core_pct; 438 u64 core_pct;
439 core_pct = div64_u64(sample->aperf * 100, sample->mperf); 439 core_pct = div64_u64(int_tofp(sample->aperf * 100),
440 sample->freq = cpu->pstate.max_pstate * core_pct * 1000; 440 sample->mperf);
441 sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
441 442
442 sample->core_pct_busy = core_pct; 443 sample->core_pct_busy = core_pct;
443} 444}
@@ -469,22 +470,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
469 mod_timer_pinned(&cpu->timer, jiffies + delay); 470 mod_timer_pinned(&cpu->timer, jiffies + delay);
470} 471}
471 472
472static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) 473static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
473{ 474{
474 int32_t busy_scaled;
475 int32_t core_busy, max_pstate, current_pstate; 475 int32_t core_busy, max_pstate, current_pstate;
476 476
477 core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); 477 core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
478 max_pstate = int_tofp(cpu->pstate.max_pstate); 478 max_pstate = int_tofp(cpu->pstate.max_pstate);
479 current_pstate = int_tofp(cpu->pstate.current_pstate); 479 current_pstate = int_tofp(cpu->pstate.current_pstate);
480 busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 480 return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
481
482 return fp_toint(busy_scaled);
483} 481}
484 482
485static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 483static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
486{ 484{
487 int busy_scaled; 485 int32_t busy_scaled;
488 struct _pid *pid; 486 struct _pid *pid;
489 signed int ctl = 0; 487 signed int ctl = 0;
490 int steps; 488 int steps;
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 3519111c566b..10b577fcf48d 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -305,6 +305,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
305 edma_alloc_slot(EDMA_CTLR(echan->ch_num), 305 edma_alloc_slot(EDMA_CTLR(echan->ch_num),
306 EDMA_SLOT_ANY); 306 EDMA_SLOT_ANY);
307 if (echan->slot[i] < 0) { 307 if (echan->slot[i] < 0) {
308 kfree(edesc);
308 dev_err(dev, "Failed to allocate slot\n"); 309 dev_err(dev, "Failed to allocate slot\n");
309 kfree(edesc); 310 kfree(edesc);
310 return NULL; 311 return NULL;
@@ -346,6 +347,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
346 ccnt = sg_dma_len(sg) / (acnt * bcnt); 347 ccnt = sg_dma_len(sg) / (acnt * bcnt);
347 if (ccnt > (SZ_64K - 1)) { 348 if (ccnt > (SZ_64K - 1)) {
348 dev_err(dev, "Exceeded max SG segment size\n"); 349 dev_err(dev, "Exceeded max SG segment size\n");
350 kfree(edesc);
349 return NULL; 351 return NULL;
350 } 352 }
351 cidx = acnt * bcnt; 353 cidx = acnt * bcnt;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 05ad9ba0a67e..fe58d0833a11 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -61,7 +61,7 @@ static int drm_version(struct drm_device *dev, void *data,
61 61
62/** Ioctl table */ 62/** Ioctl table */
63static const struct drm_ioctl_desc drm_ioctls[] = { 63static const struct drm_ioctl_desc drm_ioctls[] = {
64 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED), 64 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
65 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 65 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
66 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 66 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
67 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 67 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ea9022ef15d5..10d1de5bce6f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -83,8 +83,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
83 return true; 83 return true;
84} 84}
85 85
86static void intel_crt_get_config(struct intel_encoder *encoder, 86static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
87 struct intel_crtc_config *pipe_config)
88{ 87{
89 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 88 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
90 struct intel_crt *crt = intel_encoder_to_crt(encoder); 89 struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -102,7 +101,25 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
102 else 101 else
103 flags |= DRM_MODE_FLAG_NVSYNC; 102 flags |= DRM_MODE_FLAG_NVSYNC;
104 103
105 pipe_config->adjusted_mode.flags |= flags; 104 return flags;
105}
106
107static void intel_crt_get_config(struct intel_encoder *encoder,
108 struct intel_crtc_config *pipe_config)
109{
110 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
111}
112
113static void hsw_crt_get_config(struct intel_encoder *encoder,
114 struct intel_crtc_config *pipe_config)
115{
116 intel_ddi_get_config(encoder, pipe_config);
117
118 pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
119 DRM_MODE_FLAG_NHSYNC |
120 DRM_MODE_FLAG_PVSYNC |
121 DRM_MODE_FLAG_NVSYNC);
122 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
106} 123}
107 124
108/* Note: The caller is required to filter out dpms modes not supported by the 125/* Note: The caller is required to filter out dpms modes not supported by the
@@ -799,7 +816,10 @@ void intel_crt_init(struct drm_device *dev)
799 crt->base.mode_set = intel_crt_mode_set; 816 crt->base.mode_set = intel_crt_mode_set;
800 crt->base.disable = intel_disable_crt; 817 crt->base.disable = intel_disable_crt;
801 crt->base.enable = intel_enable_crt; 818 crt->base.enable = intel_enable_crt;
802 crt->base.get_config = intel_crt_get_config; 819 if (IS_HASWELL(dev))
820 crt->base.get_config = hsw_crt_get_config;
821 else
822 crt->base.get_config = intel_crt_get_config;
803 if (I915_HAS_HOTPLUG(dev)) 823 if (I915_HAS_HOTPLUG(dev))
804 crt->base.hpd_pin = HPD_CRT; 824 crt->base.hpd_pin = HPD_CRT;
805 if (HAS_DDI(dev)) 825 if (HAS_DDI(dev))
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 63de2701b974..b53fff84a7d5 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1249,8 +1249,8 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
1249 intel_dp_check_link_status(intel_dp); 1249 intel_dp_check_link_status(intel_dp);
1250} 1250}
1251 1251
1252static void intel_ddi_get_config(struct intel_encoder *encoder, 1252void intel_ddi_get_config(struct intel_encoder *encoder,
1253 struct intel_crtc_config *pipe_config) 1253 struct intel_crtc_config *pipe_config)
1254{ 1254{
1255 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 1255 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1256 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1256 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -1268,6 +1268,23 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
1268 flags |= DRM_MODE_FLAG_NVSYNC; 1268 flags |= DRM_MODE_FLAG_NVSYNC;
1269 1269
1270 pipe_config->adjusted_mode.flags |= flags; 1270 pipe_config->adjusted_mode.flags |= flags;
1271
1272 switch (temp & TRANS_DDI_BPC_MASK) {
1273 case TRANS_DDI_BPC_6:
1274 pipe_config->pipe_bpp = 18;
1275 break;
1276 case TRANS_DDI_BPC_8:
1277 pipe_config->pipe_bpp = 24;
1278 break;
1279 case TRANS_DDI_BPC_10:
1280 pipe_config->pipe_bpp = 30;
1281 break;
1282 case TRANS_DDI_BPC_12:
1283 pipe_config->pipe_bpp = 36;
1284 break;
1285 default:
1286 break;
1287 }
1271} 1288}
1272 1289
1273static void intel_ddi_destroy(struct drm_encoder *encoder) 1290static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 581fb4b2f766..d78d33f9337d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2327,9 +2327,10 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
2327 FDI_FE_ERRC_ENABLE); 2327 FDI_FE_ERRC_ENABLE);
2328} 2328}
2329 2329
2330static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc) 2330static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
2331{ 2331{
2332 return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder; 2332 return crtc->base.enabled && crtc->active &&
2333 crtc->config.has_pch_encoder;
2333} 2334}
2334 2335
2335static void ivb_modeset_global_resources(struct drm_device *dev) 2336static void ivb_modeset_global_resources(struct drm_device *dev)
@@ -2979,6 +2980,48 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
2979 I915_READ(VSYNCSHIFT(cpu_transcoder))); 2980 I915_READ(VSYNCSHIFT(cpu_transcoder)));
2980} 2981}
2981 2982
2983static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
2984{
2985 struct drm_i915_private *dev_priv = dev->dev_private;
2986 uint32_t temp;
2987
2988 temp = I915_READ(SOUTH_CHICKEN1);
2989 if (temp & FDI_BC_BIFURCATION_SELECT)
2990 return;
2991
2992 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2993 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2994
2995 temp |= FDI_BC_BIFURCATION_SELECT;
2996 DRM_DEBUG_KMS("enabling fdi C rx\n");
2997 I915_WRITE(SOUTH_CHICKEN1, temp);
2998 POSTING_READ(SOUTH_CHICKEN1);
2999}
3000
3001static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3002{
3003 struct drm_device *dev = intel_crtc->base.dev;
3004 struct drm_i915_private *dev_priv = dev->dev_private;
3005
3006 switch (intel_crtc->pipe) {
3007 case PIPE_A:
3008 break;
3009 case PIPE_B:
3010 if (intel_crtc->config.fdi_lanes > 2)
3011 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3012 else
3013 cpt_enable_fdi_bc_bifurcation(dev);
3014
3015 break;
3016 case PIPE_C:
3017 cpt_enable_fdi_bc_bifurcation(dev);
3018
3019 break;
3020 default:
3021 BUG();
3022 }
3023}
3024
2982/* 3025/*
2983 * Enable PCH resources required for PCH ports: 3026 * Enable PCH resources required for PCH ports:
2984 * - PCH PLLs 3027 * - PCH PLLs
@@ -2997,6 +3040,9 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2997 3040
2998 assert_pch_transcoder_disabled(dev_priv, pipe); 3041 assert_pch_transcoder_disabled(dev_priv, pipe);
2999 3042
3043 if (IS_IVYBRIDGE(dev))
3044 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3045
3000 /* Write the TU size bits before fdi link training, so that error 3046 /* Write the TU size bits before fdi link training, so that error
3001 * detection works. */ 3047 * detection works. */
3002 I915_WRITE(FDI_RX_TUSIZE1(pipe), 3048 I915_WRITE(FDI_RX_TUSIZE1(pipe),
@@ -4983,6 +5029,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4983 if (!(tmp & PIPECONF_ENABLE)) 5029 if (!(tmp & PIPECONF_ENABLE))
4984 return false; 5030 return false;
4985 5031
5032 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5033 switch (tmp & PIPECONF_BPC_MASK) {
5034 case PIPECONF_6BPC:
5035 pipe_config->pipe_bpp = 18;
5036 break;
5037 case PIPECONF_8BPC:
5038 pipe_config->pipe_bpp = 24;
5039 break;
5040 case PIPECONF_10BPC:
5041 pipe_config->pipe_bpp = 30;
5042 break;
5043 default:
5044 break;
5045 }
5046 }
5047
4986 intel_get_pipe_timings(crtc, pipe_config); 5048 intel_get_pipe_timings(crtc, pipe_config);
4987 5049
4988 i9xx_get_pfit_config(crtc, pipe_config); 5050 i9xx_get_pfit_config(crtc, pipe_config);
@@ -5576,48 +5638,6 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
5576 return true; 5638 return true;
5577} 5639}
5578 5640
5579static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
5580{
5581 struct drm_i915_private *dev_priv = dev->dev_private;
5582 uint32_t temp;
5583
5584 temp = I915_READ(SOUTH_CHICKEN1);
5585 if (temp & FDI_BC_BIFURCATION_SELECT)
5586 return;
5587
5588 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5589 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5590
5591 temp |= FDI_BC_BIFURCATION_SELECT;
5592 DRM_DEBUG_KMS("enabling fdi C rx\n");
5593 I915_WRITE(SOUTH_CHICKEN1, temp);
5594 POSTING_READ(SOUTH_CHICKEN1);
5595}
5596
5597static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
5598{
5599 struct drm_device *dev = intel_crtc->base.dev;
5600 struct drm_i915_private *dev_priv = dev->dev_private;
5601
5602 switch (intel_crtc->pipe) {
5603 case PIPE_A:
5604 break;
5605 case PIPE_B:
5606 if (intel_crtc->config.fdi_lanes > 2)
5607 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
5608 else
5609 cpt_enable_fdi_bc_bifurcation(dev);
5610
5611 break;
5612 case PIPE_C:
5613 cpt_enable_fdi_bc_bifurcation(dev);
5614
5615 break;
5616 default:
5617 BUG();
5618 }
5619}
5620
5621int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 5641int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
5622{ 5642{
5623 /* 5643 /*
@@ -5811,9 +5831,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5811 &intel_crtc->config.fdi_m_n); 5831 &intel_crtc->config.fdi_m_n);
5812 } 5832 }
5813 5833
5814 if (IS_IVYBRIDGE(dev))
5815 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
5816
5817 ironlake_set_pipeconf(crtc); 5834 ironlake_set_pipeconf(crtc);
5818 5835
5819 /* Set up the display plane register */ 5836 /* Set up the display plane register */
@@ -5881,6 +5898,23 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5881 if (!(tmp & PIPECONF_ENABLE)) 5898 if (!(tmp & PIPECONF_ENABLE))
5882 return false; 5899 return false;
5883 5900
5901 switch (tmp & PIPECONF_BPC_MASK) {
5902 case PIPECONF_6BPC:
5903 pipe_config->pipe_bpp = 18;
5904 break;
5905 case PIPECONF_8BPC:
5906 pipe_config->pipe_bpp = 24;
5907 break;
5908 case PIPECONF_10BPC:
5909 pipe_config->pipe_bpp = 30;
5910 break;
5911 case PIPECONF_12BPC:
5912 pipe_config->pipe_bpp = 36;
5913 break;
5914 default:
5915 break;
5916 }
5917
5884 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 5918 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
5885 struct intel_shared_dpll *pll; 5919 struct intel_shared_dpll *pll;
5886 5920
@@ -8612,6 +8646,9 @@ intel_pipe_config_compare(struct drm_device *dev,
8612 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 8646 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8613 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 8647 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8614 8648
8649 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
8650 PIPE_CONF_CHECK_I(pipe_bpp);
8651
8615#undef PIPE_CONF_CHECK_X 8652#undef PIPE_CONF_CHECK_X
8616#undef PIPE_CONF_CHECK_I 8653#undef PIPE_CONF_CHECK_I
8617#undef PIPE_CONF_CHECK_FLAGS 8654#undef PIPE_CONF_CHECK_FLAGS
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2c555f91bfae..1a431377d83b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1401,6 +1401,26 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1401 else 1401 else
1402 pipe_config->port_clock = 270000; 1402 pipe_config->port_clock = 270000;
1403 } 1403 }
1404
1405 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1406 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1407 /*
1408 * This is a big fat ugly hack.
1409 *
1410 * Some machines in UEFI boot mode provide us a VBT that has 18
1411 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
1412 * unknown we fail to light up. Yet the same BIOS boots up with
1413 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
1414 * max, not what it tells us to use.
1415 *
1416 * Note: This will still be broken if the eDP panel is not lit
1417 * up by the BIOS, and thus we can't get the mode at module
1418 * load.
1419 */
1420 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
1421 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1422 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1423 }
1404} 1424}
1405 1425
1406static bool is_edp_psr(struct intel_dp *intel_dp) 1426static bool is_edp_psr(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9b7b68fd5d47..7f2b384ac939 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -765,6 +765,8 @@ extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
765extern bool 765extern bool
766intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); 766intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
767extern void intel_ddi_fdi_disable(struct drm_crtc *crtc); 767extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
768extern void intel_ddi_get_config(struct intel_encoder *encoder,
769 struct intel_crtc_config *pipe_config);
768 770
769extern void intel_display_handle_reset(struct drm_device *dev); 771extern void intel_display_handle_reset(struct drm_device *dev);
770extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 772extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 831a5c021c4b..b8af94a5be39 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -700,6 +700,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
700 }, 700 },
701 { 701 {
702 .callback = intel_no_lvds_dmi_callback, 702 .callback = intel_no_lvds_dmi_callback,
703 .ident = "Intel D410PT",
704 .matches = {
705 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
706 DMI_MATCH(DMI_BOARD_NAME, "D410PT"),
707 },
708 },
709 {
710 .callback = intel_no_lvds_dmi_callback,
711 .ident = "Intel D425KT",
712 .matches = {
713 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
714 DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"),
715 },
716 },
717 {
718 .callback = intel_no_lvds_dmi_callback,
703 .ident = "Intel D510MO", 719 .ident = "Intel D510MO",
704 .matches = { 720 .matches = {
705 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), 721 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index fe1de855775e..57fcc4b16a52 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -291,6 +291,7 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
291 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ 291 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
292 292
293 WREG32(HDMI_ACR_PACKET_CONTROL + offset, 293 WREG32(HDMI_ACR_PACKET_CONTROL + offset,
294 HDMI_ACR_SOURCE | /* select SW CTS value */
294 HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ 295 HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
295 296
296 evergreen_hdmi_update_ACR(encoder, mode->clock); 297 evergreen_hdmi_update_ACR(encoder, mode->clock);
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 71399065db04..b41905573cd2 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2635,7 +2635,7 @@ int kv_dpm_init(struct radeon_device *rdev)
2635 pi->caps_sclk_ds = true; 2635 pi->caps_sclk_ds = true;
2636 pi->enable_auto_thermal_throttling = true; 2636 pi->enable_auto_thermal_throttling = true;
2637 pi->disable_nb_ps3_in_battery = false; 2637 pi->disable_nb_ps3_in_battery = false;
2638 pi->bapm_enable = true; 2638 pi->bapm_enable = false;
2639 pi->voltage_drop_t = 0; 2639 pi->voltage_drop_t = 0;
2640 pi->caps_sclk_throttle_low_notification = false; 2640 pi->caps_sclk_throttle_low_notification = false;
2641 pi->caps_fps = false; /* true? */ 2641 pi->caps_fps = false; /* true? */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a400ac1c4147..24f4960f59ee 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1272,8 +1272,8 @@ struct radeon_blacklist_clocks
1272struct radeon_clock_and_voltage_limits { 1272struct radeon_clock_and_voltage_limits {
1273 u32 sclk; 1273 u32 sclk;
1274 u32 mclk; 1274 u32 mclk;
1275 u32 vddc; 1275 u16 vddc;
1276 u32 vddci; 1276 u16 vddci;
1277}; 1277};
1278 1278
1279struct radeon_clock_array { 1279struct radeon_clock_array {
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 3591855cc5b5..6df23502059a 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -594,7 +594,7 @@ isert_connect_release(struct isert_conn *isert_conn)
594 594
595 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 595 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
596 596
597 if (device->use_frwr) 597 if (device && device->use_frwr)
598 isert_conn_free_frwr_pool(isert_conn); 598 isert_conn_free_frwr_pool(isert_conn);
599 599
600 if (isert_conn->conn_qp) { 600 if (isert_conn->conn_qp) {
diff --git a/drivers/input/input.c b/drivers/input/input.c
index c04469928925..e75d015024a1 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1734,6 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class);
1734 */ 1734 */
1735struct input_dev *input_allocate_device(void) 1735struct input_dev *input_allocate_device(void)
1736{ 1736{
1737 static atomic_t input_no = ATOMIC_INIT(0);
1737 struct input_dev *dev; 1738 struct input_dev *dev;
1738 1739
1739 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL); 1740 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
@@ -1743,9 +1744,13 @@ struct input_dev *input_allocate_device(void)
1743 device_initialize(&dev->dev); 1744 device_initialize(&dev->dev);
1744 mutex_init(&dev->mutex); 1745 mutex_init(&dev->mutex);
1745 spin_lock_init(&dev->event_lock); 1746 spin_lock_init(&dev->event_lock);
1747 init_timer(&dev->timer);
1746 INIT_LIST_HEAD(&dev->h_list); 1748 INIT_LIST_HEAD(&dev->h_list);
1747 INIT_LIST_HEAD(&dev->node); 1749 INIT_LIST_HEAD(&dev->node);
1748 1750
1751 dev_set_name(&dev->dev, "input%ld",
1752 (unsigned long) atomic_inc_return(&input_no) - 1);
1753
1749 __module_get(THIS_MODULE); 1754 __module_get(THIS_MODULE);
1750 } 1755 }
1751 1756
@@ -2019,7 +2024,6 @@ static void devm_input_device_unregister(struct device *dev, void *res)
2019 */ 2024 */
2020int input_register_device(struct input_dev *dev) 2025int input_register_device(struct input_dev *dev)
2021{ 2026{
2022 static atomic_t input_no = ATOMIC_INIT(0);
2023 struct input_devres *devres = NULL; 2027 struct input_devres *devres = NULL;
2024 struct input_handler *handler; 2028 struct input_handler *handler;
2025 unsigned int packet_size; 2029 unsigned int packet_size;
@@ -2059,7 +2063,6 @@ int input_register_device(struct input_dev *dev)
2059 * If delay and period are pre-set by the driver, then autorepeating 2063 * If delay and period are pre-set by the driver, then autorepeating
2060 * is handled by the driver itself and we don't do it in input.c. 2064 * is handled by the driver itself and we don't do it in input.c.
2061 */ 2065 */
2062 init_timer(&dev->timer);
2063 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) { 2066 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) {
2064 dev->timer.data = (long) dev; 2067 dev->timer.data = (long) dev;
2065 dev->timer.function = input_repeat_key; 2068 dev->timer.function = input_repeat_key;
@@ -2073,9 +2076,6 @@ int input_register_device(struct input_dev *dev)
2073 if (!dev->setkeycode) 2076 if (!dev->setkeycode)
2074 dev->setkeycode = input_default_setkeycode; 2077 dev->setkeycode = input_default_setkeycode;
2075 2078
2076 dev_set_name(&dev->dev, "input%ld",
2077 (unsigned long) atomic_inc_return(&input_no) - 1);
2078
2079 error = device_add(&dev->dev); 2079 error = device_add(&dev->dev);
2080 if (error) 2080 if (error)
2081 goto err_free_vals; 2081 goto err_free_vals;
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 134c3b404a54..a2e758d27584 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -786,10 +786,17 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
786 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); 786 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
787 input_set_capability(input_dev, EV_MSC, MSC_SCAN); 787 input_set_capability(input_dev, EV_MSC, MSC_SCAN);
788 788
789 if (pdata) 789 if (pdata) {
790 error = pxa27x_keypad_build_keycode(keypad); 790 error = pxa27x_keypad_build_keycode(keypad);
791 else 791 } else {
792 error = pxa27x_keypad_build_keycode_from_dt(keypad); 792 error = pxa27x_keypad_build_keycode_from_dt(keypad);
793 /*
794 * Data that we get from DT resides in dynamically
795 * allocated memory so we need to update our pdata
796 * pointer.
797 */
798 pdata = keypad->pdata;
799 }
793 if (error) { 800 if (error) {
794 dev_err(&pdev->dev, "failed to build keycode\n"); 801 dev_err(&pdev->dev, "failed to build keycode\n");
795 goto failed_put_clk; 802 goto failed_put_clk;
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 082684e7f390..9365535ba7f1 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -351,7 +351,9 @@ static void cm109_urb_irq_callback(struct urb *urb)
351 if (status) { 351 if (status) {
352 if (status == -ESHUTDOWN) 352 if (status == -ESHUTDOWN)
353 return; 353 return;
354 dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status); 354 dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
355 __func__, status);
356 goto out;
355 } 357 }
356 358
357 /* Special keys */ 359 /* Special keys */
@@ -418,8 +420,12 @@ static void cm109_urb_ctl_callback(struct urb *urb)
418 dev->ctl_data->byte[2], 420 dev->ctl_data->byte[2],
419 dev->ctl_data->byte[3]); 421 dev->ctl_data->byte[3]);
420 422
421 if (status) 423 if (status) {
422 dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status); 424 if (status == -ESHUTDOWN)
425 return;
426 dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
427 __func__, status);
428 }
423 429
424 spin_lock(&dev->ctl_submit_lock); 430 spin_lock(&dev->ctl_submit_lock);
425 431
@@ -427,7 +433,7 @@ static void cm109_urb_ctl_callback(struct urb *urb)
427 433
428 if (likely(!dev->shutdown)) { 434 if (likely(!dev->shutdown)) {
429 435
430 if (dev->buzzer_pending) { 436 if (dev->buzzer_pending || status) {
431 dev->buzzer_pending = 0; 437 dev->buzzer_pending = 0;
432 dev->ctl_urb_pending = 1; 438 dev->ctl_urb_pending = 1;
433 cm109_submit_buzz_toggle(dev); 439 cm109_submit_buzz_toggle(dev);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 7c5d72a6a26a..83658472ad25 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -103,6 +103,7 @@ static const struct alps_model_info alps_model_data[] = {
103 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ 103 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
104 { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, 104 { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
105 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, 105 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
106 { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_DUALPOINT }, /* Dell XT2 */
106 { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ 107 { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
107 { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, 108 { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
108 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ 109 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 78e4de42efaa..52c9ebf94729 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -223,21 +223,26 @@ static int i8042_flush(void)
223{ 223{
224 unsigned long flags; 224 unsigned long flags;
225 unsigned char data, str; 225 unsigned char data, str;
226 int i = 0; 226 int count = 0;
227 int retval = 0;
227 228
228 spin_lock_irqsave(&i8042_lock, flags); 229 spin_lock_irqsave(&i8042_lock, flags);
229 230
230 while (((str = i8042_read_status()) & I8042_STR_OBF) && (i < I8042_BUFFER_SIZE)) { 231 while ((str = i8042_read_status()) & I8042_STR_OBF) {
231 udelay(50); 232 if (count++ < I8042_BUFFER_SIZE) {
232 data = i8042_read_data(); 233 udelay(50);
233 i++; 234 data = i8042_read_data();
234 dbg("%02x <- i8042 (flush, %s)\n", 235 dbg("%02x <- i8042 (flush, %s)\n",
235 data, str & I8042_STR_AUXDATA ? "aux" : "kbd"); 236 data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
237 } else {
238 retval = -EIO;
239 break;
240 }
236 } 241 }
237 242
238 spin_unlock_irqrestore(&i8042_lock, flags); 243 spin_unlock_irqrestore(&i8042_lock, flags);
239 244
240 return i; 245 return retval;
241} 246}
242 247
243/* 248/*
@@ -849,7 +854,7 @@ static int __init i8042_check_aux(void)
849 854
850static int i8042_controller_check(void) 855static int i8042_controller_check(void)
851{ 856{
852 if (i8042_flush() == I8042_BUFFER_SIZE) { 857 if (i8042_flush()) {
853 pr_err("No controller found\n"); 858 pr_err("No controller found\n");
854 return -ENODEV; 859 return -ENODEV;
855 } 860 }
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 79b69ea47f74..e53416a4d7f3 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -1031,6 +1031,7 @@ static void wacom_destroy_leds(struct wacom *wacom)
1031} 1031}
1032 1032
1033static enum power_supply_property wacom_battery_props[] = { 1033static enum power_supply_property wacom_battery_props[] = {
1034 POWER_SUPPLY_PROP_SCOPE,
1034 POWER_SUPPLY_PROP_CAPACITY 1035 POWER_SUPPLY_PROP_CAPACITY
1035}; 1036};
1036 1037
@@ -1042,6 +1043,9 @@ static int wacom_battery_get_property(struct power_supply *psy,
1042 int ret = 0; 1043 int ret = 0;
1043 1044
1044 switch (psp) { 1045 switch (psp) {
1046 case POWER_SUPPLY_PROP_SCOPE:
1047 val->intval = POWER_SUPPLY_SCOPE_DEVICE;
1048 break;
1045 case POWER_SUPPLY_PROP_CAPACITY: 1049 case POWER_SUPPLY_PROP_CAPACITY:
1046 val->intval = 1050 val->intval =
1047 wacom->wacom_wac.battery_capacity * 100 / 31; 1051 wacom->wacom_wac.battery_capacity * 100 / 31;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index b2aa503c16b1..c59b797eeafa 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -2054,6 +2054,12 @@ static const struct wacom_features wacom_features_0x101 =
2054static const struct wacom_features wacom_features_0x10D = 2054static const struct wacom_features wacom_features_0x10D =
2055 { "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 2055 { "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
2056 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2056 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2057static const struct wacom_features wacom_features_0x10E =
2058 { "Wacom ISDv4 10E", WACOM_PKGLEN_MTTPC, 27760, 15694, 255,
2059 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2060static const struct wacom_features wacom_features_0x10F =
2061 { "Wacom ISDv4 10F", WACOM_PKGLEN_MTTPC, 27760, 15694, 255,
2062 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2057static const struct wacom_features wacom_features_0x4001 = 2063static const struct wacom_features wacom_features_0x4001 =
2058 { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 2064 { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
2059 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2065 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2248,6 +2254,8 @@ const struct usb_device_id wacom_ids[] = {
2248 { USB_DEVICE_WACOM(0x100) }, 2254 { USB_DEVICE_WACOM(0x100) },
2249 { USB_DEVICE_WACOM(0x101) }, 2255 { USB_DEVICE_WACOM(0x101) },
2250 { USB_DEVICE_WACOM(0x10D) }, 2256 { USB_DEVICE_WACOM(0x10D) },
2257 { USB_DEVICE_WACOM(0x10E) },
2258 { USB_DEVICE_WACOM(0x10F) },
2251 { USB_DEVICE_WACOM(0x300) }, 2259 { USB_DEVICE_WACOM(0x300) },
2252 { USB_DEVICE_WACOM(0x301) }, 2260 { USB_DEVICE_WACOM(0x301) },
2253 { USB_DEVICE_WACOM(0x304) }, 2261 { USB_DEVICE_WACOM(0x304) },
diff --git a/drivers/md/md.c b/drivers/md/md.c
index adf4d7e1d5e1..561a65f82e26 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8111,6 +8111,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
8111 u64 *p; 8111 u64 *p;
8112 int lo, hi; 8112 int lo, hi;
8113 int rv = 1; 8113 int rv = 1;
8114 unsigned long flags;
8114 8115
8115 if (bb->shift < 0) 8116 if (bb->shift < 0)
8116 /* badblocks are disabled */ 8117 /* badblocks are disabled */
@@ -8125,7 +8126,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
8125 sectors = next - s; 8126 sectors = next - s;
8126 } 8127 }
8127 8128
8128 write_seqlock_irq(&bb->lock); 8129 write_seqlock_irqsave(&bb->lock, flags);
8129 8130
8130 p = bb->page; 8131 p = bb->page;
8131 lo = 0; 8132 lo = 0;
@@ -8241,7 +8242,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
8241 bb->changed = 1; 8242 bb->changed = 1;
8242 if (!acknowledged) 8243 if (!acknowledged)
8243 bb->unacked_exist = 1; 8244 bb->unacked_exist = 1;
8244 write_sequnlock_irq(&bb->lock); 8245 write_sequnlock_irqrestore(&bb->lock, flags);
8245 8246
8246 return rv; 8247 return rv;
8247} 8248}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d60412c7f995..aacf6bf352d8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1479,6 +1479,7 @@ static int raid1_spare_active(struct mddev *mddev)
1479 } 1479 }
1480 } 1480 }
1481 if (rdev 1481 if (rdev
1482 && rdev->recovery_offset == MaxSector
1482 && !test_bit(Faulty, &rdev->flags) 1483 && !test_bit(Faulty, &rdev->flags)
1483 && !test_and_set_bit(In_sync, &rdev->flags)) { 1484 && !test_and_set_bit(In_sync, &rdev->flags)) {
1484 count++; 1485 count++;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index df7b0a06b0ea..73dc8a377522 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1782,6 +1782,7 @@ static int raid10_spare_active(struct mddev *mddev)
1782 } 1782 }
1783 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 1783 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1784 } else if (tmp->rdev 1784 } else if (tmp->rdev
1785 && tmp->rdev->recovery_offset == MaxSector
1785 && !test_bit(Faulty, &tmp->rdev->flags) 1786 && !test_bit(Faulty, &tmp->rdev->flags)
1786 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 1787 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1787 count++; 1788 count++;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7ff4f252ca1a..f8b906843926 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -778,6 +778,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
778 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 778 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
779 bi->bi_io_vec[0].bv_offset = 0; 779 bi->bi_io_vec[0].bv_offset = 0;
780 bi->bi_size = STRIPE_SIZE; 780 bi->bi_size = STRIPE_SIZE;
781 /*
782 * If this is discard request, set bi_vcnt 0. We don't
783 * want to confuse SCSI because SCSI will replace payload
784 */
785 if (rw & REQ_DISCARD)
786 bi->bi_vcnt = 0;
781 if (rrdev) 787 if (rrdev)
782 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 788 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
783 789
@@ -816,6 +822,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
816 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 822 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
817 rbi->bi_io_vec[0].bv_offset = 0; 823 rbi->bi_io_vec[0].bv_offset = 0;
818 rbi->bi_size = STRIPE_SIZE; 824 rbi->bi_size = STRIPE_SIZE;
825 /*
826 * If this is discard request, set bi_vcnt 0. We don't
827 * want to confuse SCSI because SCSI will replace payload
828 */
829 if (rw & REQ_DISCARD)
830 rbi->bi_vcnt = 0;
819 if (conf->mddev->gendisk) 831 if (conf->mddev->gendisk)
820 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 832 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
821 rbi, disk_devt(conf->mddev->gendisk), 833 rbi, disk_devt(conf->mddev->gendisk),
@@ -2910,6 +2922,14 @@ static void handle_stripe_clean_event(struct r5conf *conf,
2910 } 2922 }
2911 /* now that discard is done we can proceed with any sync */ 2923 /* now that discard is done we can proceed with any sync */
2912 clear_bit(STRIPE_DISCARD, &sh->state); 2924 clear_bit(STRIPE_DISCARD, &sh->state);
2925 /*
2926 * SCSI discard will change some bio fields and the stripe has
2927 * no updated data, so remove it from hash list and the stripe
2928 * will be reinitialized
2929 */
2930 spin_lock_irq(&conf->device_lock);
2931 remove_hash(sh);
2932 spin_unlock_irq(&conf->device_lock);
2913 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 2933 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
2914 set_bit(STRIPE_HANDLE, &sh->state); 2934 set_bit(STRIPE_HANDLE, &sh->state);
2915 2935
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 59ab0692f0b9..a9830ff8e3f3 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -349,7 +349,7 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
349 349
350int common_nfc_set_geometry(struct gpmi_nand_data *this) 350int common_nfc_set_geometry(struct gpmi_nand_data *this)
351{ 351{
352 return set_geometry_by_ecc_info(this) ? 0 : legacy_set_geometry(this); 352 return legacy_set_geometry(this);
353} 353}
354 354
355struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) 355struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index dd03dfdfb0d6..c28d4e29af1a 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,7 +1320,12 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1320 for (cs = 0; cs < pdata->num_cs; cs++) { 1320 for (cs = 0; cs < pdata->num_cs; cs++) {
1321 struct mtd_info *mtd = info->host[cs]->mtd; 1321 struct mtd_info *mtd = info->host[cs]->mtd;
1322 1322
1323 mtd->name = pdev->name; 1323 /*
1324 * The mtd name matches the one used in 'mtdparts' kernel
1325 * parameter. This name cannot be changed or otherwise
1326 * user's mtd partitions configuration would get broken.
1327 */
1328 mtd->name = "pxa3xx_nand-0";
1324 info->cs = cs; 1329 info->cs = cs;
1325 ret = pxa3xx_nand_scan(mtd); 1330 ret = pxa3xx_nand_scan(mtd);
1326 if (ret) { 1331 if (ret) {
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index a668cd491cb3..e3fc07cf2f62 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -814,9 +814,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
814 msg_ctrl_save = priv->read_reg(priv, 814 msg_ctrl_save = priv->read_reg(priv,
815 C_CAN_IFACE(MSGCTRL_REG, 0)); 815 C_CAN_IFACE(MSGCTRL_REG, 0));
816 816
817 if (msg_ctrl_save & IF_MCONT_EOB)
818 return num_rx_pkts;
819
820 if (msg_ctrl_save & IF_MCONT_MSGLST) { 817 if (msg_ctrl_save & IF_MCONT_MSGLST) {
821 c_can_handle_lost_msg_obj(dev, 0, msg_obj); 818 c_can_handle_lost_msg_obj(dev, 0, msg_obj);
822 num_rx_pkts++; 819 num_rx_pkts++;
@@ -824,6 +821,9 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
824 continue; 821 continue;
825 } 822 }
826 823
824 if (msg_ctrl_save & IF_MCONT_EOB)
825 return num_rx_pkts;
826
827 if (!(msg_ctrl_save & IF_MCONT_NEWDAT)) 827 if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
828 continue; 828 continue;
829 829
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 3b9546588240..4b2d5ed62b11 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1544,9 +1544,9 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
1544 return 0; 1544 return 0;
1545} 1545}
1546 1546
1547static void kvaser_usb_get_endpoints(const struct usb_interface *intf, 1547static int kvaser_usb_get_endpoints(const struct usb_interface *intf,
1548 struct usb_endpoint_descriptor **in, 1548 struct usb_endpoint_descriptor **in,
1549 struct usb_endpoint_descriptor **out) 1549 struct usb_endpoint_descriptor **out)
1550{ 1550{
1551 const struct usb_host_interface *iface_desc; 1551 const struct usb_host_interface *iface_desc;
1552 struct usb_endpoint_descriptor *endpoint; 1552 struct usb_endpoint_descriptor *endpoint;
@@ -1557,12 +1557,18 @@ static void kvaser_usb_get_endpoints(const struct usb_interface *intf,
1557 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { 1557 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
1558 endpoint = &iface_desc->endpoint[i].desc; 1558 endpoint = &iface_desc->endpoint[i].desc;
1559 1559
1560 if (usb_endpoint_is_bulk_in(endpoint)) 1560 if (!*in && usb_endpoint_is_bulk_in(endpoint))
1561 *in = endpoint; 1561 *in = endpoint;
1562 1562
1563 if (usb_endpoint_is_bulk_out(endpoint)) 1563 if (!*out && usb_endpoint_is_bulk_out(endpoint))
1564 *out = endpoint; 1564 *out = endpoint;
1565
1566 /* use first bulk endpoint for in and out */
1567 if (*in && *out)
1568 return 0;
1565 } 1569 }
1570
1571 return -ENODEV;
1566} 1572}
1567 1573
1568static int kvaser_usb_probe(struct usb_interface *intf, 1574static int kvaser_usb_probe(struct usb_interface *intf,
@@ -1576,8 +1582,8 @@ static int kvaser_usb_probe(struct usb_interface *intf,
1576 if (!dev) 1582 if (!dev)
1577 return -ENOMEM; 1583 return -ENOMEM;
1578 1584
1579 kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out); 1585 err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
1580 if (!dev->bulk_in || !dev->bulk_out) { 1586 if (err) {
1581 dev_err(&intf->dev, "Cannot get usb endpoint(s)"); 1587 dev_err(&intf->dev, "Cannot get usb endpoint(s)");
1582 return err; 1588 return err;
1583 } 1589 }
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index a98778e3af84..e2aa09ce6af7 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -252,25 +252,33 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
252 struct bgmac_slot_info *slot) 252 struct bgmac_slot_info *slot)
253{ 253{
254 struct device *dma_dev = bgmac->core->dma_dev; 254 struct device *dma_dev = bgmac->core->dma_dev;
255 struct sk_buff *skb;
256 dma_addr_t dma_addr;
255 struct bgmac_rx_header *rx; 257 struct bgmac_rx_header *rx;
256 258
257 /* Alloc skb */ 259 /* Alloc skb */
258 slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); 260 skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
259 if (!slot->skb) 261 if (!skb)
260 return -ENOMEM; 262 return -ENOMEM;
261 263
262 /* Poison - if everything goes fine, hardware will overwrite it */ 264 /* Poison - if everything goes fine, hardware will overwrite it */
263 rx = (struct bgmac_rx_header *)slot->skb->data; 265 rx = (struct bgmac_rx_header *)skb->data;
264 rx->len = cpu_to_le16(0xdead); 266 rx->len = cpu_to_le16(0xdead);
265 rx->flags = cpu_to_le16(0xbeef); 267 rx->flags = cpu_to_le16(0xbeef);
266 268
267 /* Map skb for the DMA */ 269 /* Map skb for the DMA */
268 slot->dma_addr = dma_map_single(dma_dev, slot->skb->data, 270 dma_addr = dma_map_single(dma_dev, skb->data,
269 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); 271 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
270 if (dma_mapping_error(dma_dev, slot->dma_addr)) { 272 if (dma_mapping_error(dma_dev, dma_addr)) {
271 bgmac_err(bgmac, "DMA mapping error\n"); 273 bgmac_err(bgmac, "DMA mapping error\n");
274 dev_kfree_skb(skb);
272 return -ENOMEM; 275 return -ENOMEM;
273 } 276 }
277
278 /* Update the slot */
279 slot->skb = skb;
280 slot->dma_addr = dma_addr;
281
274 if (slot->dma_addr & 0xC0000000) 282 if (slot->dma_addr & 0xC0000000)
275 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); 283 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
276 284
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 6e46cff5236d..dcafbda3e5be 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2545,10 +2545,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2545 } 2545 }
2546 } 2546 }
2547 2547
2548 /* Allocated memory for FW statistics */
2549 if (bnx2x_alloc_fw_stats_mem(bp))
2550 LOAD_ERROR_EXIT(bp, load_error0);
2551
2552 /* need to be done after alloc mem, since it's self adjusting to amount 2548 /* need to be done after alloc mem, since it's self adjusting to amount
2553 * of memory available for RSS queues 2549 * of memory available for RSS queues
2554 */ 2550 */
@@ -2558,6 +2554,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2558 LOAD_ERROR_EXIT(bp, load_error0); 2554 LOAD_ERROR_EXIT(bp, load_error0);
2559 } 2555 }
2560 2556
2557 /* Allocated memory for FW statistics */
2558 if (bnx2x_alloc_fw_stats_mem(bp))
2559 LOAD_ERROR_EXIT(bp, load_error0);
2560
2561 /* request pf to initialize status blocks */ 2561 /* request pf to initialize status blocks */
2562 if (IS_VF(bp)) { 2562 if (IS_VF(bp)) {
2563 rc = bnx2x_vfpf_init(bp); 2563 rc = bnx2x_vfpf_init(bp);
@@ -2812,8 +2812,8 @@ load_error1:
2812 if (IS_PF(bp)) 2812 if (IS_PF(bp))
2813 bnx2x_clear_pf_load(bp); 2813 bnx2x_clear_pf_load(bp);
2814load_error0: 2814load_error0:
2815 bnx2x_free_fp_mem(bp);
2816 bnx2x_free_fw_stats_mem(bp); 2815 bnx2x_free_fw_stats_mem(bp);
2816 bnx2x_free_fp_mem(bp);
2817 bnx2x_free_mem(bp); 2817 bnx2x_free_mem(bp);
2818 2818
2819 return rc; 2819 return rc;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 71fffad94aff..0216d592d0ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2018,6 +2018,8 @@ failed:
2018 2018
2019void bnx2x_iov_remove_one(struct bnx2x *bp) 2019void bnx2x_iov_remove_one(struct bnx2x *bp)
2020{ 2020{
2021 int vf_idx;
2022
2021 /* if SRIOV is not enabled there's nothing to do */ 2023 /* if SRIOV is not enabled there's nothing to do */
2022 if (!IS_SRIOV(bp)) 2024 if (!IS_SRIOV(bp))
2023 return; 2025 return;
@@ -2026,6 +2028,18 @@ void bnx2x_iov_remove_one(struct bnx2x *bp)
2026 pci_disable_sriov(bp->pdev); 2028 pci_disable_sriov(bp->pdev);
2027 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 2029 DP(BNX2X_MSG_IOV, "sriov disabled\n");
2028 2030
2031 /* disable access to all VFs */
2032 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
2033 bnx2x_pretend_func(bp,
2034 HW_VF_HANDLE(bp,
2035 bp->vfdb->sriov.first_vf_in_pf +
2036 vf_idx));
2037 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
2038 bp->vfdb->sriov.first_vf_in_pf + vf_idx);
2039 bnx2x_vf_enable_internal(bp, 0);
2040 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2041 }
2042
2029 /* free vf database */ 2043 /* free vf database */
2030 __bnx2x_iov_free_vfdb(bp); 2044 __bnx2x_iov_free_vfdb(bp);
2031} 2045}
@@ -3197,7 +3211,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3197 * the "acquire" messages to appear on the VF PF channel. 3211 * the "acquire" messages to appear on the VF PF channel.
3198 */ 3212 */
3199 DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); 3213 DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
3200 pci_disable_sriov(bp->pdev); 3214 bnx2x_disable_sriov(bp);
3201 rc = pci_enable_sriov(bp->pdev, req_vfs); 3215 rc = pci_enable_sriov(bp->pdev, req_vfs);
3202 if (rc) { 3216 if (rc) {
3203 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3217 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 9c89dc8fe105..632b318eb38a 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1599,7 +1599,8 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1599 flits = skb_transport_offset(skb) / 8; 1599 flits = skb_transport_offset(skb) / 8;
1600 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1600 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1601 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), 1601 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1602 skb->tail - skb->transport_header, 1602 skb_tail_pointer(skb) -
1603 skb_transport_header(skb),
1603 adap->pdev); 1604 adap->pdev);
1604 if (need_skb_unmap()) { 1605 if (need_skb_unmap()) {
1605 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); 1606 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 2c88ac295eea..f4825db5d179 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -840,6 +840,16 @@ int be_load_fw(struct be_adapter *adapter, u8 *func);
840bool be_is_wol_supported(struct be_adapter *adapter); 840bool be_is_wol_supported(struct be_adapter *adapter);
841bool be_pause_supported(struct be_adapter *adapter); 841bool be_pause_supported(struct be_adapter *adapter);
842u32 be_get_fw_log_level(struct be_adapter *adapter); 842u32 be_get_fw_log_level(struct be_adapter *adapter);
843
844static inline int fw_major_num(const char *fw_ver)
845{
846 int fw_major = 0;
847
848 sscanf(fw_ver, "%d.", &fw_major);
849
850 return fw_major;
851}
852
843int be_update_queues(struct be_adapter *adapter); 853int be_update_queues(struct be_adapter *adapter);
844int be_poll(struct napi_struct *napi, int budget); 854int be_poll(struct napi_struct *napi, int budget);
845 855
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 8080a1a5cee7..741d3bff5ae7 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3379,6 +3379,12 @@ static int be_setup(struct be_adapter *adapter)
3379 3379
3380 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash); 3380 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3381 3381
3382 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3383 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3384 adapter->fw_ver);
3385 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3386 }
3387
3382 if (adapter->vlans_added) 3388 if (adapter->vlans_added)
3383 be_vid_config(adapter); 3389 be_vid_config(adapter);
3384 3390
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index dac564c25440..e7847510eda2 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -263,7 +263,9 @@ static inline void mal_schedule_poll(struct mal_instance *mal)
263{ 263{
264 if (likely(napi_schedule_prep(&mal->napi))) { 264 if (likely(napi_schedule_prep(&mal->napi))) {
265 MAL_DBG2(mal, "schedule_poll" NL); 265 MAL_DBG2(mal, "schedule_poll" NL);
266 spin_lock(&mal->lock);
266 mal_disable_eob_irq(mal); 267 mal_disable_eob_irq(mal);
268 spin_unlock(&mal->lock);
267 __napi_schedule(&mal->napi); 269 __napi_schedule(&mal->napi);
268 } else 270 } else
269 MAL_DBG2(mal, "already in poll" NL); 271 MAL_DBG2(mal, "already in poll" NL);
@@ -442,15 +444,13 @@ static int mal_poll(struct napi_struct *napi, int budget)
442 if (unlikely(mc->ops->peek_rx(mc->dev) || 444 if (unlikely(mc->ops->peek_rx(mc->dev) ||
443 test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) { 445 test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
444 MAL_DBG2(mal, "rotting packet" NL); 446 MAL_DBG2(mal, "rotting packet" NL);
445 if (napi_reschedule(napi)) 447 if (!napi_reschedule(napi))
446 mal_disable_eob_irq(mal);
447 else
448 MAL_DBG2(mal, "already in poll list" NL);
449
450 if (budget > 0)
451 goto again;
452 else
453 goto more_work; 448 goto more_work;
449
450 spin_lock_irqsave(&mal->lock, flags);
451 mal_disable_eob_irq(mal);
452 spin_unlock_irqrestore(&mal->lock, flags);
453 goto again;
454 } 454 }
455 mc->ops->poll_tx(mc->dev); 455 mc->ops->poll_tx(mc->dev);
456 } 456 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 735765c21c95..ae8eb4c4fb6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1691,7 +1691,7 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave
1691 vp_oper->vlan_idx = NO_INDX; 1691 vp_oper->vlan_idx = NO_INDX;
1692 } 1692 }
1693 if (NO_INDX != vp_oper->mac_idx) { 1693 if (NO_INDX != vp_oper->mac_idx) {
1694 __mlx4_unregister_mac(&priv->dev, port, vp_oper->mac_idx); 1694 __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
1695 vp_oper->mac_idx = NO_INDX; 1695 vp_oper->mac_idx = NO_INDX;
1696 } 1696 }
1697 } 1697 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index a126bdf27952..a44b9395b37a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2276,9 +2276,9 @@ int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter,
2276 temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17; 2276 temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17;
2277 npar_info->max_linkspeed_reg_offset = temp; 2277 npar_info->max_linkspeed_reg_offset = temp;
2278 } 2278 }
2279 if (npar_info->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) 2279
2280 memcpy(ahw->extra_capability, &cmd.rsp.arg[16], 2280 memcpy(ahw->extra_capability, &cmd.rsp.arg[16],
2281 sizeof(ahw->extra_capability)); 2281 sizeof(ahw->extra_capability));
2282 2282
2283out: 2283out:
2284 qlcnic_free_mbx_args(&cmd); 2284 qlcnic_free_mbx_args(&cmd);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 73e72eb83bdf..6f7f60c09f07 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -785,8 +785,6 @@ void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter)
785 785
786#define QLCNIC_ENABLE_IPV4_LRO 1 786#define QLCNIC_ENABLE_IPV4_LRO 1
787#define QLCNIC_ENABLE_IPV6_LRO 2 787#define QLCNIC_ENABLE_IPV6_LRO 2
788#define QLCNIC_NO_DEST_IPV4_CHECK (1 << 8)
789#define QLCNIC_NO_DEST_IPV6_CHECK (2 << 8)
790 788
791int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable) 789int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
792{ 790{
@@ -806,11 +804,10 @@ int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
806 804
807 word = 0; 805 word = 0;
808 if (enable) { 806 if (enable) {
809 word = QLCNIC_ENABLE_IPV4_LRO | QLCNIC_NO_DEST_IPV4_CHECK; 807 word = QLCNIC_ENABLE_IPV4_LRO;
810 if (adapter->ahw->extra_capability[0] & 808 if (adapter->ahw->extra_capability[0] &
811 QLCNIC_FW_CAP2_HW_LRO_IPV6) 809 QLCNIC_FW_CAP2_HW_LRO_IPV6)
812 word |= QLCNIC_ENABLE_IPV6_LRO | 810 word |= QLCNIC_ENABLE_IPV6_LRO;
813 QLCNIC_NO_DEST_IPV6_CHECK;
814 } 811 }
815 812
816 req.words[0] = cpu_to_le64(word); 813 req.words[0] = cpu_to_le64(word);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index dcf4a4e7ce23..b97e4a0079d1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1133,7 +1133,10 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
1133 if (err == -EIO) 1133 if (err == -EIO)
1134 return err; 1134 return err;
1135 adapter->ahw->extra_capability[0] = temp; 1135 adapter->ahw->extra_capability[0] = temp;
1136 } else {
1137 adapter->ahw->extra_capability[0] = 0;
1136 } 1138 }
1139
1137 adapter->ahw->max_mac_filters = nic_info.max_mac_filters; 1140 adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
1138 adapter->ahw->max_mtu = nic_info.max_mtu; 1141 adapter->ahw->max_mtu = nic_info.max_mtu;
1139 1142
@@ -2161,8 +2164,7 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
2161 else if (qlcnic_83xx_check(adapter)) 2164 else if (qlcnic_83xx_check(adapter))
2162 fw_cmd = QLCNIC_CMD_83XX_SET_DRV_VER; 2165 fw_cmd = QLCNIC_CMD_83XX_SET_DRV_VER;
2163 2166
2164 if ((ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) && 2167 if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER)
2165 (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER))
2166 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); 2168 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
2167} 2169}
2168 2170
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index a8ef4c4b94be..ba2f5e710af1 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -312,6 +312,7 @@ static ssize_t store_enabled(struct netconsole_target *nt,
312 const char *buf, 312 const char *buf,
313 size_t count) 313 size_t count)
314{ 314{
315 unsigned long flags;
315 int enabled; 316 int enabled;
316 int err; 317 int err;
317 318
@@ -326,9 +327,7 @@ static ssize_t store_enabled(struct netconsole_target *nt,
326 return -EINVAL; 327 return -EINVAL;
327 } 328 }
328 329
329 mutex_lock(&nt->mutex);
330 if (enabled) { /* 1 */ 330 if (enabled) { /* 1 */
331
332 /* 331 /*
333 * Skip netpoll_parse_options() -- all the attributes are 332 * Skip netpoll_parse_options() -- all the attributes are
334 * already configured via configfs. Just print them out. 333 * already configured via configfs. Just print them out.
@@ -336,19 +335,22 @@ static ssize_t store_enabled(struct netconsole_target *nt,
336 netpoll_print_options(&nt->np); 335 netpoll_print_options(&nt->np);
337 336
338 err = netpoll_setup(&nt->np); 337 err = netpoll_setup(&nt->np);
339 if (err) { 338 if (err)
340 mutex_unlock(&nt->mutex);
341 return err; 339 return err;
342 }
343
344 pr_info("network logging started\n");
345 340
341 pr_info("netconsole: network logging started\n");
346 } else { /* 0 */ 342 } else { /* 0 */
343 /* We need to disable the netconsole before cleaning it up
344 * otherwise we might end up in write_msg() with
345 * nt->np.dev == NULL and nt->enabled == 1
346 */
347 spin_lock_irqsave(&target_list_lock, flags);
348 nt->enabled = 0;
349 spin_unlock_irqrestore(&target_list_lock, flags);
347 netpoll_cleanup(&nt->np); 350 netpoll_cleanup(&nt->np);
348 } 351 }
349 352
350 nt->enabled = enabled; 353 nt->enabled = enabled;
351 mutex_unlock(&nt->mutex);
352 354
353 return strnlen(buf, count); 355 return strnlen(buf, count);
354} 356}
@@ -559,8 +561,10 @@ static ssize_t netconsole_target_attr_store(struct config_item *item,
559 struct netconsole_target_attr *na = 561 struct netconsole_target_attr *na =
560 container_of(attr, struct netconsole_target_attr, attr); 562 container_of(attr, struct netconsole_target_attr, attr);
561 563
564 mutex_lock(&nt->mutex);
562 if (na->store) 565 if (na->store)
563 ret = na->store(nt, buf, count); 566 ret = na->store(nt, buf, count);
567 mutex_unlock(&nt->mutex);
564 568
565 return ret; 569 return ret;
566} 570}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 846cc19c04f2..8e8d0fcd4979 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -78,7 +78,6 @@
78#define AX_MEDIUM_STATUS_MODE 0x22 78#define AX_MEDIUM_STATUS_MODE 0x22
79 #define AX_MEDIUM_GIGAMODE 0x01 79 #define AX_MEDIUM_GIGAMODE 0x01
80 #define AX_MEDIUM_FULL_DUPLEX 0x02 80 #define AX_MEDIUM_FULL_DUPLEX 0x02
81 #define AX_MEDIUM_ALWAYS_ONE 0x04
82 #define AX_MEDIUM_EN_125MHZ 0x08 81 #define AX_MEDIUM_EN_125MHZ 0x08
83 #define AX_MEDIUM_RXFLOW_CTRLEN 0x10 82 #define AX_MEDIUM_RXFLOW_CTRLEN 0x10
84 #define AX_MEDIUM_TXFLOW_CTRLEN 0x20 83 #define AX_MEDIUM_TXFLOW_CTRLEN 0x20
@@ -1065,8 +1064,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
1065 1064
1066 /* Configure default medium type => giga */ 1065 /* Configure default medium type => giga */
1067 *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN | 1066 *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
1068 AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE | 1067 AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_FULL_DUPLEX |
1069 AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE; 1068 AX_MEDIUM_GIGAMODE;
1070 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE, 1069 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
1071 2, 2, tmp16); 1070 2, 2, tmp16);
1072 1071
@@ -1225,7 +1224,7 @@ static int ax88179_link_reset(struct usbnet *dev)
1225 } 1224 }
1226 1225
1227 mode = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN | 1226 mode = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
1228 AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE; 1227 AX_MEDIUM_RXFLOW_CTRLEN;
1229 1228
1230 ax88179_read_cmd(dev, AX_ACCESS_MAC, PHYSICAL_LINK_STATUS, 1229 ax88179_read_cmd(dev, AX_ACCESS_MAC, PHYSICAL_LINK_STATUS,
1231 1, 1, &link_sts); 1230 1, 1, &link_sts);
@@ -1339,8 +1338,8 @@ static int ax88179_reset(struct usbnet *dev)
1339 1338
1340 /* Configure default medium type => giga */ 1339 /* Configure default medium type => giga */
1341 *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN | 1340 *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
1342 AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE | 1341 AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_FULL_DUPLEX |
1343 AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE; 1342 AX_MEDIUM_GIGAMODE;
1344 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE, 1343 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
1345 2, 2, tmp16); 1344 2, 2, tmp16);
1346 1345
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 113ee93dbb2e..656a02e28e26 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1160,11 +1160,6 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
1160{ 1160{
1161 struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); 1161 struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
1162 1162
1163 mutex_lock(&vi->config_lock);
1164
1165 if (!vi->config_enable)
1166 goto done;
1167
1168 switch(action & ~CPU_TASKS_FROZEN) { 1163 switch(action & ~CPU_TASKS_FROZEN) {
1169 case CPU_ONLINE: 1164 case CPU_ONLINE:
1170 case CPU_DOWN_FAILED: 1165 case CPU_DOWN_FAILED:
@@ -1178,8 +1173,6 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
1178 break; 1173 break;
1179 } 1174 }
1180 1175
1181done:
1182 mutex_unlock(&vi->config_lock);
1183 return NOTIFY_OK; 1176 return NOTIFY_OK;
1184} 1177}
1185 1178
@@ -1747,6 +1740,8 @@ static int virtnet_freeze(struct virtio_device *vdev)
1747 struct virtnet_info *vi = vdev->priv; 1740 struct virtnet_info *vi = vdev->priv;
1748 int i; 1741 int i;
1749 1742
1743 unregister_hotcpu_notifier(&vi->nb);
1744
1750 /* Prevent config work handler from accessing the device */ 1745 /* Prevent config work handler from accessing the device */
1751 mutex_lock(&vi->config_lock); 1746 mutex_lock(&vi->config_lock);
1752 vi->config_enable = false; 1747 vi->config_enable = false;
@@ -1795,6 +1790,10 @@ static int virtnet_restore(struct virtio_device *vdev)
1795 virtnet_set_queues(vi, vi->curr_queue_pairs); 1790 virtnet_set_queues(vi, vi->curr_queue_pairs);
1796 rtnl_unlock(); 1791 rtnl_unlock();
1797 1792
1793 err = register_hotcpu_notifier(&vi->nb);
1794 if (err)
1795 return err;
1796
1798 return 0; 1797 return 0;
1799} 1798}
1800#endif 1799#endif
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 5bbcb5e3ee0c..388ddf60a66d 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -148,10 +148,6 @@ static int enslave( struct net_device *, struct net_device * );
148static int emancipate( struct net_device * ); 148static int emancipate( struct net_device * );
149#endif 149#endif
150 150
151#ifdef __i386__
152#define ASM_CRC 1
153#endif
154
155static const char version[] = 151static const char version[] =
156 "Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n"; 152 "Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n";
157 153
@@ -1551,88 +1547,6 @@ __setup( "sbni=", sbni_setup );
1551 1547
1552/* -------------------------------------------------------------------------- */ 1548/* -------------------------------------------------------------------------- */
1553 1549
1554#ifdef ASM_CRC
1555
1556static u32
1557calc_crc32( u32 crc, u8 *p, u32 len )
1558{
1559 register u32 _crc;
1560 _crc = crc;
1561
1562 __asm__ __volatile__ (
1563 "xorl %%ebx, %%ebx\n"
1564 "movl %2, %%esi\n"
1565 "movl %3, %%ecx\n"
1566 "movl $crc32tab, %%edi\n"
1567 "shrl $2, %%ecx\n"
1568 "jz 1f\n"
1569
1570 ".align 4\n"
1571 "0:\n"
1572 "movb %%al, %%bl\n"
1573 "movl (%%esi), %%edx\n"
1574 "shrl $8, %%eax\n"
1575 "xorb %%dl, %%bl\n"
1576 "shrl $8, %%edx\n"
1577 "xorl (%%edi,%%ebx,4), %%eax\n"
1578
1579 "movb %%al, %%bl\n"
1580 "shrl $8, %%eax\n"
1581 "xorb %%dl, %%bl\n"
1582 "shrl $8, %%edx\n"
1583 "xorl (%%edi,%%ebx,4), %%eax\n"
1584
1585 "movb %%al, %%bl\n"
1586 "shrl $8, %%eax\n"
1587 "xorb %%dl, %%bl\n"
1588 "movb %%dh, %%dl\n"
1589 "xorl (%%edi,%%ebx,4), %%eax\n"
1590
1591 "movb %%al, %%bl\n"
1592 "shrl $8, %%eax\n"
1593 "xorb %%dl, %%bl\n"
1594 "addl $4, %%esi\n"
1595 "xorl (%%edi,%%ebx,4), %%eax\n"
1596
1597 "decl %%ecx\n"
1598 "jnz 0b\n"
1599
1600 "1:\n"
1601 "movl %3, %%ecx\n"
1602 "andl $3, %%ecx\n"
1603 "jz 2f\n"
1604
1605 "movb %%al, %%bl\n"
1606 "shrl $8, %%eax\n"
1607 "xorb (%%esi), %%bl\n"
1608 "xorl (%%edi,%%ebx,4), %%eax\n"
1609
1610 "decl %%ecx\n"
1611 "jz 2f\n"
1612
1613 "movb %%al, %%bl\n"
1614 "shrl $8, %%eax\n"
1615 "xorb 1(%%esi), %%bl\n"
1616 "xorl (%%edi,%%ebx,4), %%eax\n"
1617
1618 "decl %%ecx\n"
1619 "jz 2f\n"
1620
1621 "movb %%al, %%bl\n"
1622 "shrl $8, %%eax\n"
1623 "xorb 2(%%esi), %%bl\n"
1624 "xorl (%%edi,%%ebx,4), %%eax\n"
1625 "2:\n"
1626 : "=a" (_crc)
1627 : "0" (_crc), "g" (p), "g" (len)
1628 : "bx", "cx", "dx", "si", "di"
1629 );
1630
1631 return _crc;
1632}
1633
1634#else /* ASM_CRC */
1635
1636static u32 1550static u32
1637calc_crc32( u32 crc, u8 *p, u32 len ) 1551calc_crc32( u32 crc, u8 *p, u32 len )
1638{ 1552{
@@ -1642,9 +1556,6 @@ calc_crc32( u32 crc, u8 *p, u32 len )
1642 return crc; 1556 return crc;
1643} 1557}
1644 1558
1645#endif /* ASM_CRC */
1646
1647
1648static u32 crc32tab[] __attribute__ ((aligned(8))) = { 1559static u32 crc32tab[] __attribute__ ((aligned(8))) = {
1649 0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37, 1560 0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37,
1650 0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E, 1561 0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E,
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 55b8dec86233..08ae01b41c83 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -169,6 +169,7 @@ struct xenvif {
169 unsigned long credit_usec; 169 unsigned long credit_usec;
170 unsigned long remaining_credit; 170 unsigned long remaining_credit;
171 struct timer_list credit_timeout; 171 struct timer_list credit_timeout;
172 u64 credit_window_start;
172 173
173 /* Statistics */ 174 /* Statistics */
174 unsigned long rx_gso_checksum_fixup; 175 unsigned long rx_gso_checksum_fixup;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index e4aa26748f80..b78ee10a956a 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -316,8 +316,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
316 vif->credit_bytes = vif->remaining_credit = ~0UL; 316 vif->credit_bytes = vif->remaining_credit = ~0UL;
317 vif->credit_usec = 0UL; 317 vif->credit_usec = 0UL;
318 init_timer(&vif->credit_timeout); 318 init_timer(&vif->credit_timeout);
319 /* Initialize 'expires' now: it's used to track the credit window. */ 319 vif->credit_window_start = get_jiffies_64();
320 vif->credit_timeout.expires = jiffies;
321 320
322 dev->netdev_ops = &xenvif_netdev_ops; 321 dev->netdev_ops = &xenvif_netdev_ops;
323 dev->hw_features = NETIF_F_SG | 322 dev->hw_features = NETIF_F_SG |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 828fdab4f1a4..919b6509455c 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1380,9 +1380,8 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1380 1380
1381static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) 1381static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1382{ 1382{
1383 unsigned long now = jiffies; 1383 u64 now = get_jiffies_64();
1384 unsigned long next_credit = 1384 u64 next_credit = vif->credit_window_start +
1385 vif->credit_timeout.expires +
1386 msecs_to_jiffies(vif->credit_usec / 1000); 1385 msecs_to_jiffies(vif->credit_usec / 1000);
1387 1386
1388 /* Timer could already be pending in rare cases. */ 1387 /* Timer could already be pending in rare cases. */
@@ -1390,8 +1389,8 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1390 return true; 1389 return true;
1391 1390
1392 /* Passed the point where we can replenish credit? */ 1391 /* Passed the point where we can replenish credit? */
1393 if (time_after_eq(now, next_credit)) { 1392 if (time_after_eq64(now, next_credit)) {
1394 vif->credit_timeout.expires = now; 1393 vif->credit_window_start = now;
1395 tx_add_credit(vif); 1394 tx_add_credit(vif);
1396 } 1395 }
1397 1396
@@ -1403,6 +1402,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1403 tx_credit_callback; 1402 tx_credit_callback;
1404 mod_timer(&vif->credit_timeout, 1403 mod_timer(&vif->credit_timeout,
1405 next_credit); 1404 next_credit);
1405 vif->credit_window_start = next_credit;
1406 1406
1407 return true; 1407 return true;
1408 } 1408 }
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index be12fbfcae10..1ea75236a15f 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -552,9 +552,8 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
552 struct acpiphp_func *func; 552 struct acpiphp_func *func;
553 int max, pass; 553 int max, pass;
554 LIST_HEAD(add_list); 554 LIST_HEAD(add_list);
555 int nr_found;
556 555
557 nr_found = acpiphp_rescan_slot(slot); 556 acpiphp_rescan_slot(slot);
558 max = acpiphp_max_busnr(bus); 557 max = acpiphp_max_busnr(bus);
559 for (pass = 0; pass < 2; pass++) { 558 for (pass = 0; pass < 2; pass++) {
560 list_for_each_entry(dev, &bus->devices, bus_list) { 559 list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -574,9 +573,6 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
574 } 573 }
575 } 574 }
576 __pci_bus_assign_resources(bus, &add_list, NULL); 575 __pci_bus_assign_resources(bus, &add_list, NULL);
577 /* Nothing more to do here if there are no new devices on this bus. */
578 if (!nr_found && (slot->flags & SLOT_ENABLED))
579 return;
580 576
581 acpiphp_sanitize_bus(bus); 577 acpiphp_sanitize_bus(bus);
582 acpiphp_set_hpp_values(bus); 578 acpiphp_set_hpp_values(bus);
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index feab3a5e50b5..757eb0716d45 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -696,7 +696,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
696 while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, 696 while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC,
697 PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, 697 PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
698 pci_device)) != NULL) { 698 pci_device)) != NULL) {
699 struct blogic_adapter *adapter = adapter; 699 struct blogic_adapter *host_adapter = adapter;
700 struct blogic_adapter_info adapter_info; 700 struct blogic_adapter_info adapter_info;
701 enum blogic_isa_ioport mod_ioaddr_req; 701 enum blogic_isa_ioport mod_ioaddr_req;
702 unsigned char bus; 702 unsigned char bus;
@@ -744,9 +744,9 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
744 known and enabled, note that the particular Standard ISA I/O 744 known and enabled, note that the particular Standard ISA I/O
745 Address should not be probed. 745 Address should not be probed.
746 */ 746 */
747 adapter->io_addr = io_addr; 747 host_adapter->io_addr = io_addr;
748 blogic_intreset(adapter); 748 blogic_intreset(host_adapter);
749 if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0, 749 if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
750 &adapter_info, sizeof(adapter_info)) == 750 &adapter_info, sizeof(adapter_info)) ==
751 sizeof(adapter_info)) { 751 sizeof(adapter_info)) {
752 if (adapter_info.isa_port < 6) 752 if (adapter_info.isa_port < 6)
@@ -762,7 +762,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
762 I/O Address assigned at system initialization. 762 I/O Address assigned at system initialization.
763 */ 763 */
764 mod_ioaddr_req = BLOGIC_IO_DISABLE; 764 mod_ioaddr_req = BLOGIC_IO_DISABLE;
765 blogic_cmd(adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req, 765 blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
766 sizeof(mod_ioaddr_req), NULL, 0); 766 sizeof(mod_ioaddr_req), NULL, 0);
767 /* 767 /*
768 For the first MultiMaster Host Adapter enumerated, 768 For the first MultiMaster Host Adapter enumerated,
@@ -779,12 +779,12 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
779 779
780 fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45; 780 fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45;
781 fetch_localram.count = sizeof(autoscsi_byte45); 781 fetch_localram.count = sizeof(autoscsi_byte45);
782 blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM, 782 blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM,
783 &fetch_localram, sizeof(fetch_localram), 783 &fetch_localram, sizeof(fetch_localram),
784 &autoscsi_byte45, 784 &autoscsi_byte45,
785 sizeof(autoscsi_byte45)); 785 sizeof(autoscsi_byte45));
786 blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id, 786 blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0,
787 sizeof(id)); 787 &id, sizeof(id));
788 if (id.fw_ver_digit1 == '5') 788 if (id.fw_ver_digit1 == '5')
789 force_scan_order = 789 force_scan_order =
790 autoscsi_byte45.force_scan_order; 790 autoscsi_byte45.force_scan_order;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 408a42ef787a..f0d432c139d0 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
771static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 771static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
772{ 772{
773 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; 773 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
774 if (!capable(CAP_SYS_RAWIO))
775 return -EPERM;
774 return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg); 776 return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
775} 777}
776 778
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 2ef497ebadc0..ee5c1833eb73 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -20,7 +20,7 @@
20 * | Device Discovery | 0x2095 | 0x2020-0x2022, | 20 * | Device Discovery | 0x2095 | 0x2020-0x2022, |
21 * | | | 0x2011-0x2012, | 21 * | | | 0x2011-0x2012, |
22 * | | | 0x2016 | 22 * | | | 0x2016 |
23 * | Queue Command and IO tracing | 0x3058 | 0x3006-0x300b | 23 * | Queue Command and IO tracing | 0x3059 | 0x3006-0x300b |
24 * | | | 0x3027-0x3028 | 24 * | | | 0x3027-0x3028 |
25 * | | | 0x303d-0x3041 | 25 * | | | 0x303d-0x3041 |
26 * | | | 0x302d,0x3033 | 26 * | | | 0x302d,0x3033 |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index df1b30ba938c..ff9c86b1a0d8 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1957,6 +1957,15 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1957 que = MSW(sts->handle); 1957 que = MSW(sts->handle);
1958 req = ha->req_q_map[que]; 1958 req = ha->req_q_map[que];
1959 1959
1960 /* Check for invalid queue pointer */
1961 if (req == NULL ||
1962 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
1963 ql_dbg(ql_dbg_io, vha, 0x3059,
1964 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
1965 "que=%u.\n", sts->handle, req, que);
1966 return;
1967 }
1968
1960 /* Validate handle. */ 1969 /* Validate handle. */
1961 if (handle < req->num_outstanding_cmds) 1970 if (handle < req->num_outstanding_cmds)
1962 sp = req->outstanding_cmds[handle]; 1971 sp = req->outstanding_cmds[handle];
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e62d17d41d4e..5693f6d7eddb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2854,6 +2854,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2854 gd->events |= DISK_EVENT_MEDIA_CHANGE; 2854 gd->events |= DISK_EVENT_MEDIA_CHANGE;
2855 } 2855 }
2856 2856
2857 blk_pm_runtime_init(sdp->request_queue, dev);
2857 add_disk(gd); 2858 add_disk(gd);
2858 if (sdkp->capacity) 2859 if (sdkp->capacity)
2859 sd_dif_config_host(sdkp); 2860 sd_dif_config_host(sdkp);
@@ -2862,7 +2863,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2862 2863
2863 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 2864 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
2864 sdp->removable ? "removable " : ""); 2865 sdp->removable ? "removable " : "");
2865 blk_pm_runtime_init(sdp->request_queue, dev);
2866 scsi_autopm_put_device(sdp); 2866 scsi_autopm_put_device(sdp);
2867 put_device(&sdkp->dev); 2867 put_device(&sdkp->dev);
2868} 2868}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 5cbc4bb1b395..df5e961484e1 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -105,8 +105,11 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
105static int sg_add(struct device *, struct class_interface *); 105static int sg_add(struct device *, struct class_interface *);
106static void sg_remove(struct device *, struct class_interface *); 106static void sg_remove(struct device *, struct class_interface *);
107 107
108static DEFINE_SPINLOCK(sg_open_exclusive_lock);
109
108static DEFINE_IDR(sg_index_idr); 110static DEFINE_IDR(sg_index_idr);
109static DEFINE_RWLOCK(sg_index_lock); 111static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
112 file descriptor list for device */
110 113
111static struct class_interface sg_interface = { 114static struct class_interface sg_interface = {
112 .add_dev = sg_add, 115 .add_dev = sg_add,
@@ -143,7 +146,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
143} Sg_request; 146} Sg_request;
144 147
145typedef struct sg_fd { /* holds the state of a file descriptor */ 148typedef struct sg_fd { /* holds the state of a file descriptor */
146 struct list_head sfd_siblings; /* protected by sfd_lock of device */ 149 /* sfd_siblings is protected by sg_index_lock */
150 struct list_head sfd_siblings;
147 struct sg_device *parentdp; /* owning device */ 151 struct sg_device *parentdp; /* owning device */
148 wait_queue_head_t read_wait; /* queue read until command done */ 152 wait_queue_head_t read_wait; /* queue read until command done */
149 rwlock_t rq_list_lock; /* protect access to list in req_arr */ 153 rwlock_t rq_list_lock; /* protect access to list in req_arr */
@@ -166,12 +170,13 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
166 170
167typedef struct sg_device { /* holds the state of each scsi generic device */ 171typedef struct sg_device { /* holds the state of each scsi generic device */
168 struct scsi_device *device; 172 struct scsi_device *device;
173 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
169 int sg_tablesize; /* adapter's max scatter-gather table size */ 174 int sg_tablesize; /* adapter's max scatter-gather table size */
170 u32 index; /* device index number */ 175 u32 index; /* device index number */
171 spinlock_t sfd_lock; /* protect file descriptor list for device */ 176 /* sfds is protected by sg_index_lock */
172 struct list_head sfds; 177 struct list_head sfds;
173 struct rw_semaphore o_sem; /* exclude open should hold this rwsem */
174 volatile char detached; /* 0->attached, 1->detached pending removal */ 178 volatile char detached; /* 0->attached, 1->detached pending removal */
179 /* exclude protected by sg_open_exclusive_lock */
175 char exclude; /* opened for exclusive access */ 180 char exclude; /* opened for exclusive access */
176 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ 181 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
177 struct gendisk *disk; 182 struct gendisk *disk;
@@ -220,14 +225,35 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
220 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); 225 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
221} 226}
222 227
228static int get_exclude(Sg_device *sdp)
229{
230 unsigned long flags;
231 int ret;
232
233 spin_lock_irqsave(&sg_open_exclusive_lock, flags);
234 ret = sdp->exclude;
235 spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
236 return ret;
237}
238
239static int set_exclude(Sg_device *sdp, char val)
240{
241 unsigned long flags;
242
243 spin_lock_irqsave(&sg_open_exclusive_lock, flags);
244 sdp->exclude = val;
245 spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
246 return val;
247}
248
223static int sfds_list_empty(Sg_device *sdp) 249static int sfds_list_empty(Sg_device *sdp)
224{ 250{
225 unsigned long flags; 251 unsigned long flags;
226 int ret; 252 int ret;
227 253
228 spin_lock_irqsave(&sdp->sfd_lock, flags); 254 read_lock_irqsave(&sg_index_lock, flags);
229 ret = list_empty(&sdp->sfds); 255 ret = list_empty(&sdp->sfds);
230 spin_unlock_irqrestore(&sdp->sfd_lock, flags); 256 read_unlock_irqrestore(&sg_index_lock, flags);
231 return ret; 257 return ret;
232} 258}
233 259
@@ -239,6 +265,7 @@ sg_open(struct inode *inode, struct file *filp)
239 struct request_queue *q; 265 struct request_queue *q;
240 Sg_device *sdp; 266 Sg_device *sdp;
241 Sg_fd *sfp; 267 Sg_fd *sfp;
268 int res;
242 int retval; 269 int retval;
243 270
244 nonseekable_open(inode, filp); 271 nonseekable_open(inode, filp);
@@ -267,52 +294,54 @@ sg_open(struct inode *inode, struct file *filp)
267 goto error_out; 294 goto error_out;
268 } 295 }
269 296
270 if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) { 297 if (flags & O_EXCL) {
271 retval = -EPERM; /* Can't lock it with read only access */ 298 if (O_RDONLY == (flags & O_ACCMODE)) {
272 goto error_out; 299 retval = -EPERM; /* Can't lock it with read only access */
273 } 300 goto error_out;
274 if (flags & O_NONBLOCK) { 301 }
275 if (flags & O_EXCL) { 302 if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
276 if (!down_write_trylock(&sdp->o_sem)) { 303 retval = -EBUSY;
277 retval = -EBUSY; 304 goto error_out;
278 goto error_out; 305 }
279 } 306 res = wait_event_interruptible(sdp->o_excl_wait,
280 } else { 307 ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
281 if (!down_read_trylock(&sdp->o_sem)) { 308 if (res) {
282 retval = -EBUSY; 309 retval = res; /* -ERESTARTSYS because signal hit process */
283 goto error_out; 310 goto error_out;
284 } 311 }
312 } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */
313 if (flags & O_NONBLOCK) {
314 retval = -EBUSY;
315 goto error_out;
316 }
317 res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
318 if (res) {
319 retval = res; /* -ERESTARTSYS because signal hit process */
320 goto error_out;
285 } 321 }
286 } else {
287 if (flags & O_EXCL)
288 down_write(&sdp->o_sem);
289 else
290 down_read(&sdp->o_sem);
291 } 322 }
292 /* Since write lock is held, no need to check sfd_list */ 323 if (sdp->detached) {
293 if (flags & O_EXCL) 324 retval = -ENODEV;
294 sdp->exclude = 1; /* used by release lock */ 325 goto error_out;
295 326 }
296 if (sfds_list_empty(sdp)) { /* no existing opens on this device */ 327 if (sfds_list_empty(sdp)) { /* no existing opens on this device */
297 sdp->sgdebug = 0; 328 sdp->sgdebug = 0;
298 q = sdp->device->request_queue; 329 q = sdp->device->request_queue;
299 sdp->sg_tablesize = queue_max_segments(q); 330 sdp->sg_tablesize = queue_max_segments(q);
300 } 331 }
301 sfp = sg_add_sfp(sdp, dev); 332 if ((sfp = sg_add_sfp(sdp, dev)))
302 if (!IS_ERR(sfp))
303 filp->private_data = sfp; 333 filp->private_data = sfp;
304 /* retval is already provably zero at this point because of the
305 * check after retval = scsi_autopm_get_device(sdp->device))
306 */
307 else { 334 else {
308 retval = PTR_ERR(sfp);
309
310 if (flags & O_EXCL) { 335 if (flags & O_EXCL) {
311 sdp->exclude = 0; /* undo if error */ 336 set_exclude(sdp, 0); /* undo if error */
312 up_write(&sdp->o_sem); 337 wake_up_interruptible(&sdp->o_excl_wait);
313 } else 338 }
314 up_read(&sdp->o_sem); 339 retval = -ENOMEM;
340 goto error_out;
341 }
342 retval = 0;
315error_out: 343error_out:
344 if (retval) {
316 scsi_autopm_put_device(sdp->device); 345 scsi_autopm_put_device(sdp->device);
317sdp_put: 346sdp_put:
318 scsi_device_put(sdp->device); 347 scsi_device_put(sdp->device);
@@ -329,18 +358,13 @@ sg_release(struct inode *inode, struct file *filp)
329{ 358{
330 Sg_device *sdp; 359 Sg_device *sdp;
331 Sg_fd *sfp; 360 Sg_fd *sfp;
332 int excl;
333 361
334 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 362 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
335 return -ENXIO; 363 return -ENXIO;
336 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); 364 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
337 365
338 excl = sdp->exclude; 366 set_exclude(sdp, 0);
339 sdp->exclude = 0; 367 wake_up_interruptible(&sdp->o_excl_wait);
340 if (excl)
341 up_write(&sdp->o_sem);
342 else
343 up_read(&sdp->o_sem);
344 368
345 scsi_autopm_put_device(sdp->device); 369 scsi_autopm_put_device(sdp->device);
346 kref_put(&sfp->f_ref, sg_remove_sfp); 370 kref_put(&sfp->f_ref, sg_remove_sfp);
@@ -1391,9 +1415,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1391 disk->first_minor = k; 1415 disk->first_minor = k;
1392 sdp->disk = disk; 1416 sdp->disk = disk;
1393 sdp->device = scsidp; 1417 sdp->device = scsidp;
1394 spin_lock_init(&sdp->sfd_lock);
1395 INIT_LIST_HEAD(&sdp->sfds); 1418 INIT_LIST_HEAD(&sdp->sfds);
1396 init_rwsem(&sdp->o_sem); 1419 init_waitqueue_head(&sdp->o_excl_wait);
1397 sdp->sg_tablesize = queue_max_segments(q); 1420 sdp->sg_tablesize = queue_max_segments(q);
1398 sdp->index = k; 1421 sdp->index = k;
1399 kref_init(&sdp->d_ref); 1422 kref_init(&sdp->d_ref);
@@ -1526,13 +1549,11 @@ static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
1526 1549
1527 /* Need a write lock to set sdp->detached. */ 1550 /* Need a write lock to set sdp->detached. */
1528 write_lock_irqsave(&sg_index_lock, iflags); 1551 write_lock_irqsave(&sg_index_lock, iflags);
1529 spin_lock(&sdp->sfd_lock);
1530 sdp->detached = 1; 1552 sdp->detached = 1;
1531 list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { 1553 list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
1532 wake_up_interruptible(&sfp->read_wait); 1554 wake_up_interruptible(&sfp->read_wait);
1533 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); 1555 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
1534 } 1556 }
1535 spin_unlock(&sdp->sfd_lock);
1536 write_unlock_irqrestore(&sg_index_lock, iflags); 1557 write_unlock_irqrestore(&sg_index_lock, iflags);
1537 1558
1538 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); 1559 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
@@ -2043,7 +2064,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2043 2064
2044 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); 2065 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2045 if (!sfp) 2066 if (!sfp)
2046 return ERR_PTR(-ENOMEM); 2067 return NULL;
2047 2068
2048 init_waitqueue_head(&sfp->read_wait); 2069 init_waitqueue_head(&sfp->read_wait);
2049 rwlock_init(&sfp->rq_list_lock); 2070 rwlock_init(&sfp->rq_list_lock);
@@ -2057,13 +2078,9 @@ sg_add_sfp(Sg_device * sdp, int dev)
2057 sfp->cmd_q = SG_DEF_COMMAND_Q; 2078 sfp->cmd_q = SG_DEF_COMMAND_Q;
2058 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; 2079 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2059 sfp->parentdp = sdp; 2080 sfp->parentdp = sdp;
2060 spin_lock_irqsave(&sdp->sfd_lock, iflags); 2081 write_lock_irqsave(&sg_index_lock, iflags);
2061 if (sdp->detached) {
2062 spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
2063 return ERR_PTR(-ENODEV);
2064 }
2065 list_add_tail(&sfp->sfd_siblings, &sdp->sfds); 2082 list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
2066 spin_unlock_irqrestore(&sdp->sfd_lock, iflags); 2083 write_unlock_irqrestore(&sg_index_lock, iflags);
2067 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); 2084 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2068 if (unlikely(sg_big_buff != def_reserved_size)) 2085 if (unlikely(sg_big_buff != def_reserved_size))
2069 sg_big_buff = def_reserved_size; 2086 sg_big_buff = def_reserved_size;
@@ -2113,9 +2130,10 @@ static void sg_remove_sfp(struct kref *kref)
2113 struct sg_device *sdp = sfp->parentdp; 2130 struct sg_device *sdp = sfp->parentdp;
2114 unsigned long iflags; 2131 unsigned long iflags;
2115 2132
2116 spin_lock_irqsave(&sdp->sfd_lock, iflags); 2133 write_lock_irqsave(&sg_index_lock, iflags);
2117 list_del(&sfp->sfd_siblings); 2134 list_del(&sfp->sfd_siblings);
2118 spin_unlock_irqrestore(&sdp->sfd_lock, iflags); 2135 write_unlock_irqrestore(&sg_index_lock, iflags);
2136 wake_up_interruptible(&sdp->o_excl_wait);
2119 2137
2120 INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); 2138 INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
2121 schedule_work(&sfp->ew.work); 2139 schedule_work(&sfp->ew.work);
@@ -2502,7 +2520,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2502 return 0; 2520 return 0;
2503} 2521}
2504 2522
2505/* must be called while holding sg_index_lock and sfd_lock */ 2523/* must be called while holding sg_index_lock */
2506static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) 2524static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2507{ 2525{
2508 int k, m, new_interface, blen, usg; 2526 int k, m, new_interface, blen, usg;
@@ -2587,26 +2605,22 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2587 2605
2588 read_lock_irqsave(&sg_index_lock, iflags); 2606 read_lock_irqsave(&sg_index_lock, iflags);
2589 sdp = it ? sg_lookup_dev(it->index) : NULL; 2607 sdp = it ? sg_lookup_dev(it->index) : NULL;
2590 if (sdp) { 2608 if (sdp && !list_empty(&sdp->sfds)) {
2591 spin_lock(&sdp->sfd_lock); 2609 struct scsi_device *scsidp = sdp->device;
2592 if (!list_empty(&sdp->sfds)) {
2593 struct scsi_device *scsidp = sdp->device;
2594 2610
2595 seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); 2611 seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
2596 if (sdp->detached) 2612 if (sdp->detached)
2597 seq_printf(s, "detached pending close "); 2613 seq_printf(s, "detached pending close ");
2598 else 2614 else
2599 seq_printf 2615 seq_printf
2600 (s, "scsi%d chan=%d id=%d lun=%d em=%d", 2616 (s, "scsi%d chan=%d id=%d lun=%d em=%d",
2601 scsidp->host->host_no, 2617 scsidp->host->host_no,
2602 scsidp->channel, scsidp->id, 2618 scsidp->channel, scsidp->id,
2603 scsidp->lun, 2619 scsidp->lun,
2604 scsidp->host->hostt->emulated); 2620 scsidp->host->hostt->emulated);
2605 seq_printf(s, " sg_tablesize=%d excl=%d\n", 2621 seq_printf(s, " sg_tablesize=%d excl=%d\n",
2606 sdp->sg_tablesize, sdp->exclude); 2622 sdp->sg_tablesize, get_exclude(sdp));
2607 sg_proc_debug_helper(s, sdp); 2623 sg_proc_debug_helper(s, sdp);
2608 }
2609 spin_unlock(&sdp->sfd_lock);
2610 } 2624 }
2611 read_unlock_irqrestore(&sg_index_lock, iflags); 2625 read_unlock_irqrestore(&sg_index_lock, iflags);
2612 return 0; 2626 return 0;
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index f91bc1fdd895..639ba96adb36 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -1960,6 +1960,7 @@ cntrlEnd:
1960 1960
1961 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n"); 1961 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
1962 1962
1963 memset(&DevInfo, 0, sizeof(DevInfo));
1963 DevInfo.MaxRDMBufferSize = BUFFER_4K; 1964 DevInfo.MaxRDMBufferSize = BUFFER_4K;
1964 DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START; 1965 DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
1965 DevInfo.u32RxAlignmentCorrection = 0; 1966 DevInfo.u32RxAlignmentCorrection = 0;
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
index 6ccb64fb0786..6ce0af9977d8 100644
--- a/drivers/staging/ozwpan/ozcdev.c
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
155 struct oz_app_hdr *app_hdr; 155 struct oz_app_hdr *app_hdr;
156 struct oz_serial_ctx *ctx; 156 struct oz_serial_ctx *ctx;
157 157
158 if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
159 return -EINVAL;
160
158 spin_lock_bh(&g_cdev.lock); 161 spin_lock_bh(&g_cdev.lock);
159 pd = g_cdev.active_pd; 162 pd = g_cdev.active_pd;
160 if (pd) 163 if (pd)
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
index 23db32f07fd5..a10cdb17038b 100644
--- a/drivers/staging/sb105x/sb_pci_mp.c
+++ b/drivers/staging/sb105x/sb_pci_mp.c
@@ -1063,7 +1063,7 @@ static int mp_wait_modem_status(struct sb_uart_state *state, unsigned long arg)
1063 1063
1064static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt) 1064static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt)
1065{ 1065{
1066 struct serial_icounter_struct icount; 1066 struct serial_icounter_struct icount = {};
1067 struct sb_uart_icount cnow; 1067 struct sb_uart_icount cnow;
1068 struct sb_uart_port *port = state->port; 1068 struct sb_uart_port *port = state->port;
1069 1069
diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
index c97e0e154d28..7e10dcdc3090 100644
--- a/drivers/staging/wlags49_h2/wl_priv.c
+++ b/drivers/staging/wlags49_h2/wl_priv.c
@@ -570,6 +570,7 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
570 ltv_t *pLtv; 570 ltv_t *pLtv;
571 bool_t ltvAllocated = FALSE; 571 bool_t ltvAllocated = FALSE;
572 ENCSTRCT sEncryption; 572 ENCSTRCT sEncryption;
573 size_t len;
573 574
574#ifdef USE_WDS 575#ifdef USE_WDS
575 hcf_16 hcfPort = HCF_PORT_0; 576 hcf_16 hcfPort = HCF_PORT_0;
@@ -686,7 +687,8 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
686 break; 687 break;
687 case CFG_CNF_OWN_NAME: 688 case CFG_CNF_OWN_NAME:
688 memset(lp->StationName, 0, sizeof(lp->StationName)); 689 memset(lp->StationName, 0, sizeof(lp->StationName));
689 memcpy((void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]); 690 len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName));
691 strlcpy(lp->StationName, &pLtv->u.u8[2], len);
690 pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); 692 pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
691 break; 693 break;
692 case CFG_CNF_LOAD_BALANCING: 694 case CFG_CNF_LOAD_BALANCING:
@@ -1783,6 +1785,7 @@ int wvlan_set_station_nickname(struct net_device *dev,
1783{ 1785{
1784 struct wl_private *lp = wl_priv(dev); 1786 struct wl_private *lp = wl_priv(dev);
1785 unsigned long flags; 1787 unsigned long flags;
1788 size_t len;
1786 int ret = 0; 1789 int ret = 0;
1787 /*------------------------------------------------------------------------*/ 1790 /*------------------------------------------------------------------------*/
1788 1791
@@ -1793,8 +1796,8 @@ int wvlan_set_station_nickname(struct net_device *dev,
1793 wl_lock(lp, &flags); 1796 wl_lock(lp, &flags);
1794 1797
1795 memset(lp->StationName, 0, sizeof(lp->StationName)); 1798 memset(lp->StationName, 0, sizeof(lp->StationName));
1796 1799 len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName));
1797 memcpy(lp->StationName, extra, wrqu->data.length); 1800 strlcpy(lp->StationName, extra, len);
1798 1801
1799 /* Commit the adapter parameters */ 1802 /* Commit the adapter parameters */
1800 wl_apply(lp); 1803 wl_apply(lp);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 551c96ca60ac..0f199f6a0738 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -134,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
134 * pSCSI Host ID and enable for phba mode 134 * pSCSI Host ID and enable for phba mode
135 */ 135 */
136 sh = scsi_host_lookup(phv->phv_host_id); 136 sh = scsi_host_lookup(phv->phv_host_id);
137 if (IS_ERR(sh)) { 137 if (!sh) {
138 pr_err("pSCSI: Unable to locate SCSI Host for" 138 pr_err("pSCSI: Unable to locate SCSI Host for"
139 " phv_host_id: %d\n", phv->phv_host_id); 139 " phv_host_id: %d\n", phv->phv_host_id);
140 return PTR_ERR(sh); 140 return -EINVAL;
141 } 141 }
142 142
143 phv->phv_lld_host = sh; 143 phv->phv_lld_host = sh;
@@ -515,10 +515,10 @@ static int pscsi_configure_device(struct se_device *dev)
515 sh = phv->phv_lld_host; 515 sh = phv->phv_lld_host;
516 } else { 516 } else {
517 sh = scsi_host_lookup(pdv->pdv_host_id); 517 sh = scsi_host_lookup(pdv->pdv_host_id);
518 if (IS_ERR(sh)) { 518 if (!sh) {
519 pr_err("pSCSI: Unable to locate" 519 pr_err("pSCSI: Unable to locate"
520 " pdv_host_id: %d\n", pdv->pdv_host_id); 520 " pdv_host_id: %d\n", pdv->pdv_host_id);
521 return PTR_ERR(sh); 521 return -EINVAL;
522 } 522 }
523 } 523 }
524 } else { 524 } else {
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 4714c6f8da4b..d9b92b2c524d 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -263,6 +263,11 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
263 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 263 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
264 return TCM_INVALID_CDB_FIELD; 264 return TCM_INVALID_CDB_FIELD;
265 } 265 }
266 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
267 if (flags[0] & 0x10) {
268 pr_warn("WRITE SAME with ANCHOR not supported\n");
269 return TCM_INVALID_CDB_FIELD;
270 }
266 /* 271 /*
267 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 272 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
268 * translated into block discard requests within backend code. 273 * translated into block discard requests within backend code.
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 3da4fd10b9f8..474cd44fac14 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -82,6 +82,9 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
82 mutex_lock(&g_device_mutex); 82 mutex_lock(&g_device_mutex);
83 list_for_each_entry(se_dev, &g_device_list, g_dev_node) { 83 list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
84 84
85 if (!se_dev->dev_attrib.emulate_3pc)
86 continue;
87
85 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 88 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
86 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); 89 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
87 90
@@ -357,6 +360,7 @@ struct xcopy_pt_cmd {
357 struct se_cmd se_cmd; 360 struct se_cmd se_cmd;
358 struct xcopy_op *xcopy_op; 361 struct xcopy_op *xcopy_op;
359 struct completion xpt_passthrough_sem; 362 struct completion xpt_passthrough_sem;
363 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
360}; 364};
361 365
362static struct se_port xcopy_pt_port; 366static struct se_port xcopy_pt_port;
@@ -675,7 +679,8 @@ static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
675 679
676 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n", 680 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
677 se_cmd->scsi_status); 681 se_cmd->scsi_status);
678 return 0; 682
683 return (se_cmd->scsi_status) ? -EINVAL : 0;
679} 684}
680 685
681static int target_xcopy_read_source( 686static int target_xcopy_read_source(
@@ -708,7 +713,7 @@ static int target_xcopy_read_source(
708 (unsigned long long)src_lba, src_sectors, length); 713 (unsigned long long)src_lba, src_sectors, length);
709 714
710 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 715 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
711 DMA_FROM_DEVICE, 0, NULL); 716 DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
712 xop->src_pt_cmd = xpt_cmd; 717 xop->src_pt_cmd = xpt_cmd;
713 718
714 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], 719 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
@@ -768,7 +773,7 @@ static int target_xcopy_write_destination(
768 (unsigned long long)dst_lba, dst_sectors, length); 773 (unsigned long long)dst_lba, dst_sectors, length);
769 774
770 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 775 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
771 DMA_TO_DEVICE, 0, NULL); 776 DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
772 xop->dst_pt_cmd = xpt_cmd; 777 xop->dst_pt_cmd = xpt_cmd;
773 778
774 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0], 779 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
@@ -884,30 +889,42 @@ out:
884 889
885sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) 890sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
886{ 891{
892 struct se_device *dev = se_cmd->se_dev;
887 struct xcopy_op *xop = NULL; 893 struct xcopy_op *xop = NULL;
888 unsigned char *p = NULL, *seg_desc; 894 unsigned char *p = NULL, *seg_desc;
889 unsigned int list_id, list_id_usage, sdll, inline_dl, sa; 895 unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
896 sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
890 int rc; 897 int rc;
891 unsigned short tdll; 898 unsigned short tdll;
892 899
900 if (!dev->dev_attrib.emulate_3pc) {
901 pr_err("EXTENDED_COPY operation explicitly disabled\n");
902 return TCM_UNSUPPORTED_SCSI_OPCODE;
903 }
904
893 sa = se_cmd->t_task_cdb[1] & 0x1f; 905 sa = se_cmd->t_task_cdb[1] & 0x1f;
894 if (sa != 0x00) { 906 if (sa != 0x00) {
895 pr_err("EXTENDED_COPY(LID4) not supported\n"); 907 pr_err("EXTENDED_COPY(LID4) not supported\n");
896 return TCM_UNSUPPORTED_SCSI_OPCODE; 908 return TCM_UNSUPPORTED_SCSI_OPCODE;
897 } 909 }
898 910
911 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
912 if (!xop) {
913 pr_err("Unable to allocate xcopy_op\n");
914 return TCM_OUT_OF_RESOURCES;
915 }
916 xop->xop_se_cmd = se_cmd;
917
899 p = transport_kmap_data_sg(se_cmd); 918 p = transport_kmap_data_sg(se_cmd);
900 if (!p) { 919 if (!p) {
901 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); 920 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
921 kfree(xop);
902 return TCM_OUT_OF_RESOURCES; 922 return TCM_OUT_OF_RESOURCES;
903 } 923 }
904 924
905 list_id = p[0]; 925 list_id = p[0];
906 if (list_id != 0x00) { 926 list_id_usage = (p[1] & 0x18) >> 3;
907 pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id); 927
908 goto out;
909 }
910 list_id_usage = (p[1] & 0x18);
911 /* 928 /*
912 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH 929 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
913 */ 930 */
@@ -920,13 +937,6 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
920 goto out; 937 goto out;
921 } 938 }
922 939
923 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
924 if (!xop) {
925 pr_err("Unable to allocate xcopy_op\n");
926 goto out;
927 }
928 xop->xop_se_cmd = se_cmd;
929
930 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" 940 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
931 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, 941 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
932 tdll, sdll, inline_dl); 942 tdll, sdll, inline_dl);
@@ -935,6 +945,17 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
935 if (rc <= 0) 945 if (rc <= 0)
936 goto out; 946 goto out;
937 947
948 if (xop->src_dev->dev_attrib.block_size !=
949 xop->dst_dev->dev_attrib.block_size) {
950 pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
951 " block_size: %u currently unsupported\n",
952 xop->src_dev->dev_attrib.block_size,
953 xop->dst_dev->dev_attrib.block_size);
954 xcopy_pt_undepend_remotedev(xop);
955 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
956 goto out;
957 }
958
938 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, 959 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
939 rc * XCOPY_TARGET_DESC_LEN); 960 rc * XCOPY_TARGET_DESC_LEN);
940 seg_desc = &p[16]; 961 seg_desc = &p[16];
@@ -957,7 +978,7 @@ out:
957 if (p) 978 if (p)
958 transport_kunmap_data_sg(se_cmd); 979 transport_kunmap_data_sg(se_cmd);
959 kfree(xop); 980 kfree(xop);
960 return TCM_INVALID_CDB_FIELD; 981 return ret;
961} 982}
962 983
963static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) 984static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index d067285a2d20..6b0f75eac8a2 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1499,7 +1499,7 @@ static void atmel_set_ops(struct uart_port *port)
1499/* 1499/*
1500 * Get ip name usart or uart 1500 * Get ip name usart or uart
1501 */ 1501 */
1502static int atmel_get_ip_name(struct uart_port *port) 1502static void atmel_get_ip_name(struct uart_port *port)
1503{ 1503{
1504 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1504 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1505 int name = UART_GET_IP_NAME(port); 1505 int name = UART_GET_IP_NAME(port);
@@ -1518,10 +1518,7 @@ static int atmel_get_ip_name(struct uart_port *port)
1518 atmel_port->is_usart = false; 1518 atmel_port->is_usart = false;
1519 } else { 1519 } else {
1520 dev_err(port->dev, "Not supported ip name, set to uart\n"); 1520 dev_err(port->dev, "Not supported ip name, set to uart\n");
1521 return -EINVAL;
1522 } 1521 }
1523
1524 return 0;
1525} 1522}
1526 1523
1527/* 1524/*
@@ -2405,9 +2402,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
2405 /* 2402 /*
2406 * Get port name of usart or uart 2403 * Get port name of usart or uart
2407 */ 2404 */
2408 ret = atmel_get_ip_name(&port->uart); 2405 atmel_get_ip_name(&port->uart);
2409 if (ret < 0)
2410 goto err_add_port;
2411 2406
2412 return 0; 2407 return 0;
2413 2408
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index ba475632c5fa..0e808cf91d97 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -642,16 +642,29 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
642{ 642{
643 struct uio_device *idev = vma->vm_private_data; 643 struct uio_device *idev = vma->vm_private_data;
644 int mi = uio_find_mem_index(vma); 644 int mi = uio_find_mem_index(vma);
645 struct uio_mem *mem;
645 if (mi < 0) 646 if (mi < 0)
646 return -EINVAL; 647 return -EINVAL;
648 mem = idev->info->mem + mi;
647 649
648 vma->vm_ops = &uio_physical_vm_ops; 650 if (vma->vm_end - vma->vm_start > mem->size)
651 return -EINVAL;
649 652
653 vma->vm_ops = &uio_physical_vm_ops;
650 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 654 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
651 655
656 /*
657 * We cannot use the vm_iomap_memory() helper here,
658 * because vma->vm_pgoff is the map index we looked
659 * up above in uio_find_mem_index(), rather than an
660 * actual page offset into the mmap.
661 *
662 * So we just do the physical mmap without a page
663 * offset.
664 */
652 return remap_pfn_range(vma, 665 return remap_pfn_range(vma,
653 vma->vm_start, 666 vma->vm_start,
654 idev->info->mem[mi].addr >> PAGE_SHIFT, 667 mem->addr >> PAGE_SHIFT,
655 vma->vm_end - vma->vm_start, 668 vma->vm_end - vma->vm_start,
656 vma->vm_page_prot); 669 vma->vm_page_prot);
657} 670}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index c45f9c0a1b34..b21d553c245b 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -904,6 +904,7 @@ static struct usb_device_id id_table_combined [] = {
904 { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) }, 904 { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
905 /* Crucible Devices */ 905 /* Crucible Devices */
906 { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) }, 906 { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
907 { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
907 { } /* Terminating entry */ 908 { } /* Terminating entry */
908}; 909};
909 910
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 1b8af461b522..a7019d1e3058 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1307,3 +1307,9 @@
1307 * Manufacturer: Crucible Technologies 1307 * Manufacturer: Crucible Technologies
1308 */ 1308 */
1309#define FTDI_CT_COMET_PID 0x8e08 1309#define FTDI_CT_COMET_PID 0x8e08
1310
1311/*
1312 * Product: Z3X Box
1313 * Manufacturer: Smart GSM Team
1314 */
1315#define FTDI_Z3X_PID 0x0011
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index bedf8e47713b..1e6de4cd079d 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -4,11 +4,6 @@
4 * Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com) 4 * Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com)
5 * Copyright (C) 2003 IBM Corp. 5 * Copyright (C) 2003 IBM Corp.
6 * 6 *
7 * Copyright (C) 2009, 2013 Frank Schäfer <fschaefer.oss@googlemail.com>
8 * - fixes, improvements and documentation for the baud rate encoding methods
9 * Copyright (C) 2013 Reinhard Max <max@suse.de>
10 * - fixes and improvements for the divisor based baud rate encoding method
11 *
12 * Original driver for 2.2.x by anonymous 7 * Original driver for 2.2.x by anonymous
13 * 8 *
14 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -134,18 +129,10 @@ MODULE_DEVICE_TABLE(usb, id_table);
134 129
135 130
136enum pl2303_type { 131enum pl2303_type {
137 type_0, /* H version ? */ 132 type_0, /* don't know the difference between type 0 and */
138 type_1, /* H version ? */ 133 type_1, /* type 1, until someone from prolific tells us... */
139 HX_TA, /* HX(A) / X(A) / TA version */ /* TODO: improve */ 134 HX, /* HX version of the pl2303 chip */
140 HXD_EA_RA_SA, /* HXD / EA / RA / SA version */ /* TODO: improve */
141 TB, /* TB version */
142 HX_CLONE, /* Cheap and less functional clone of the HX chip */
143}; 135};
144/*
145 * NOTE: don't know the difference between type 0 and type 1,
146 * until someone from Prolific tells us...
147 * TODO: distinguish between X/HX, TA and HXD, EA, RA, SA variants
148 */
149 136
150struct pl2303_serial_private { 137struct pl2303_serial_private {
151 enum pl2303_type type; 138 enum pl2303_type type;
@@ -185,7 +172,6 @@ static int pl2303_startup(struct usb_serial *serial)
185{ 172{
186 struct pl2303_serial_private *spriv; 173 struct pl2303_serial_private *spriv;
187 enum pl2303_type type = type_0; 174 enum pl2303_type type = type_0;
188 char *type_str = "unknown (treating as type_0)";
189 unsigned char *buf; 175 unsigned char *buf;
190 176
191 spriv = kzalloc(sizeof(*spriv), GFP_KERNEL); 177 spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
@@ -198,53 +184,15 @@ static int pl2303_startup(struct usb_serial *serial)
198 return -ENOMEM; 184 return -ENOMEM;
199 } 185 }
200 186
201 if (serial->dev->descriptor.bDeviceClass == 0x02) { 187 if (serial->dev->descriptor.bDeviceClass == 0x02)
202 type = type_0; 188 type = type_0;
203 type_str = "type_0"; 189 else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40)
204 } else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40) { 190 type = HX;
205 /* 191 else if (serial->dev->descriptor.bDeviceClass == 0x00)
206 * NOTE: The bcdDevice version is the only difference between
207 * the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB
208 */
209 if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) {
210 /* Check if the device is a clone */
211 pl2303_vendor_read(0x9494, 0, serial, buf);
212 /*
213 * NOTE: Not sure if this read is really needed.
214 * The HX returns 0x00, the clone 0x02, but the Windows
215 * driver seems to ignore the value and continues.
216 */
217 pl2303_vendor_write(0x0606, 0xaa, serial);
218 pl2303_vendor_read(0x8686, 0, serial, buf);
219 if (buf[0] != 0xaa) {
220 type = HX_CLONE;
221 type_str = "X/HX clone (limited functionality)";
222 } else {
223 type = HX_TA;
224 type_str = "X/HX/TA";
225 }
226 pl2303_vendor_write(0x0606, 0x00, serial);
227 } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
228 == 0x400) {
229 type = HXD_EA_RA_SA;
230 type_str = "HXD/EA/RA/SA";
231 } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
232 == 0x500) {
233 type = TB;
234 type_str = "TB";
235 } else {
236 dev_info(&serial->interface->dev,
237 "unknown/unsupported device type\n");
238 kfree(spriv);
239 kfree(buf);
240 return -ENODEV;
241 }
242 } else if (serial->dev->descriptor.bDeviceClass == 0x00
243 || serial->dev->descriptor.bDeviceClass == 0xFF) {
244 type = type_1; 192 type = type_1;
245 type_str = "type_1"; 193 else if (serial->dev->descriptor.bDeviceClass == 0xFF)
246 } 194 type = type_1;
247 dev_dbg(&serial->interface->dev, "device type: %s\n", type_str); 195 dev_dbg(&serial->interface->dev, "device type: %d\n", type);
248 196
249 spriv->type = type; 197 spriv->type = type;
250 usb_set_serial_data(serial, spriv); 198 usb_set_serial_data(serial, spriv);
@@ -259,10 +207,10 @@ static int pl2303_startup(struct usb_serial *serial)
259 pl2303_vendor_read(0x8383, 0, serial, buf); 207 pl2303_vendor_read(0x8383, 0, serial, buf);
260 pl2303_vendor_write(0, 1, serial); 208 pl2303_vendor_write(0, 1, serial);
261 pl2303_vendor_write(1, 0, serial); 209 pl2303_vendor_write(1, 0, serial);
262 if (type == type_0 || type == type_1) 210 if (type == HX)
263 pl2303_vendor_write(2, 0x24, serial);
264 else
265 pl2303_vendor_write(2, 0x44, serial); 211 pl2303_vendor_write(2, 0x44, serial);
212 else
213 pl2303_vendor_write(2, 0x24, serial);
266 214
267 kfree(buf); 215 kfree(buf);
268 return 0; 216 return 0;
@@ -316,174 +264,65 @@ static int pl2303_set_control_lines(struct usb_serial_port *port, u8 value)
316 return retval; 264 return retval;
317} 265}
318 266
319static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type, 267static void pl2303_encode_baudrate(struct tty_struct *tty,
320 u8 buf[4]) 268 struct usb_serial_port *port,
269 u8 buf[4])
321{ 270{
322 /*
323 * NOTE: Only the values defined in baud_sup are supported !
324 * => if unsupported values are set, the PL2303 uses 9600 baud instead
325 * => HX clones just don't work at unsupported baud rates < 115200 baud,
326 * for baud rates > 115200 they run at 115200 baud
327 */
328 const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600, 271 const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
329 4800, 7200, 9600, 14400, 19200, 28800, 38400, 272 4800, 7200, 9600, 14400, 19200, 28800, 38400,
330 57600, 115200, 230400, 460800, 614400, 921600, 273 57600, 115200, 230400, 460800, 500000, 614400,
331 1228800, 2457600, 3000000, 6000000, 12000000 }; 274 921600, 1228800, 2457600, 3000000, 6000000 };
275
276 struct usb_serial *serial = port->serial;
277 struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
278 int baud;
279 int i;
280
332 /* 281 /*
333 * NOTE: With the exception of type_0/1 devices, the following 282 * NOTE: Only the values defined in baud_sup are supported!
334 * additional baud rates are supported (tested with HX rev. 3A only): 283 * => if unsupported values are set, the PL2303 seems to use
335 * 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800, 284 * 9600 baud (at least my PL2303X always does)
336 * 403200, 806400. (*: not HX and HX clones)
337 *
338 * Maximum values: HXD, TB: 12000000; HX, TA: 6000000;
339 * type_0+1: 1228800; RA: 921600; HX clones, SA: 115200
340 *
341 * As long as we are not using this encoding method for anything else
342 * than the type_0+1, HX and HX clone chips, there is no point in
343 * complicating the code to support them.
344 */ 285 */
345 int i; 286 baud = tty_get_baud_rate(tty);
287 dev_dbg(&port->dev, "baud requested = %d\n", baud);
288 if (!baud)
289 return;
346 290
347 /* Set baudrate to nearest supported value */ 291 /* Set baudrate to nearest supported value */
348 for (i = 0; i < ARRAY_SIZE(baud_sup); ++i) { 292 for (i = 0; i < ARRAY_SIZE(baud_sup); ++i) {
349 if (baud_sup[i] > baud) 293 if (baud_sup[i] > baud)
350 break; 294 break;
351 } 295 }
296
352 if (i == ARRAY_SIZE(baud_sup)) 297 if (i == ARRAY_SIZE(baud_sup))
353 baud = baud_sup[i - 1]; 298 baud = baud_sup[i - 1];
354 else if (i > 0 && (baud_sup[i] - baud) > (baud - baud_sup[i - 1])) 299 else if (i > 0 && (baud_sup[i] - baud) > (baud - baud_sup[i - 1]))
355 baud = baud_sup[i - 1]; 300 baud = baud_sup[i - 1];
356 else 301 else
357 baud = baud_sup[i]; 302 baud = baud_sup[i];
358 /* Respect the chip type specific baud rate limits */
359 /*
360 * FIXME: as long as we don't know how to distinguish between the
361 * HXD, EA, RA, and SA chip variants, allow the max. value of 12M.
362 */
363 if (type == HX_TA)
364 baud = min_t(int, baud, 6000000);
365 else if (type == type_0 || type == type_1)
366 baud = min_t(int, baud, 1228800);
367 else if (type == HX_CLONE)
368 baud = min_t(int, baud, 115200);
369 /* Direct (standard) baud rate encoding method */
370 put_unaligned_le32(baud, buf);
371
372 return baud;
373}
374 303
375static int pl2303_baudrate_encode_divisor(int baud, enum pl2303_type type, 304 /* type_0, type_1 only support up to 1228800 baud */
376 u8 buf[4]) 305 if (spriv->type != HX)
377{ 306 baud = min_t(int, baud, 1228800);
378 /*
379 * Divisor based baud rate encoding method
380 *
381 * NOTE: HX clones do NOT support this method.
382 * It's not clear if the type_0/1 chips support it.
383 *
384 * divisor = 12MHz * 32 / baudrate = 2^A * B
385 *
386 * with
387 *
388 * A = buf[1] & 0x0e
389 * B = buf[0] + (buf[1] & 0x01) << 8
390 *
391 * Special cases:
392 * => 8 < B < 16: device seems to work not properly
393 * => B <= 8: device uses the max. value B = 512 instead
394 */
395 unsigned int A, B;
396 307
397 /* 308 if (baud <= 115200) {
398 * NOTE: The Windows driver allows maximum baud rates of 110% of the 309 put_unaligned_le32(baud, buf);
399 * specified maximium value.
400 * Quick tests with early (2004) HX (rev. A) chips suggest, that even
401 * higher baud rates (up to the maximum of 24M baud !) are working fine,
402 * but that should really be tested carefully in "real life" scenarios
403 * before removing the upper limit completely.
404 * Baud rates smaller than the specified 75 baud are definitely working
405 * fine.
406 */
407 if (type == type_0 || type == type_1)
408 baud = min_t(int, baud, 1228800 * 1.1);
409 else if (type == HX_TA)
410 baud = min_t(int, baud, 6000000 * 1.1);
411 else if (type == HXD_EA_RA_SA)
412 /* HXD, EA: 12Mbps; RA: 1Mbps; SA: 115200 bps */
413 /*
414 * FIXME: as long as we don't know how to distinguish between
415 * these chip variants, allow the max. of these values
416 */
417 baud = min_t(int, baud, 12000000 * 1.1);
418 else if (type == TB)
419 baud = min_t(int, baud, 12000000 * 1.1);
420 /* Determine factors A and B */
421 A = 0;
422 B = 12000000 * 32 / baud; /* 12MHz */
423 B <<= 1; /* Add one bit for rounding */
424 while (B > (512 << 1) && A <= 14) {
425 A += 2;
426 B >>= 2;
427 }
428 if (A > 14) { /* max. divisor = min. baudrate reached */
429 A = 14;
430 B = 512;
431 /* => ~45.78 baud */
432 } else { 310 } else {
433 B = (B + 1) >> 1; /* Round the last bit */
434 }
435 /* Handle special cases */
436 if (B == 512)
437 B = 0; /* also: 1 to 8 */
438 else if (B < 16)
439 /* 311 /*
440 * NOTE: With the current algorithm this happens 312 * Apparently the formula for higher speeds is:
441 * only for A=0 and means that the min. divisor 313 * baudrate = 12M * 32 / (2^buf[1]) / buf[0]
442 * (respectively: the max. baudrate) is reached.
443 */ 314 */
444 B = 16; /* => 24 MBaud */ 315 unsigned tmp = 12000000 * 32 / baud;
445 /* Encode the baud rate */ 316 buf[3] = 0x80;
446 buf[3] = 0x80; /* Select divisor encoding method */ 317 buf[2] = 0;
447 buf[2] = 0; 318 buf[1] = (tmp >= 256);
448 buf[1] = (A & 0x0e); /* A */ 319 while (tmp >= 256) {
449 buf[1] |= ((B & 0x100) >> 8); /* MSB of B */ 320 tmp >>= 2;
450 buf[0] = B & 0xff; /* 8 LSBs of B */ 321 buf[1] <<= 1;
451 /* Calculate the actual/resulting baud rate */ 322 }
452 if (B <= 8) 323 buf[0] = tmp;
453 B = 512; 324 }
454 baud = 12000000 * 32 / ((1 << A) * B);
455
456 return baud;
457}
458
459static void pl2303_encode_baudrate(struct tty_struct *tty,
460 struct usb_serial_port *port,
461 enum pl2303_type type,
462 u8 buf[4])
463{
464 int baud;
465 325
466 baud = tty_get_baud_rate(tty);
467 dev_dbg(&port->dev, "baud requested = %d\n", baud);
468 if (!baud)
469 return;
470 /*
471 * There are two methods for setting/encoding the baud rate
472 * 1) Direct method: encodes the baud rate value directly
473 * => supported by all chip types
474 * 2) Divisor based method: encodes a divisor to a base value (12MHz*32)
475 * => not supported by HX clones (and likely type_0/1 chips)
476 *
477 * NOTE: Although the divisor based baud rate encoding method is much
478 * more flexible, some of the standard baud rate values can not be
479 * realized exactly. But the difference is very small (max. 0.2%) and
480 * the device likely uses the same baud rate generator for both methods
481 * so that there is likley no difference.
482 */
483 if (type == type_0 || type == type_1 || type == HX_CLONE)
484 baud = pl2303_baudrate_encode_direct(baud, type, buf);
485 else
486 baud = pl2303_baudrate_encode_divisor(baud, type, buf);
487 /* Save resulting baud rate */ 326 /* Save resulting baud rate */
488 tty_encode_baud_rate(tty, baud, baud); 327 tty_encode_baud_rate(tty, baud, baud);
489 dev_dbg(&port->dev, "baud set = %d\n", baud); 328 dev_dbg(&port->dev, "baud set = %d\n", baud);
@@ -540,8 +379,8 @@ static void pl2303_set_termios(struct tty_struct *tty,
540 dev_dbg(&port->dev, "data bits = %d\n", buf[6]); 379 dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
541 } 380 }
542 381
543 /* For reference: buf[0]:buf[3] baud rate value */ 382 /* For reference buf[0]:buf[3] baud rate value */
544 pl2303_encode_baudrate(tty, port, spriv->type, buf); 383 pl2303_encode_baudrate(tty, port, &buf[0]);
545 384
546 /* For reference buf[4]=0 is 1 stop bits */ 385 /* For reference buf[4]=0 is 1 stop bits */
547 /* For reference buf[4]=1 is 1.5 stop bits */ 386 /* For reference buf[4]=1 is 1.5 stop bits */
@@ -618,10 +457,10 @@ static void pl2303_set_termios(struct tty_struct *tty,
618 dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf); 457 dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf);
619 458
620 if (C_CRTSCTS(tty)) { 459 if (C_CRTSCTS(tty)) {
621 if (spriv->type == type_0 || spriv->type == type_1) 460 if (spriv->type == HX)
622 pl2303_vendor_write(0x0, 0x41, serial);
623 else
624 pl2303_vendor_write(0x0, 0x61, serial); 461 pl2303_vendor_write(0x0, 0x61, serial);
462 else
463 pl2303_vendor_write(0x0, 0x41, serial);
625 } else { 464 } else {
626 pl2303_vendor_write(0x0, 0x0, serial); 465 pl2303_vendor_write(0x0, 0x0, serial);
627 } 466 }
@@ -658,7 +497,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
658 struct pl2303_serial_private *spriv = usb_get_serial_data(serial); 497 struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
659 int result; 498 int result;
660 499
661 if (spriv->type == type_0 || spriv->type == type_1) { 500 if (spriv->type != HX) {
662 usb_clear_halt(serial->dev, port->write_urb->pipe); 501 usb_clear_halt(serial->dev, port->write_urb->pipe);
663 usb_clear_halt(serial->dev, port->read_urb->pipe); 502 usb_clear_halt(serial->dev, port->read_urb->pipe);
664 } else { 503 } else {
@@ -833,7 +672,6 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
833 result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 672 result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
834 BREAK_REQUEST, BREAK_REQUEST_TYPE, state, 673 BREAK_REQUEST, BREAK_REQUEST_TYPE, state,
835 0, NULL, 0, 100); 674 0, NULL, 0, 100);
836 /* NOTE: HX clones don't support sending breaks, -EPIPE is returned */
837 if (result) 675 if (result)
838 dev_err(&port->dev, "error sending break = %d\n", result); 676 dev_err(&port->dev, "error sending break = %d\n", result);
839} 677}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index ce5221fa393a..e663921eebb6 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1056,7 +1056,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1056 if (data_direction != DMA_NONE) { 1056 if (data_direction != DMA_NONE) {
1057 ret = vhost_scsi_map_iov_to_sgl(cmd, 1057 ret = vhost_scsi_map_iov_to_sgl(cmd,
1058 &vq->iov[data_first], data_num, 1058 &vq->iov[data_first], data_num,
1059 data_direction == DMA_TO_DEVICE); 1059 data_direction == DMA_FROM_DEVICE);
1060 if (unlikely(ret)) { 1060 if (unlikely(ret)) {
1061 vq_err(vq, "Failed to map iov to sgl\n"); 1061 vq_err(vq, "Failed to map iov to sgl\n");
1062 goto err_free; 1062 goto err_free;
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index a54ccdc4d661..22ad85242e5b 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -361,37 +361,13 @@ void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
361int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) 361int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
362{ 362{
363 struct au1100fb_device *fbdev; 363 struct au1100fb_device *fbdev;
364 unsigned int len;
365 unsigned long start=0, off;
366 364
367 fbdev = to_au1100fb_device(fbi); 365 fbdev = to_au1100fb_device(fbi);
368 366
369 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
370 return -EINVAL;
371 }
372
373 start = fbdev->fb_phys & PAGE_MASK;
374 len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
375
376 off = vma->vm_pgoff << PAGE_SHIFT;
377
378 if ((vma->vm_end - vma->vm_start + off) > len) {
379 return -EINVAL;
380 }
381
382 off += start;
383 vma->vm_pgoff = off >> PAGE_SHIFT;
384
385 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 367 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
386 pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6 368 pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
387 369
388 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, 370 return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
389 vma->vm_end - vma->vm_start,
390 vma->vm_page_prot)) {
391 return -EAGAIN;
392 }
393
394 return 0;
395} 371}
396 372
397static struct fb_ops au1100fb_ops = 373static struct fb_ops au1100fb_ops =
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index 301224ecc950..1d02897d17f2 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1233,34 +1233,13 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
1233 * method mainly to allow the use of the TLB streaming flag (CCA=6) 1233 * method mainly to allow the use of the TLB streaming flag (CCA=6)
1234 */ 1234 */
1235static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) 1235static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
1236
1237{ 1236{
1238 unsigned int len;
1239 unsigned long start=0, off;
1240 struct au1200fb_device *fbdev = info->par; 1237 struct au1200fb_device *fbdev = info->par;
1241 1238
1242 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
1243 return -EINVAL;
1244 }
1245
1246 start = fbdev->fb_phys & PAGE_MASK;
1247 len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
1248
1249 off = vma->vm_pgoff << PAGE_SHIFT;
1250
1251 if ((vma->vm_end - vma->vm_start + off) > len) {
1252 return -EINVAL;
1253 }
1254
1255 off += start;
1256 vma->vm_pgoff = off >> PAGE_SHIFT;
1257
1258 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1239 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1259 pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */ 1240 pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
1260 1241
1261 return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, 1242 return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
1262 vma->vm_end - vma->vm_start,
1263 vma->vm_page_prot);
1264} 1243}
1265 1244
1266static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata) 1245static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
diff --git a/fs/dcache.c b/fs/dcache.c
index 20532cb0b06e..ae6ebb88ceff 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -542,7 +542,7 @@ EXPORT_SYMBOL(d_drop);
542 * If ref is non-zero, then decrement the refcount too. 542 * If ref is non-zero, then decrement the refcount too.
543 * Returns dentry requiring refcount drop, or NULL if we're done. 543 * Returns dentry requiring refcount drop, or NULL if we're done.
544 */ 544 */
545static inline struct dentry * 545static struct dentry *
546dentry_kill(struct dentry *dentry, int unlock_on_failure) 546dentry_kill(struct dentry *dentry, int unlock_on_failure)
547 __releases(dentry->d_lock) 547 __releases(dentry->d_lock)
548{ 548{
@@ -630,7 +630,8 @@ repeat:
630 goto kill_it; 630 goto kill_it;
631 } 631 }
632 632
633 dentry->d_flags |= DCACHE_REFERENCED; 633 if (!(dentry->d_flags & DCACHE_REFERENCED))
634 dentry->d_flags |= DCACHE_REFERENCED;
634 dentry_lru_add(dentry); 635 dentry_lru_add(dentry);
635 636
636 dentry->d_lockref.count--; 637 dentry->d_lockref.count--;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index c88e355f7635..000eae2782b6 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -408,7 +408,7 @@ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat,
408 struct page *page) 408 struct page *page)
409{ 409{
410 return ecryptfs_lower_header_size(crypt_stat) + 410 return ecryptfs_lower_header_size(crypt_stat) +
411 (page->index << PAGE_CACHE_SHIFT); 411 ((loff_t)page->index << PAGE_CACHE_SHIFT);
412} 412}
413 413
414/** 414/**
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 7d52806c2119..4725a07f003c 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1149,7 +1149,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
1149 struct ecryptfs_msg_ctx *msg_ctx; 1149 struct ecryptfs_msg_ctx *msg_ctx;
1150 struct ecryptfs_message *msg = NULL; 1150 struct ecryptfs_message *msg = NULL;
1151 char *auth_tok_sig; 1151 char *auth_tok_sig;
1152 char *payload; 1152 char *payload = NULL;
1153 size_t payload_len = 0; 1153 size_t payload_len = 0;
1154 int rc; 1154 int rc;
1155 1155
@@ -1203,6 +1203,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
1203 } 1203 }
1204out: 1204out:
1205 kfree(msg); 1205 kfree(msg);
1206 kfree(payload);
1206 return rc; 1207 return rc;
1207} 1208}
1208 1209
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 473e09da7d02..810c28fb8c3c 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -34,7 +34,6 @@
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/anon_inodes.h> 35#include <linux/anon_inodes.h>
36#include <linux/device.h> 36#include <linux/device.h>
37#include <linux/freezer.h>
38#include <asm/uaccess.h> 37#include <asm/uaccess.h>
39#include <asm/io.h> 38#include <asm/io.h>
40#include <asm/mman.h> 39#include <asm/mman.h>
@@ -1605,8 +1604,7 @@ fetch_events:
1605 } 1604 }
1606 1605
1607 spin_unlock_irqrestore(&ep->lock, flags); 1606 spin_unlock_irqrestore(&ep->lock, flags);
1608 if (!freezable_schedule_hrtimeout_range(to, slack, 1607 if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
1609 HRTIMER_MODE_ABS))
1610 timed_out = 1; 1608 timed_out = 1;
1611 1609
1612 spin_lock_irqsave(&ep->lock, flags); 1610 spin_lock_irqsave(&ep->lock, flags);
diff --git a/fs/file_table.c b/fs/file_table.c
index abdd15ad13c9..e900ca518635 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -297,7 +297,7 @@ void flush_delayed_fput(void)
297 delayed_fput(NULL); 297 delayed_fput(NULL);
298} 298}
299 299
300static DECLARE_WORK(delayed_fput_work, delayed_fput); 300static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
301 301
302void fput(struct file *file) 302void fput(struct file *file)
303{ 303{
@@ -317,7 +317,7 @@ void fput(struct file *file)
317 } 317 }
318 318
319 if (llist_add(&file->f_u.fu_llist, &delayed_fput_list)) 319 if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
320 schedule_work(&delayed_fput_work); 320 schedule_delayed_work(&delayed_fput_work, 1);
321 } 321 }
322} 322}
323 323
diff --git a/fs/select.c b/fs/select.c
index 35d4adc749d9..dfd5cb18c012 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -238,8 +238,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
238 238
239 set_current_state(state); 239 set_current_state(state);
240 if (!pwq->triggered) 240 if (!pwq->triggered)
241 rc = freezable_schedule_hrtimeout_range(expires, slack, 241 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
242 HRTIMER_MODE_ABS);
243 __set_current_state(TASK_RUNNING); 242 __set_current_state(TASK_RUNNING);
244 243
245 /* 244 /*
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 3135c2525c76..a290157265ef 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -328,6 +328,8 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence)
328 m->read_pos = offset; 328 m->read_pos = offset;
329 retval = file->f_pos = offset; 329 retval = file->f_pos = offset;
330 } 330 }
331 } else {
332 file->f_pos = offset;
331 } 333 }
332 } 334 }
333 file->f_version = m->version; 335 file->f_version = m->version;
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 19c19a5eee29..f6c82de12541 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -34,9 +34,9 @@ struct ipc_namespace {
34 int sem_ctls[4]; 34 int sem_ctls[4];
35 int used_sems; 35 int used_sems;
36 36
37 int msg_ctlmax; 37 unsigned int msg_ctlmax;
38 int msg_ctlmnb; 38 unsigned int msg_ctlmnb;
39 int msg_ctlmni; 39 unsigned int msg_ctlmni;
40 atomic_t msg_bytes; 40 atomic_t msg_bytes;
41 atomic_t msg_hdrs; 41 atomic_t msg_hdrs;
42 int auto_msgmni; 42 int auto_msgmni;
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index f3c7c24bec1c..fbfdb9d8d3a7 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -24,7 +24,8 @@ struct netpoll {
24 struct net_device *dev; 24 struct net_device *dev;
25 char dev_name[IFNAMSIZ]; 25 char dev_name[IFNAMSIZ];
26 const char *name; 26 const char *name;
27 void (*rx_hook)(struct netpoll *, int, char *, int); 27 void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
28 int offset, int len);
28 29
29 union inet_addr local_ip, remote_ip; 30 union inet_addr local_ip, remote_ip;
30 bool ipv6; 31 bool ipv6;
@@ -41,7 +42,7 @@ struct netpoll_info {
41 unsigned long rx_flags; 42 unsigned long rx_flags;
42 spinlock_t rx_lock; 43 spinlock_t rx_lock;
43 struct semaphore dev_lock; 44 struct semaphore dev_lock;
44 struct list_head rx_np; /* netpolls that registered an rx_hook */ 45 struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
45 46
46 struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */ 47 struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
47 struct sk_buff_head txq; 48 struct sk_buff_head txq;
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index cc88172c7d9a..c74088ab103b 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -332,7 +332,7 @@ do { \
332#endif 332#endif
333 333
334#ifndef this_cpu_sub 334#ifndef this_cpu_sub
335# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) 335# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val))
336#endif 336#endif
337 337
338#ifndef this_cpu_inc 338#ifndef this_cpu_inc
@@ -418,7 +418,7 @@ do { \
418# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) 418# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
419#endif 419#endif
420 420
421#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) 421#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val))
422#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) 422#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
423#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) 423#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
424 424
@@ -586,7 +586,7 @@ do { \
586#endif 586#endif
587 587
588#ifndef __this_cpu_sub 588#ifndef __this_cpu_sub
589# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) 589# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val))
590#endif 590#endif
591 591
592#ifndef __this_cpu_inc 592#ifndef __this_cpu_inc
@@ -668,7 +668,7 @@ do { \
668 __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val) 668 __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
669#endif 669#endif
670 670
671#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(val)) 671#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
672#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) 672#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
673#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) 673#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
674 674
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 6738f3409a6f..2182525e4d74 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -165,6 +165,7 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
165static inline void rt6_clean_expires(struct rt6_info *rt) 165static inline void rt6_clean_expires(struct rt6_info *rt)
166{ 166{
167 rt->rt6i_flags &= ~RTF_EXPIRES; 167 rt->rt6i_flags &= ~RTF_EXPIRES;
168 rt->dst.expires = 0;
168} 169}
169 170
170static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires) 171static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires)
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index aef8fc354025..da9cc0f05c93 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -144,7 +144,7 @@ TRACE_EVENT(target_sequencer_start,
144 ), 144 ),
145 145
146 TP_fast_assign( 146 TP_fast_assign(
147 __entry->unpacked_lun = cmd->se_lun->unpacked_lun; 147 __entry->unpacked_lun = cmd->orig_fe_lun;
148 __entry->opcode = cmd->t_task_cdb[0]; 148 __entry->opcode = cmd->t_task_cdb[0];
149 __entry->data_length = cmd->data_length; 149 __entry->data_length = cmd->data_length;
150 __entry->task_attribute = cmd->sam_task_attr; 150 __entry->task_attribute = cmd->sam_task_attr;
@@ -182,7 +182,7 @@ TRACE_EVENT(target_cmd_complete,
182 ), 182 ),
183 183
184 TP_fast_assign( 184 TP_fast_assign(
185 __entry->unpacked_lun = cmd->se_lun->unpacked_lun; 185 __entry->unpacked_lun = cmd->orig_fe_lun;
186 __entry->opcode = cmd->t_task_cdb[0]; 186 __entry->opcode = cmd->t_task_cdb[0];
187 __entry->data_length = cmd->data_length; 187 __entry->data_length = cmd->data_length;
188 __entry->task_attribute = cmd->sam_task_attr; 188 __entry->task_attribute = cmd->sam_task_attr;
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 009a655a5d35..2fc1602e23bb 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -456,13 +456,15 @@ struct perf_event_mmap_page {
456 /* 456 /*
457 * Control data for the mmap() data buffer. 457 * Control data for the mmap() data buffer.
458 * 458 *
459 * User-space reading the @data_head value should issue an rmb(), on 459 * User-space reading the @data_head value should issue an smp_rmb(),
460 * SMP capable platforms, after reading this value -- see 460 * after reading this value.
461 * perf_event_wakeup().
462 * 461 *
463 * When the mapping is PROT_WRITE the @data_tail value should be 462 * When the mapping is PROT_WRITE the @data_tail value should be
464 * written by userspace to reflect the last read data. In this case 463 * written by userspace to reflect the last read data, after issueing
465 * the kernel will not over-write unread data. 464 * an smp_mb() to separate the data read from the ->data_tail store.
465 * In this case the kernel will not over-write unread data.
466 *
467 * See perf_output_put_handle() for the data ordering.
466 */ 468 */
467 __u64 data_head; /* head in the data section */ 469 __u64 data_head; /* head in the data section */
468 __u64 data_tail; /* user-space written tail */ 470 __u64 data_tail; /* user-space written tail */
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index 130dfece27ac..b0e99deb6d05 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -62,7 +62,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
62 return err; 62 return err;
63} 63}
64 64
65static int proc_ipc_callback_dointvec(ctl_table *table, int write, 65static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
66 void __user *buffer, size_t *lenp, loff_t *ppos) 66 void __user *buffer, size_t *lenp, loff_t *ppos)
67{ 67{
68 struct ctl_table ipc_table; 68 struct ctl_table ipc_table;
@@ -72,7 +72,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
72 memcpy(&ipc_table, table, sizeof(ipc_table)); 72 memcpy(&ipc_table, table, sizeof(ipc_table));
73 ipc_table.data = get_ipc(table); 73 ipc_table.data = get_ipc(table);
74 74
75 rc = proc_dointvec(&ipc_table, write, buffer, lenp, ppos); 75 rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
76 76
77 if (write && !rc && lenp_bef == *lenp) 77 if (write && !rc && lenp_bef == *lenp)
78 /* 78 /*
@@ -152,15 +152,13 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
152#define proc_ipc_dointvec NULL 152#define proc_ipc_dointvec NULL
153#define proc_ipc_dointvec_minmax NULL 153#define proc_ipc_dointvec_minmax NULL
154#define proc_ipc_dointvec_minmax_orphans NULL 154#define proc_ipc_dointvec_minmax_orphans NULL
155#define proc_ipc_callback_dointvec NULL 155#define proc_ipc_callback_dointvec_minmax NULL
156#define proc_ipcauto_dointvec_minmax NULL 156#define proc_ipcauto_dointvec_minmax NULL
157#endif 157#endif
158 158
159static int zero; 159static int zero;
160static int one = 1; 160static int one = 1;
161#ifdef CONFIG_CHECKPOINT_RESTORE
162static int int_max = INT_MAX; 161static int int_max = INT_MAX;
163#endif
164 162
165static struct ctl_table ipc_kern_table[] = { 163static struct ctl_table ipc_kern_table[] = {
166 { 164 {
@@ -198,21 +196,27 @@ static struct ctl_table ipc_kern_table[] = {
198 .data = &init_ipc_ns.msg_ctlmax, 196 .data = &init_ipc_ns.msg_ctlmax,
199 .maxlen = sizeof (init_ipc_ns.msg_ctlmax), 197 .maxlen = sizeof (init_ipc_ns.msg_ctlmax),
200 .mode = 0644, 198 .mode = 0644,
201 .proc_handler = proc_ipc_dointvec, 199 .proc_handler = proc_ipc_dointvec_minmax,
200 .extra1 = &zero,
201 .extra2 = &int_max,
202 }, 202 },
203 { 203 {
204 .procname = "msgmni", 204 .procname = "msgmni",
205 .data = &init_ipc_ns.msg_ctlmni, 205 .data = &init_ipc_ns.msg_ctlmni,
206 .maxlen = sizeof (init_ipc_ns.msg_ctlmni), 206 .maxlen = sizeof (init_ipc_ns.msg_ctlmni),
207 .mode = 0644, 207 .mode = 0644,
208 .proc_handler = proc_ipc_callback_dointvec, 208 .proc_handler = proc_ipc_callback_dointvec_minmax,
209 .extra1 = &zero,
210 .extra2 = &int_max,
209 }, 211 },
210 { 212 {
211 .procname = "msgmnb", 213 .procname = "msgmnb",
212 .data = &init_ipc_ns.msg_ctlmnb, 214 .data = &init_ipc_ns.msg_ctlmnb,
213 .maxlen = sizeof (init_ipc_ns.msg_ctlmnb), 215 .maxlen = sizeof (init_ipc_ns.msg_ctlmnb),
214 .mode = 0644, 216 .mode = 0644,
215 .proc_handler = proc_ipc_dointvec, 217 .proc_handler = proc_ipc_dointvec_minmax,
218 .extra1 = &zero,
219 .extra2 = &int_max,
216 }, 220 },
217 { 221 {
218 .procname = "sem", 222 .procname = "sem",
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d49a9d29334c..953c14348375 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6767,6 +6767,10 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
6767 if (ret) 6767 if (ret)
6768 return -EFAULT; 6768 return -EFAULT;
6769 6769
6770 /* disabled for now */
6771 if (attr->mmap2)
6772 return -EINVAL;
6773
6770 if (attr->__reserved_1) 6774 if (attr->__reserved_1)
6771 return -EINVAL; 6775 return -EINVAL;
6772 6776
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index cd55144270b5..9c2ddfbf4525 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -87,10 +87,31 @@ again:
87 goto out; 87 goto out;
88 88
89 /* 89 /*
90 * Publish the known good head. Rely on the full barrier implied 90 * Since the mmap() consumer (userspace) can run on a different CPU:
91 * by atomic_dec_and_test() order the rb->head read and this 91 *
92 * write. 92 * kernel user
93 *
94 * READ ->data_tail READ ->data_head
95 * smp_mb() (A) smp_rmb() (C)
96 * WRITE $data READ $data
97 * smp_wmb() (B) smp_mb() (D)
98 * STORE ->data_head WRITE ->data_tail
99 *
100 * Where A pairs with D, and B pairs with C.
101 *
102 * I don't think A needs to be a full barrier because we won't in fact
103 * write data until we see the store from userspace. So we simply don't
104 * issue the data WRITE until we observe it. Be conservative for now.
105 *
106 * OTOH, D needs to be a full barrier since it separates the data READ
107 * from the tail WRITE.
108 *
109 * For B a WMB is sufficient since it separates two WRITEs, and for C
110 * an RMB is sufficient since it separates two READs.
111 *
112 * See perf_output_begin().
93 */ 113 */
114 smp_wmb();
94 rb->user_page->data_head = head; 115 rb->user_page->data_head = head;
95 116
96 /* 117 /*
@@ -154,9 +175,11 @@ int perf_output_begin(struct perf_output_handle *handle,
154 * Userspace could choose to issue a mb() before updating the 175 * Userspace could choose to issue a mb() before updating the
155 * tail pointer. So that all reads will be completed before the 176 * tail pointer. So that all reads will be completed before the
156 * write is issued. 177 * write is issued.
178 *
179 * See perf_output_put_handle().
157 */ 180 */
158 tail = ACCESS_ONCE(rb->user_page->data_tail); 181 tail = ACCESS_ONCE(rb->user_page->data_tail);
159 smp_rmb(); 182 smp_mb();
160 offset = head = local_read(&rb->head); 183 offset = head = local_read(&rb->head);
161 head += size; 184 head += size;
162 if (unlikely(!perf_output_space(rb, tail, offset, head))) 185 if (unlikely(!perf_output_space(rb, tail, offset, head)))
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 6d647aedffea..d24105b1b794 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -410,7 +410,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
410static __always_inline int __sched 410static __always_inline int __sched
411__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 411__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
412 struct lockdep_map *nest_lock, unsigned long ip, 412 struct lockdep_map *nest_lock, unsigned long ip,
413 struct ww_acquire_ctx *ww_ctx) 413 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
414{ 414{
415 struct task_struct *task = current; 415 struct task_struct *task = current;
416 struct mutex_waiter waiter; 416 struct mutex_waiter waiter;
@@ -450,7 +450,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
450 struct task_struct *owner; 450 struct task_struct *owner;
451 struct mspin_node node; 451 struct mspin_node node;
452 452
453 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { 453 if (use_ww_ctx && ww_ctx->acquired > 0) {
454 struct ww_mutex *ww; 454 struct ww_mutex *ww;
455 455
456 ww = container_of(lock, struct ww_mutex, base); 456 ww = container_of(lock, struct ww_mutex, base);
@@ -480,7 +480,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
480 if ((atomic_read(&lock->count) == 1) && 480 if ((atomic_read(&lock->count) == 1) &&
481 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { 481 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
482 lock_acquired(&lock->dep_map, ip); 482 lock_acquired(&lock->dep_map, ip);
483 if (!__builtin_constant_p(ww_ctx == NULL)) { 483 if (use_ww_ctx) {
484 struct ww_mutex *ww; 484 struct ww_mutex *ww;
485 ww = container_of(lock, struct ww_mutex, base); 485 ww = container_of(lock, struct ww_mutex, base);
486 486
@@ -551,7 +551,7 @@ slowpath:
551 goto err; 551 goto err;
552 } 552 }
553 553
554 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { 554 if (use_ww_ctx && ww_ctx->acquired > 0) {
555 ret = __mutex_lock_check_stamp(lock, ww_ctx); 555 ret = __mutex_lock_check_stamp(lock, ww_ctx);
556 if (ret) 556 if (ret)
557 goto err; 557 goto err;
@@ -575,7 +575,7 @@ skip_wait:
575 lock_acquired(&lock->dep_map, ip); 575 lock_acquired(&lock->dep_map, ip);
576 mutex_set_owner(lock); 576 mutex_set_owner(lock);
577 577
578 if (!__builtin_constant_p(ww_ctx == NULL)) { 578 if (use_ww_ctx) {
579 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 579 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
580 struct mutex_waiter *cur; 580 struct mutex_waiter *cur;
581 581
@@ -615,7 +615,7 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
615{ 615{
616 might_sleep(); 616 might_sleep();
617 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 617 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
618 subclass, NULL, _RET_IP_, NULL); 618 subclass, NULL, _RET_IP_, NULL, 0);
619} 619}
620 620
621EXPORT_SYMBOL_GPL(mutex_lock_nested); 621EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -625,7 +625,7 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
625{ 625{
626 might_sleep(); 626 might_sleep();
627 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 627 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
628 0, nest, _RET_IP_, NULL); 628 0, nest, _RET_IP_, NULL, 0);
629} 629}
630 630
631EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 631EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
@@ -635,7 +635,7 @@ mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
635{ 635{
636 might_sleep(); 636 might_sleep();
637 return __mutex_lock_common(lock, TASK_KILLABLE, 637 return __mutex_lock_common(lock, TASK_KILLABLE,
638 subclass, NULL, _RET_IP_, NULL); 638 subclass, NULL, _RET_IP_, NULL, 0);
639} 639}
640EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 640EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
641 641
@@ -644,7 +644,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
644{ 644{
645 might_sleep(); 645 might_sleep();
646 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 646 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
647 subclass, NULL, _RET_IP_, NULL); 647 subclass, NULL, _RET_IP_, NULL, 0);
648} 648}
649 649
650EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 650EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -682,7 +682,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
682 682
683 might_sleep(); 683 might_sleep();
684 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 684 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
685 0, &ctx->dep_map, _RET_IP_, ctx); 685 0, &ctx->dep_map, _RET_IP_, ctx, 1);
686 if (!ret && ctx->acquired > 1) 686 if (!ret && ctx->acquired > 1)
687 return ww_mutex_deadlock_injection(lock, ctx); 687 return ww_mutex_deadlock_injection(lock, ctx);
688 688
@@ -697,7 +697,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
697 697
698 might_sleep(); 698 might_sleep();
699 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 699 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
700 0, &ctx->dep_map, _RET_IP_, ctx); 700 0, &ctx->dep_map, _RET_IP_, ctx, 1);
701 701
702 if (!ret && ctx->acquired > 1) 702 if (!ret && ctx->acquired > 1)
703 return ww_mutex_deadlock_injection(lock, ctx); 703 return ww_mutex_deadlock_injection(lock, ctx);
@@ -809,28 +809,28 @@ __mutex_lock_slowpath(atomic_t *lock_count)
809 struct mutex *lock = container_of(lock_count, struct mutex, count); 809 struct mutex *lock = container_of(lock_count, struct mutex, count);
810 810
811 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, 811 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
812 NULL, _RET_IP_, NULL); 812 NULL, _RET_IP_, NULL, 0);
813} 813}
814 814
815static noinline int __sched 815static noinline int __sched
816__mutex_lock_killable_slowpath(struct mutex *lock) 816__mutex_lock_killable_slowpath(struct mutex *lock)
817{ 817{
818 return __mutex_lock_common(lock, TASK_KILLABLE, 0, 818 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
819 NULL, _RET_IP_, NULL); 819 NULL, _RET_IP_, NULL, 0);
820} 820}
821 821
822static noinline int __sched 822static noinline int __sched
823__mutex_lock_interruptible_slowpath(struct mutex *lock) 823__mutex_lock_interruptible_slowpath(struct mutex *lock)
824{ 824{
825 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, 825 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
826 NULL, _RET_IP_, NULL); 826 NULL, _RET_IP_, NULL, 0);
827} 827}
828 828
829static noinline int __sched 829static noinline int __sched
830__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 830__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
831{ 831{
832 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, 832 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
833 NULL, _RET_IP_, ctx); 833 NULL, _RET_IP_, ctx, 1);
834} 834}
835 835
836static noinline int __sched 836static noinline int __sched
@@ -838,7 +838,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
838 struct ww_acquire_ctx *ctx) 838 struct ww_acquire_ctx *ctx)
839{ 839{
840 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, 840 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
841 NULL, _RET_IP_, ctx); 841 NULL, _RET_IP_, ctx, 1);
842} 842}
843 843
844#endif 844#endif
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index c9c759d5a15c..0121dab83f43 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -846,7 +846,7 @@ static int software_resume(void)
846 goto Finish; 846 goto Finish;
847} 847}
848 848
849late_initcall(software_resume); 849late_initcall_sync(software_resume);
850 850
851 851
852static const char * const hibernation_modes[] = { 852static const char * const hibernation_modes[] = {
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 38959c866789..662c5798a685 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -33,29 +33,64 @@ struct ce_unbind {
33 int res; 33 int res;
34}; 34};
35 35
36/** 36static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
37 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds 37 bool ismax)
38 * @latch: value to convert
39 * @evt: pointer to clock event device descriptor
40 *
41 * Math helper, returns latch value converted to nanoseconds (bound checked)
42 */
43u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
44{ 38{
45 u64 clc = (u64) latch << evt->shift; 39 u64 clc = (u64) latch << evt->shift;
40 u64 rnd;
46 41
47 if (unlikely(!evt->mult)) { 42 if (unlikely(!evt->mult)) {
48 evt->mult = 1; 43 evt->mult = 1;
49 WARN_ON(1); 44 WARN_ON(1);
50 } 45 }
46 rnd = (u64) evt->mult - 1;
47
48 /*
49 * Upper bound sanity check. If the backwards conversion is
50 * not equal latch, we know that the above shift overflowed.
51 */
52 if ((clc >> evt->shift) != (u64)latch)
53 clc = ~0ULL;
54
55 /*
56 * Scaled math oddities:
57 *
58 * For mult <= (1 << shift) we can safely add mult - 1 to
59 * prevent integer rounding loss. So the backwards conversion
60 * from nsec to device ticks will be correct.
61 *
62 * For mult > (1 << shift), i.e. device frequency is > 1GHz we
63 * need to be careful. Adding mult - 1 will result in a value
64 * which when converted back to device ticks can be larger
65 * than latch by up to (mult - 1) >> shift. For the min_delta
66 * calculation we still want to apply this in order to stay
67 * above the minimum device ticks limit. For the upper limit
68 * we would end up with a latch value larger than the upper
69 * limit of the device, so we omit the add to stay below the
70 * device upper boundary.
71 *
72 * Also omit the add if it would overflow the u64 boundary.
73 */
74 if ((~0ULL - clc > rnd) &&
75 (!ismax || evt->mult <= (1U << evt->shift)))
76 clc += rnd;
51 77
52 do_div(clc, evt->mult); 78 do_div(clc, evt->mult);
53 if (clc < 1000)
54 clc = 1000;
55 if (clc > KTIME_MAX)
56 clc = KTIME_MAX;
57 79
58 return clc; 80 /* Deltas less than 1usec are pointless noise */
81 return clc > 1000 ? clc : 1000;
82}
83
84/**
85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
86 * @latch: value to convert
87 * @evt: pointer to clock event device descriptor
88 *
89 * Math helper, returns latch value converted to nanoseconds (bound checked)
90 */
91u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
92{
93 return cev_delta2ns(latch, evt, false);
59} 94}
60EXPORT_SYMBOL_GPL(clockevent_delta2ns); 95EXPORT_SYMBOL_GPL(clockevent_delta2ns);
61 96
@@ -380,8 +415,8 @@ void clockevents_config(struct clock_event_device *dev, u32 freq)
380 sec = 600; 415 sec = 600;
381 416
382 clockevents_calc_mult_shift(dev, freq, sec); 417 clockevents_calc_mult_shift(dev, freq, sec);
383 dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev); 418 dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
384 dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev); 419 dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
385} 420}
386 421
387/** 422/**
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 06344d986eb9..094f3152ec2b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -983,7 +983,7 @@ config DEBUG_KOBJECT
983 983
984config DEBUG_KOBJECT_RELEASE 984config DEBUG_KOBJECT_RELEASE
985 bool "kobject release debugging" 985 bool "kobject release debugging"
986 depends on DEBUG_KERNEL 986 depends on DEBUG_OBJECTS_TIMERS
987 help 987 help
988 kobjects are reference counted objects. This means that their 988 kobjects are reference counted objects. This means that their
989 last reference count put is not predictable, and the kobject can 989 last reference count put is not predictable, and the kobject can
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a685c8a79578..d16fa295ae1d 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -577,7 +577,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
577 miter->__offset += miter->consumed; 577 miter->__offset += miter->consumed;
578 miter->__remaining -= miter->consumed; 578 miter->__remaining -= miter->consumed;
579 579
580 if (miter->__flags & SG_MITER_TO_SG) 580 if ((miter->__flags & SG_MITER_TO_SG) &&
581 !PageSlab(miter->page))
581 flush_kernel_dcache_page(miter->page); 582 flush_kernel_dcache_page(miter->page);
582 583
583 if (miter->__flags & SG_MITER_ATOMIC) { 584 if (miter->__flags & SG_MITER_ATOMIC) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 610e3df2768a..cca80d96e509 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1278,64 +1278,90 @@ out:
1278int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 1278int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1279 unsigned long addr, pmd_t pmd, pmd_t *pmdp) 1279 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
1280{ 1280{
1281 struct anon_vma *anon_vma = NULL;
1281 struct page *page; 1282 struct page *page;
1282 unsigned long haddr = addr & HPAGE_PMD_MASK; 1283 unsigned long haddr = addr & HPAGE_PMD_MASK;
1284 int page_nid = -1, this_nid = numa_node_id();
1283 int target_nid; 1285 int target_nid;
1284 int current_nid = -1; 1286 bool page_locked;
1285 bool migrated; 1287 bool migrated = false;
1286 1288
1287 spin_lock(&mm->page_table_lock); 1289 spin_lock(&mm->page_table_lock);
1288 if (unlikely(!pmd_same(pmd, *pmdp))) 1290 if (unlikely(!pmd_same(pmd, *pmdp)))
1289 goto out_unlock; 1291 goto out_unlock;
1290 1292
1291 page = pmd_page(pmd); 1293 page = pmd_page(pmd);
1292 get_page(page); 1294 page_nid = page_to_nid(page);
1293 current_nid = page_to_nid(page);
1294 count_vm_numa_event(NUMA_HINT_FAULTS); 1295 count_vm_numa_event(NUMA_HINT_FAULTS);
1295 if (current_nid == numa_node_id()) 1296 if (page_nid == this_nid)
1296 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 1297 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1297 1298
1299 /*
1300 * Acquire the page lock to serialise THP migrations but avoid dropping
1301 * page_table_lock if at all possible
1302 */
1303 page_locked = trylock_page(page);
1298 target_nid = mpol_misplaced(page, vma, haddr); 1304 target_nid = mpol_misplaced(page, vma, haddr);
1299 if (target_nid == -1) { 1305 if (target_nid == -1) {
1300 put_page(page); 1306 /* If the page was locked, there are no parallel migrations */
1301 goto clear_pmdnuma; 1307 if (page_locked)
1308 goto clear_pmdnuma;
1309
1310 /*
1311 * Otherwise wait for potential migrations and retry. We do
1312 * relock and check_same as the page may no longer be mapped.
1313 * As the fault is being retried, do not account for it.
1314 */
1315 spin_unlock(&mm->page_table_lock);
1316 wait_on_page_locked(page);
1317 page_nid = -1;
1318 goto out;
1302 } 1319 }
1303 1320
1304 /* Acquire the page lock to serialise THP migrations */ 1321 /* Page is misplaced, serialise migrations and parallel THP splits */
1322 get_page(page);
1305 spin_unlock(&mm->page_table_lock); 1323 spin_unlock(&mm->page_table_lock);
1306 lock_page(page); 1324 if (!page_locked)
1325 lock_page(page);
1326 anon_vma = page_lock_anon_vma_read(page);
1307 1327
1308 /* Confirm the PTE did not while locked */ 1328 /* Confirm the PTE did not while locked */
1309 spin_lock(&mm->page_table_lock); 1329 spin_lock(&mm->page_table_lock);
1310 if (unlikely(!pmd_same(pmd, *pmdp))) { 1330 if (unlikely(!pmd_same(pmd, *pmdp))) {
1311 unlock_page(page); 1331 unlock_page(page);
1312 put_page(page); 1332 put_page(page);
1333 page_nid = -1;
1313 goto out_unlock; 1334 goto out_unlock;
1314 } 1335 }
1315 spin_unlock(&mm->page_table_lock);
1316 1336
1317 /* Migrate the THP to the requested node */ 1337 /*
1338 * Migrate the THP to the requested node, returns with page unlocked
1339 * and pmd_numa cleared.
1340 */
1341 spin_unlock(&mm->page_table_lock);
1318 migrated = migrate_misplaced_transhuge_page(mm, vma, 1342 migrated = migrate_misplaced_transhuge_page(mm, vma,
1319 pmdp, pmd, addr, page, target_nid); 1343 pmdp, pmd, addr, page, target_nid);
1320 if (!migrated) 1344 if (migrated)
1321 goto check_same; 1345 page_nid = target_nid;
1322
1323 task_numa_fault(target_nid, HPAGE_PMD_NR, true);
1324 return 0;
1325 1346
1326check_same: 1347 goto out;
1327 spin_lock(&mm->page_table_lock);
1328 if (unlikely(!pmd_same(pmd, *pmdp)))
1329 goto out_unlock;
1330clear_pmdnuma: 1348clear_pmdnuma:
1349 BUG_ON(!PageLocked(page));
1331 pmd = pmd_mknonnuma(pmd); 1350 pmd = pmd_mknonnuma(pmd);
1332 set_pmd_at(mm, haddr, pmdp, pmd); 1351 set_pmd_at(mm, haddr, pmdp, pmd);
1333 VM_BUG_ON(pmd_numa(*pmdp)); 1352 VM_BUG_ON(pmd_numa(*pmdp));
1334 update_mmu_cache_pmd(vma, addr, pmdp); 1353 update_mmu_cache_pmd(vma, addr, pmdp);
1354 unlock_page(page);
1335out_unlock: 1355out_unlock:
1336 spin_unlock(&mm->page_table_lock); 1356 spin_unlock(&mm->page_table_lock);
1337 if (current_nid != -1) 1357
1338 task_numa_fault(current_nid, HPAGE_PMD_NR, false); 1358out:
1359 if (anon_vma)
1360 page_unlock_anon_vma_read(anon_vma);
1361
1362 if (page_nid != -1)
1363 task_numa_fault(page_nid, HPAGE_PMD_NR, migrated);
1364
1339 return 0; 1365 return 0;
1340} 1366}
1341 1367
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 72467914b856..72f9decb0104 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -81,8 +81,9 @@ restart:
81 * decrement nr_to_walk first so that we don't livelock if we 81 * decrement nr_to_walk first so that we don't livelock if we
82 * get stuck on large numbesr of LRU_RETRY items 82 * get stuck on large numbesr of LRU_RETRY items
83 */ 83 */
84 if (--(*nr_to_walk) == 0) 84 if (!*nr_to_walk)
85 break; 85 break;
86 --*nr_to_walk;
86 87
87 ret = isolate(item, &nlru->lock, cb_arg); 88 ret = isolate(item, &nlru->lock, cb_arg);
88 switch (ret) { 89 switch (ret) {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9c9c685e4ddc..665dcd7abfff 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -54,6 +54,7 @@
54#include <linux/page_cgroup.h> 54#include <linux/page_cgroup.h>
55#include <linux/cpu.h> 55#include <linux/cpu.h>
56#include <linux/oom.h> 56#include <linux/oom.h>
57#include <linux/lockdep.h>
57#include "internal.h" 58#include "internal.h"
58#include <net/sock.h> 59#include <net/sock.h>
59#include <net/ip.h> 60#include <net/ip.h>
@@ -2046,6 +2047,12 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
2046 return total; 2047 return total;
2047} 2048}
2048 2049
2050#ifdef CONFIG_LOCKDEP
2051static struct lockdep_map memcg_oom_lock_dep_map = {
2052 .name = "memcg_oom_lock",
2053};
2054#endif
2055
2049static DEFINE_SPINLOCK(memcg_oom_lock); 2056static DEFINE_SPINLOCK(memcg_oom_lock);
2050 2057
2051/* 2058/*
@@ -2083,7 +2090,8 @@ static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
2083 } 2090 }
2084 iter->oom_lock = false; 2091 iter->oom_lock = false;
2085 } 2092 }
2086 } 2093 } else
2094 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
2087 2095
2088 spin_unlock(&memcg_oom_lock); 2096 spin_unlock(&memcg_oom_lock);
2089 2097
@@ -2095,6 +2103,7 @@ static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
2095 struct mem_cgroup *iter; 2103 struct mem_cgroup *iter;
2096 2104
2097 spin_lock(&memcg_oom_lock); 2105 spin_lock(&memcg_oom_lock);
2106 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
2098 for_each_mem_cgroup_tree(iter, memcg) 2107 for_each_mem_cgroup_tree(iter, memcg)
2099 iter->oom_lock = false; 2108 iter->oom_lock = false;
2100 spin_unlock(&memcg_oom_lock); 2109 spin_unlock(&memcg_oom_lock);
@@ -2765,10 +2774,10 @@ done:
2765 *ptr = memcg; 2774 *ptr = memcg;
2766 return 0; 2775 return 0;
2767nomem: 2776nomem:
2768 *ptr = NULL; 2777 if (!(gfp_mask & __GFP_NOFAIL)) {
2769 if (gfp_mask & __GFP_NOFAIL) 2778 *ptr = NULL;
2770 return 0; 2779 return -ENOMEM;
2771 return -ENOMEM; 2780 }
2772bypass: 2781bypass:
2773 *ptr = root_mem_cgroup; 2782 *ptr = root_mem_cgroup;
2774 return -EINTR; 2783 return -EINTR;
@@ -3773,8 +3782,7 @@ void mem_cgroup_move_account_page_stat(struct mem_cgroup *from,
3773{ 3782{
3774 /* Update stat data for mem_cgroup */ 3783 /* Update stat data for mem_cgroup */
3775 preempt_disable(); 3784 preempt_disable();
3776 WARN_ON_ONCE(from->stat->count[idx] < nr_pages); 3785 __this_cpu_sub(from->stat->count[idx], nr_pages);
3777 __this_cpu_add(from->stat->count[idx], -nr_pages);
3778 __this_cpu_add(to->stat->count[idx], nr_pages); 3786 __this_cpu_add(to->stat->count[idx], nr_pages);
3779 preempt_enable(); 3787 preempt_enable();
3780} 3788}
@@ -4950,31 +4958,18 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
4950 } while (usage > 0); 4958 } while (usage > 0);
4951} 4959}
4952 4960
4953/*
4954 * This mainly exists for tests during the setting of set of use_hierarchy.
4955 * Since this is the very setting we are changing, the current hierarchy value
4956 * is meaningless
4957 */
4958static inline bool __memcg_has_children(struct mem_cgroup *memcg)
4959{
4960 struct cgroup_subsys_state *pos;
4961
4962 /* bounce at first found */
4963 css_for_each_child(pos, &memcg->css)
4964 return true;
4965 return false;
4966}
4967
4968/*
4969 * Must be called with memcg_create_mutex held, unless the cgroup is guaranteed
4970 * to be already dead (as in mem_cgroup_force_empty, for instance). This is
4971 * from mem_cgroup_count_children(), in the sense that we don't really care how
4972 * many children we have; we only need to know if we have any. It also counts
4973 * any memcg without hierarchy as infertile.
4974 */
4975static inline bool memcg_has_children(struct mem_cgroup *memcg) 4961static inline bool memcg_has_children(struct mem_cgroup *memcg)
4976{ 4962{
4977 return memcg->use_hierarchy && __memcg_has_children(memcg); 4963 lockdep_assert_held(&memcg_create_mutex);
4964 /*
4965 * The lock does not prevent addition or deletion to the list
4966 * of children, but it prevents a new child from being
4967 * initialized based on this parent in css_online(), so it's
4968 * enough to decide whether hierarchically inherited
4969 * attributes can still be changed or not.
4970 */
4971 return memcg->use_hierarchy &&
4972 !list_empty(&memcg->css.cgroup->children);
4978} 4973}
4979 4974
4980/* 4975/*
@@ -5054,7 +5049,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
5054 */ 5049 */
5055 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 5050 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
5056 (val == 1 || val == 0)) { 5051 (val == 1 || val == 0)) {
5057 if (!__memcg_has_children(memcg)) 5052 if (list_empty(&memcg->css.cgroup->children))
5058 memcg->use_hierarchy = val; 5053 memcg->use_hierarchy = val;
5059 else 5054 else
5060 retval = -EBUSY; 5055 retval = -EBUSY;
diff --git a/mm/memory.c b/mm/memory.c
index 1311f26497e6..d176154c243f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3521,12 +3521,12 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3521} 3521}
3522 3522
3523int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, 3523int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
3524 unsigned long addr, int current_nid) 3524 unsigned long addr, int page_nid)
3525{ 3525{
3526 get_page(page); 3526 get_page(page);
3527 3527
3528 count_vm_numa_event(NUMA_HINT_FAULTS); 3528 count_vm_numa_event(NUMA_HINT_FAULTS);
3529 if (current_nid == numa_node_id()) 3529 if (page_nid == numa_node_id())
3530 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 3530 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
3531 3531
3532 return mpol_misplaced(page, vma, addr); 3532 return mpol_misplaced(page, vma, addr);
@@ -3537,7 +3537,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3537{ 3537{
3538 struct page *page = NULL; 3538 struct page *page = NULL;
3539 spinlock_t *ptl; 3539 spinlock_t *ptl;
3540 int current_nid = -1; 3540 int page_nid = -1;
3541 int target_nid; 3541 int target_nid;
3542 bool migrated = false; 3542 bool migrated = false;
3543 3543
@@ -3567,15 +3567,10 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3567 return 0; 3567 return 0;
3568 } 3568 }
3569 3569
3570 current_nid = page_to_nid(page); 3570 page_nid = page_to_nid(page);
3571 target_nid = numa_migrate_prep(page, vma, addr, current_nid); 3571 target_nid = numa_migrate_prep(page, vma, addr, page_nid);
3572 pte_unmap_unlock(ptep, ptl); 3572 pte_unmap_unlock(ptep, ptl);
3573 if (target_nid == -1) { 3573 if (target_nid == -1) {
3574 /*
3575 * Account for the fault against the current node if it not
3576 * being replaced regardless of where the page is located.
3577 */
3578 current_nid = numa_node_id();
3579 put_page(page); 3574 put_page(page);
3580 goto out; 3575 goto out;
3581 } 3576 }
@@ -3583,11 +3578,11 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3583 /* Migrate to the requested node */ 3578 /* Migrate to the requested node */
3584 migrated = migrate_misplaced_page(page, target_nid); 3579 migrated = migrate_misplaced_page(page, target_nid);
3585 if (migrated) 3580 if (migrated)
3586 current_nid = target_nid; 3581 page_nid = target_nid;
3587 3582
3588out: 3583out:
3589 if (current_nid != -1) 3584 if (page_nid != -1)
3590 task_numa_fault(current_nid, 1, migrated); 3585 task_numa_fault(page_nid, 1, migrated);
3591 return 0; 3586 return 0;
3592} 3587}
3593 3588
@@ -3602,7 +3597,6 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3602 unsigned long offset; 3597 unsigned long offset;
3603 spinlock_t *ptl; 3598 spinlock_t *ptl;
3604 bool numa = false; 3599 bool numa = false;
3605 int local_nid = numa_node_id();
3606 3600
3607 spin_lock(&mm->page_table_lock); 3601 spin_lock(&mm->page_table_lock);
3608 pmd = *pmdp; 3602 pmd = *pmdp;
@@ -3625,9 +3619,10 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3625 for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) { 3619 for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
3626 pte_t pteval = *pte; 3620 pte_t pteval = *pte;
3627 struct page *page; 3621 struct page *page;
3628 int curr_nid = local_nid; 3622 int page_nid = -1;
3629 int target_nid; 3623 int target_nid;
3630 bool migrated; 3624 bool migrated = false;
3625
3631 if (!pte_present(pteval)) 3626 if (!pte_present(pteval))
3632 continue; 3627 continue;
3633 if (!pte_numa(pteval)) 3628 if (!pte_numa(pteval))
@@ -3649,25 +3644,19 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3649 if (unlikely(page_mapcount(page) != 1)) 3644 if (unlikely(page_mapcount(page) != 1))
3650 continue; 3645 continue;
3651 3646
3652 /* 3647 page_nid = page_to_nid(page);
3653 * Note that the NUMA fault is later accounted to either 3648 target_nid = numa_migrate_prep(page, vma, addr, page_nid);
3654 * the node that is currently running or where the page is 3649 pte_unmap_unlock(pte, ptl);
3655 * migrated to. 3650 if (target_nid != -1) {
3656 */ 3651 migrated = migrate_misplaced_page(page, target_nid);
3657 curr_nid = local_nid; 3652 if (migrated)
3658 target_nid = numa_migrate_prep(page, vma, addr, 3653 page_nid = target_nid;
3659 page_to_nid(page)); 3654 } else {
3660 if (target_nid == -1) {
3661 put_page(page); 3655 put_page(page);
3662 continue;
3663 } 3656 }
3664 3657
3665 /* Migrate to the requested node */ 3658 if (page_nid != -1)
3666 pte_unmap_unlock(pte, ptl); 3659 task_numa_fault(page_nid, 1, migrated);
3667 migrated = migrate_misplaced_page(page, target_nid);
3668 if (migrated)
3669 curr_nid = target_nid;
3670 task_numa_fault(curr_nid, 1, migrated);
3671 3660
3672 pte = pte_offset_map_lock(mm, pmdp, addr, &ptl); 3661 pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
3673 } 3662 }
diff --git a/mm/migrate.c b/mm/migrate.c
index 7a7325ee1d08..c04692774e88 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1715,12 +1715,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1715 unlock_page(new_page); 1715 unlock_page(new_page);
1716 put_page(new_page); /* Free it */ 1716 put_page(new_page); /* Free it */
1717 1717
1718 unlock_page(page); 1718 /* Retake the callers reference and putback on LRU */
1719 get_page(page);
1719 putback_lru_page(page); 1720 putback_lru_page(page);
1720 1721 mod_zone_page_state(page_zone(page),
1721 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 1722 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
1722 isolated = 0; 1723 goto out_fail;
1723 goto out;
1724 } 1724 }
1725 1725
1726 /* 1726 /*
@@ -1737,9 +1737,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1737 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1737 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1738 entry = pmd_mkhuge(entry); 1738 entry = pmd_mkhuge(entry);
1739 1739
1740 page_add_new_anon_rmap(new_page, vma, haddr); 1740 pmdp_clear_flush(vma, haddr, pmd);
1741
1742 set_pmd_at(mm, haddr, pmd, entry); 1741 set_pmd_at(mm, haddr, pmd, entry);
1742 page_add_new_anon_rmap(new_page, vma, haddr);
1743 update_mmu_cache_pmd(vma, address, &entry); 1743 update_mmu_cache_pmd(vma, address, &entry);
1744 page_remove_rmap(page); 1744 page_remove_rmap(page);
1745 /* 1745 /*
@@ -1758,7 +1758,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1758 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR); 1758 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
1759 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR); 1759 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
1760 1760
1761out:
1762 mod_zone_page_state(page_zone(page), 1761 mod_zone_page_state(page_zone(page),
1763 NR_ISOLATED_ANON + page_lru, 1762 NR_ISOLATED_ANON + page_lru,
1764 -HPAGE_PMD_NR); 1763 -HPAGE_PMD_NR);
@@ -1767,6 +1766,10 @@ out:
1767out_fail: 1766out_fail:
1768 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 1767 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
1769out_dropref: 1768out_dropref:
1769 entry = pmd_mknonnuma(entry);
1770 set_pmd_at(mm, haddr, pmd, entry);
1771 update_mmu_cache_pmd(vma, address, &entry);
1772
1770 unlock_page(page); 1773 unlock_page(page);
1771 put_page(page); 1774 put_page(page);
1772 return 0; 1775 return 0;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index a3af058f68e4..412ba2b7326a 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -148,7 +148,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
148 split_huge_page_pmd(vma, addr, pmd); 148 split_huge_page_pmd(vma, addr, pmd);
149 else if (change_huge_pmd(vma, pmd, addr, newprot, 149 else if (change_huge_pmd(vma, pmd, addr, newprot,
150 prot_numa)) { 150 prot_numa)) {
151 pages += HPAGE_PMD_NR; 151 pages++;
152 continue; 152 continue;
153 } 153 }
154 /* fall through */ 154 /* fall through */
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 5da2cbcfdbb5..2beeabf502c5 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -242,7 +242,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
242 if (err) 242 if (err)
243 break; 243 break;
244 pgd++; 244 pgd++;
245 } while (addr = next, addr != end); 245 } while (addr = next, addr < end);
246 246
247 return err; 247 return err;
248} 248}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ca04163635da..e6b7fecb3af1 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -64,7 +64,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
64 br_flood_deliver(br, skb, false); 64 br_flood_deliver(br, skb, false);
65 goto out; 65 goto out;
66 } 66 }
67 if (br_multicast_rcv(br, NULL, skb)) { 67 if (br_multicast_rcv(br, NULL, skb, vid)) {
68 kfree_skb(skb); 68 kfree_skb(skb);
69 goto out; 69 goto out;
70 } 70 }
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index a2fd37ec35f7..7e73c32e205d 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -80,7 +80,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
80 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid); 80 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid);
81 81
82 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) && 82 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
83 br_multicast_rcv(br, p, skb)) 83 br_multicast_rcv(br, p, skb, vid))
84 goto drop; 84 goto drop;
85 85
86 if (p->state == BR_STATE_LEARNING) 86 if (p->state == BR_STATE_LEARNING)
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 0513ef3ce667..4c214b2b88ef 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -947,7 +947,8 @@ void br_multicast_disable_port(struct net_bridge_port *port)
947 947
948static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 948static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
949 struct net_bridge_port *port, 949 struct net_bridge_port *port,
950 struct sk_buff *skb) 950 struct sk_buff *skb,
951 u16 vid)
951{ 952{
952 struct igmpv3_report *ih; 953 struct igmpv3_report *ih;
953 struct igmpv3_grec *grec; 954 struct igmpv3_grec *grec;
@@ -957,12 +958,10 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
957 int type; 958 int type;
958 int err = 0; 959 int err = 0;
959 __be32 group; 960 __be32 group;
960 u16 vid = 0;
961 961
962 if (!pskb_may_pull(skb, sizeof(*ih))) 962 if (!pskb_may_pull(skb, sizeof(*ih)))
963 return -EINVAL; 963 return -EINVAL;
964 964
965 br_vlan_get_tag(skb, &vid);
966 ih = igmpv3_report_hdr(skb); 965 ih = igmpv3_report_hdr(skb);
967 num = ntohs(ih->ngrec); 966 num = ntohs(ih->ngrec);
968 len = sizeof(*ih); 967 len = sizeof(*ih);
@@ -1005,7 +1004,8 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1005#if IS_ENABLED(CONFIG_IPV6) 1004#if IS_ENABLED(CONFIG_IPV6)
1006static int br_ip6_multicast_mld2_report(struct net_bridge *br, 1005static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1007 struct net_bridge_port *port, 1006 struct net_bridge_port *port,
1008 struct sk_buff *skb) 1007 struct sk_buff *skb,
1008 u16 vid)
1009{ 1009{
1010 struct icmp6hdr *icmp6h; 1010 struct icmp6hdr *icmp6h;
1011 struct mld2_grec *grec; 1011 struct mld2_grec *grec;
@@ -1013,12 +1013,10 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1013 int len; 1013 int len;
1014 int num; 1014 int num;
1015 int err = 0; 1015 int err = 0;
1016 u16 vid = 0;
1017 1016
1018 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 1017 if (!pskb_may_pull(skb, sizeof(*icmp6h)))
1019 return -EINVAL; 1018 return -EINVAL;
1020 1019
1021 br_vlan_get_tag(skb, &vid);
1022 icmp6h = icmp6_hdr(skb); 1020 icmp6h = icmp6_hdr(skb);
1023 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1021 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1024 len = sizeof(*icmp6h); 1022 len = sizeof(*icmp6h);
@@ -1141,7 +1139,8 @@ static void br_multicast_query_received(struct net_bridge *br,
1141 1139
1142static int br_ip4_multicast_query(struct net_bridge *br, 1140static int br_ip4_multicast_query(struct net_bridge *br,
1143 struct net_bridge_port *port, 1141 struct net_bridge_port *port,
1144 struct sk_buff *skb) 1142 struct sk_buff *skb,
1143 u16 vid)
1145{ 1144{
1146 const struct iphdr *iph = ip_hdr(skb); 1145 const struct iphdr *iph = ip_hdr(skb);
1147 struct igmphdr *ih = igmp_hdr(skb); 1146 struct igmphdr *ih = igmp_hdr(skb);
@@ -1153,7 +1152,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1153 unsigned long now = jiffies; 1152 unsigned long now = jiffies;
1154 __be32 group; 1153 __be32 group;
1155 int err = 0; 1154 int err = 0;
1156 u16 vid = 0;
1157 1155
1158 spin_lock(&br->multicast_lock); 1156 spin_lock(&br->multicast_lock);
1159 if (!netif_running(br->dev) || 1157 if (!netif_running(br->dev) ||
@@ -1189,7 +1187,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1189 if (!group) 1187 if (!group)
1190 goto out; 1188 goto out;
1191 1189
1192 br_vlan_get_tag(skb, &vid);
1193 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1190 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
1194 if (!mp) 1191 if (!mp)
1195 goto out; 1192 goto out;
@@ -1219,7 +1216,8 @@ out:
1219#if IS_ENABLED(CONFIG_IPV6) 1216#if IS_ENABLED(CONFIG_IPV6)
1220static int br_ip6_multicast_query(struct net_bridge *br, 1217static int br_ip6_multicast_query(struct net_bridge *br,
1221 struct net_bridge_port *port, 1218 struct net_bridge_port *port,
1222 struct sk_buff *skb) 1219 struct sk_buff *skb,
1220 u16 vid)
1223{ 1221{
1224 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1222 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1225 struct mld_msg *mld; 1223 struct mld_msg *mld;
@@ -1231,7 +1229,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1231 unsigned long now = jiffies; 1229 unsigned long now = jiffies;
1232 const struct in6_addr *group = NULL; 1230 const struct in6_addr *group = NULL;
1233 int err = 0; 1231 int err = 0;
1234 u16 vid = 0;
1235 1232
1236 spin_lock(&br->multicast_lock); 1233 spin_lock(&br->multicast_lock);
1237 if (!netif_running(br->dev) || 1234 if (!netif_running(br->dev) ||
@@ -1265,7 +1262,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1265 if (!group) 1262 if (!group)
1266 goto out; 1263 goto out;
1267 1264
1268 br_vlan_get_tag(skb, &vid);
1269 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1265 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
1270 if (!mp) 1266 if (!mp)
1271 goto out; 1267 goto out;
@@ -1439,7 +1435,8 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1439 1435
1440static int br_multicast_ipv4_rcv(struct net_bridge *br, 1436static int br_multicast_ipv4_rcv(struct net_bridge *br,
1441 struct net_bridge_port *port, 1437 struct net_bridge_port *port,
1442 struct sk_buff *skb) 1438 struct sk_buff *skb,
1439 u16 vid)
1443{ 1440{
1444 struct sk_buff *skb2 = skb; 1441 struct sk_buff *skb2 = skb;
1445 const struct iphdr *iph; 1442 const struct iphdr *iph;
@@ -1447,7 +1444,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1447 unsigned int len; 1444 unsigned int len;
1448 unsigned int offset; 1445 unsigned int offset;
1449 int err; 1446 int err;
1450 u16 vid = 0;
1451 1447
1452 /* We treat OOM as packet loss for now. */ 1448 /* We treat OOM as packet loss for now. */
1453 if (!pskb_may_pull(skb, sizeof(*iph))) 1449 if (!pskb_may_pull(skb, sizeof(*iph)))
@@ -1508,7 +1504,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1508 1504
1509 err = 0; 1505 err = 0;
1510 1506
1511 br_vlan_get_tag(skb2, &vid);
1512 BR_INPUT_SKB_CB(skb)->igmp = 1; 1507 BR_INPUT_SKB_CB(skb)->igmp = 1;
1513 ih = igmp_hdr(skb2); 1508 ih = igmp_hdr(skb2);
1514 1509
@@ -1519,10 +1514,10 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1519 err = br_ip4_multicast_add_group(br, port, ih->group, vid); 1514 err = br_ip4_multicast_add_group(br, port, ih->group, vid);
1520 break; 1515 break;
1521 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1516 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1522 err = br_ip4_multicast_igmp3_report(br, port, skb2); 1517 err = br_ip4_multicast_igmp3_report(br, port, skb2, vid);
1523 break; 1518 break;
1524 case IGMP_HOST_MEMBERSHIP_QUERY: 1519 case IGMP_HOST_MEMBERSHIP_QUERY:
1525 err = br_ip4_multicast_query(br, port, skb2); 1520 err = br_ip4_multicast_query(br, port, skb2, vid);
1526 break; 1521 break;
1527 case IGMP_HOST_LEAVE_MESSAGE: 1522 case IGMP_HOST_LEAVE_MESSAGE:
1528 br_ip4_multicast_leave_group(br, port, ih->group, vid); 1523 br_ip4_multicast_leave_group(br, port, ih->group, vid);
@@ -1540,7 +1535,8 @@ err_out:
1540#if IS_ENABLED(CONFIG_IPV6) 1535#if IS_ENABLED(CONFIG_IPV6)
1541static int br_multicast_ipv6_rcv(struct net_bridge *br, 1536static int br_multicast_ipv6_rcv(struct net_bridge *br,
1542 struct net_bridge_port *port, 1537 struct net_bridge_port *port,
1543 struct sk_buff *skb) 1538 struct sk_buff *skb,
1539 u16 vid)
1544{ 1540{
1545 struct sk_buff *skb2; 1541 struct sk_buff *skb2;
1546 const struct ipv6hdr *ip6h; 1542 const struct ipv6hdr *ip6h;
@@ -1550,7 +1546,6 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1550 unsigned int len; 1546 unsigned int len;
1551 int offset; 1547 int offset;
1552 int err; 1548 int err;
1553 u16 vid = 0;
1554 1549
1555 if (!pskb_may_pull(skb, sizeof(*ip6h))) 1550 if (!pskb_may_pull(skb, sizeof(*ip6h)))
1556 return -EINVAL; 1551 return -EINVAL;
@@ -1640,7 +1635,6 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1640 1635
1641 err = 0; 1636 err = 0;
1642 1637
1643 br_vlan_get_tag(skb, &vid);
1644 BR_INPUT_SKB_CB(skb)->igmp = 1; 1638 BR_INPUT_SKB_CB(skb)->igmp = 1;
1645 1639
1646 switch (icmp6_type) { 1640 switch (icmp6_type) {
@@ -1657,10 +1651,10 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1657 break; 1651 break;
1658 } 1652 }
1659 case ICMPV6_MLD2_REPORT: 1653 case ICMPV6_MLD2_REPORT:
1660 err = br_ip6_multicast_mld2_report(br, port, skb2); 1654 err = br_ip6_multicast_mld2_report(br, port, skb2, vid);
1661 break; 1655 break;
1662 case ICMPV6_MGM_QUERY: 1656 case ICMPV6_MGM_QUERY:
1663 err = br_ip6_multicast_query(br, port, skb2); 1657 err = br_ip6_multicast_query(br, port, skb2, vid);
1664 break; 1658 break;
1665 case ICMPV6_MGM_REDUCTION: 1659 case ICMPV6_MGM_REDUCTION:
1666 { 1660 {
@@ -1681,7 +1675,7 @@ out:
1681#endif 1675#endif
1682 1676
1683int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1677int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1684 struct sk_buff *skb) 1678 struct sk_buff *skb, u16 vid)
1685{ 1679{
1686 BR_INPUT_SKB_CB(skb)->igmp = 0; 1680 BR_INPUT_SKB_CB(skb)->igmp = 0;
1687 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1681 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
@@ -1691,10 +1685,10 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1691 1685
1692 switch (skb->protocol) { 1686 switch (skb->protocol) {
1693 case htons(ETH_P_IP): 1687 case htons(ETH_P_IP):
1694 return br_multicast_ipv4_rcv(br, port, skb); 1688 return br_multicast_ipv4_rcv(br, port, skb, vid);
1695#if IS_ENABLED(CONFIG_IPV6) 1689#if IS_ENABLED(CONFIG_IPV6)
1696 case htons(ETH_P_IPV6): 1690 case htons(ETH_P_IPV6):
1697 return br_multicast_ipv6_rcv(br, port, skb); 1691 return br_multicast_ipv6_rcv(br, port, skb, vid);
1698#endif 1692#endif
1699 } 1693 }
1700 1694
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index d1ca6d956633..229d820bdf0b 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -435,7 +435,7 @@ int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,
435#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 435#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
436extern unsigned int br_mdb_rehash_seq; 436extern unsigned int br_mdb_rehash_seq;
437int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 437int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
438 struct sk_buff *skb); 438 struct sk_buff *skb, u16 vid);
439struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 439struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
440 struct sk_buff *skb, u16 vid); 440 struct sk_buff *skb, u16 vid);
441void br_multicast_add_port(struct net_bridge_port *port); 441void br_multicast_add_port(struct net_bridge_port *port);
@@ -504,7 +504,8 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
504#else 504#else
505static inline int br_multicast_rcv(struct net_bridge *br, 505static inline int br_multicast_rcv(struct net_bridge *br,
506 struct net_bridge_port *port, 506 struct net_bridge_port *port,
507 struct sk_buff *skb) 507 struct sk_buff *skb,
508 u16 vid)
508{ 509{
509 return 0; 510 return 0;
510} 511}
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 518093802d1d..7c470c371e14 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -181,6 +181,7 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
181 ub->qlen++; 181 ub->qlen++;
182 182
183 pm = nlmsg_data(nlh); 183 pm = nlmsg_data(nlh);
184 memset(pm, 0, sizeof(*pm));
184 185
185 /* Fill in the ulog data */ 186 /* Fill in the ulog data */
186 pm->version = EBT_ULOG_VERSION; 187 pm->version = EBT_ULOG_VERSION;
@@ -193,8 +194,6 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
193 pm->hook = hooknr; 194 pm->hook = hooknr;
194 if (uloginfo->prefix != NULL) 195 if (uloginfo->prefix != NULL)
195 strcpy(pm->prefix, uloginfo->prefix); 196 strcpy(pm->prefix, uloginfo->prefix);
196 else
197 *(pm->prefix) = '\0';
198 197
199 if (in) { 198 if (in) {
200 strcpy(pm->physindev, in->name); 199 strcpy(pm->physindev, in->name);
@@ -204,16 +203,14 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
204 strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name); 203 strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
205 else 204 else
206 strcpy(pm->indev, in->name); 205 strcpy(pm->indev, in->name);
207 } else 206 }
208 pm->indev[0] = pm->physindev[0] = '\0';
209 207
210 if (out) { 208 if (out) {
211 /* If out exists, then out is a bridge port */ 209 /* If out exists, then out is a bridge port */
212 strcpy(pm->physoutdev, out->name); 210 strcpy(pm->physoutdev, out->name);
213 /* rcu_read_lock()ed by nf_hook_slow */ 211 /* rcu_read_lock()ed by nf_hook_slow */
214 strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name); 212 strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
215 } else 213 }
216 pm->outdev[0] = pm->physoutdev[0] = '\0';
217 214
218 if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0) 215 if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0)
219 BUG(); 216 BUG();
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 5cac36e6ccd1..0242035192f1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -66,7 +66,7 @@ again:
66 struct iphdr _iph; 66 struct iphdr _iph;
67ip: 67ip:
68 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); 68 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
69 if (!iph) 69 if (!iph || iph->ihl < 5)
70 return false; 70 return false;
71 71
72 if (ip_is_fragment(iph)) 72 if (ip_is_fragment(iph))
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index fc75c9e461b8..8f971990677c 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -636,8 +636,9 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo
636 636
637 netpoll_send_skb(np, send_skb); 637 netpoll_send_skb(np, send_skb);
638 638
639 /* If there are several rx_hooks for the same address, 639 /* If there are several rx_skb_hooks for the same
640 we're fine by sending a single reply */ 640 * address we're fine by sending a single reply
641 */
641 break; 642 break;
642 } 643 }
643 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 644 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
@@ -719,8 +720,9 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo
719 720
720 netpoll_send_skb(np, send_skb); 721 netpoll_send_skb(np, send_skb);
721 722
722 /* If there are several rx_hooks for the same address, 723 /* If there are several rx_skb_hooks for the same
723 we're fine by sending a single reply */ 724 * address, we're fine by sending a single reply
725 */
724 break; 726 break;
725 } 727 }
726 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 728 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
@@ -756,11 +758,12 @@ static bool pkt_is_ns(struct sk_buff *skb)
756 758
757int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) 759int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
758{ 760{
759 int proto, len, ulen; 761 int proto, len, ulen, data_len;
760 int hits = 0; 762 int hits = 0, offset;
761 const struct iphdr *iph; 763 const struct iphdr *iph;
762 struct udphdr *uh; 764 struct udphdr *uh;
763 struct netpoll *np, *tmp; 765 struct netpoll *np, *tmp;
766 uint16_t source;
764 767
765 if (list_empty(&npinfo->rx_np)) 768 if (list_empty(&npinfo->rx_np))
766 goto out; 769 goto out;
@@ -820,7 +823,10 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
820 823
821 len -= iph->ihl*4; 824 len -= iph->ihl*4;
822 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4); 825 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
826 offset = (unsigned char *)(uh + 1) - skb->data;
823 ulen = ntohs(uh->len); 827 ulen = ntohs(uh->len);
828 data_len = skb->len - offset;
829 source = ntohs(uh->source);
824 830
825 if (ulen != len) 831 if (ulen != len)
826 goto out; 832 goto out;
@@ -834,9 +840,7 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
834 if (np->local_port && np->local_port != ntohs(uh->dest)) 840 if (np->local_port && np->local_port != ntohs(uh->dest))
835 continue; 841 continue;
836 842
837 np->rx_hook(np, ntohs(uh->source), 843 np->rx_skb_hook(np, source, skb, offset, data_len);
838 (char *)(uh+1),
839 ulen - sizeof(struct udphdr));
840 hits++; 844 hits++;
841 } 845 }
842 } else { 846 } else {
@@ -859,7 +863,10 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
859 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 863 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
860 goto out; 864 goto out;
861 uh = udp_hdr(skb); 865 uh = udp_hdr(skb);
866 offset = (unsigned char *)(uh + 1) - skb->data;
862 ulen = ntohs(uh->len); 867 ulen = ntohs(uh->len);
868 data_len = skb->len - offset;
869 source = ntohs(uh->source);
863 if (ulen != skb->len) 870 if (ulen != skb->len)
864 goto out; 871 goto out;
865 if (udp6_csum_init(skb, uh, IPPROTO_UDP)) 872 if (udp6_csum_init(skb, uh, IPPROTO_UDP))
@@ -872,9 +879,7 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
872 if (np->local_port && np->local_port != ntohs(uh->dest)) 879 if (np->local_port && np->local_port != ntohs(uh->dest))
873 continue; 880 continue;
874 881
875 np->rx_hook(np, ntohs(uh->source), 882 np->rx_skb_hook(np, source, skb, offset, data_len);
876 (char *)(uh+1),
877 ulen - sizeof(struct udphdr));
878 hits++; 883 hits++;
879 } 884 }
880#endif 885#endif
@@ -1062,7 +1067,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
1062 1067
1063 npinfo->netpoll = np; 1068 npinfo->netpoll = np;
1064 1069
1065 if (np->rx_hook) { 1070 if (np->rx_skb_hook) {
1066 spin_lock_irqsave(&npinfo->rx_lock, flags); 1071 spin_lock_irqsave(&npinfo->rx_lock, flags);
1067 npinfo->rx_flags |= NETPOLL_RX_ENABLED; 1072 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
1068 list_add_tail(&np->rx, &npinfo->rx_np); 1073 list_add_tail(&np->rx, &npinfo->rx_np);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 85a4f21aac1a..59da7cde0724 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -271,6 +271,11 @@ unsigned int arpt_do_table(struct sk_buff *skb,
271 local_bh_disable(); 271 local_bh_disable();
272 addend = xt_write_recseq_begin(); 272 addend = xt_write_recseq_begin();
273 private = table->private; 273 private = table->private;
274 /*
275 * Ensure we load private-> members after we've fetched the base
276 * pointer.
277 */
278 smp_read_barrier_depends();
274 table_base = private->entries[smp_processor_id()]; 279 table_base = private->entries[smp_processor_id()];
275 280
276 e = get_entry(table_base, private->hook_entry[hook]); 281 e = get_entry(table_base, private->hook_entry[hook]);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index d23118d95ff9..718dfbd30cbe 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -327,6 +327,11 @@ ipt_do_table(struct sk_buff *skb,
327 addend = xt_write_recseq_begin(); 327 addend = xt_write_recseq_begin();
328 private = table->private; 328 private = table->private;
329 cpu = smp_processor_id(); 329 cpu = smp_processor_id();
330 /*
331 * Ensure we load private-> members after we've fetched the base
332 * pointer.
333 */
334 smp_read_barrier_depends();
330 table_base = private->entries[cpu]; 335 table_base = private->entries[cpu];
331 jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; 336 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
332 stackptr = per_cpu_ptr(private->stackptr, cpu); 337 stackptr = per_cpu_ptr(private->stackptr, cpu);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index cbc22158af49..9cb993cd224b 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -220,6 +220,7 @@ static void ipt_ulog_packet(struct net *net,
220 ub->qlen++; 220 ub->qlen++;
221 221
222 pm = nlmsg_data(nlh); 222 pm = nlmsg_data(nlh);
223 memset(pm, 0, sizeof(*pm));
223 224
224 /* We might not have a timestamp, get one */ 225 /* We might not have a timestamp, get one */
225 if (skb->tstamp.tv64 == 0) 226 if (skb->tstamp.tv64 == 0)
@@ -238,8 +239,6 @@ static void ipt_ulog_packet(struct net *net,
238 } 239 }
239 else if (loginfo->prefix[0] != '\0') 240 else if (loginfo->prefix[0] != '\0')
240 strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix)); 241 strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
241 else
242 *(pm->prefix) = '\0';
243 242
244 if (in && in->hard_header_len > 0 && 243 if (in && in->hard_header_len > 0 &&
245 skb->mac_header != skb->network_header && 244 skb->mac_header != skb->network_header &&
@@ -251,13 +250,9 @@ static void ipt_ulog_packet(struct net *net,
251 250
252 if (in) 251 if (in)
253 strncpy(pm->indev_name, in->name, sizeof(pm->indev_name)); 252 strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
254 else
255 pm->indev_name[0] = '\0';
256 253
257 if (out) 254 if (out)
258 strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name)); 255 strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
259 else
260 pm->outdev_name[0] = '\0';
261 256
262 /* copy_len <= skb->len, so can't fail. */ 257 /* copy_len <= skb->len, so can't fail. */
263 if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0) 258 if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b935397c703c..63095b218b4a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2903,7 +2903,8 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2903 * left edge of the send window. 2903 * left edge of the send window.
2904 * See draft-ietf-tcplw-high-performance-00, section 3.3. 2904 * See draft-ietf-tcplw-high-performance-00, section 3.3.
2905 */ 2905 */
2906 if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 2906 if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2907 flag & FLAG_ACKED)
2907 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 2908 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
2908 2909
2909 if (seq_rtt < 0) 2910 if (seq_rtt < 0)
@@ -2918,14 +2919,19 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2918} 2919}
2919 2920
2920/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */ 2921/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
2921static void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) 2922static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
2922{ 2923{
2923 struct tcp_sock *tp = tcp_sk(sk); 2924 struct tcp_sock *tp = tcp_sk(sk);
2924 s32 seq_rtt = -1; 2925 s32 seq_rtt = -1;
2925 2926
2926 if (tp->lsndtime && !tp->total_retrans) 2927 if (synack_stamp && !tp->total_retrans)
2927 seq_rtt = tcp_time_stamp - tp->lsndtime; 2928 seq_rtt = tcp_time_stamp - synack_stamp;
2928 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1); 2929
2930 /* If the ACK acks both the SYNACK and the (Fast Open'd) data packets
2931 * sent in SYN_RECV, SYNACK RTT is the smooth RTT computed in tcp_ack()
2932 */
2933 if (!tp->srtt)
2934 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
2929} 2935}
2930 2936
2931static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 2937static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
@@ -3028,6 +3034,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3028 s32 seq_rtt = -1; 3034 s32 seq_rtt = -1;
3029 s32 ca_seq_rtt = -1; 3035 s32 ca_seq_rtt = -1;
3030 ktime_t last_ackt = net_invalid_timestamp(); 3036 ktime_t last_ackt = net_invalid_timestamp();
3037 bool rtt_update;
3031 3038
3032 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { 3039 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
3033 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 3040 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
@@ -3104,14 +3111,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3104 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 3111 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
3105 flag |= FLAG_SACK_RENEGING; 3112 flag |= FLAG_SACK_RENEGING;
3106 3113
3107 if (tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt) || 3114 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt);
3108 (flag & FLAG_ACKED))
3109 tcp_rearm_rto(sk);
3110 3115
3111 if (flag & FLAG_ACKED) { 3116 if (flag & FLAG_ACKED) {
3112 const struct tcp_congestion_ops *ca_ops 3117 const struct tcp_congestion_ops *ca_ops
3113 = inet_csk(sk)->icsk_ca_ops; 3118 = inet_csk(sk)->icsk_ca_ops;
3114 3119
3120 tcp_rearm_rto(sk);
3115 if (unlikely(icsk->icsk_mtup.probe_size && 3121 if (unlikely(icsk->icsk_mtup.probe_size &&
3116 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3122 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
3117 tcp_mtup_probe_success(sk); 3123 tcp_mtup_probe_success(sk);
@@ -3150,6 +3156,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3150 3156
3151 ca_ops->pkts_acked(sk, pkts_acked, rtt_us); 3157 ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
3152 } 3158 }
3159 } else if (skb && rtt_update && sack_rtt >= 0 &&
3160 sack_rtt > (s32)(now - TCP_SKB_CB(skb)->when)) {
3161 /* Do not re-arm RTO if the sack RTT is measured from data sent
3162 * after when the head was last (re)transmitted. Otherwise the
3163 * timeout may continue to extend in loss recovery.
3164 */
3165 tcp_rearm_rto(sk);
3153 } 3166 }
3154 3167
3155#if FASTRETRANS_DEBUG > 0 3168#if FASTRETRANS_DEBUG > 0
@@ -5626,6 +5639,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5626 struct request_sock *req; 5639 struct request_sock *req;
5627 int queued = 0; 5640 int queued = 0;
5628 bool acceptable; 5641 bool acceptable;
5642 u32 synack_stamp;
5629 5643
5630 tp->rx_opt.saw_tstamp = 0; 5644 tp->rx_opt.saw_tstamp = 0;
5631 5645
@@ -5708,9 +5722,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5708 * so release it. 5722 * so release it.
5709 */ 5723 */
5710 if (req) { 5724 if (req) {
5725 synack_stamp = tcp_rsk(req)->snt_synack;
5711 tp->total_retrans = req->num_retrans; 5726 tp->total_retrans = req->num_retrans;
5712 reqsk_fastopen_remove(sk, req, false); 5727 reqsk_fastopen_remove(sk, req, false);
5713 } else { 5728 } else {
5729 synack_stamp = tp->lsndtime;
5714 /* Make sure socket is routed, for correct metrics. */ 5730 /* Make sure socket is routed, for correct metrics. */
5715 icsk->icsk_af_ops->rebuild_header(sk); 5731 icsk->icsk_af_ops->rebuild_header(sk);
5716 tcp_init_congestion_control(sk); 5732 tcp_init_congestion_control(sk);
@@ -5733,7 +5749,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5733 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; 5749 tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
5734 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; 5750 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
5735 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 5751 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
5736 tcp_synack_rtt_meas(sk, req); 5752 tcp_synack_rtt_meas(sk, synack_stamp);
5737 5753
5738 if (tp->rx_opt.tstamp_ok) 5754 if (tp->rx_opt.tstamp_ok)
5739 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5755 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index a7a5583eab04..a2b68a108eae 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -18,6 +18,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
18 netdev_features_t features) 18 netdev_features_t features)
19{ 19{
20 struct sk_buff *segs = ERR_PTR(-EINVAL); 20 struct sk_buff *segs = ERR_PTR(-EINVAL);
21 unsigned int sum_truesize = 0;
21 struct tcphdr *th; 22 struct tcphdr *th;
22 unsigned int thlen; 23 unsigned int thlen;
23 unsigned int seq; 24 unsigned int seq;
@@ -104,13 +105,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
104 if (copy_destructor) { 105 if (copy_destructor) {
105 skb->destructor = gso_skb->destructor; 106 skb->destructor = gso_skb->destructor;
106 skb->sk = gso_skb->sk; 107 skb->sk = gso_skb->sk;
107 /* {tcp|sock}_wfree() use exact truesize accounting : 108 sum_truesize += skb->truesize;
108 * sum(skb->truesize) MUST be exactly be gso_skb->truesize
109 * So we account mss bytes of 'true size' for each segment.
110 * The last segment will contain the remaining.
111 */
112 skb->truesize = mss;
113 gso_skb->truesize -= mss;
114 } 109 }
115 skb = skb->next; 110 skb = skb->next;
116 th = tcp_hdr(skb); 111 th = tcp_hdr(skb);
@@ -127,7 +122,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
127 if (copy_destructor) { 122 if (copy_destructor) {
128 swap(gso_skb->sk, skb->sk); 123 swap(gso_skb->sk, skb->sk);
129 swap(gso_skb->destructor, skb->destructor); 124 swap(gso_skb->destructor, skb->destructor);
130 swap(gso_skb->truesize, skb->truesize); 125 sum_truesize += skb->truesize;
126 atomic_add(sum_truesize - gso_skb->truesize,
127 &skb->sk->sk_wmem_alloc);
131 } 128 }
132 129
133 delta = htonl(oldlen + (skb_tail_pointer(skb) - 130 delta = htonl(oldlen + (skb_tail_pointer(skb) -
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index ccde54248c8c..e1a63930a967 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -104,10 +104,14 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
104 const struct iphdr *iph = ip_hdr(skb); 104 const struct iphdr *iph = ip_hdr(skb);
105 u8 *xprth = skb_network_header(skb) + iph->ihl * 4; 105 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
106 struct flowi4 *fl4 = &fl->u.ip4; 106 struct flowi4 *fl4 = &fl->u.ip4;
107 int oif = 0;
108
109 if (skb_dst(skb))
110 oif = skb_dst(skb)->dev->ifindex;
107 111
108 memset(fl4, 0, sizeof(struct flowi4)); 112 memset(fl4, 0, sizeof(struct flowi4));
109 fl4->flowi4_mark = skb->mark; 113 fl4->flowi4_mark = skb->mark;
110 fl4->flowi4_oif = skb_dst(skb)->dev->ifindex; 114 fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
111 115
112 if (!ip_is_fragment(iph)) { 116 if (!ip_is_fragment(iph)) {
113 switch (iph->protocol) { 117 switch (iph->protocol) {
@@ -236,7 +240,7 @@ static struct dst_ops xfrm4_dst_ops = {
236 .destroy = xfrm4_dst_destroy, 240 .destroy = xfrm4_dst_destroy,
237 .ifdown = xfrm4_dst_ifdown, 241 .ifdown = xfrm4_dst_ifdown,
238 .local_out = __ip_local_out, 242 .local_out = __ip_local_out,
239 .gc_thresh = 1024, 243 .gc_thresh = 32768,
240}; 244};
241 245
242static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { 246static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 44400c216dc6..710238f58aa9 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -349,6 +349,11 @@ ip6t_do_table(struct sk_buff *skb,
349 local_bh_disable(); 349 local_bh_disable();
350 addend = xt_write_recseq_begin(); 350 addend = xt_write_recseq_begin();
351 private = table->private; 351 private = table->private;
352 /*
353 * Ensure we load private-> members after we've fetched the base
354 * pointer.
355 */
356 smp_read_barrier_depends();
352 cpu = smp_processor_id(); 357 cpu = smp_processor_id();
353 table_base = private->entries[cpu]; 358 table_base = private->entries[cpu];
354 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; 359 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1ac0b6e17d95..fd399ac6c1f7 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1087,10 +1087,13 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1087 if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev))) 1087 if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
1088 return NULL; 1088 return NULL;
1089 1089
1090 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) 1090 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1091 return dst; 1091 return NULL;
1092 1092
1093 return NULL; 1093 if (rt6_check_expired(rt))
1094 return NULL;
1095
1096 return dst;
1094} 1097}
1095 1098
1096static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) 1099static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 08ed2772b7aa..5f8e128c512d 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -135,10 +135,14 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
135 struct ipv6_opt_hdr *exthdr; 135 struct ipv6_opt_hdr *exthdr;
136 const unsigned char *nh = skb_network_header(skb); 136 const unsigned char *nh = skb_network_header(skb);
137 u8 nexthdr = nh[IP6CB(skb)->nhoff]; 137 u8 nexthdr = nh[IP6CB(skb)->nhoff];
138 int oif = 0;
139
140 if (skb_dst(skb))
141 oif = skb_dst(skb)->dev->ifindex;
138 142
139 memset(fl6, 0, sizeof(struct flowi6)); 143 memset(fl6, 0, sizeof(struct flowi6));
140 fl6->flowi6_mark = skb->mark; 144 fl6->flowi6_mark = skb->mark;
141 fl6->flowi6_oif = skb_dst(skb)->dev->ifindex; 145 fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
142 146
143 fl6->daddr = reverse ? hdr->saddr : hdr->daddr; 147 fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
144 fl6->saddr = reverse ? hdr->daddr : hdr->saddr; 148 fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
@@ -285,7 +289,7 @@ static struct dst_ops xfrm6_dst_ops = {
285 .destroy = xfrm6_dst_destroy, 289 .destroy = xfrm6_dst_destroy,
286 .ifdown = xfrm6_dst_ifdown, 290 .ifdown = xfrm6_dst_ifdown,
287 .local_out = __ip6_local_out, 291 .local_out = __ip6_local_out,
288 .gc_thresh = 1024, 292 .gc_thresh = 32768,
289}; 293};
290 294
291static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { 295static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8b03028cca69..227aa11e8409 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -845,8 +845,13 @@ xt_replace_table(struct xt_table *table,
845 return NULL; 845 return NULL;
846 } 846 }
847 847
848 table->private = newinfo;
849 newinfo->initial_entries = private->initial_entries; 848 newinfo->initial_entries = private->initial_entries;
849 /*
850 * Ensure contents of newinfo are visible before assigning to
851 * private.
852 */
853 smp_wmb();
854 table->private = newinfo;
850 855
851 /* 856 /*
852 * Even though table entries have now been swapped, other CPU's 857 * Even though table entries have now been swapped, other CPU's
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 1e2fae32f81b..ed00fef58996 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -147,6 +147,7 @@ nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par)
147{ 147{
148 const struct xt_NFQ_info_v3 *info = par->targinfo; 148 const struct xt_NFQ_info_v3 *info = par->targinfo;
149 u32 queue = info->queuenum; 149 u32 queue = info->queuenum;
150 int ret;
150 151
151 if (info->queues_total > 1) { 152 if (info->queues_total > 1) {
152 if (info->flags & NFQ_FLAG_CPU_FANOUT) { 153 if (info->flags & NFQ_FLAG_CPU_FANOUT) {
@@ -157,7 +158,11 @@ nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par)
157 queue = nfqueue_hash(skb, par); 158 queue = nfqueue_hash(skb, par);
158 } 159 }
159 160
160 return NF_QUEUE_NR(queue); 161 ret = NF_QUEUE_NR(queue);
162 if (info->flags & NFQ_FLAG_BYPASS)
163 ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
164
165 return ret;
161} 166}
162 167
163static struct xt_target nfqueue_tg_reg[] __read_mostly = { 168static struct xt_target nfqueue_tg_reg[] __read_mostly = {
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index c3235675f359..5c2dab276109 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -65,8 +65,7 @@ void ovs_dp_notify_wq(struct work_struct *work)
65 continue; 65 continue;
66 66
67 netdev_vport = netdev_vport_priv(vport); 67 netdev_vport = netdev_vport_priv(vport);
68 if (netdev_vport->dev->reg_state == NETREG_UNREGISTERED || 68 if (!(netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH))
69 netdev_vport->dev->reg_state == NETREG_UNREGISTERING)
70 dp_detach_port_notify(vport); 69 dp_detach_port_notify(vport);
71 } 70 }
72 } 71 }
@@ -88,6 +87,10 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event,
88 return NOTIFY_DONE; 87 return NOTIFY_DONE;
89 88
90 if (event == NETDEV_UNREGISTER) { 89 if (event == NETDEV_UNREGISTER) {
90 /* upper_dev_unlink and decrement promisc immediately */
91 ovs_netdev_detach_dev(vport);
92
93 /* schedule vport destroy, dev_put and genl notification */
91 ovs_net = net_generic(dev_net(dev), ovs_net_id); 94 ovs_net = net_generic(dev_net(dev), ovs_net_id);
92 queue_work(system_wq, &ovs_net->dp_notify_work); 95 queue_work(system_wq, &ovs_net->dp_notify_work);
93 } 96 }
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 09d93c13cfd6..d21f77d875ba 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -150,15 +150,25 @@ static void free_port_rcu(struct rcu_head *rcu)
150 ovs_vport_free(vport_from_priv(netdev_vport)); 150 ovs_vport_free(vport_from_priv(netdev_vport));
151} 151}
152 152
153static void netdev_destroy(struct vport *vport) 153void ovs_netdev_detach_dev(struct vport *vport)
154{ 154{
155 struct netdev_vport *netdev_vport = netdev_vport_priv(vport); 155 struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
156 156
157 rtnl_lock(); 157 ASSERT_RTNL();
158 netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; 158 netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
159 netdev_rx_handler_unregister(netdev_vport->dev); 159 netdev_rx_handler_unregister(netdev_vport->dev);
160 netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp)); 160 netdev_upper_dev_unlink(netdev_vport->dev,
161 netdev_master_upper_dev_get(netdev_vport->dev));
161 dev_set_promiscuity(netdev_vport->dev, -1); 162 dev_set_promiscuity(netdev_vport->dev, -1);
163}
164
165static void netdev_destroy(struct vport *vport)
166{
167 struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
168
169 rtnl_lock();
170 if (netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH)
171 ovs_netdev_detach_dev(vport);
162 rtnl_unlock(); 172 rtnl_unlock();
163 173
164 call_rcu(&netdev_vport->rcu, free_port_rcu); 174 call_rcu(&netdev_vport->rcu, free_port_rcu);
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index dd298b5c5cdb..8df01c1127e5 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -39,5 +39,6 @@ netdev_vport_priv(const struct vport *vport)
39} 39}
40 40
41const char *ovs_netdev_get_name(const struct vport *); 41const char *ovs_netdev_get_name(const struct vport *);
42void ovs_netdev_detach_dev(struct vport *);
42 43
43#endif /* vport_netdev.h */ 44#endif /* vport_netdev.h */
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index a9dfdda9ed1d..fdc041c57853 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -255,6 +255,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
255 f->socket_hash != sk->sk_hash)) { 255 f->socket_hash != sk->sk_hash)) {
256 f->credit = q->initial_quantum; 256 f->credit = q->initial_quantum;
257 f->socket_hash = sk->sk_hash; 257 f->socket_hash = sk->sk_hash;
258 f->time_next_packet = 0ULL;
258 } 259 }
259 return f; 260 return f;
260 } 261 }
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index f6334aa19151..7567e6f1a920 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -279,7 +279,9 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
279 sctp_v6_to_addr(&dst_saddr, &fl6->saddr, htons(bp->port)); 279 sctp_v6_to_addr(&dst_saddr, &fl6->saddr, htons(bp->port));
280 rcu_read_lock(); 280 rcu_read_lock();
281 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 281 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
282 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC)) 282 if (!laddr->valid || laddr->state == SCTP_ADDR_DEL ||
283 (laddr->state != SCTP_ADDR_SRC &&
284 !asoc->src_out_of_asoc_ok))
283 continue; 285 continue;
284 286
285 /* Do not compare against v4 addrs */ 287 /* Do not compare against v4 addrs */
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 666c66842799..1a6eef39ab2f 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -860,7 +860,6 @@ static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
860 (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) 860 (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
861 return; 861 return;
862 862
863 BUG_ON(asoc->peer.primary_path == NULL);
864 sctp_unhash_established(asoc); 863 sctp_unhash_established(asoc);
865 sctp_association_free(asoc); 864 sctp_association_free(asoc);
866} 865}
diff --git a/net/x25/Kconfig b/net/x25/Kconfig
index c959312c45e3..e2fa133f9fba 100644
--- a/net/x25/Kconfig
+++ b/net/x25/Kconfig
@@ -16,8 +16,8 @@ config X25
16 if you want that) and the lower level data link layer protocol LAPB 16 if you want that) and the lower level data link layer protocol LAPB
17 (say Y to "LAPB Data Link Driver" below if you want that). 17 (say Y to "LAPB Data Link Driver" below if you want that).
18 18
19 You can read more about X.25 at <http://www.sangoma.com/x25.htm> and 19 You can read more about X.25 at <http://www.sangoma.com/tutorials/x25/> and
20 <http://www.cisco.com/univercd/cc/td/doc/product/software/ios11/cbook/cx25.htm>. 20 <http://docwiki.cisco.com/wiki/X.25>.
21 Information about X.25 for Linux is contained in the files 21 Information about X.25 for Linux is contained in the files
22 <file:Documentation/networking/x25.txt> and 22 <file:Documentation/networking/x25.txt> and
23 <file:Documentation/networking/x25-iface.txt>. 23 <file:Documentation/networking/x25-iface.txt>.
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index b943c7fc5ed2..ccfdc7115a83 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -141,14 +141,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
141 const int plen = skb->len; 141 const int plen = skb->len;
142 int dlen = IPCOMP_SCRATCH_SIZE; 142 int dlen = IPCOMP_SCRATCH_SIZE;
143 u8 *start = skb->data; 143 u8 *start = skb->data;
144 const int cpu = get_cpu(); 144 struct crypto_comp *tfm;
145 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); 145 u8 *scratch;
146 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
147 int err; 146 int err;
148 147
149 local_bh_disable(); 148 local_bh_disable();
149 scratch = *this_cpu_ptr(ipcomp_scratches);
150 tfm = *this_cpu_ptr(ipcd->tfms);
150 err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); 151 err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
151 local_bh_enable();
152 if (err) 152 if (err)
153 goto out; 153 goto out;
154 154
@@ -158,13 +158,13 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
158 } 158 }
159 159
160 memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); 160 memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
161 put_cpu(); 161 local_bh_enable();
162 162
163 pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); 163 pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
164 return 0; 164 return 0;
165 165
166out: 166out:
167 put_cpu(); 167 local_bh_enable();
168 return err; 168 return err;
169} 169}
170 170
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 487ac6f37ca2..9a11f9f799f4 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -55,6 +55,7 @@ static struct sym_entry *table;
55static unsigned int table_size, table_cnt; 55static unsigned int table_size, table_cnt;
56static int all_symbols = 0; 56static int all_symbols = 0;
57static char symbol_prefix_char = '\0'; 57static char symbol_prefix_char = '\0';
58static unsigned long long kernel_start_addr = 0;
58 59
59int token_profit[0x10000]; 60int token_profit[0x10000];
60 61
@@ -65,7 +66,10 @@ unsigned char best_table_len[256];
65 66
66static void usage(void) 67static void usage(void)
67{ 68{
68 fprintf(stderr, "Usage: kallsyms [--all-symbols] [--symbol-prefix=<prefix char>] < in.map > out.S\n"); 69 fprintf(stderr, "Usage: kallsyms [--all-symbols] "
70 "[--symbol-prefix=<prefix char>] "
71 "[--page-offset=<CONFIG_PAGE_OFFSET>] "
72 "< in.map > out.S\n");
69 exit(1); 73 exit(1);
70} 74}
71 75
@@ -194,6 +198,9 @@ static int symbol_valid(struct sym_entry *s)
194 int i; 198 int i;
195 int offset = 1; 199 int offset = 1;
196 200
201 if (s->addr < kernel_start_addr)
202 return 0;
203
197 /* skip prefix char */ 204 /* skip prefix char */
198 if (symbol_prefix_char && *(s->sym + 1) == symbol_prefix_char) 205 if (symbol_prefix_char && *(s->sym + 1) == symbol_prefix_char)
199 offset++; 206 offset++;
@@ -646,6 +653,9 @@ int main(int argc, char **argv)
646 if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\'')) 653 if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\''))
647 p++; 654 p++;
648 symbol_prefix_char = *p; 655 symbol_prefix_char = *p;
656 } else if (strncmp(argv[i], "--page-offset=", 14) == 0) {
657 const char *p = &argv[i][14];
658 kernel_start_addr = strtoull(p, NULL, 16);
649 } else 659 } else
650 usage(); 660 usage();
651 } 661 }
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 014994936b1c..32b10f53d0b4 100644
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -82,6 +82,8 @@ kallsyms()
82 kallsymopt="${kallsymopt} --all-symbols" 82 kallsymopt="${kallsymopt} --all-symbols"
83 fi 83 fi
84 84
85 kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
86
85 local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ 87 local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
86 ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}" 88 ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
87 89
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 17f45e8aa89c..e1e9e0c999fe 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -49,6 +49,8 @@ static struct snd_pcm *snd_pcm_get(struct snd_card *card, int device)
49 struct snd_pcm *pcm; 49 struct snd_pcm *pcm;
50 50
51 list_for_each_entry(pcm, &snd_pcm_devices, list) { 51 list_for_each_entry(pcm, &snd_pcm_devices, list) {
52 if (pcm->internal)
53 continue;
52 if (pcm->card == card && pcm->device == device) 54 if (pcm->card == card && pcm->device == device)
53 return pcm; 55 return pcm;
54 } 56 }
@@ -60,6 +62,8 @@ static int snd_pcm_next(struct snd_card *card, int device)
60 struct snd_pcm *pcm; 62 struct snd_pcm *pcm;
61 63
62 list_for_each_entry(pcm, &snd_pcm_devices, list) { 64 list_for_each_entry(pcm, &snd_pcm_devices, list) {
65 if (pcm->internal)
66 continue;
63 if (pcm->card == card && pcm->device > device) 67 if (pcm->card == card && pcm->device > device)
64 return pcm->device; 68 return pcm->device;
65 else if (pcm->card->number > card->number) 69 else if (pcm->card->number > card->number)
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 5b6c4e3c92ca..748c6a941963 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4864,8 +4864,8 @@ static void hda_power_work(struct work_struct *work)
4864 spin_unlock(&codec->power_lock); 4864 spin_unlock(&codec->power_lock);
4865 4865
4866 state = hda_call_codec_suspend(codec, true); 4866 state = hda_call_codec_suspend(codec, true);
4867 codec->pm_down_notified = 0; 4867 if (!codec->pm_down_notified &&
4868 if (!bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) { 4868 !bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) {
4869 codec->pm_down_notified = 1; 4869 codec->pm_down_notified = 1;
4870 hda_call_pm_notify(bus, false); 4870 hda_call_pm_notify(bus, false);
4871 } 4871 }
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 26ad4f0aade3..b7c89dff7066 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -4475,9 +4475,11 @@ int snd_hda_gen_build_controls(struct hda_codec *codec)
4475 true, &spec->vmaster_mute.sw_kctl); 4475 true, &spec->vmaster_mute.sw_kctl);
4476 if (err < 0) 4476 if (err < 0)
4477 return err; 4477 return err;
4478 if (spec->vmaster_mute.hook) 4478 if (spec->vmaster_mute.hook) {
4479 snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute, 4479 snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute,
4480 spec->vmaster_mute_enum); 4480 spec->vmaster_mute_enum);
4481 snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
4482 }
4481 } 4483 }
4482 4484
4483 free_kctls(spec); /* no longer needed */ 4485 free_kctls(spec); /* no longer needed */
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 0cbdd87dde6d..2aa2f579b4d6 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -968,6 +968,15 @@ static void ad1884_fixup_hp_eapd(struct hda_codec *codec,
968 } 968 }
969} 969}
970 970
971static void ad1884_fixup_thinkpad(struct hda_codec *codec,
972 const struct hda_fixup *fix, int action)
973{
974 struct ad198x_spec *spec = codec->spec;
975
976 if (action == HDA_FIXUP_ACT_PRE_PROBE)
977 spec->gen.keep_eapd_on = 1;
978}
979
971/* set magic COEFs for dmic */ 980/* set magic COEFs for dmic */
972static const struct hda_verb ad1884_dmic_init_verbs[] = { 981static const struct hda_verb ad1884_dmic_init_verbs[] = {
973 {0x01, AC_VERB_SET_COEF_INDEX, 0x13f7}, 982 {0x01, AC_VERB_SET_COEF_INDEX, 0x13f7},
@@ -979,6 +988,7 @@ enum {
979 AD1884_FIXUP_AMP_OVERRIDE, 988 AD1884_FIXUP_AMP_OVERRIDE,
980 AD1884_FIXUP_HP_EAPD, 989 AD1884_FIXUP_HP_EAPD,
981 AD1884_FIXUP_DMIC_COEF, 990 AD1884_FIXUP_DMIC_COEF,
991 AD1884_FIXUP_THINKPAD,
982 AD1884_FIXUP_HP_TOUCHSMART, 992 AD1884_FIXUP_HP_TOUCHSMART,
983}; 993};
984 994
@@ -997,6 +1007,12 @@ static const struct hda_fixup ad1884_fixups[] = {
997 .type = HDA_FIXUP_VERBS, 1007 .type = HDA_FIXUP_VERBS,
998 .v.verbs = ad1884_dmic_init_verbs, 1008 .v.verbs = ad1884_dmic_init_verbs,
999 }, 1009 },
1010 [AD1884_FIXUP_THINKPAD] = {
1011 .type = HDA_FIXUP_FUNC,
1012 .v.func = ad1884_fixup_thinkpad,
1013 .chained = true,
1014 .chain_id = AD1884_FIXUP_DMIC_COEF,
1015 },
1000 [AD1884_FIXUP_HP_TOUCHSMART] = { 1016 [AD1884_FIXUP_HP_TOUCHSMART] = {
1001 .type = HDA_FIXUP_VERBS, 1017 .type = HDA_FIXUP_VERBS,
1002 .v.verbs = ad1884_dmic_init_verbs, 1018 .v.verbs = ad1884_dmic_init_verbs,
@@ -1008,7 +1024,7 @@ static const struct hda_fixup ad1884_fixups[] = {
1008static const struct snd_pci_quirk ad1884_fixup_tbl[] = { 1024static const struct snd_pci_quirk ad1884_fixup_tbl[] = {
1009 SND_PCI_QUIRK(0x103c, 0x2a82, "HP Touchsmart", AD1884_FIXUP_HP_TOUCHSMART), 1025 SND_PCI_QUIRK(0x103c, 0x2a82, "HP Touchsmart", AD1884_FIXUP_HP_TOUCHSMART),
1010 SND_PCI_QUIRK_VENDOR(0x103c, "HP", AD1884_FIXUP_HP_EAPD), 1026 SND_PCI_QUIRK_VENDOR(0x103c, "HP", AD1884_FIXUP_HP_EAPD),
1011 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1884_FIXUP_DMIC_COEF), 1027 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1884_FIXUP_THINKPAD),
1012 {} 1028 {}
1013}; 1029};
1014 1030
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bf313bea7085..8ad554312b69 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4623,6 +4623,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4623 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 4623 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4624 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), 4624 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
4625 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4), 4625 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
4626 SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_ASUS_MODE4),
4626 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT), 4627 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
4627 SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2), 4628 SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
4628 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), 4629 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 8b50e5958de5..01daf655e20b 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -530,6 +530,7 @@ static int hp_supply_event(struct snd_soc_dapm_widget *w,
530 hubs->hp_startup_mode); 530 hubs->hp_startup_mode);
531 break; 531 break;
532 } 532 }
533 break;
533 534
534 case SND_SOC_DAPM_PRE_PMD: 535 case SND_SOC_DAPM_PRE_PMD:
535 snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1, 536 snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index c17c14c394df..b2949aed1ac2 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1949,7 +1949,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
1949 w->active ? "active" : "inactive"); 1949 w->active ? "active" : "inactive");
1950 1950
1951 list_for_each_entry(p, &w->sources, list_sink) { 1951 list_for_each_entry(p, &w->sources, list_sink) {
1952 if (p->connected && !p->connected(w, p->sink)) 1952 if (p->connected && !p->connected(w, p->source))
1953 continue; 1953 continue;
1954 1954
1955 if (p->connect) 1955 if (p->connect)
@@ -3495,6 +3495,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
3495 if (!w) { 3495 if (!w) {
3496 dev_err(dapm->dev, "ASoC: Failed to create %s widget\n", 3496 dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
3497 dai->driver->playback.stream_name); 3497 dai->driver->playback.stream_name);
3498 return -ENOMEM;
3498 } 3499 }
3499 3500
3500 w->priv = dai; 3501 w->priv = dai;
@@ -3513,6 +3514,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
3513 if (!w) { 3514 if (!w) {
3514 dev_err(dapm->dev, "ASoC: Failed to create %s widget\n", 3515 dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
3515 dai->driver->capture.stream_name); 3516 dai->driver->capture.stream_name);
3517 return -ENOMEM;
3516 } 3518 }
3517 3519
3518 w->priv = dai; 3520 w->priv = dai;
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index e297b74471b8..ca0d3d9f4bac 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -90,8 +90,20 @@ OPTIONS
90 Number of mmap data pages. Must be a power of two. 90 Number of mmap data pages. Must be a power of two.
91 91
92-g:: 92-g::
93 Enables call-graph (stack chain/backtrace) recording.
94
93--call-graph:: 95--call-graph::
94 Do call-graph (stack chain/backtrace) recording. 96 Setup and enable call-graph (stack chain/backtrace) recording,
97 implies -g.
98
99 Allows specifying "fp" (frame pointer) or "dwarf"
100 (DWARF's CFI - Call Frame Information) as the method to collect
101 the information used to show the call graphs.
102
103 In some systems, where binaries are build with gcc
104 --fomit-frame-pointer, using the "fp" method will produce bogus
105 call graphs, using "dwarf", if available (perf tools linked to
106 the libunwind library) should be used instead.
95 107
96-q:: 108-q::
97--quiet:: 109--quiet::
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 58d6598a9686..6a118e71d003 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -140,20 +140,12 @@ Default is to monitor all CPUS.
140--asm-raw:: 140--asm-raw::
141 Show raw instruction encoding of assembly instructions. 141 Show raw instruction encoding of assembly instructions.
142 142
143-G [type,min,order]:: 143-G::
144 Enables call-graph (stack chain/backtrace) recording.
145
144--call-graph:: 146--call-graph::
145 Display call chains using type, min percent threshold and order. 147 Setup and enable call-graph (stack chain/backtrace) recording,
146 type can be either: 148 implies -G.
147 - flat: single column, linear exposure of call chains.
148 - graph: use a graph tree, displaying absolute overhead rates.
149 - fractal: like graph, but displays relative rates. Each branch of
150 the tree is considered as a new profiled object.
151
152 order can be either:
153 - callee: callee based call graph.
154 - caller: inverted caller based call graph.
155
156 Default: fractal,0.5,callee.
157 149
158--ignore-callees=<regex>:: 150--ignore-callees=<regex>::
159 Ignore callees of the function(s) matching the given regex. 151 Ignore callees of the function(s) matching the given regex.
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 935d52216c89..fbc2888d6495 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -888,11 +888,18 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
888 while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) { 888 while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) {
889 err = perf_evlist__parse_sample(kvm->evlist, event, &sample); 889 err = perf_evlist__parse_sample(kvm->evlist, event, &sample);
890 if (err) { 890 if (err) {
891 perf_evlist__mmap_consume(kvm->evlist, idx);
891 pr_err("Failed to parse sample\n"); 892 pr_err("Failed to parse sample\n");
892 return -1; 893 return -1;
893 } 894 }
894 895
895 err = perf_session_queue_event(kvm->session, event, &sample, 0); 896 err = perf_session_queue_event(kvm->session, event, &sample, 0);
897 /*
898 * FIXME: Here we can't consume the event, as perf_session_queue_event will
899 * point to it, and it'll get possibly overwritten by the kernel.
900 */
901 perf_evlist__mmap_consume(kvm->evlist, idx);
902
896 if (err) { 903 if (err) {
897 pr_err("Failed to enqueue sample: %d\n", err); 904 pr_err("Failed to enqueue sample: %d\n", err);
898 return -1; 905 return -1;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index a41ac41546c9..d04651484640 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -712,21 +712,12 @@ static int get_stack_size(char *str, unsigned long *_size)
712} 712}
713#endif /* LIBUNWIND_SUPPORT */ 713#endif /* LIBUNWIND_SUPPORT */
714 714
715int record_parse_callchain_opt(const struct option *opt, 715int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
716 const char *arg, int unset)
717{ 716{
718 struct perf_record_opts *opts = opt->value;
719 char *tok, *name, *saveptr = NULL; 717 char *tok, *name, *saveptr = NULL;
720 char *buf; 718 char *buf;
721 int ret = -1; 719 int ret = -1;
722 720
723 /* --no-call-graph */
724 if (unset)
725 return 0;
726
727 /* We specified default option if none is provided. */
728 BUG_ON(!arg);
729
730 /* We need buffer that we know we can write to. */ 721 /* We need buffer that we know we can write to. */
731 buf = malloc(strlen(arg) + 1); 722 buf = malloc(strlen(arg) + 1);
732 if (!buf) 723 if (!buf)
@@ -764,13 +755,9 @@ int record_parse_callchain_opt(const struct option *opt,
764 ret = get_stack_size(tok, &size); 755 ret = get_stack_size(tok, &size);
765 opts->stack_dump_size = size; 756 opts->stack_dump_size = size;
766 } 757 }
767
768 if (!ret)
769 pr_debug("callchain: stack dump size %d\n",
770 opts->stack_dump_size);
771#endif /* LIBUNWIND_SUPPORT */ 758#endif /* LIBUNWIND_SUPPORT */
772 } else { 759 } else {
773 pr_err("callchain: Unknown -g option " 760 pr_err("callchain: Unknown --call-graph option "
774 "value: %s\n", arg); 761 "value: %s\n", arg);
775 break; 762 break;
776 } 763 }
@@ -778,13 +765,52 @@ int record_parse_callchain_opt(const struct option *opt,
778 } while (0); 765 } while (0);
779 766
780 free(buf); 767 free(buf);
768 return ret;
769}
770
771static void callchain_debug(struct perf_record_opts *opts)
772{
773 pr_debug("callchain: type %d\n", opts->call_graph);
781 774
775 if (opts->call_graph == CALLCHAIN_DWARF)
776 pr_debug("callchain: stack dump size %d\n",
777 opts->stack_dump_size);
778}
779
780int record_parse_callchain_opt(const struct option *opt,
781 const char *arg,
782 int unset)
783{
784 struct perf_record_opts *opts = opt->value;
785 int ret;
786
787 /* --no-call-graph */
788 if (unset) {
789 opts->call_graph = CALLCHAIN_NONE;
790 pr_debug("callchain: disabled\n");
791 return 0;
792 }
793
794 ret = record_parse_callchain(arg, opts);
782 if (!ret) 795 if (!ret)
783 pr_debug("callchain: type %d\n", opts->call_graph); 796 callchain_debug(opts);
784 797
785 return ret; 798 return ret;
786} 799}
787 800
801int record_callchain_opt(const struct option *opt,
802 const char *arg __maybe_unused,
803 int unset __maybe_unused)
804{
805 struct perf_record_opts *opts = opt->value;
806
807 if (opts->call_graph == CALLCHAIN_NONE)
808 opts->call_graph = CALLCHAIN_FP;
809
810 callchain_debug(opts);
811 return 0;
812}
813
788static const char * const record_usage[] = { 814static const char * const record_usage[] = {
789 "perf record [<options>] [<command>]", 815 "perf record [<options>] [<command>]",
790 "perf record [<options>] -- <command> [<options>]", 816 "perf record [<options>] -- <command> [<options>]",
@@ -813,12 +839,12 @@ static struct perf_record record = {
813 }, 839 },
814}; 840};
815 841
816#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: " 842#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
817 843
818#ifdef LIBUNWIND_SUPPORT 844#ifdef LIBUNWIND_SUPPORT
819const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf"; 845const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
820#else 846#else
821const char record_callchain_help[] = CALLCHAIN_HELP "[fp]"; 847const char record_callchain_help[] = CALLCHAIN_HELP "fp";
822#endif 848#endif
823 849
824/* 850/*
@@ -858,9 +884,12 @@ const struct option record_options[] = {
858 "number of mmap data pages"), 884 "number of mmap data pages"),
859 OPT_BOOLEAN(0, "group", &record.opts.group, 885 OPT_BOOLEAN(0, "group", &record.opts.group,
860 "put the counters into a counter group"), 886 "put the counters into a counter group"),
861 OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts, 887 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
862 "mode[,dump_size]", record_callchain_help, 888 NULL, "enables call-graph recording" ,
863 &record_parse_callchain_opt, "fp"), 889 &record_callchain_opt),
890 OPT_CALLBACK(0, "call-graph", &record.opts,
891 "mode[,dump_size]", record_callchain_help,
892 &record_parse_callchain_opt),
864 OPT_INCR('v', "verbose", &verbose, 893 OPT_INCR('v', "verbose", &verbose,
865 "be more verbose (show counter open errors, etc)"), 894 "be more verbose (show counter open errors, etc)"),
866 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"), 895 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 212214162bb2..5a11f13e56f9 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -810,7 +810,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
810 ret = perf_evlist__parse_sample(top->evlist, event, &sample); 810 ret = perf_evlist__parse_sample(top->evlist, event, &sample);
811 if (ret) { 811 if (ret) {
812 pr_err("Can't parse sample, err = %d\n", ret); 812 pr_err("Can't parse sample, err = %d\n", ret);
813 continue; 813 goto next_event;
814 } 814 }
815 815
816 evsel = perf_evlist__id2evsel(session->evlist, sample.id); 816 evsel = perf_evlist__id2evsel(session->evlist, sample.id);
@@ -825,13 +825,13 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
825 case PERF_RECORD_MISC_USER: 825 case PERF_RECORD_MISC_USER:
826 ++top->us_samples; 826 ++top->us_samples;
827 if (top->hide_user_symbols) 827 if (top->hide_user_symbols)
828 continue; 828 goto next_event;
829 machine = &session->machines.host; 829 machine = &session->machines.host;
830 break; 830 break;
831 case PERF_RECORD_MISC_KERNEL: 831 case PERF_RECORD_MISC_KERNEL:
832 ++top->kernel_samples; 832 ++top->kernel_samples;
833 if (top->hide_kernel_symbols) 833 if (top->hide_kernel_symbols)
834 continue; 834 goto next_event;
835 machine = &session->machines.host; 835 machine = &session->machines.host;
836 break; 836 break;
837 case PERF_RECORD_MISC_GUEST_KERNEL: 837 case PERF_RECORD_MISC_GUEST_KERNEL:
@@ -847,7 +847,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
847 */ 847 */
848 /* Fall thru */ 848 /* Fall thru */
849 default: 849 default:
850 continue; 850 goto next_event;
851 } 851 }
852 852
853 853
@@ -859,6 +859,8 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
859 machine__process_event(machine, event); 859 machine__process_event(machine, event);
860 } else 860 } else
861 ++session->stats.nr_unknown_events; 861 ++session->stats.nr_unknown_events;
862next_event:
863 perf_evlist__mmap_consume(top->evlist, idx);
862 } 864 }
863} 865}
864 866
@@ -1016,16 +1018,16 @@ out_delete:
1016} 1018}
1017 1019
1018static int 1020static int
1019parse_callchain_opt(const struct option *opt, const char *arg, int unset) 1021callchain_opt(const struct option *opt, const char *arg, int unset)
1020{ 1022{
1021 /*
1022 * --no-call-graph
1023 */
1024 if (unset)
1025 return 0;
1026
1027 symbol_conf.use_callchain = true; 1023 symbol_conf.use_callchain = true;
1024 return record_callchain_opt(opt, arg, unset);
1025}
1028 1026
1027static int
1028parse_callchain_opt(const struct option *opt, const char *arg, int unset)
1029{
1030 symbol_conf.use_callchain = true;
1029 return record_parse_callchain_opt(opt, arg, unset); 1031 return record_parse_callchain_opt(opt, arg, unset);
1030} 1032}
1031 1033
@@ -1106,9 +1108,12 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
1106 "sort by key(s): pid, comm, dso, symbol, parent, weight, local_weight"), 1108 "sort by key(s): pid, comm, dso, symbol, parent, weight, local_weight"),
1107 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, 1109 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1108 "Show a column with the number of samples"), 1110 "Show a column with the number of samples"),
1109 OPT_CALLBACK_DEFAULT('G', "call-graph", &top.record_opts, 1111 OPT_CALLBACK_NOOPT('G', NULL, &top.record_opts,
1110 "mode[,dump_size]", record_callchain_help, 1112 NULL, "enables call-graph recording",
1111 &parse_callchain_opt, "fp"), 1113 &callchain_opt),
1114 OPT_CALLBACK(0, "call-graph", &top.record_opts,
1115 "mode[,dump_size]", record_callchain_help,
1116 &parse_callchain_opt),
1112 OPT_CALLBACK(0, "ignore-callees", NULL, "regex", 1117 OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
1113 "ignore callees of these functions in call graphs", 1118 "ignore callees of these functions in call graphs",
1114 report_parse_ignore_callees_opt), 1119 report_parse_ignore_callees_opt),
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 71aa3e35406b..99c8d9ad6729 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -987,7 +987,7 @@ again:
987 err = perf_evlist__parse_sample(evlist, event, &sample); 987 err = perf_evlist__parse_sample(evlist, event, &sample);
988 if (err) { 988 if (err) {
989 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); 989 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
990 continue; 990 goto next_event;
991 } 991 }
992 992
993 if (trace->base_time == 0) 993 if (trace->base_time == 0)
@@ -1001,18 +1001,20 @@ again:
1001 evsel = perf_evlist__id2evsel(evlist, sample.id); 1001 evsel = perf_evlist__id2evsel(evlist, sample.id);
1002 if (evsel == NULL) { 1002 if (evsel == NULL) {
1003 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id); 1003 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
1004 continue; 1004 goto next_event;
1005 } 1005 }
1006 1006
1007 if (sample.raw_data == NULL) { 1007 if (sample.raw_data == NULL) {
1008 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", 1008 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
1009 perf_evsel__name(evsel), sample.tid, 1009 perf_evsel__name(evsel), sample.tid,
1010 sample.cpu, sample.raw_size); 1010 sample.cpu, sample.raw_size);
1011 continue; 1011 goto next_event;
1012 } 1012 }
1013 1013
1014 handler = evsel->handler.func; 1014 handler = evsel->handler.func;
1015 handler(trace, evsel, &sample); 1015 handler(trace, evsel, &sample);
1016next_event:
1017 perf_evlist__mmap_consume(evlist, i);
1016 1018
1017 if (done) 1019 if (done)
1018 goto out_unmap_evlist; 1020 goto out_unmap_evlist;
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 6fb781d5586c..e3fedfa2906e 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -290,6 +290,7 @@ static int process_events(struct machine *machine, struct perf_evlist *evlist,
290 for (i = 0; i < evlist->nr_mmaps; i++) { 290 for (i = 0; i < evlist->nr_mmaps; i++) {
291 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { 291 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
292 ret = process_event(machine, evlist, event, state); 292 ret = process_event(machine, evlist, event, state);
293 perf_evlist__mmap_consume(evlist, i);
293 if (ret < 0) 294 if (ret < 0)
294 return ret; 295 return ret;
295 } 296 }
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index d444ea2c47d9..376c35608534 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -36,6 +36,7 @@ static int find_comm(struct perf_evlist *evlist, const char *comm)
36 (pid_t)event->comm.tid == getpid() && 36 (pid_t)event->comm.tid == getpid() &&
37 strcmp(event->comm.comm, comm) == 0) 37 strcmp(event->comm.comm, comm) == 0)
38 found += 1; 38 found += 1;
39 perf_evlist__mmap_consume(evlist, i);
39 } 40 }
40 } 41 }
41 return found; 42 return found;
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index c4185b9aeb80..a7232c204eb9 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -122,6 +122,7 @@ int test__basic_mmap(void)
122 goto out_munmap; 122 goto out_munmap;
123 } 123 }
124 nr_events[evsel->idx]++; 124 nr_events[evsel->idx]++;
125 perf_evlist__mmap_consume(evlist, 0);
125 } 126 }
126 127
127 err = 0; 128 err = 0;
diff --git a/tools/perf/tests/open-syscall-tp-fields.c b/tools/perf/tests/open-syscall-tp-fields.c
index fc5b9fca8b47..524b221b829b 100644
--- a/tools/perf/tests/open-syscall-tp-fields.c
+++ b/tools/perf/tests/open-syscall-tp-fields.c
@@ -77,8 +77,10 @@ int test__syscall_open_tp_fields(void)
77 77
78 ++nr_events; 78 ++nr_events;
79 79
80 if (type != PERF_RECORD_SAMPLE) 80 if (type != PERF_RECORD_SAMPLE) {
81 perf_evlist__mmap_consume(evlist, i);
81 continue; 82 continue;
83 }
82 84
83 err = perf_evsel__parse_sample(evsel, event, &sample); 85 err = perf_evsel__parse_sample(evsel, event, &sample);
84 if (err) { 86 if (err) {
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index b8a7056519ac..7923b06ffc91 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -263,6 +263,8 @@ int test__PERF_RECORD(void)
263 type); 263 type);
264 ++errs; 264 ++errs;
265 } 265 }
266
267 perf_evlist__mmap_consume(evlist, i);
266 } 268 }
267 } 269 }
268 270
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
index 0ab61b1f408e..4ca1b938f6a6 100644
--- a/tools/perf/tests/perf-time-to-tsc.c
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -122,7 +122,7 @@ int test__perf_time_to_tsc(void)
122 if (event->header.type != PERF_RECORD_COMM || 122 if (event->header.type != PERF_RECORD_COMM ||
123 (pid_t)event->comm.pid != getpid() || 123 (pid_t)event->comm.pid != getpid() ||
124 (pid_t)event->comm.tid != getpid()) 124 (pid_t)event->comm.tid != getpid())
125 continue; 125 goto next_event;
126 126
127 if (strcmp(event->comm.comm, comm1) == 0) { 127 if (strcmp(event->comm.comm, comm1) == 0) {
128 CHECK__(perf_evsel__parse_sample(evsel, event, 128 CHECK__(perf_evsel__parse_sample(evsel, event,
@@ -134,6 +134,8 @@ int test__perf_time_to_tsc(void)
134 &sample)); 134 &sample));
135 comm2_time = sample.time; 135 comm2_time = sample.time;
136 } 136 }
137next_event:
138 perf_evlist__mmap_consume(evlist, i);
137 } 139 }
138 } 140 }
139 141
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 2e41e2d32ccc..6e2b44ec0749 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -78,7 +78,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
78 struct perf_sample sample; 78 struct perf_sample sample;
79 79
80 if (event->header.type != PERF_RECORD_SAMPLE) 80 if (event->header.type != PERF_RECORD_SAMPLE)
81 continue; 81 goto next_event;
82 82
83 err = perf_evlist__parse_sample(evlist, event, &sample); 83 err = perf_evlist__parse_sample(evlist, event, &sample);
84 if (err < 0) { 84 if (err < 0) {
@@ -88,6 +88,8 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
88 88
89 total_periods += sample.period; 89 total_periods += sample.period;
90 nr_samples++; 90 nr_samples++;
91next_event:
92 perf_evlist__mmap_consume(evlist, 0);
91 } 93 }
92 94
93 if ((u64) nr_samples == total_periods) { 95 if ((u64) nr_samples == total_periods) {
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 28fe5894b061..a3e64876e940 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -96,10 +96,10 @@ int test__task_exit(void)
96 96
97retry: 97retry:
98 while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { 98 while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
99 if (event->header.type != PERF_RECORD_EXIT) 99 if (event->header.type == PERF_RECORD_EXIT)
100 continue; 100 nr_exit++;
101 101
102 nr_exit++; 102 perf_evlist__mmap_consume(evlist, 0);
103 } 103 }
104 104
105 if (!exited || !nr_exit) { 105 if (!exited || !nr_exit) {
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 194e2f42ff5d..6c152686e837 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -315,8 +315,7 @@ static inline void advance_hpp(struct perf_hpp *hpp, int inc)
315} 315}
316 316
317static int hist_entry__period_snprintf(struct perf_hpp *hpp, 317static int hist_entry__period_snprintf(struct perf_hpp *hpp,
318 struct hist_entry *he, 318 struct hist_entry *he)
319 bool color)
320{ 319{
321 const char *sep = symbol_conf.field_sep; 320 const char *sep = symbol_conf.field_sep;
322 struct perf_hpp_fmt *fmt; 321 struct perf_hpp_fmt *fmt;
@@ -338,7 +337,7 @@ static int hist_entry__period_snprintf(struct perf_hpp *hpp,
338 } else 337 } else
339 first = false; 338 first = false;
340 339
341 if (color && fmt->color) 340 if (perf_hpp__use_color() && fmt->color)
342 ret = fmt->color(fmt, hpp, he); 341 ret = fmt->color(fmt, hpp, he);
343 else 342 else
344 ret = fmt->entry(fmt, hpp, he); 343 ret = fmt->entry(fmt, hpp, he);
@@ -358,12 +357,11 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
358 .buf = bf, 357 .buf = bf,
359 .size = size, 358 .size = size,
360 }; 359 };
361 bool color = !symbol_conf.field_sep;
362 360
363 if (size == 0 || size > bfsz) 361 if (size == 0 || size > bfsz)
364 size = hpp.size = bfsz; 362 size = hpp.size = bfsz;
365 363
366 ret = hist_entry__period_snprintf(&hpp, he, color); 364 ret = hist_entry__period_snprintf(&hpp, he);
367 hist_entry__sort_snprintf(he, bf + ret, size - ret, hists); 365 hist_entry__sort_snprintf(he, bf + ret, size - ret, hists);
368 366
369 ret = fprintf(fp, "%s\n", bf); 367 ret = fprintf(fp, "%s\n", bf);
@@ -482,6 +480,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
482 480
483print_entries: 481print_entries:
484 linesz = hists__sort_list_width(hists) + 3 + 1; 482 linesz = hists__sort_list_width(hists) + 3 + 1;
483 linesz += perf_hpp__color_overhead();
485 line = malloc(linesz); 484 line = malloc(linesz);
486 if (line == NULL) { 485 if (line == NULL) {
487 ret = -1; 486 ret = -1;
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 2b585bc308cf..9e99060408ae 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -147,6 +147,9 @@ static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
147 147
148struct option; 148struct option;
149 149
150int record_parse_callchain(const char *arg, struct perf_record_opts *opts);
150int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset); 151int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
152int record_callchain_opt(const struct option *opt, const char *arg, int unset);
153
151extern const char record_callchain_help[]; 154extern const char record_callchain_help[];
152#endif /* __PERF_CALLCHAIN_H */ 155#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 9b393e7dca6f..49096ea58a15 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -187,7 +187,7 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
187 return -1; 187 return -1;
188 } 188 }
189 189
190 event->header.type = PERF_RECORD_MMAP2; 190 event->header.type = PERF_RECORD_MMAP;
191 /* 191 /*
192 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 192 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
193 */ 193 */
@@ -198,7 +198,6 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
198 char prot[5]; 198 char prot[5];
199 char execname[PATH_MAX]; 199 char execname[PATH_MAX];
200 char anonstr[] = "//anon"; 200 char anonstr[] = "//anon";
201 unsigned int ino;
202 size_t size; 201 size_t size;
203 ssize_t n; 202 ssize_t n;
204 203
@@ -209,15 +208,12 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
209 strcpy(execname, ""); 208 strcpy(execname, "");
210 209
211 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 210 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
212 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n", 211 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %*x:%*x %*u %s\n",
213 &event->mmap2.start, &event->mmap2.len, prot, 212 &event->mmap.start, &event->mmap.len, prot,
214 &event->mmap2.pgoff, &event->mmap2.maj, 213 &event->mmap.pgoff,
215 &event->mmap2.min, 214 execname);
216 &ino, execname);
217
218 event->mmap2.ino = (u64)ino;
219 215
220 if (n != 8) 216 if (n != 5)
221 continue; 217 continue;
222 218
223 if (prot[2] != 'x') 219 if (prot[2] != 'x')
@@ -227,15 +223,15 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
227 strcpy(execname, anonstr); 223 strcpy(execname, anonstr);
228 224
229 size = strlen(execname) + 1; 225 size = strlen(execname) + 1;
230 memcpy(event->mmap2.filename, execname, size); 226 memcpy(event->mmap.filename, execname, size);
231 size = PERF_ALIGN(size, sizeof(u64)); 227 size = PERF_ALIGN(size, sizeof(u64));
232 event->mmap2.len -= event->mmap.start; 228 event->mmap.len -= event->mmap.start;
233 event->mmap2.header.size = (sizeof(event->mmap2) - 229 event->mmap.header.size = (sizeof(event->mmap) -
234 (sizeof(event->mmap2.filename) - size)); 230 (sizeof(event->mmap.filename) - size));
235 memset(event->mmap2.filename + size, 0, machine->id_hdr_size); 231 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
236 event->mmap2.header.size += machine->id_hdr_size; 232 event->mmap.header.size += machine->id_hdr_size;
237 event->mmap2.pid = tgid; 233 event->mmap.pid = tgid;
238 event->mmap2.tid = pid; 234 event->mmap.tid = pid;
239 235
240 if (process(tool, event, &synth_sample, machine) != 0) { 236 if (process(tool, event, &synth_sample, machine) != 0) {
241 rc = -1; 237 rc = -1;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index f9f77bee0b1b..e584cd30b0f2 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -545,12 +545,19 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
545 545
546 md->prev = old; 546 md->prev = old;
547 547
548 if (!evlist->overwrite)
549 perf_mmap__write_tail(md, old);
550
551 return event; 548 return event;
552} 549}
553 550
551void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
552{
553 if (!evlist->overwrite) {
554 struct perf_mmap *md = &evlist->mmap[idx];
555 unsigned int old = md->prev;
556
557 perf_mmap__write_tail(md, old);
558 }
559}
560
554static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) 561static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
555{ 562{
556 if (evlist->mmap[idx].base != NULL) { 563 if (evlist->mmap[idx].base != NULL) {
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 880d7139d2fb..206d09339306 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -89,6 +89,8 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
89 89
90union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); 90union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
91 91
92void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
93
92int perf_evlist__open(struct perf_evlist *evlist); 94int perf_evlist__open(struct perf_evlist *evlist);
93void perf_evlist__close(struct perf_evlist *evlist); 95void perf_evlist__close(struct perf_evlist *evlist);
94 96
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 0ce9febf1ba0..9f1ef9bee2d0 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -678,7 +678,6 @@ void perf_evsel__config(struct perf_evsel *evsel,
678 attr->sample_type |= PERF_SAMPLE_WEIGHT; 678 attr->sample_type |= PERF_SAMPLE_WEIGHT;
679 679
680 attr->mmap = track; 680 attr->mmap = track;
681 attr->mmap2 = track && !perf_missing_features.mmap2;
682 attr->comm = track; 681 attr->comm = track;
683 682
684 /* 683 /*
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 1329b6b6ffe6..ce8dc61ce2c3 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -5,6 +5,7 @@
5#include <pthread.h> 5#include <pthread.h>
6#include "callchain.h" 6#include "callchain.h"
7#include "header.h" 7#include "header.h"
8#include "color.h"
8 9
9extern struct callchain_param callchain_param; 10extern struct callchain_param callchain_param;
10 11
@@ -175,6 +176,18 @@ void perf_hpp__init(void);
175void perf_hpp__column_register(struct perf_hpp_fmt *format); 176void perf_hpp__column_register(struct perf_hpp_fmt *format);
176void perf_hpp__column_enable(unsigned col); 177void perf_hpp__column_enable(unsigned col);
177 178
179static inline size_t perf_hpp__use_color(void)
180{
181 return !symbol_conf.field_sep;
182}
183
184static inline size_t perf_hpp__color_overhead(void)
185{
186 return perf_hpp__use_color() ?
187 (COLOR_MAXLEN + sizeof(PERF_COLOR_RESET)) * PERF_HPP__MAX_INDEX
188 : 0;
189}
190
178struct perf_evlist; 191struct perf_evlist;
179 192
180struct hist_browser_timer { 193struct hist_browser_timer {
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index c09e0a9fdf4c..f0692737ebf1 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1357,10 +1357,10 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
1357 goto post; 1357 goto post;
1358 } 1358 }
1359 1359
1360 fname = dwarf_decl_file(&spdie);
1360 if (addr == (unsigned long)baseaddr) { 1361 if (addr == (unsigned long)baseaddr) {
1361 /* Function entry - Relative line number is 0 */ 1362 /* Function entry - Relative line number is 0 */
1362 lineno = baseline; 1363 lineno = baseline;
1363 fname = dwarf_decl_file(&spdie);
1364 goto post; 1364 goto post;
1365 } 1365 }
1366 1366
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 71b5412bbbb9..2ac4bc92bb1f 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -822,6 +822,8 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
822 PyObject *pyevent = pyrf_event__new(event); 822 PyObject *pyevent = pyrf_event__new(event);
823 struct pyrf_event *pevent = (struct pyrf_event *)pyevent; 823 struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
824 824
825 perf_evlist__mmap_consume(evlist, cpu);
826
825 if (pyevent == NULL) 827 if (pyevent == NULL)
826 return PyErr_NoMemory(); 828 return PyErr_NoMemory();
827 829
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index a85e4ae5f3ac..c0c9795c4f02 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -282,7 +282,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
282 282
283 event = find_cache_event(evsel); 283 event = find_cache_event(evsel);
284 if (!event) 284 if (!event)
285 die("ug! no event found for type %" PRIu64, evsel->attr.config); 285 die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config);
286 286
287 pid = raw_field_value(event, "common_pid", data); 287 pid = raw_field_value(event, "common_pid", data);
288 288
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index cc75a3cef388..95d91a0b23af 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -56,6 +56,17 @@ static void handler_call_die(const char *handler_name)
56 Py_FatalError("problem in Python trace event handler"); 56 Py_FatalError("problem in Python trace event handler");
57} 57}
58 58
59/*
60 * Insert val into into the dictionary and decrement the reference counter.
61 * This is necessary for dictionaries since PyDict_SetItemString() does not
62 * steal a reference, as opposed to PyTuple_SetItem().
63 */
64static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val)
65{
66 PyDict_SetItemString(dict, key, val);
67 Py_DECREF(val);
68}
69
59static void define_value(enum print_arg_type field_type, 70static void define_value(enum print_arg_type field_type,
60 const char *ev_name, 71 const char *ev_name,
61 const char *field_name, 72 const char *field_name,
@@ -279,11 +290,11 @@ static void python_process_tracepoint(union perf_event *perf_event
279 PyTuple_SetItem(t, n++, PyInt_FromLong(pid)); 290 PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
280 PyTuple_SetItem(t, n++, PyString_FromString(comm)); 291 PyTuple_SetItem(t, n++, PyString_FromString(comm));
281 } else { 292 } else {
282 PyDict_SetItemString(dict, "common_cpu", PyInt_FromLong(cpu)); 293 pydict_set_item_string_decref(dict, "common_cpu", PyInt_FromLong(cpu));
283 PyDict_SetItemString(dict, "common_s", PyInt_FromLong(s)); 294 pydict_set_item_string_decref(dict, "common_s", PyInt_FromLong(s));
284 PyDict_SetItemString(dict, "common_ns", PyInt_FromLong(ns)); 295 pydict_set_item_string_decref(dict, "common_ns", PyInt_FromLong(ns));
285 PyDict_SetItemString(dict, "common_pid", PyInt_FromLong(pid)); 296 pydict_set_item_string_decref(dict, "common_pid", PyInt_FromLong(pid));
286 PyDict_SetItemString(dict, "common_comm", PyString_FromString(comm)); 297 pydict_set_item_string_decref(dict, "common_comm", PyString_FromString(comm));
287 } 298 }
288 for (field = event->format.fields; field; field = field->next) { 299 for (field = event->format.fields; field; field = field->next) {
289 if (field->flags & FIELD_IS_STRING) { 300 if (field->flags & FIELD_IS_STRING) {
@@ -313,7 +324,7 @@ static void python_process_tracepoint(union perf_event *perf_event
313 if (handler) 324 if (handler)
314 PyTuple_SetItem(t, n++, obj); 325 PyTuple_SetItem(t, n++, obj);
315 else 326 else
316 PyDict_SetItemString(dict, field->name, obj); 327 pydict_set_item_string_decref(dict, field->name, obj);
317 328
318 } 329 }
319 if (!handler) 330 if (!handler)
@@ -370,21 +381,21 @@ static void python_process_general_event(union perf_event *perf_event
370 if (!handler || !PyCallable_Check(handler)) 381 if (!handler || !PyCallable_Check(handler))
371 goto exit; 382 goto exit;
372 383
373 PyDict_SetItemString(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel))); 384 pydict_set_item_string_decref(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
374 PyDict_SetItemString(dict, "attr", PyString_FromStringAndSize( 385 pydict_set_item_string_decref(dict, "attr", PyString_FromStringAndSize(
375 (const char *)&evsel->attr, sizeof(evsel->attr))); 386 (const char *)&evsel->attr, sizeof(evsel->attr)));
376 PyDict_SetItemString(dict, "sample", PyString_FromStringAndSize( 387 pydict_set_item_string_decref(dict, "sample", PyString_FromStringAndSize(
377 (const char *)sample, sizeof(*sample))); 388 (const char *)sample, sizeof(*sample)));
378 PyDict_SetItemString(dict, "raw_buf", PyString_FromStringAndSize( 389 pydict_set_item_string_decref(dict, "raw_buf", PyString_FromStringAndSize(
379 (const char *)sample->raw_data, sample->raw_size)); 390 (const char *)sample->raw_data, sample->raw_size));
380 PyDict_SetItemString(dict, "comm", 391 pydict_set_item_string_decref(dict, "comm",
381 PyString_FromString(thread->comm)); 392 PyString_FromString(thread->comm));
382 if (al->map) { 393 if (al->map) {
383 PyDict_SetItemString(dict, "dso", 394 pydict_set_item_string_decref(dict, "dso",
384 PyString_FromString(al->map->dso->name)); 395 PyString_FromString(al->map->dso->name));
385 } 396 }
386 if (al->sym) { 397 if (al->sym) {
387 PyDict_SetItemString(dict, "symbol", 398 pydict_set_item_string_decref(dict, "symbol",
388 PyString_FromString(al->sym->name)); 399 PyString_FromString(al->sym->name));
389 } 400 }
390 401
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a9dd682cf5e3..1cf9ccb01013 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3091,7 +3091,7 @@ static const struct file_operations *stat_fops[] = {
3091 3091
3092static int kvm_init_debug(void) 3092static int kvm_init_debug(void)
3093{ 3093{
3094 int r = -EFAULT; 3094 int r = -EEXIST;
3095 struct kvm_stats_debugfs_item *p; 3095 struct kvm_stats_debugfs_item *p;
3096 3096
3097 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 3097 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);