aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/bcache.txt12
-rw-r--r--Documentation/devices.txt8
-rw-r--r--Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt2
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Documentation/m68k/kernel-options.txt2
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/compressed/debug.S28
-rw-r--r--arch/arm/boot/compressed/head-sa1100.S1
-rw-r--r--arch/arm/boot/compressed/head-shark.S1
-rw-r--r--arch/arm/boot/compressed/head.S5
-rw-r--r--arch/arm/include/asm/percpu.h11
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/mips/include/asm/mmu_context.h2
-rw-r--r--arch/mips/include/uapi/asm/kvm.h81
-rw-r--r--arch/mips/kernel/ftrace.c4
-rw-r--r--arch/mips/kernel/idle.c13
-rw-r--r--arch/mips/kvm/kvm_mips.c83
-rw-r--r--arch/powerpc/include/asm/cputable.h17
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h16
-rw-r--r--arch/powerpc/kernel/cputable.c8
-rw-r--r--arch/powerpc/kernel/entry_64.S28
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S90
-rw-r--r--arch/powerpc/kernel/pci-common.c4
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kvm/44x_tlb.c5
-rw-r--r--arch/powerpc/kvm/booke.c18
-rw-r--r--arch/powerpc/kvm/e500_mmu.c5
-rw-r--r--arch/powerpc/kvm/e500mc.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c12
-rw-r--r--arch/s390/include/asm/pgtable.h32
-rw-r--r--arch/s390/kernel/dumpstack.c12
-rw-r--r--arch/s390/kernel/irq.c64
-rw-r--r--arch/s390/kernel/sclp.S2
-rw-r--r--arch/s390/pci/pci.c33
-rw-r--r--arch/x86/xen/smp.c8
-rw-r--r--block/blk-core.c2
-rw-r--r--crypto/Kconfig2
-rw-r--r--drivers/base/regmap/regcache-rbtree.c6
-rw-r--r--drivers/base/regmap/regcache.c20
-rw-r--r--drivers/base/regmap/regmap-debugfs.c5
-rw-r--r--drivers/block/cciss.c32
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c8
-rw-r--r--drivers/block/nvme-core.c62
-rw-r--r--drivers/block/nvme-scsi.c3
-rw-r--r--drivers/block/pktcdvd.c3
-rw-r--r--drivers/block/rbd.c33
-rw-r--r--drivers/bluetooth/Kconfig4
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c28
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c30
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c24
-rw-r--r--drivers/hid/hid-multitouch.c11
-rw-r--r--drivers/hwmon/adm1021.c58
-rw-r--r--drivers/md/bcache/Kconfig1
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/stats.c34
-rw-r--r--drivers/md/bcache/super.c185
-rw-r--r--drivers/md/bcache/writeback.c2
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/raid1.c38
-rw-r--r--drivers/md/raid10.c29
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/misc/sgi-gru/grufile.c1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/team/team_mode_random.c2
-rw-r--r--drivers/net/team/team_mode_roundrobin.c2
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_ether.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig10
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h2
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/iwlegacy/common.h6
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c22
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c134
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.h3
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c13
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h4
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h6
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c131
-rw-r--r--drivers/rtc/rtc-cmos.c4
-rw-r--r--drivers/rtc/rtc-tps6586x.c3
-rw-r--r--drivers/rtc/rtc-twl.c1
-rw-r--r--drivers/spi/spi-sh-hspi.c2
-rw-r--r--drivers/spi/spi-topcliff-pch.c3
-rw-r--r--drivers/spi/spi-xilinx.c74
-rw-r--r--drivers/vhost/net.c29
-rw-r--r--drivers/vhost/vhost.c8
-rw-r--r--drivers/vhost/vhost.h1
-rw-r--r--drivers/xen/tmem.c4
-rw-r--r--fs/aio.c36
-rw-r--r--fs/ceph/locks.c73
-rw-r--r--fs/ceph/mds_client.c65
-rw-r--r--fs/ceph/super.h9
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c1
-rw-r--r--fs/ocfs2/namei.c4
-rw-r--r--fs/proc/kmsg.c10
-rw-r--r--include/asm-generic/kvm_para.h5
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/if_team.h4
-rw-r--r--include/linux/math64.h6
-rw-r--r--include/linux/scatterlist.h3
-rw-r--r--include/linux/swapops.h3
-rw-r--r--include/linux/syslog.h4
-rw-r--r--include/net/bluetooth/hci_core.h1
-rw-r--r--include/net/bluetooth/mgmt.h1
-rw-r--r--include/sound/soc-dapm.h3
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/audit_tree.c1
-rw-r--r--kernel/cpu.c55
-rw-r--r--kernel/printk.c91
-rw-r--r--kernel/softirq.c13
-rw-r--r--kernel/sys.c29
-rw-r--r--kernel/trace/trace.c8
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--lib/mpi/mpicoder.c2
-rw-r--r--mm/frontswap.c2
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memcontrol.c14
-rw-r--r--mm/migrate.c23
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/swap_state.c18
-rw-r--r--mm/swapfile.c2
-rw-r--r--net/9p/client.c55
-rw-r--r--net/bluetooth/hci_core.c6
-rw-r--r--net/bluetooth/l2cap_core.c70
-rw-r--r--net/bluetooth/mgmt.c23
-rw-r--r--net/bluetooth/smp.c4
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/sock_diag.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c1
-rw-r--r--net/netfilter/nfnetlink_acct.c7
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c7
-rw-r--r--net/netfilter/nfnetlink_queue_core.c6
-rw-r--r--net/netfilter/xt_TCPMSS.c6
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/sched/sch_api.c11
-rw-r--r--net/sctp/socket.c6
-rw-r--r--sound/soc/codecs/cs42l52.c6
-rw-r--r--sound/soc/codecs/tlv320aic3x.c10
-rw-r--r--sound/soc/codecs/wm5102.c3
-rw-r--r--sound/soc/codecs/wm5110.c3
-rw-r--r--sound/soc/codecs/wm8994.c3
-rw-r--r--sound/soc/soc-dapm.c49
-rw-r--r--sound/soc/soc-pcm.c13
-rw-r--r--tools/power/x86/turbostat/turbostat.c2
164 files changed, 1647 insertions, 999 deletions
diff --git a/Documentation/bcache.txt b/Documentation/bcache.txt
index 77db8809bd96..b3a7e7d384f6 100644
--- a/Documentation/bcache.txt
+++ b/Documentation/bcache.txt
@@ -319,7 +319,10 @@ cache<0..n>
319 Symlink to each of the cache devices comprising this cache set. 319 Symlink to each of the cache devices comprising this cache set.
320 320
321cache_available_percent 321cache_available_percent
322 Percentage of cache device free. 322 Percentage of cache device which doesn't contain dirty data, and could
323 potentially be used for writeback. This doesn't mean this space isn't used
324 for clean cached data; the unused statistic (in priority_stats) is typically
325 much lower.
323 326
324clear_stats 327clear_stats
325 Clears the statistics associated with this cache 328 Clears the statistics associated with this cache
@@ -423,8 +426,11 @@ nbuckets
423 Total buckets in this cache 426 Total buckets in this cache
424 427
425priority_stats 428priority_stats
426 Statistics about how recently data in the cache has been accessed. This can 429 Statistics about how recently data in the cache has been accessed.
427 reveal your working set size. 430 This can reveal your working set size. Unused is the percentage of
431 the cache that doesn't contain any data. Metadata is bcache's
432 metadata overhead. Average is the average priority of cache buckets.
433 Next is a list of quantiles with the priority threshold of each.
428 434
429written 435written
430 Sum of all data that has been written to the cache; comparison with 436 Sum of all data that has been written to the cache; comparison with
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 08f01e79c41a..b9015912bca6 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -498,12 +498,8 @@ Your cooperation is appreciated.
498 498
499 Each device type has 5 bits (32 minors). 499 Each device type has 5 bits (32 minors).
500 500
501 13 block 8-bit MFM/RLL/IDE controller 501 13 block Previously used for the XT disk (/dev/xdN)
502 0 = /dev/xda First XT disk whole disk 502 Deleted in kernel v3.9.
503 64 = /dev/xdb Second XT disk whole disk
504
505 Partitions are handled in the same way as IDE disks
506 (see major number 3).
507 503
508 14 char Open Sound System (OSS) 504 14 char Open Sound System (OSS)
509 0 = /dev/mixer Mixer control 505 0 = /dev/mixer Mixer control
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
index 2a3feabd3b22..34c1505774bf 100644
--- a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
@@ -1,7 +1,7 @@
1Atmel AT91RM9200 Real Time Clock 1Atmel AT91RM9200 Real Time Clock
2 2
3Required properties: 3Required properties:
4- compatible: should be: "atmel,at91rm9200-rtc" 4- compatible: should be: "atmel,at91rm9200-rtc" or "atmel,at91sam9x5-rtc"
5- reg: physical base address of the controller and length of memory mapped 5- reg: physical base address of the controller and length of memory mapped
6 region. 6 region.
7- interrupts: rtc alarm/event interrupt 7- interrupts: rtc alarm/event interrupt
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 6e3b18a8afc6..2fe6e767b3d6 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3351,9 +3351,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3351 plus one apbt timer for broadcast timer. 3351 plus one apbt timer for broadcast timer.
3352 x86_mrst_timer=apbt_only | lapic_and_apbt 3352 x86_mrst_timer=apbt_only | lapic_and_apbt
3353 3353
3354 xd= [HW,XT] Original XT pre-IDE (RLL encoded) disks.
3355 xd_geo= See header of drivers/block/xd.c.
3356
3357 xen_emul_unplug= [HW,X86,XEN] 3354 xen_emul_unplug= [HW,X86,XEN]
3358 Unplug Xen emulated devices 3355 Unplug Xen emulated devices
3359 Format: [unplug0,][unplug1] 3356 Format: [unplug0,][unplug1]
diff --git a/Documentation/m68k/kernel-options.txt b/Documentation/m68k/kernel-options.txt
index 97d45f276fe6..eaf32a1fd0b1 100644
--- a/Documentation/m68k/kernel-options.txt
+++ b/Documentation/m68k/kernel-options.txt
@@ -80,8 +80,6 @@ Valid names are:
80 /dev/sdd: -> 0x0830 (forth SCSI disk) 80 /dev/sdd: -> 0x0830 (forth SCSI disk)
81 /dev/sde: -> 0x0840 (fifth SCSI disk) 81 /dev/sde: -> 0x0840 (fifth SCSI disk)
82 /dev/fd : -> 0x0200 (floppy disk) 82 /dev/fd : -> 0x0200 (floppy disk)
83 /dev/xda: -> 0x0c00 (first XT disk, unused in Linux/m68k)
84 /dev/xdb: -> 0x0c40 (second XT disk, unused in Linux/m68k)
85 83
86 The name must be followed by a decimal number, that stands for the 84 The name must be followed by a decimal number, that stands for the
87partition number. Internally, the value of the number is just 85partition number. Internally, the value of the number is just
diff --git a/MAINTAINERS b/MAINTAINERS
index 250dc970c62d..5be702cc8449 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5766,7 +5766,7 @@ M: Matthew Wilcox <willy@linux.intel.com>
5766L: linux-nvme@lists.infradead.org 5766L: linux-nvme@lists.infradead.org
5767T: git git://git.infradead.org/users/willy/linux-nvme.git 5767T: git git://git.infradead.org/users/willy/linux-nvme.git
5768S: Supported 5768S: Supported
5769F: drivers/block/nvme.c 5769F: drivers/block/nvme*
5770F: include/linux/nvme.h 5770F: include/linux/nvme.h
5771 5771
5772OMAP SUPPORT 5772OMAP SUPPORT
@@ -7624,7 +7624,7 @@ F: drivers/clk/spear/
7624SPI SUBSYSTEM 7624SPI SUBSYSTEM
7625M: Mark Brown <broonie@kernel.org> 7625M: Mark Brown <broonie@kernel.org>
7626M: Grant Likely <grant.likely@linaro.org> 7626M: Grant Likely <grant.likely@linaro.org>
7627L: spi-devel-general@lists.sourceforge.net 7627L: linux-spi@vger.kernel.org
7628T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git 7628T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git
7629Q: http://patchwork.kernel.org/project/spi-devel-general/list/ 7629Q: http://patchwork.kernel.org/project/spi-devel-general/list/
7630S: Maintained 7630S: Maintained
@@ -9004,7 +9004,7 @@ S: Maintained
9004F: drivers/net/wireless/wl3501* 9004F: drivers/net/wireless/wl3501*
9005 9005
9006WM97XX TOUCHSCREEN DRIVERS 9006WM97XX TOUCHSCREEN DRIVERS
9007M: Mark Brown <broonie@opensource.wolfsonmicro.com> 9007M: Mark Brown <broonie@kernel.org>
9008M: Liam Girdwood <lrg@slimlogic.co.uk> 9008M: Liam Girdwood <lrg@slimlogic.co.uk>
9009L: linux-input@vger.kernel.org 9009L: linux-input@vger.kernel.org
9010T: git git://opensource.wolfsonmicro.com/linux-2.6-touch 9010T: git git://opensource.wolfsonmicro.com/linux-2.6-touch
@@ -9014,7 +9014,6 @@ F: drivers/input/touchscreen/*wm97*
9014F: include/linux/wm97xx.h 9014F: include/linux/wm97xx.h
9015 9015
9016WOLFSON MICROELECTRONICS DRIVERS 9016WOLFSON MICROELECTRONICS DRIVERS
9017M: Mark Brown <broonie@opensource.wolfsonmicro.com>
9018L: patches@opensource.wolfsonmicro.com 9017L: patches@opensource.wolfsonmicro.com
9019T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc 9018T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc
9020T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus 9019T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 3580d57ea218..79e9bdbfc491 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -124,7 +124,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
124endif 124endif
125 125
126ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj) 126ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
127asflags-y := -Wa,-march=all -DZIMAGE 127asflags-y := -DZIMAGE
128 128
129# Supply kernel BSS size to the decompressor via a linker symbol. 129# Supply kernel BSS size to the decompressor via a linker symbol.
130KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \ 130KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S
index 6e8382d5b7a4..5392ee63338f 100644
--- a/arch/arm/boot/compressed/debug.S
+++ b/arch/arm/boot/compressed/debug.S
@@ -1,6 +1,8 @@
1#include <linux/linkage.h> 1#include <linux/linkage.h>
2#include <asm/assembler.h> 2#include <asm/assembler.h>
3 3
4#ifndef CONFIG_DEBUG_SEMIHOSTING
5
4#include CONFIG_DEBUG_LL_INCLUDE 6#include CONFIG_DEBUG_LL_INCLUDE
5 7
6ENTRY(putc) 8ENTRY(putc)
@@ -10,3 +12,29 @@ ENTRY(putc)
10 busyuart r3, r1 12 busyuart r3, r1
11 mov pc, lr 13 mov pc, lr
12ENDPROC(putc) 14ENDPROC(putc)
15
16#else
17
18ENTRY(putc)
19 adr r1, 1f
20 ldmia r1, {r2, r3}
21 add r2, r2, r1
22 ldr r1, [r2, r3]
23 strb r0, [r1]
24 mov r0, #0x03 @ SYS_WRITEC
25 ARM( svc #0x123456 )
26 THUMB( svc #0xab )
27 mov pc, lr
28 .align 2
291: .word _GLOBAL_OFFSET_TABLE_ - .
30 .word semi_writec_buf(GOT)
31ENDPROC(putc)
32
33 .bss
34 .global semi_writec_buf
35 .type semi_writec_buf, %object
36semi_writec_buf:
37 .space 4
38 .size semi_writec_buf, 4
39
40#endif
diff --git a/arch/arm/boot/compressed/head-sa1100.S b/arch/arm/boot/compressed/head-sa1100.S
index 6179d94dd5c6..3115e313d9f6 100644
--- a/arch/arm/boot/compressed/head-sa1100.S
+++ b/arch/arm/boot/compressed/head-sa1100.S
@@ -11,6 +11,7 @@
11#include <asm/mach-types.h> 11#include <asm/mach-types.h>
12 12
13 .section ".start", "ax" 13 .section ".start", "ax"
14 .arch armv4
14 15
15__SA1100_start: 16__SA1100_start:
16 17
diff --git a/arch/arm/boot/compressed/head-shark.S b/arch/arm/boot/compressed/head-shark.S
index 089c560e07f1..92b56897ed64 100644
--- a/arch/arm/boot/compressed/head-shark.S
+++ b/arch/arm/boot/compressed/head-shark.S
@@ -18,6 +18,7 @@
18 18
19 .section ".start", "ax" 19 .section ".start", "ax"
20 20
21 .arch armv4
21 b __beginning 22 b __beginning
22 23
23__ofw_data: .long 0 @ the number of memory blocks 24__ofw_data: .long 0 @ the number of memory blocks
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index fe4d9c3ad761..032a8d987148 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -11,6 +11,7 @@
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <asm/assembler.h> 12#include <asm/assembler.h>
13 13
14 .arch armv7-a
14/* 15/*
15 * Debugging stuff 16 * Debugging stuff
16 * 17 *
@@ -805,8 +806,8 @@ call_cache_fn: adr r12, proc_types
805 .align 2 806 .align 2
806 .type proc_types,#object 807 .type proc_types,#object
807proc_types: 808proc_types:
808 .word 0x00000000 @ old ARM ID 809 .word 0x41000000 @ old ARM ID
809 .word 0x0000f000 810 .word 0xff00f000
810 mov pc, lr 811 mov pc, lr
811 THUMB( nop ) 812 THUMB( nop )
812 mov pc, lr 813 mov pc, lr
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index 968c0a14e0a3..209e6504922e 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -30,8 +30,15 @@ static inline void set_my_cpu_offset(unsigned long off)
30static inline unsigned long __my_cpu_offset(void) 30static inline unsigned long __my_cpu_offset(void)
31{ 31{
32 unsigned long off; 32 unsigned long off;
33 /* Read TPIDRPRW */ 33 register unsigned long *sp asm ("sp");
34 asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory"); 34
35 /*
36 * Read TPIDRPRW.
37 * We want to allow caching the value, so avoid using volatile and
38 * instead use a fake stack read to hazard against barrier().
39 */
40 asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp));
41
35 return off; 42 return off;
36} 43}
37#define __my_cpu_offset __my_cpu_offset() 44#define __my_cpu_offset __my_cpu_offset()
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index f10316b4ecdc..c5a59546a256 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/cpu.h> 14#include <linux/cpu.h>
15#include <linux/cpumask.h> 15#include <linux/cpumask.h>
16#include <linux/export.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/percpu.h> 18#include <linux/percpu.h>
18#include <linux/node.h> 19#include <linux/node.h>
@@ -200,6 +201,7 @@ static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
200 * cpu topology table 201 * cpu topology table
201 */ 202 */
202struct cputopo_arm cpu_topology[NR_CPUS]; 203struct cputopo_arm cpu_topology[NR_CPUS];
204EXPORT_SYMBOL_GPL(cpu_topology);
203 205
204const struct cpumask *cpu_coregroup_mask(int cpu) 206const struct cpumask *cpu_coregroup_mask(int cpu)
205{ 207{
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 820116067c10..516e6e9a5594 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -117,7 +117,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
117 if (! ((asid += ASID_INC) & ASID_MASK) ) { 117 if (! ((asid += ASID_INC) & ASID_MASK) ) {
118 if (cpu_has_vtag_icache) 118 if (cpu_has_vtag_icache)
119 flush_icache_all(); 119 flush_icache_all();
120#ifdef CONFIG_VIRTUALIZATION 120#ifdef CONFIG_KVM
121 kvm_local_flush_tlb_all(); /* start new asid cycle */ 121 kvm_local_flush_tlb_all(); /* start new asid cycle */
122#else 122#else
123 local_flush_tlb_all(); /* start new asid cycle */ 123 local_flush_tlb_all(); /* start new asid cycle */
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h
index 3f424f5217da..f09ff5ae2059 100644
--- a/arch/mips/include/uapi/asm/kvm.h
+++ b/arch/mips/include/uapi/asm/kvm.h
@@ -58,56 +58,53 @@ struct kvm_fpu {
58 * bits[2..0] - Register 'sel' index. 58 * bits[2..0] - Register 'sel' index.
59 * bits[7..3] - Register 'rd' index. 59 * bits[7..3] - Register 'rd' index.
60 * bits[15..8] - Must be zero. 60 * bits[15..8] - Must be zero.
61 * bits[63..16] - 1 -> CP0 registers. 61 * bits[31..16] - 1 -> CP0 registers.
62 * bits[51..32] - Must be zero.
63 * bits[63..52] - As per linux/kvm.h
62 * 64 *
63 * Other sets registers may be added in the future. Each set would 65 * Other sets registers may be added in the future. Each set would
64 * have its own identifier in bits[63..16]. 66 * have its own identifier in bits[31..16].
65 *
66 * The addr field of struct kvm_one_reg must point to an aligned
67 * 64-bit wide location. For registers that are narrower than
68 * 64-bits, the value is stored in the low order bits of the location,
69 * and sign extended to 64-bits.
70 * 67 *
71 * The registers defined in struct kvm_regs are also accessible, the 68 * The registers defined in struct kvm_regs are also accessible, the
72 * id values for these are below. 69 * id values for these are below.
73 */ 70 */
74 71
75#define KVM_REG_MIPS_R0 0 72#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0)
76#define KVM_REG_MIPS_R1 1 73#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1)
77#define KVM_REG_MIPS_R2 2 74#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2)
78#define KVM_REG_MIPS_R3 3 75#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3)
79#define KVM_REG_MIPS_R4 4 76#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4)
80#define KVM_REG_MIPS_R5 5 77#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5)
81#define KVM_REG_MIPS_R6 6 78#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6)
82#define KVM_REG_MIPS_R7 7 79#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7)
83#define KVM_REG_MIPS_R8 8 80#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8)
84#define KVM_REG_MIPS_R9 9 81#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9)
85#define KVM_REG_MIPS_R10 10 82#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10)
86#define KVM_REG_MIPS_R11 11 83#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11)
87#define KVM_REG_MIPS_R12 12 84#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12)
88#define KVM_REG_MIPS_R13 13 85#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13)
89#define KVM_REG_MIPS_R14 14 86#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14)
90#define KVM_REG_MIPS_R15 15 87#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15)
91#define KVM_REG_MIPS_R16 16 88#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16)
92#define KVM_REG_MIPS_R17 17 89#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17)
93#define KVM_REG_MIPS_R18 18 90#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18)
94#define KVM_REG_MIPS_R19 19 91#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19)
95#define KVM_REG_MIPS_R20 20 92#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20)
96#define KVM_REG_MIPS_R21 21 93#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21)
97#define KVM_REG_MIPS_R22 22 94#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22)
98#define KVM_REG_MIPS_R23 23 95#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23)
99#define KVM_REG_MIPS_R24 24 96#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24)
100#define KVM_REG_MIPS_R25 25 97#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25)
101#define KVM_REG_MIPS_R26 26 98#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26)
102#define KVM_REG_MIPS_R27 27 99#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27)
103#define KVM_REG_MIPS_R28 28 100#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28)
104#define KVM_REG_MIPS_R29 29 101#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29)
105#define KVM_REG_MIPS_R30 30 102#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30)
106#define KVM_REG_MIPS_R31 31 103#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31)
107 104
108#define KVM_REG_MIPS_HI 32 105#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32)
109#define KVM_REG_MIPS_LO 33 106#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)
110#define KVM_REG_MIPS_PC 34 107#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34)
111 108
112/* 109/*
113 * KVM MIPS specific structures and definitions 110 * KVM MIPS specific structures and definitions
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index cf5509f13dd5..dba90ec0dc38 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -25,12 +25,16 @@
25#define MCOUNT_OFFSET_INSNS 4 25#define MCOUNT_OFFSET_INSNS 4
26#endif 26#endif
27 27
28#ifdef CONFIG_DYNAMIC_FTRACE
29
28/* Arch override because MIPS doesn't need to run this from stop_machine() */ 30/* Arch override because MIPS doesn't need to run this from stop_machine() */
29void arch_ftrace_update_code(int command) 31void arch_ftrace_update_code(int command)
30{ 32{
31 ftrace_modify_all_code(command); 33 ftrace_modify_all_code(command);
32} 34}
33 35
36#endif
37
34/* 38/*
35 * Check if the address is in kernel space 39 * Check if the address is in kernel space
36 * 40 *
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 3b09b888afa9..0c655deeea4a 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -93,26 +93,27 @@ static void rm7k_wait_irqoff(void)
93} 93}
94 94
95/* 95/*
96 * The Au1xxx wait is available only if using 32khz counter or 96 * Au1 'wait' is only useful when the 32kHz counter is used as timer,
97 * external timer source, but specifically not CP0 Counter. 97 * since coreclock (and the cp0 counter) stops upon executing it. Only an
98 * alchemy/common/time.c may override cpu_wait! 98 * interrupt can wake it, so they must be enabled before entering idle modes.
99 */ 99 */
100static void au1k_wait(void) 100static void au1k_wait(void)
101{ 101{
102 unsigned long c0status = read_c0_status() | 1; /* irqs on */
103
102 __asm__( 104 __asm__(
103 " .set mips3 \n" 105 " .set mips3 \n"
104 " cache 0x14, 0(%0) \n" 106 " cache 0x14, 0(%0) \n"
105 " cache 0x14, 32(%0) \n" 107 " cache 0x14, 32(%0) \n"
106 " sync \n" 108 " sync \n"
107 " nop \n" 109 " mtc0 %1, $12 \n" /* wr c0status */
108 " wait \n" 110 " wait \n"
109 " nop \n" 111 " nop \n"
110 " nop \n" 112 " nop \n"
111 " nop \n" 113 " nop \n"
112 " nop \n" 114 " nop \n"
113 " .set mips0 \n" 115 " .set mips0 \n"
114 : : "r" (au1k_wait)); 116 : : "r" (au1k_wait), "r" (c0status));
115 local_irq_enable();
116} 117}
117 118
118static int __initdata nowait; 119static int __initdata nowait;
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index d934b017f479..dd203e59e6fd 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -485,29 +485,35 @@ kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
485 return -ENOIOCTLCMD; 485 return -ENOIOCTLCMD;
486} 486}
487 487
488#define KVM_REG_MIPS_CP0_INDEX (0x10000 + 8 * 0 + 0) 488#define MIPS_CP0_32(_R, _S) \
489#define KVM_REG_MIPS_CP0_ENTRYLO0 (0x10000 + 8 * 2 + 0) 489 (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
490#define KVM_REG_MIPS_CP0_ENTRYLO1 (0x10000 + 8 * 3 + 0) 490
491#define KVM_REG_MIPS_CP0_CONTEXT (0x10000 + 8 * 4 + 0) 491#define MIPS_CP0_64(_R, _S) \
492#define KVM_REG_MIPS_CP0_USERLOCAL (0x10000 + 8 * 4 + 2) 492 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
493#define KVM_REG_MIPS_CP0_PAGEMASK (0x10000 + 8 * 5 + 0) 493
494#define KVM_REG_MIPS_CP0_PAGEGRAIN (0x10000 + 8 * 5 + 1) 494#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
495#define KVM_REG_MIPS_CP0_WIRED (0x10000 + 8 * 6 + 0) 495#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
496#define KVM_REG_MIPS_CP0_HWRENA (0x10000 + 8 * 7 + 0) 496#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
497#define KVM_REG_MIPS_CP0_BADVADDR (0x10000 + 8 * 8 + 0) 497#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
498#define KVM_REG_MIPS_CP0_COUNT (0x10000 + 8 * 9 + 0) 498#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
499#define KVM_REG_MIPS_CP0_ENTRYHI (0x10000 + 8 * 10 + 0) 499#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
500#define KVM_REG_MIPS_CP0_COMPARE (0x10000 + 8 * 11 + 0) 500#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
501#define KVM_REG_MIPS_CP0_STATUS (0x10000 + 8 * 12 + 0) 501#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
502#define KVM_REG_MIPS_CP0_CAUSE (0x10000 + 8 * 13 + 0) 502#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
503#define KVM_REG_MIPS_CP0_EBASE (0x10000 + 8 * 15 + 1) 503#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
504#define KVM_REG_MIPS_CP0_CONFIG (0x10000 + 8 * 16 + 0) 504#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
505#define KVM_REG_MIPS_CP0_CONFIG1 (0x10000 + 8 * 16 + 1) 505#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
506#define KVM_REG_MIPS_CP0_CONFIG2 (0x10000 + 8 * 16 + 2) 506#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
507#define KVM_REG_MIPS_CP0_CONFIG3 (0x10000 + 8 * 16 + 3) 507#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
508#define KVM_REG_MIPS_CP0_CONFIG7 (0x10000 + 8 * 16 + 7) 508#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
509#define KVM_REG_MIPS_CP0_XCONTEXT (0x10000 + 8 * 20 + 0) 509#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
510#define KVM_REG_MIPS_CP0_ERROREPC (0x10000 + 8 * 30 + 0) 510#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
511#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
512#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
513#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
514#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
515#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
516#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
511 517
512static u64 kvm_mips_get_one_regs[] = { 518static u64 kvm_mips_get_one_regs[] = {
513 KVM_REG_MIPS_R0, 519 KVM_REG_MIPS_R0,
@@ -567,8 +573,6 @@ static u64 kvm_mips_get_one_regs[] = {
567static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, 573static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
568 const struct kvm_one_reg *reg) 574 const struct kvm_one_reg *reg)
569{ 575{
570 u64 __user *uaddr = (u64 __user *)(long)reg->addr;
571
572 struct mips_coproc *cop0 = vcpu->arch.cop0; 576 struct mips_coproc *cop0 = vcpu->arch.cop0;
573 s64 v; 577 s64 v;
574 578
@@ -631,18 +635,39 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
631 default: 635 default:
632 return -EINVAL; 636 return -EINVAL;
633 } 637 }
634 return put_user(v, uaddr); 638 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
639 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
640 return put_user(v, uaddr64);
641 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
642 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
643 u32 v32 = (u32)v;
644 return put_user(v32, uaddr32);
645 } else {
646 return -EINVAL;
647 }
635} 648}
636 649
637static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, 650static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
638 const struct kvm_one_reg *reg) 651 const struct kvm_one_reg *reg)
639{ 652{
640 u64 __user *uaddr = (u64 __user *)(long)reg->addr;
641 struct mips_coproc *cop0 = vcpu->arch.cop0; 653 struct mips_coproc *cop0 = vcpu->arch.cop0;
642 u64 v; 654 u64 v;
643 655
644 if (get_user(v, uaddr) != 0) 656 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
645 return -EFAULT; 657 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
658
659 if (get_user(v, uaddr64) != 0)
660 return -EFAULT;
661 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
662 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
663 s32 v32;
664
665 if (get_user(v32, uaddr32) != 0)
666 return -EFAULT;
667 v = (s64)v32;
668 } else {
669 return -EINVAL;
670 }
646 671
647 switch (reg->id) { 672 switch (reg->id) {
648 case KVM_REG_MIPS_R0: 673 case KVM_REG_MIPS_R0:
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 26807e5aff51..6f3887d884d2 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -176,6 +176,7 @@ extern const char *powerpc_base_platform;
176#define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000) 176#define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000)
177#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000) 177#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000)
178#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) 178#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
179#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
179 180
180#ifndef __ASSEMBLY__ 181#ifndef __ASSEMBLY__
181 182
@@ -394,19 +395,20 @@ extern const char *powerpc_base_platform;
394 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \ 395 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
395 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ 396 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
396 CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \ 397 CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
397 CPU_FTR_HVMODE) 398 CPU_FTR_HVMODE | CPU_FTR_DABRX)
398#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 399#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
399 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 400 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
400 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 401 CPU_FTR_MMCRA | CPU_FTR_SMT | \
401 CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \ 402 CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
402 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB) 403 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX)
403#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 404#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
404 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 405 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
405 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 406 CPU_FTR_MMCRA | CPU_FTR_SMT | \
406 CPU_FTR_COHERENT_ICACHE | \ 407 CPU_FTR_COHERENT_ICACHE | \
407 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 408 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
408 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ 409 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
409 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR) 410 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \
411 CPU_FTR_DABRX)
410#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 412#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
411 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ 413 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
412 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 414 CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -415,7 +417,7 @@ extern const char *powerpc_base_platform;
415 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ 417 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
416 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 418 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
417 CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \ 419 CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \
418 CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR) 420 CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX)
419#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 421#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
420 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ 422 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
421 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 423 CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -430,14 +432,15 @@ extern const char *powerpc_base_platform;
430 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 432 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
431 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 433 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
432 CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ 434 CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
433 CPU_FTR_UNALIGNED_LD_STD) 435 CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX)
434#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 436#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
435 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \ 437 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
436 CPU_FTR_PURR | CPU_FTR_REAL_LE) 438 CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
437#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) 439#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
438 440
439#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \ 441#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
440 CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX) 442 CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \
443 CPU_FTR_ICSWX | CPU_FTR_DABRX )
441 444
442#ifdef __powerpc64__ 445#ifdef __powerpc64__
443#ifdef CONFIG_PPC_BOOK3E 446#ifdef CONFIG_PPC_BOOK3E
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index b9dd382cb349..851bac7afa4b 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -54,8 +54,16 @@
54#define BOOKE_INTERRUPT_DEBUG 15 54#define BOOKE_INTERRUPT_DEBUG 15
55 55
56/* E500 */ 56/* E500 */
57#define BOOKE_INTERRUPT_SPE_UNAVAIL 32 57#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32
58#define BOOKE_INTERRUPT_SPE_FP_DATA 33 58#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33
59/*
60 * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
61 */
62#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
63#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
64#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
65#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
66 BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
59#define BOOKE_INTERRUPT_SPE_FP_ROUND 34 67#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
60#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 68#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
61#define BOOKE_INTERRUPT_DOORBELL 36 69#define BOOKE_INTERRUPT_DOORBELL 36
@@ -67,10 +75,6 @@
67#define BOOKE_INTERRUPT_HV_SYSCALL 40 75#define BOOKE_INTERRUPT_HV_SYSCALL 40
68#define BOOKE_INTERRUPT_HV_PRIV 41 76#define BOOKE_INTERRUPT_HV_PRIV 41
69 77
70/* altivec */
71#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 42
72#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 43
73
74/* book3s */ 78/* book3s */
75 79
76#define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100 80#define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 1f0937d7d4b5..2a45d0f04385 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -452,8 +452,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
452 .mmu_features = MMU_FTRS_POWER8, 452 .mmu_features = MMU_FTRS_POWER8,
453 .icache_bsize = 128, 453 .icache_bsize = 128,
454 .dcache_bsize = 128, 454 .dcache_bsize = 128,
455 .oprofile_type = PPC_OPROFILE_POWER4, 455 .oprofile_type = PPC_OPROFILE_INVALID,
456 .oprofile_cpu_type = 0, 456 .oprofile_cpu_type = "ppc64/ibm-compat-v1",
457 .cpu_setup = __setup_cpu_power8, 457 .cpu_setup = __setup_cpu_power8,
458 .cpu_restore = __restore_cpu_power8, 458 .cpu_restore = __restore_cpu_power8,
459 .platform = "power8", 459 .platform = "power8",
@@ -506,8 +506,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
506 .dcache_bsize = 128, 506 .dcache_bsize = 128,
507 .num_pmcs = 6, 507 .num_pmcs = 6,
508 .pmc_type = PPC_PMC_IBM, 508 .pmc_type = PPC_PMC_IBM,
509 .oprofile_cpu_type = 0, 509 .oprofile_cpu_type = "ppc64/power8",
510 .oprofile_type = PPC_OPROFILE_POWER4, 510 .oprofile_type = PPC_OPROFILE_INVALID,
511 .cpu_setup = __setup_cpu_power8, 511 .cpu_setup = __setup_cpu_power8,
512 .cpu_restore = __restore_cpu_power8, 512 .cpu_restore = __restore_cpu_power8,
513 .platform = "power8", 513 .platform = "power8",
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 246b11c4fe7e..8741c854e03d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -465,20 +465,6 @@ BEGIN_FTR_SECTION
465 std r0, THREAD_EBBHR(r3) 465 std r0, THREAD_EBBHR(r3)
466 mfspr r0, SPRN_EBBRR 466 mfspr r0, SPRN_EBBRR
467 std r0, THREAD_EBBRR(r3) 467 std r0, THREAD_EBBRR(r3)
468
469 /* PMU registers made user read/(write) by EBB */
470 mfspr r0, SPRN_SIAR
471 std r0, THREAD_SIAR(r3)
472 mfspr r0, SPRN_SDAR
473 std r0, THREAD_SDAR(r3)
474 mfspr r0, SPRN_SIER
475 std r0, THREAD_SIER(r3)
476 mfspr r0, SPRN_MMCR0
477 std r0, THREAD_MMCR0(r3)
478 mfspr r0, SPRN_MMCR2
479 std r0, THREAD_MMCR2(r3)
480 mfspr r0, SPRN_MMCRA
481 std r0, THREAD_MMCRA(r3)
482END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 468END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
483#endif 469#endif
484 470
@@ -581,20 +567,6 @@ BEGIN_FTR_SECTION
581 ld r0, THREAD_EBBRR(r4) 567 ld r0, THREAD_EBBRR(r4)
582 mtspr SPRN_EBBRR, r0 568 mtspr SPRN_EBBRR, r0
583 569
584 /* PMU registers made user read/(write) by EBB */
585 ld r0, THREAD_SIAR(r4)
586 mtspr SPRN_SIAR, r0
587 ld r0, THREAD_SDAR(r4)
588 mtspr SPRN_SDAR, r0
589 ld r0, THREAD_SIER(r4)
590 mtspr SPRN_SIER, r0
591 ld r0, THREAD_MMCR0(r4)
592 mtspr SPRN_MMCR0, r0
593 ld r0, THREAD_MMCR2(r4)
594 mtspr SPRN_MMCR2, r0
595 ld r0, THREAD_MMCRA(r4)
596 mtspr SPRN_MMCRA, r0
597
598 ld r0,THREAD_TAR(r4) 570 ld r0,THREAD_TAR(r4)
599 mtspr SPRN_TAR,r0 571 mtspr SPRN_TAR,r0
600END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 572END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e6eba1bf61ad..e783453f910d 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -454,38 +454,14 @@ BEGIN_FTR_SECTION
454 xori r10,r10,(MSR_FE0|MSR_FE1) 454 xori r10,r10,(MSR_FE0|MSR_FE1)
455 mtmsrd r10 455 mtmsrd r10
456 sync 456 sync
457 fmr 0,0 457
458 fmr 1,1 458#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
459 fmr 2,2 459#define FMR4(n) FMR2(n) ; FMR2(n+2)
460 fmr 3,3 460#define FMR8(n) FMR4(n) ; FMR4(n+4)
461 fmr 4,4 461#define FMR16(n) FMR8(n) ; FMR8(n+8)
462 fmr 5,5 462#define FMR32(n) FMR16(n) ; FMR16(n+16)
463 fmr 6,6 463 FMR32(0)
464 fmr 7,7 464
465 fmr 8,8
466 fmr 9,9
467 fmr 10,10
468 fmr 11,11
469 fmr 12,12
470 fmr 13,13
471 fmr 14,14
472 fmr 15,15
473 fmr 16,16
474 fmr 17,17
475 fmr 18,18
476 fmr 19,19
477 fmr 20,20
478 fmr 21,21
479 fmr 22,22
480 fmr 23,23
481 fmr 24,24
482 fmr 25,25
483 fmr 26,26
484 fmr 27,27
485 fmr 28,28
486 fmr 29,29
487 fmr 30,30
488 fmr 31,31
489FTR_SECTION_ELSE 465FTR_SECTION_ELSE
490/* 466/*
491 * To denormalise we need to move a copy of the register to itself. 467 * To denormalise we need to move a copy of the register to itself.
@@ -495,39 +471,25 @@ FTR_SECTION_ELSE
495 oris r10,r10,MSR_VSX@h 471 oris r10,r10,MSR_VSX@h
496 mtmsrd r10 472 mtmsrd r10
497 sync 473 sync
498 XVCPSGNDP(0,0,0) 474
499 XVCPSGNDP(1,1,1) 475#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
500 XVCPSGNDP(2,2,2) 476#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
501 XVCPSGNDP(3,3,3) 477#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
502 XVCPSGNDP(4,4,4) 478#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
503 XVCPSGNDP(5,5,5) 479#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
504 XVCPSGNDP(6,6,6) 480 XVCPSGNDP32(0)
505 XVCPSGNDP(7,7,7) 481
506 XVCPSGNDP(8,8,8)
507 XVCPSGNDP(9,9,9)
508 XVCPSGNDP(10,10,10)
509 XVCPSGNDP(11,11,11)
510 XVCPSGNDP(12,12,12)
511 XVCPSGNDP(13,13,13)
512 XVCPSGNDP(14,14,14)
513 XVCPSGNDP(15,15,15)
514 XVCPSGNDP(16,16,16)
515 XVCPSGNDP(17,17,17)
516 XVCPSGNDP(18,18,18)
517 XVCPSGNDP(19,19,19)
518 XVCPSGNDP(20,20,20)
519 XVCPSGNDP(21,21,21)
520 XVCPSGNDP(22,22,22)
521 XVCPSGNDP(23,23,23)
522 XVCPSGNDP(24,24,24)
523 XVCPSGNDP(25,25,25)
524 XVCPSGNDP(26,26,26)
525 XVCPSGNDP(27,27,27)
526 XVCPSGNDP(28,28,28)
527 XVCPSGNDP(29,29,29)
528 XVCPSGNDP(30,30,30)
529 XVCPSGNDP(31,31,31)
530ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) 482ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
483
484BEGIN_FTR_SECTION
485 b denorm_done
486END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
487/*
488 * To denormalise we need to move a copy of the register to itself.
489 * For POWER8 we need to do that for all 64 VSX registers
490 */
491 XVCPSGNDP32(32)
492denorm_done:
531 mtspr SPRN_HSRR0,r11 493 mtspr SPRN_HSRR0,r11
532 mtcrf 0x80,r9 494 mtcrf 0x80,r9
533 ld r9,PACA_EXGEN+EX_R9(r13) 495 ld r9,PACA_EXGEN+EX_R9(r13)
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 7f2273cc3c7d..eabeec991016 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -827,6 +827,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
827 } 827 }
828 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 828 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
829 struct resource *res = dev->resource + i; 829 struct resource *res = dev->resource + i;
830 struct pci_bus_region reg;
830 if (!res->flags) 831 if (!res->flags)
831 continue; 832 continue;
832 833
@@ -835,8 +836,9 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
835 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set 836 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
836 * since in that case, we don't want to re-assign anything 837 * since in that case, we don't want to re-assign anything
837 */ 838 */
839 pcibios_resource_to_bus(dev, &reg, res);
838 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) || 840 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
839 (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { 841 (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
840 /* Only print message if not re-assigning */ 842 /* Only print message if not re-assigning */
841 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) 843 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
842 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] " 844 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index a902723fdc69..b0f3e3f77e72 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -399,7 +399,8 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
399static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) 399static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
400{ 400{
401 mtspr(SPRN_DABR, dabr); 401 mtspr(SPRN_DABR, dabr);
402 mtspr(SPRN_DABRX, dabrx); 402 if (cpu_has_feature(CPU_FTR_DABRX))
403 mtspr(SPRN_DABRX, dabrx);
403 return 0; 404 return 0;
404} 405}
405#else 406#else
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 5dd3ab469976..ed0385448148 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -441,6 +441,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
441 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 441 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
442 struct kvmppc_44x_tlbe *tlbe; 442 struct kvmppc_44x_tlbe *tlbe;
443 unsigned int gtlb_index; 443 unsigned int gtlb_index;
444 int idx;
444 445
445 gtlb_index = kvmppc_get_gpr(vcpu, ra); 446 gtlb_index = kvmppc_get_gpr(vcpu, ra);
446 if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) { 447 if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
@@ -473,6 +474,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
473 return EMULATE_FAIL; 474 return EMULATE_FAIL;
474 } 475 }
475 476
477 idx = srcu_read_lock(&vcpu->kvm->srcu);
478
476 if (tlbe_is_host_safe(vcpu, tlbe)) { 479 if (tlbe_is_host_safe(vcpu, tlbe)) {
477 gva_t eaddr; 480 gva_t eaddr;
478 gpa_t gpaddr; 481 gpa_t gpaddr;
@@ -489,6 +492,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
489 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); 492 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
490 } 493 }
491 494
495 srcu_read_unlock(&vcpu->kvm->srcu, idx);
496
492 trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1, 497 trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
493 tlbe->word2); 498 tlbe->word2);
494 499
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 1020119226db..5cd7ad0c1176 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -832,6 +832,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
832{ 832{
833 int r = RESUME_HOST; 833 int r = RESUME_HOST;
834 int s; 834 int s;
835 int idx;
836
837#ifdef CONFIG_PPC64
838 WARN_ON(local_paca->irq_happened != 0);
839#endif
840
841 /*
842 * We enter with interrupts disabled in hardware, but
843 * we need to call hard_irq_disable anyway to ensure that
844 * the software state is kept in sync.
845 */
846 hard_irq_disable();
835 847
836 /* update before a new last_exit_type is rewritten */ 848 /* update before a new last_exit_type is rewritten */
837 kvmppc_update_timing_stats(vcpu); 849 kvmppc_update_timing_stats(vcpu);
@@ -1053,6 +1065,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1053 break; 1065 break;
1054 } 1066 }
1055 1067
1068 idx = srcu_read_lock(&vcpu->kvm->srcu);
1069
1056 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); 1070 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1057 gfn = gpaddr >> PAGE_SHIFT; 1071 gfn = gpaddr >> PAGE_SHIFT;
1058 1072
@@ -1075,6 +1089,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1075 kvmppc_account_exit(vcpu, MMIO_EXITS); 1089 kvmppc_account_exit(vcpu, MMIO_EXITS);
1076 } 1090 }
1077 1091
1092 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1078 break; 1093 break;
1079 } 1094 }
1080 1095
@@ -1098,6 +1113,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1098 1113
1099 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); 1114 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
1100 1115
1116 idx = srcu_read_lock(&vcpu->kvm->srcu);
1117
1101 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); 1118 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1102 gfn = gpaddr >> PAGE_SHIFT; 1119 gfn = gpaddr >> PAGE_SHIFT;
1103 1120
@@ -1114,6 +1131,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1114 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); 1131 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
1115 } 1132 }
1116 1133
1134 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1117 break; 1135 break;
1118 } 1136 }
1119 1137
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index c41a5a96b558..6d6f153b6c1d 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -396,6 +396,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
396 struct kvm_book3e_206_tlb_entry *gtlbe; 396 struct kvm_book3e_206_tlb_entry *gtlbe;
397 int tlbsel, esel; 397 int tlbsel, esel;
398 int recal = 0; 398 int recal = 0;
399 int idx;
399 400
400 tlbsel = get_tlb_tlbsel(vcpu); 401 tlbsel = get_tlb_tlbsel(vcpu);
401 esel = get_tlb_esel(vcpu, tlbsel); 402 esel = get_tlb_esel(vcpu, tlbsel);
@@ -430,6 +431,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
430 kvmppc_set_tlb1map_range(vcpu, gtlbe); 431 kvmppc_set_tlb1map_range(vcpu, gtlbe);
431 } 432 }
432 433
434 idx = srcu_read_lock(&vcpu->kvm->srcu);
435
433 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ 436 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
434 if (tlbe_is_host_safe(vcpu, gtlbe)) { 437 if (tlbe_is_host_safe(vcpu, gtlbe)) {
435 u64 eaddr = get_tlb_eaddr(gtlbe); 438 u64 eaddr = get_tlb_eaddr(gtlbe);
@@ -444,6 +447,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
444 kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); 447 kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
445 } 448 }
446 449
450 srcu_read_unlock(&vcpu->kvm->srcu, idx);
451
447 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); 452 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
448 return EMULATE_DONE; 453 return EMULATE_DONE;
449} 454}
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 753cc99eff2b..19c8379575f7 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -177,8 +177,6 @@ int kvmppc_core_check_processor_compat(void)
177 r = 0; 177 r = 0;
178 else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0) 178 else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
179 r = 0; 179 r = 0;
180 else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
181 r = 0;
182 else 180 else
183 r = -ENOTSUPP; 181 r = -ENOTSUPP;
184 182
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 845c867444e6..29c6482890c8 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1758,7 +1758,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
1758 } 1758 }
1759 } 1759 }
1760 } 1760 }
1761 if ((!found) && printk_ratelimit()) 1761 if (!found && !nmi && printk_ratelimit())
1762 printk(KERN_WARNING "Can't find PMC that caused IRQ\n"); 1762 printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
1763 1763
1764 /* 1764 /*
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 19506f935737..b456b157d33d 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -83,7 +83,11 @@ static int pseries_eeh_init(void)
83 ibm_configure_pe = rtas_token("ibm,configure-pe"); 83 ibm_configure_pe = rtas_token("ibm,configure-pe");
84 ibm_configure_bridge = rtas_token("ibm,configure-bridge"); 84 ibm_configure_bridge = rtas_token("ibm,configure-bridge");
85 85
86 /* necessary sanity check */ 86 /*
87 * Necessary sanity check. We needn't check "get-config-addr-info"
88 * and its variant since the old firmware probably support address
89 * of domain/bus/slot/function for EEH RTAS operations.
90 */
87 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) { 91 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) {
88 pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n", 92 pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n",
89 __func__); 93 __func__);
@@ -102,12 +106,6 @@ static int pseries_eeh_init(void)
102 pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n", 106 pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n",
103 __func__); 107 __func__);
104 return -EINVAL; 108 return -EINVAL;
105 } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE &&
106 ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) {
107 pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and "
108 "<ibm,get-config-addr-info> invalid\n",
109 __func__);
110 return -EINVAL;
111 } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && 109 } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
112 ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) { 110 ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) {
113 pr_warning("%s: RTAS service <ibm,configure-pe> and " 111 pr_warning("%s: RTAS service <ibm,configure-pe> and "
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index ac01463038f1..e8b6e5b8932c 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -623,7 +623,7 @@ static inline pgste_t pgste_get_lock(pte_t *ptep)
623 " csg %0,%1,%2\n" 623 " csg %0,%1,%2\n"
624 " jl 0b\n" 624 " jl 0b\n"
625 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) 625 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
626 : "Q" (ptep[PTRS_PER_PTE]) : "cc"); 626 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
627#endif 627#endif
628 return __pgste(new); 628 return __pgste(new);
629} 629}
@@ -635,11 +635,19 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
635 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */ 635 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
636 " stg %1,%0\n" 636 " stg %1,%0\n"
637 : "=Q" (ptep[PTRS_PER_PTE]) 637 : "=Q" (ptep[PTRS_PER_PTE])
638 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc"); 638 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
639 : "cc", "memory");
639 preempt_enable(); 640 preempt_enable();
640#endif 641#endif
641} 642}
642 643
644static inline void pgste_set(pte_t *ptep, pgste_t pgste)
645{
646#ifdef CONFIG_PGSTE
647 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
648#endif
649}
650
643static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) 651static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
644{ 652{
645#ifdef CONFIG_PGSTE 653#ifdef CONFIG_PGSTE
@@ -704,17 +712,19 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
704{ 712{
705#ifdef CONFIG_PGSTE 713#ifdef CONFIG_PGSTE
706 unsigned long address; 714 unsigned long address;
707 unsigned long okey, nkey; 715 unsigned long nkey;
708 716
709 if (pte_val(entry) & _PAGE_INVALID) 717 if (pte_val(entry) & _PAGE_INVALID)
710 return; 718 return;
719 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
711 address = pte_val(entry) & PAGE_MASK; 720 address = pte_val(entry) & PAGE_MASK;
712 okey = nkey = page_get_storage_key(address); 721 /*
713 nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT); 722 * Set page access key and fetch protection bit from pgste.
714 /* Set page access key and fetch protection bit from pgste */ 723 * The guest C/R information is still in the PGSTE, set real
715 nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; 724 * key C/R to 0.
716 if (okey != nkey) 725 */
717 page_set_storage_key(address, nkey, 0); 726 nkey = (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
727 page_set_storage_key(address, nkey, 0);
718#endif 728#endif
719} 729}
720 730
@@ -1099,8 +1109,10 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1099 if (!mm_exclusive(mm)) 1109 if (!mm_exclusive(mm))
1100 __ptep_ipte(address, ptep); 1110 __ptep_ipte(address, ptep);
1101 1111
1102 if (mm_has_pgste(mm)) 1112 if (mm_has_pgste(mm)) {
1103 pgste = pgste_update_all(&pte, pgste); 1113 pgste = pgste_update_all(&pte, pgste);
1114 pgste_set(ptep, pgste);
1115 }
1104 return pte; 1116 return pte;
1105} 1117}
1106 1118
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 298297477257..87acc38f73c6 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -74,6 +74,8 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
74 74
75static void show_trace(struct task_struct *task, unsigned long *stack) 75static void show_trace(struct task_struct *task, unsigned long *stack)
76{ 76{
77 const unsigned long frame_size =
78 STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
77 register unsigned long __r15 asm ("15"); 79 register unsigned long __r15 asm ("15");
78 unsigned long sp; 80 unsigned long sp;
79 81
@@ -82,11 +84,13 @@ static void show_trace(struct task_struct *task, unsigned long *stack)
82 sp = task ? task->thread.ksp : __r15; 84 sp = task ? task->thread.ksp : __r15;
83 printk("Call Trace:\n"); 85 printk("Call Trace:\n");
84#ifdef CONFIG_CHECK_STACK 86#ifdef CONFIG_CHECK_STACK
85 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, 87 sp = __show_trace(sp,
86 S390_lowcore.panic_stack); 88 S390_lowcore.panic_stack + frame_size - 4096,
89 S390_lowcore.panic_stack + frame_size);
87#endif 90#endif
88 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, 91 sp = __show_trace(sp,
89 S390_lowcore.async_stack); 92 S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
93 S390_lowcore.async_stack + frame_size);
90 if (task) 94 if (task)
91 __show_trace(sp, (unsigned long) task_stack_page(task), 95 __show_trace(sp, (unsigned long) task_stack_page(task),
92 (unsigned long) task_stack_page(task) + THREAD_SIZE); 96 (unsigned long) task_stack_page(task) + THREAD_SIZE);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index f7fb58903f6a..408e866ae548 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -311,3 +311,67 @@ void measurement_alert_subclass_unregister(void)
311 spin_unlock(&ma_subclass_lock); 311 spin_unlock(&ma_subclass_lock);
312} 312}
313EXPORT_SYMBOL(measurement_alert_subclass_unregister); 313EXPORT_SYMBOL(measurement_alert_subclass_unregister);
314
315void synchronize_irq(unsigned int irq)
316{
317 /*
318 * Not needed, the handler is protected by a lock and IRQs that occur
319 * after the handler is deleted are just NOPs.
320 */
321}
322EXPORT_SYMBOL_GPL(synchronize_irq);
323
324#ifndef CONFIG_PCI
325
326/* Only PCI devices have dynamically-defined IRQ handlers */
327
328int request_irq(unsigned int irq, irq_handler_t handler,
329 unsigned long irqflags, const char *devname, void *dev_id)
330{
331 return -EINVAL;
332}
333EXPORT_SYMBOL_GPL(request_irq);
334
335void free_irq(unsigned int irq, void *dev_id)
336{
337 WARN_ON(1);
338}
339EXPORT_SYMBOL_GPL(free_irq);
340
341void enable_irq(unsigned int irq)
342{
343 WARN_ON(1);
344}
345EXPORT_SYMBOL_GPL(enable_irq);
346
347void disable_irq(unsigned int irq)
348{
349 WARN_ON(1);
350}
351EXPORT_SYMBOL_GPL(disable_irq);
352
353#endif /* !CONFIG_PCI */
354
355void disable_irq_nosync(unsigned int irq)
356{
357 disable_irq(irq);
358}
359EXPORT_SYMBOL_GPL(disable_irq_nosync);
360
361unsigned long probe_irq_on(void)
362{
363 return 0;
364}
365EXPORT_SYMBOL_GPL(probe_irq_on);
366
367int probe_irq_off(unsigned long val)
368{
369 return 0;
370}
371EXPORT_SYMBOL_GPL(probe_irq_off);
372
373unsigned int probe_irq_mask(unsigned long val)
374{
375 return val;
376}
377EXPORT_SYMBOL_GPL(probe_irq_mask);
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index b6506ee32a36..29bd7bec4176 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -225,7 +225,7 @@ _sclp_print:
225 ahi %r2,1 225 ahi %r2,1
226 ltr %r0,%r0 # end of string? 226 ltr %r0,%r0 # end of string?
227 jz .LfinalizemtoS4 227 jz .LfinalizemtoS4
228 chi %r0,0x15 # end of line (NL)? 228 chi %r0,0x0a # end of line (NL)?
229 jz .LfinalizemtoS4 229 jz .LfinalizemtoS4
230 stc %r0,0(%r6,%r7) # copy to mto 230 stc %r0,0(%r6,%r7) # copy to mto
231 la %r11,0(%r6,%r7) 231 la %r11,0(%r6,%r7)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index e6f15b5d8b7d..f1e5be85d592 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -302,15 +302,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
302 return rc; 302 return rc;
303} 303}
304 304
305void synchronize_irq(unsigned int irq)
306{
307 /*
308 * Not needed, the handler is protected by a lock and IRQs that occur
309 * after the handler is deleted are just NOPs.
310 */
311}
312EXPORT_SYMBOL_GPL(synchronize_irq);
313
314void enable_irq(unsigned int irq) 305void enable_irq(unsigned int irq)
315{ 306{
316 struct msi_desc *msi = irq_get_msi_desc(irq); 307 struct msi_desc *msi = irq_get_msi_desc(irq);
@@ -327,30 +318,6 @@ void disable_irq(unsigned int irq)
327} 318}
328EXPORT_SYMBOL_GPL(disable_irq); 319EXPORT_SYMBOL_GPL(disable_irq);
329 320
330void disable_irq_nosync(unsigned int irq)
331{
332 disable_irq(irq);
333}
334EXPORT_SYMBOL_GPL(disable_irq_nosync);
335
336unsigned long probe_irq_on(void)
337{
338 return 0;
339}
340EXPORT_SYMBOL_GPL(probe_irq_on);
341
342int probe_irq_off(unsigned long val)
343{
344 return 0;
345}
346EXPORT_SYMBOL_GPL(probe_irq_off);
347
348unsigned int probe_irq_mask(unsigned long val)
349{
350 return val;
351}
352EXPORT_SYMBOL_GPL(probe_irq_mask);
353
354void pcibios_fixup_bus(struct pci_bus *bus) 321void pcibios_fixup_bus(struct pci_bus *bus)
355{ 322{
356} 323}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index fb44426fe931..d99cae8147d1 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -17,6 +17,7 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/irq_work.h> 19#include <linux/irq_work.h>
20#include <linux/tick.h>
20 21
21#include <asm/paravirt.h> 22#include <asm/paravirt.h>
22#include <asm/desc.h> 23#include <asm/desc.h>
@@ -447,6 +448,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
447 play_dead_common(); 448 play_dead_common();
448 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 449 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
449 cpu_bringup(); 450 cpu_bringup();
451 /*
452 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
453 * clears certain data that the cpu_idle loop (which called us
454 * and that we return from) expects. The only way to get that
455 * data back is to call:
456 */
457 tick_nohz_idle_enter();
450} 458}
451 459
452#else /* !CONFIG_HOTPLUG_CPU */ 460#else /* !CONFIG_HOTPLUG_CPU */
diff --git a/block/blk-core.c b/block/blk-core.c
index 33c33bc99ddd..d5745b5833c9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3164,7 +3164,7 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
3164 q->rpm_status = RPM_ACTIVE; 3164 q->rpm_status = RPM_ACTIVE;
3165 __blk_run_queue(q); 3165 __blk_run_queue(q);
3166 pm_runtime_mark_last_busy(q->dev); 3166 pm_runtime_mark_last_busy(q->dev);
3167 pm_runtime_autosuspend(q->dev); 3167 pm_request_autosuspend(q->dev);
3168 } else { 3168 } else {
3169 q->rpm_status = RPM_SUSPENDED; 3169 q->rpm_status = RPM_SUSPENDED;
3170 } 3170 }
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 622d8a48cbe9..bf8148e74e73 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -823,6 +823,7 @@ config CRYPTO_BLOWFISH_X86_64
823config CRYPTO_BLOWFISH_AVX2_X86_64 823config CRYPTO_BLOWFISH_AVX2_X86_64
824 tristate "Blowfish cipher algorithm (x86_64/AVX2)" 824 tristate "Blowfish cipher algorithm (x86_64/AVX2)"
825 depends on X86 && 64BIT 825 depends on X86 && 64BIT
826 depends on BROKEN
826 select CRYPTO_ALGAPI 827 select CRYPTO_ALGAPI
827 select CRYPTO_CRYPTD 828 select CRYPTO_CRYPTD
828 select CRYPTO_ABLK_HELPER_X86 829 select CRYPTO_ABLK_HELPER_X86
@@ -1299,6 +1300,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
1299config CRYPTO_TWOFISH_AVX2_X86_64 1300config CRYPTO_TWOFISH_AVX2_X86_64
1300 tristate "Twofish cipher algorithm (x86_64/AVX2)" 1301 tristate "Twofish cipher algorithm (x86_64/AVX2)"
1301 depends on X86 && 64BIT 1302 depends on X86 && 64BIT
1303 depends on BROKEN
1302 select CRYPTO_ALGAPI 1304 select CRYPTO_ALGAPI
1303 select CRYPTO_CRYPTD 1305 select CRYPTO_CRYPTD
1304 select CRYPTO_ABLK_HELPER_X86 1306 select CRYPTO_ABLK_HELPER_X86
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index aa0875f6f1b7..02f490bad30f 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -143,7 +143,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
143 int registers = 0; 143 int registers = 0;
144 int this_registers, average; 144 int this_registers, average;
145 145
146 map->lock(map); 146 map->lock(map->lock_arg);
147 147
148 mem_size = sizeof(*rbtree_ctx); 148 mem_size = sizeof(*rbtree_ctx);
149 mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long); 149 mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
@@ -170,7 +170,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
170 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n", 170 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
171 nodes, registers, average, mem_size); 171 nodes, registers, average, mem_size);
172 172
173 map->unlock(map); 173 map->unlock(map->lock_arg);
174 174
175 return 0; 175 return 0;
176} 176}
@@ -391,8 +391,6 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
391 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { 391 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
392 rbnode = rb_entry(node, struct regcache_rbtree_node, node); 392 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
393 393
394 if (rbnode->base_reg < min)
395 continue;
396 if (rbnode->base_reg > max) 394 if (rbnode->base_reg > max)
397 break; 395 break;
398 if (rbnode->base_reg + rbnode->blklen < min) 396 if (rbnode->base_reg + rbnode->blklen < min)
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 75923f2396bd..507ee2da0f6e 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -270,7 +270,7 @@ int regcache_sync(struct regmap *map)
270 270
271 BUG_ON(!map->cache_ops || !map->cache_ops->sync); 271 BUG_ON(!map->cache_ops || !map->cache_ops->sync);
272 272
273 map->lock(map); 273 map->lock(map->lock_arg);
274 /* Remember the initial bypass state */ 274 /* Remember the initial bypass state */
275 bypass = map->cache_bypass; 275 bypass = map->cache_bypass;
276 dev_dbg(map->dev, "Syncing %s cache\n", 276 dev_dbg(map->dev, "Syncing %s cache\n",
@@ -306,7 +306,7 @@ out:
306 trace_regcache_sync(map->dev, name, "stop"); 306 trace_regcache_sync(map->dev, name, "stop");
307 /* Restore the bypass state */ 307 /* Restore the bypass state */
308 map->cache_bypass = bypass; 308 map->cache_bypass = bypass;
309 map->unlock(map); 309 map->unlock(map->lock_arg);
310 310
311 return ret; 311 return ret;
312} 312}
@@ -333,7 +333,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
333 333
334 BUG_ON(!map->cache_ops || !map->cache_ops->sync); 334 BUG_ON(!map->cache_ops || !map->cache_ops->sync);
335 335
336 map->lock(map); 336 map->lock(map->lock_arg);
337 337
338 /* Remember the initial bypass state */ 338 /* Remember the initial bypass state */
339 bypass = map->cache_bypass; 339 bypass = map->cache_bypass;
@@ -352,7 +352,7 @@ out:
352 trace_regcache_sync(map->dev, name, "stop region"); 352 trace_regcache_sync(map->dev, name, "stop region");
353 /* Restore the bypass state */ 353 /* Restore the bypass state */
354 map->cache_bypass = bypass; 354 map->cache_bypass = bypass;
355 map->unlock(map); 355 map->unlock(map->lock_arg);
356 356
357 return ret; 357 return ret;
358} 358}
@@ -372,11 +372,11 @@ EXPORT_SYMBOL_GPL(regcache_sync_region);
372 */ 372 */
373void regcache_cache_only(struct regmap *map, bool enable) 373void regcache_cache_only(struct regmap *map, bool enable)
374{ 374{
375 map->lock(map); 375 map->lock(map->lock_arg);
376 WARN_ON(map->cache_bypass && enable); 376 WARN_ON(map->cache_bypass && enable);
377 map->cache_only = enable; 377 map->cache_only = enable;
378 trace_regmap_cache_only(map->dev, enable); 378 trace_regmap_cache_only(map->dev, enable);
379 map->unlock(map); 379 map->unlock(map->lock_arg);
380} 380}
381EXPORT_SYMBOL_GPL(regcache_cache_only); 381EXPORT_SYMBOL_GPL(regcache_cache_only);
382 382
@@ -391,9 +391,9 @@ EXPORT_SYMBOL_GPL(regcache_cache_only);
391 */ 391 */
392void regcache_mark_dirty(struct regmap *map) 392void regcache_mark_dirty(struct regmap *map)
393{ 393{
394 map->lock(map); 394 map->lock(map->lock_arg);
395 map->cache_dirty = true; 395 map->cache_dirty = true;
396 map->unlock(map); 396 map->unlock(map->lock_arg);
397} 397}
398EXPORT_SYMBOL_GPL(regcache_mark_dirty); 398EXPORT_SYMBOL_GPL(regcache_mark_dirty);
399 399
@@ -410,11 +410,11 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty);
410 */ 410 */
411void regcache_cache_bypass(struct regmap *map, bool enable) 411void regcache_cache_bypass(struct regmap *map, bool enable)
412{ 412{
413 map->lock(map); 413 map->lock(map->lock_arg);
414 WARN_ON(map->cache_only && enable); 414 WARN_ON(map->cache_only && enable);
415 map->cache_bypass = enable; 415 map->cache_bypass = enable;
416 trace_regmap_cache_bypass(map->dev, enable); 416 trace_regmap_cache_bypass(map->dev, enable);
417 map->unlock(map); 417 map->unlock(map->lock_arg);
418} 418}
419EXPORT_SYMBOL_GPL(regcache_cache_bypass); 419EXPORT_SYMBOL_GPL(regcache_cache_bypass);
420 420
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 23b701f5fd2f..975719bc3450 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -265,6 +265,7 @@ static ssize_t regmap_map_write_file(struct file *file,
265 char *start = buf; 265 char *start = buf;
266 unsigned long reg, value; 266 unsigned long reg, value;
267 struct regmap *map = file->private_data; 267 struct regmap *map = file->private_data;
268 int ret;
268 269
269 buf_size = min(count, (sizeof(buf)-1)); 270 buf_size = min(count, (sizeof(buf)-1));
270 if (copy_from_user(buf, user_buf, buf_size)) 271 if (copy_from_user(buf, user_buf, buf_size))
@@ -282,7 +283,9 @@ static ssize_t regmap_map_write_file(struct file *file,
282 /* Userspace has been fiddling around behind the kernel's back */ 283 /* Userspace has been fiddling around behind the kernel's back */
283 add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE); 284 add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
284 285
285 regmap_write(map, reg, value); 286 ret = regmap_write(map, reg, value);
287 if (ret < 0)
288 return ret;
286 return buf_size; 289 return buf_size;
287} 290}
288#else 291#else
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 6374dc103521..62b6c2cc80b5 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -168,8 +168,6 @@ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
168static int cciss_open(struct block_device *bdev, fmode_t mode); 168static int cciss_open(struct block_device *bdev, fmode_t mode);
169static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode); 169static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
170static void cciss_release(struct gendisk *disk, fmode_t mode); 170static void cciss_release(struct gendisk *disk, fmode_t mode);
171static int do_ioctl(struct block_device *bdev, fmode_t mode,
172 unsigned int cmd, unsigned long arg);
173static int cciss_ioctl(struct block_device *bdev, fmode_t mode, 171static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
174 unsigned int cmd, unsigned long arg); 172 unsigned int cmd, unsigned long arg);
175static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); 173static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -235,7 +233,7 @@ static const struct block_device_operations cciss_fops = {
235 .owner = THIS_MODULE, 233 .owner = THIS_MODULE,
236 .open = cciss_unlocked_open, 234 .open = cciss_unlocked_open,
237 .release = cciss_release, 235 .release = cciss_release,
238 .ioctl = do_ioctl, 236 .ioctl = cciss_ioctl,
239 .getgeo = cciss_getgeo, 237 .getgeo = cciss_getgeo,
240#ifdef CONFIG_COMPAT 238#ifdef CONFIG_COMPAT
241 .compat_ioctl = cciss_compat_ioctl, 239 .compat_ioctl = cciss_compat_ioctl,
@@ -1143,16 +1141,6 @@ static void cciss_release(struct gendisk *disk, fmode_t mode)
1143 mutex_unlock(&cciss_mutex); 1141 mutex_unlock(&cciss_mutex);
1144} 1142}
1145 1143
1146static int do_ioctl(struct block_device *bdev, fmode_t mode,
1147 unsigned cmd, unsigned long arg)
1148{
1149 int ret;
1150 mutex_lock(&cciss_mutex);
1151 ret = cciss_ioctl(bdev, mode, cmd, arg);
1152 mutex_unlock(&cciss_mutex);
1153 return ret;
1154}
1155
1156#ifdef CONFIG_COMPAT 1144#ifdef CONFIG_COMPAT
1157 1145
1158static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, 1146static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
@@ -1179,7 +1167,7 @@ static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
1179 case CCISS_REGNEWD: 1167 case CCISS_REGNEWD:
1180 case CCISS_RESCANDISK: 1168 case CCISS_RESCANDISK:
1181 case CCISS_GETLUNINFO: 1169 case CCISS_GETLUNINFO:
1182 return do_ioctl(bdev, mode, cmd, arg); 1170 return cciss_ioctl(bdev, mode, cmd, arg);
1183 1171
1184 case CCISS_PASSTHRU32: 1172 case CCISS_PASSTHRU32:
1185 return cciss_ioctl32_passthru(bdev, mode, cmd, arg); 1173 return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
@@ -1219,7 +1207,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
1219 if (err) 1207 if (err)
1220 return -EFAULT; 1208 return -EFAULT;
1221 1209
1222 err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); 1210 err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
1223 if (err) 1211 if (err)
1224 return err; 1212 return err;
1225 err |= 1213 err |=
@@ -1261,7 +1249,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
1261 if (err) 1249 if (err)
1262 return -EFAULT; 1250 return -EFAULT;
1263 1251
1264 err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); 1252 err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
1265 if (err) 1253 if (err)
1266 return err; 1254 return err;
1267 err |= 1255 err |=
@@ -1311,11 +1299,14 @@ static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
1311static int cciss_getintinfo(ctlr_info_t *h, void __user *argp) 1299static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
1312{ 1300{
1313 cciss_coalint_struct intinfo; 1301 cciss_coalint_struct intinfo;
1302 unsigned long flags;
1314 1303
1315 if (!argp) 1304 if (!argp)
1316 return -EINVAL; 1305 return -EINVAL;
1306 spin_lock_irqsave(&h->lock, flags);
1317 intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay); 1307 intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
1318 intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount); 1308 intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
1309 spin_unlock_irqrestore(&h->lock, flags);
1319 if (copy_to_user 1310 if (copy_to_user
1320 (argp, &intinfo, sizeof(cciss_coalint_struct))) 1311 (argp, &intinfo, sizeof(cciss_coalint_struct)))
1321 return -EFAULT; 1312 return -EFAULT;
@@ -1356,12 +1347,15 @@ static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
1356static int cciss_getnodename(ctlr_info_t *h, void __user *argp) 1347static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
1357{ 1348{
1358 NodeName_type NodeName; 1349 NodeName_type NodeName;
1350 unsigned long flags;
1359 int i; 1351 int i;
1360 1352
1361 if (!argp) 1353 if (!argp)
1362 return -EINVAL; 1354 return -EINVAL;
1355 spin_lock_irqsave(&h->lock, flags);
1363 for (i = 0; i < 16; i++) 1356 for (i = 0; i < 16; i++)
1364 NodeName[i] = readb(&h->cfgtable->ServerName[i]); 1357 NodeName[i] = readb(&h->cfgtable->ServerName[i]);
1358 spin_unlock_irqrestore(&h->lock, flags);
1365 if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) 1359 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
1366 return -EFAULT; 1360 return -EFAULT;
1367 return 0; 1361 return 0;
@@ -1398,10 +1392,13 @@ static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
1398static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) 1392static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
1399{ 1393{
1400 Heartbeat_type heartbeat; 1394 Heartbeat_type heartbeat;
1395 unsigned long flags;
1401 1396
1402 if (!argp) 1397 if (!argp)
1403 return -EINVAL; 1398 return -EINVAL;
1399 spin_lock_irqsave(&h->lock, flags);
1404 heartbeat = readl(&h->cfgtable->HeartBeat); 1400 heartbeat = readl(&h->cfgtable->HeartBeat);
1401 spin_unlock_irqrestore(&h->lock, flags);
1405 if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type))) 1402 if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
1406 return -EFAULT; 1403 return -EFAULT;
1407 return 0; 1404 return 0;
@@ -1410,10 +1407,13 @@ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
1410static int cciss_getbustypes(ctlr_info_t *h, void __user *argp) 1407static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
1411{ 1408{
1412 BusTypes_type BusTypes; 1409 BusTypes_type BusTypes;
1410 unsigned long flags;
1413 1411
1414 if (!argp) 1412 if (!argp)
1415 return -EINVAL; 1413 return -EINVAL;
1414 spin_lock_irqsave(&h->lock, flags);
1416 BusTypes = readl(&h->cfgtable->BusTypes); 1415 BusTypes = readl(&h->cfgtable->BusTypes);
1416 spin_unlock_irqrestore(&h->lock, flags);
1417 if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type))) 1417 if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
1418 return -EFAULT; 1418 return -EFAULT;
1419 return 0; 1419 return 0;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 847107ef0cce..20dd52a2f92f 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3002,7 +3002,8 @@ static int mtip_hw_debugfs_init(struct driver_data *dd)
3002 3002
3003static void mtip_hw_debugfs_exit(struct driver_data *dd) 3003static void mtip_hw_debugfs_exit(struct driver_data *dd)
3004{ 3004{
3005 debugfs_remove_recursive(dd->dfs_node); 3005 if (dd->dfs_node)
3006 debugfs_remove_recursive(dd->dfs_node);
3006} 3007}
3007 3008
3008 3009
@@ -3863,7 +3864,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3863 struct driver_data *dd = queue->queuedata; 3864 struct driver_data *dd = queue->queuedata;
3864 struct scatterlist *sg; 3865 struct scatterlist *sg;
3865 struct bio_vec *bvec; 3866 struct bio_vec *bvec;
3866 int nents = 0; 3867 int i, nents = 0;
3867 int tag = 0, unaligned = 0; 3868 int tag = 0, unaligned = 0;
3868 3869
3869 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { 3870 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3921,11 +3922,12 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3921 } 3922 }
3922 3923
3923 /* Create the scatter list for this bio. */ 3924 /* Create the scatter list for this bio. */
3924 bio_for_each_segment(bvec, bio, nents) { 3925 bio_for_each_segment(bvec, bio, i) {
3925 sg_set_page(&sg[nents], 3926 sg_set_page(&sg[nents],
3926 bvec->bv_page, 3927 bvec->bv_page,
3927 bvec->bv_len, 3928 bvec->bv_len,
3928 bvec->bv_offset); 3929 bvec->bv_offset);
3930 nents++;
3929 } 3931 }
3930 3932
3931 /* Issue the read/write. */ 3933 /* Issue the read/write. */
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 8efdfaa44a59..ce79a590b45b 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -629,7 +629,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
629 struct nvme_command *cmnd; 629 struct nvme_command *cmnd;
630 struct nvme_iod *iod; 630 struct nvme_iod *iod;
631 enum dma_data_direction dma_dir; 631 enum dma_data_direction dma_dir;
632 int cmdid, length, result = -ENOMEM; 632 int cmdid, length, result;
633 u16 control; 633 u16 control;
634 u32 dsmgmt; 634 u32 dsmgmt;
635 int psegs = bio_phys_segments(ns->queue, bio); 635 int psegs = bio_phys_segments(ns->queue, bio);
@@ -640,6 +640,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
640 return result; 640 return result;
641 } 641 }
642 642
643 result = -ENOMEM;
643 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); 644 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
644 if (!iod) 645 if (!iod)
645 goto nomem; 646 goto nomem;
@@ -977,6 +978,8 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
977 978
978 if (timeout && !time_after(now, info[cmdid].timeout)) 979 if (timeout && !time_after(now, info[cmdid].timeout))
979 continue; 980 continue;
981 if (info[cmdid].ctx == CMD_CTX_CANCELLED)
982 continue;
980 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); 983 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
981 ctx = cancel_cmdid(nvmeq, cmdid, &fn); 984 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
982 fn(nvmeq->dev, ctx, &cqe); 985 fn(nvmeq->dev, ctx, &cqe);
@@ -1206,7 +1209,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1206 1209
1207 if (addr & 3) 1210 if (addr & 3)
1208 return ERR_PTR(-EINVAL); 1211 return ERR_PTR(-EINVAL);
1209 if (!length) 1212 if (!length || length > INT_MAX - PAGE_SIZE)
1210 return ERR_PTR(-EINVAL); 1213 return ERR_PTR(-EINVAL);
1211 1214
1212 offset = offset_in_page(addr); 1215 offset = offset_in_page(addr);
@@ -1227,7 +1230,8 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1227 sg_init_table(sg, count); 1230 sg_init_table(sg, count);
1228 for (i = 0; i < count; i++) { 1231 for (i = 0; i < count; i++) {
1229 sg_set_page(&sg[i], pages[i], 1232 sg_set_page(&sg[i], pages[i],
1230 min_t(int, length, PAGE_SIZE - offset), offset); 1233 min_t(unsigned, length, PAGE_SIZE - offset),
1234 offset);
1231 length -= (PAGE_SIZE - offset); 1235 length -= (PAGE_SIZE - offset);
1232 offset = 0; 1236 offset = 0;
1233 } 1237 }
@@ -1435,7 +1439,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
1435 nvme_free_iod(dev, iod); 1439 nvme_free_iod(dev, iod);
1436 } 1440 }
1437 1441
1438 if (!status && copy_to_user(&ucmd->result, &cmd.result, 1442 if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result,
1439 sizeof(cmd.result))) 1443 sizeof(cmd.result)))
1440 status = -EFAULT; 1444 status = -EFAULT;
1441 1445
@@ -1633,7 +1637,8 @@ static int set_queue_count(struct nvme_dev *dev, int count)
1633 1637
1634static int nvme_setup_io_queues(struct nvme_dev *dev) 1638static int nvme_setup_io_queues(struct nvme_dev *dev)
1635{ 1639{
1636 int result, cpu, i, nr_io_queues, db_bar_size, q_depth; 1640 struct pci_dev *pdev = dev->pci_dev;
1641 int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count;
1637 1642
1638 nr_io_queues = num_online_cpus(); 1643 nr_io_queues = num_online_cpus();
1639 result = set_queue_count(dev, nr_io_queues); 1644 result = set_queue_count(dev, nr_io_queues);
@@ -1642,14 +1647,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1642 if (result < nr_io_queues) 1647 if (result < nr_io_queues)
1643 nr_io_queues = result; 1648 nr_io_queues = result;
1644 1649
1650 q_count = nr_io_queues;
1645 /* Deregister the admin queue's interrupt */ 1651 /* Deregister the admin queue's interrupt */
1646 free_irq(dev->entry[0].vector, dev->queues[0]); 1652 free_irq(dev->entry[0].vector, dev->queues[0]);
1647 1653
1648 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); 1654 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
1649 if (db_bar_size > 8192) { 1655 if (db_bar_size > 8192) {
1650 iounmap(dev->bar); 1656 iounmap(dev->bar);
1651 dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), 1657 dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size);
1652 db_bar_size);
1653 dev->dbs = ((void __iomem *)dev->bar) + 4096; 1658 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1654 dev->queues[0]->q_db = dev->dbs; 1659 dev->queues[0]->q_db = dev->dbs;
1655 } 1660 }
@@ -1657,19 +1662,36 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1657 for (i = 0; i < nr_io_queues; i++) 1662 for (i = 0; i < nr_io_queues; i++)
1658 dev->entry[i].entry = i; 1663 dev->entry[i].entry = i;
1659 for (;;) { 1664 for (;;) {
1660 result = pci_enable_msix(dev->pci_dev, dev->entry, 1665 result = pci_enable_msix(pdev, dev->entry, nr_io_queues);
1661 nr_io_queues);
1662 if (result == 0) { 1666 if (result == 0) {
1663 break; 1667 break;
1664 } else if (result > 0) { 1668 } else if (result > 0) {
1665 nr_io_queues = result; 1669 nr_io_queues = result;
1666 continue; 1670 continue;
1667 } else { 1671 } else {
1668 nr_io_queues = 1; 1672 nr_io_queues = 0;
1669 break; 1673 break;
1670 } 1674 }
1671 } 1675 }
1672 1676
1677 if (nr_io_queues == 0) {
1678 nr_io_queues = q_count;
1679 for (;;) {
1680 result = pci_enable_msi_block(pdev, nr_io_queues);
1681 if (result == 0) {
1682 for (i = 0; i < nr_io_queues; i++)
1683 dev->entry[i].vector = i + pdev->irq;
1684 break;
1685 } else if (result > 0) {
1686 nr_io_queues = result;
1687 continue;
1688 } else {
1689 nr_io_queues = 1;
1690 break;
1691 }
1692 }
1693 }
1694
1673 result = queue_request_irq(dev, dev->queues[0], "nvme admin"); 1695 result = queue_request_irq(dev, dev->queues[0], "nvme admin");
1674 /* XXX: handle failure here */ 1696 /* XXX: handle failure here */
1675 1697
@@ -1850,7 +1872,10 @@ static void nvme_free_dev(struct kref *kref)
1850{ 1872{
1851 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); 1873 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
1852 nvme_dev_remove(dev); 1874 nvme_dev_remove(dev);
1853 pci_disable_msix(dev->pci_dev); 1875 if (dev->pci_dev->msi_enabled)
1876 pci_disable_msi(dev->pci_dev);
1877 else if (dev->pci_dev->msix_enabled)
1878 pci_disable_msix(dev->pci_dev);
1854 iounmap(dev->bar); 1879 iounmap(dev->bar);
1855 nvme_release_instance(dev); 1880 nvme_release_instance(dev);
1856 nvme_release_prp_pools(dev); 1881 nvme_release_prp_pools(dev);
@@ -1923,8 +1948,14 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1923 INIT_LIST_HEAD(&dev->namespaces); 1948 INIT_LIST_HEAD(&dev->namespaces);
1924 dev->pci_dev = pdev; 1949 dev->pci_dev = pdev;
1925 pci_set_drvdata(pdev, dev); 1950 pci_set_drvdata(pdev, dev);
1926 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1951
1927 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1952 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
1953 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1954 else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
1955 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1956 else
1957 goto disable;
1958
1928 result = nvme_set_instance(dev); 1959 result = nvme_set_instance(dev);
1929 if (result) 1960 if (result)
1930 goto disable; 1961 goto disable;
@@ -1977,7 +2008,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1977 unmap: 2008 unmap:
1978 iounmap(dev->bar); 2009 iounmap(dev->bar);
1979 disable_msix: 2010 disable_msix:
1980 pci_disable_msix(pdev); 2011 if (dev->pci_dev->msi_enabled)
2012 pci_disable_msi(dev->pci_dev);
2013 else if (dev->pci_dev->msix_enabled)
2014 pci_disable_msix(dev->pci_dev);
1981 nvme_release_instance(dev); 2015 nvme_release_instance(dev);
1982 nvme_release_prp_pools(dev); 2016 nvme_release_prp_pools(dev);
1983 disable: 2017 disable:
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index fed54b039893..102de2f52b5c 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -44,7 +44,6 @@
44#include <linux/sched.h> 44#include <linux/sched.h>
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <linux/types.h> 46#include <linux/types.h>
47#include <linux/version.h>
48#include <scsi/sg.h> 47#include <scsi/sg.h>
49#include <scsi/scsi.h> 48#include <scsi/scsi.h>
50 49
@@ -1654,7 +1653,7 @@ static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
1654 } 1653 }
1655} 1654}
1656 1655
1657static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1656static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1658 u8 *mode_page, u8 page_code) 1657 u8 *mode_page, u8 page_code)
1659{ 1658{
1660 int res = SNTI_TRANSLATION_SUCCESS; 1659 int res = SNTI_TRANSLATION_SUCCESS;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 3c08983e600a..f5d0ea11d9fd 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -83,7 +83,8 @@
83 83
84#define MAX_SPEED 0xffff 84#define MAX_SPEED 0xffff
85 85
86#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1)) 86#define ZONE(sector, pd) (((sector) + (pd)->offset) & \
87 ~(sector_t)((pd)->settings.size - 1))
87 88
88static DEFINE_MUTEX(pktcdvd_mutex); 89static DEFINE_MUTEX(pktcdvd_mutex);
89static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; 90static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d6d314027b5d..3063452e55da 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -519,8 +519,8 @@ static const struct block_device_operations rbd_bd_ops = {
519}; 519};
520 520
521/* 521/*
522 * Initialize an rbd client instance. 522 * Initialize an rbd client instance. Success or not, this function
523 * We own *ceph_opts. 523 * consumes ceph_opts.
524 */ 524 */
525static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) 525static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
526{ 526{
@@ -675,7 +675,8 @@ static int parse_rbd_opts_token(char *c, void *private)
675 675
676/* 676/*
677 * Get a ceph client with specific addr and configuration, if one does 677 * Get a ceph client with specific addr and configuration, if one does
678 * not exist create it. 678 * not exist create it. Either way, ceph_opts is consumed by this
679 * function.
679 */ 680 */
680static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts) 681static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
681{ 682{
@@ -4697,8 +4698,10 @@ out:
4697 return ret; 4698 return ret;
4698} 4699}
4699 4700
4700/* Undo whatever state changes are made by v1 or v2 image probe */ 4701/*
4701 4702 * Undo whatever state changes are made by v1 or v2 header info
4703 * call.
4704 */
4702static void rbd_dev_unprobe(struct rbd_device *rbd_dev) 4705static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4703{ 4706{
4704 struct rbd_image_header *header; 4707 struct rbd_image_header *header;
@@ -4902,9 +4905,10 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4902 int tmp; 4905 int tmp;
4903 4906
4904 /* 4907 /*
4905 * Get the id from the image id object. If it's not a 4908 * Get the id from the image id object. Unless there's an
4906 * format 2 image, we'll get ENOENT back, and we'll assume 4909 * error, rbd_dev->spec->image_id will be filled in with
4907 * it's a format 1 image. 4910 * a dynamically-allocated string, and rbd_dev->image_format
4911 * will be set to either 1 or 2.
4908 */ 4912 */
4909 ret = rbd_dev_image_id(rbd_dev); 4913 ret = rbd_dev_image_id(rbd_dev);
4910 if (ret) 4914 if (ret)
@@ -4992,7 +4996,6 @@ static ssize_t rbd_add(struct bus_type *bus,
4992 rc = PTR_ERR(rbdc); 4996 rc = PTR_ERR(rbdc);
4993 goto err_out_args; 4997 goto err_out_args;
4994 } 4998 }
4995 ceph_opts = NULL; /* rbd_dev client now owns this */
4996 4999
4997 /* pick the pool */ 5000 /* pick the pool */
4998 osdc = &rbdc->client->osdc; 5001 osdc = &rbdc->client->osdc;
@@ -5027,18 +5030,18 @@ static ssize_t rbd_add(struct bus_type *bus,
5027 rbd_dev->mapping.read_only = read_only; 5030 rbd_dev->mapping.read_only = read_only;
5028 5031
5029 rc = rbd_dev_device_setup(rbd_dev); 5032 rc = rbd_dev_device_setup(rbd_dev);
5030 if (!rc) 5033 if (rc) {
5031 return count; 5034 rbd_dev_image_release(rbd_dev);
5035 goto err_out_module;
5036 }
5037
5038 return count;
5032 5039
5033 rbd_dev_image_release(rbd_dev);
5034err_out_rbd_dev: 5040err_out_rbd_dev:
5035 rbd_dev_destroy(rbd_dev); 5041 rbd_dev_destroy(rbd_dev);
5036err_out_client: 5042err_out_client:
5037 rbd_put_client(rbdc); 5043 rbd_put_client(rbdc);
5038err_out_args: 5044err_out_args:
5039 if (ceph_opts)
5040 ceph_destroy_options(ceph_opts);
5041 kfree(rbd_opts);
5042 rbd_spec_put(spec); 5045 rbd_spec_put(spec);
5043err_out_module: 5046err_out_module:
5044 module_put(THIS_MODULE); 5047 module_put(THIS_MODULE);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index fdfd61a2d523..11a6104a1e4f 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -201,7 +201,7 @@ config BT_MRVL
201 The core driver to support Marvell Bluetooth devices. 201 The core driver to support Marvell Bluetooth devices.
202 202
203 This driver is required if you want to support 203 This driver is required if you want to support
204 Marvell Bluetooth devices, such as 8688/8787/8797. 204 Marvell Bluetooth devices, such as 8688/8787/8797/8897.
205 205
206 Say Y here to compile Marvell Bluetooth driver 206 Say Y here to compile Marvell Bluetooth driver
207 into the kernel or say M to compile it as module. 207 into the kernel or say M to compile it as module.
@@ -214,7 +214,7 @@ config BT_MRVL_SDIO
214 The driver for Marvell Bluetooth chipsets with SDIO interface. 214 The driver for Marvell Bluetooth chipsets with SDIO interface.
215 215
216 This driver is required if you want to use Marvell Bluetooth 216 This driver is required if you want to use Marvell Bluetooth
217 devices with SDIO interface. Currently SD8688/SD8787/SD8797 217 devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8897
218 chipsets are supported. 218 chipsets are supported.
219 219
220 Say Y here to compile support for Marvell BT-over-SDIO driver 220 Say Y here to compile support for Marvell BT-over-SDIO driver
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index c63488c54f4a..13693b7a0d5c 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -82,6 +82,23 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
82 .io_port_2 = 0x7a, 82 .io_port_2 = 0x7a,
83}; 83};
84 84
85static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
86 .cfg = 0x00,
87 .host_int_mask = 0x02,
88 .host_intstatus = 0x03,
89 .card_status = 0x50,
90 .sq_read_base_addr_a0 = 0x60,
91 .sq_read_base_addr_a1 = 0x61,
92 .card_revision = 0xbc,
93 .card_fw_status0 = 0xc0,
94 .card_fw_status1 = 0xc1,
95 .card_rx_len = 0xc2,
96 .card_rx_unit = 0xc3,
97 .io_port_0 = 0xd8,
98 .io_port_1 = 0xd9,
99 .io_port_2 = 0xda,
100};
101
85static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = { 102static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
86 .helper = "mrvl/sd8688_helper.bin", 103 .helper = "mrvl/sd8688_helper.bin",
87 .firmware = "mrvl/sd8688.bin", 104 .firmware = "mrvl/sd8688.bin",
@@ -103,6 +120,13 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
103 .sd_blksz_fw_dl = 256, 120 .sd_blksz_fw_dl = 256,
104}; 121};
105 122
123static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
124 .helper = NULL,
125 .firmware = "mrvl/sd8897_uapsta.bin",
126 .reg = &btmrvl_reg_88xx,
127 .sd_blksz_fw_dl = 256,
128};
129
106static const struct sdio_device_id btmrvl_sdio_ids[] = { 130static const struct sdio_device_id btmrvl_sdio_ids[] = {
107 /* Marvell SD8688 Bluetooth device */ 131 /* Marvell SD8688 Bluetooth device */
108 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105), 132 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105),
@@ -116,6 +140,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
116 /* Marvell SD8797 Bluetooth device */ 140 /* Marvell SD8797 Bluetooth device */
117 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A), 141 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
118 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 }, 142 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
143 /* Marvell SD8897 Bluetooth device */
144 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E),
145 .driver_data = (unsigned long) &btmrvl_sdio_sd8897 },
119 146
120 { } /* Terminating entry */ 147 { } /* Terminating entry */
121}; 148};
@@ -1194,3 +1221,4 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin");
1194MODULE_FIRMWARE("mrvl/sd8688.bin"); 1221MODULE_FIRMWARE("mrvl/sd8688.bin");
1195MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); 1222MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
1196MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); 1223MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
1224MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index a97bb6c1596c..c3dc1c04a5df 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -863,7 +863,7 @@ static struct of_device_id sahara_dt_ids[] = {
863 { .compatible = "fsl,imx27-sahara" }, 863 { .compatible = "fsl,imx27-sahara" },
864 { /* sentinel */ } 864 { /* sentinel */ }
865}; 865};
866MODULE_DEVICE_TABLE(platform, sahara_dt_ids); 866MODULE_DEVICE_TABLE(of, sahara_dt_ids);
867 867
868static int sahara_probe(struct platform_device *pdev) 868static int sahara_probe(struct platform_device *pdev)
869{ 869{
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 3cfd0931fbfb..82430ad8ba62 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -1462,7 +1462,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1462 size_t addr = 0; 1462 size_t addr = 0;
1463 struct gtt_range *gt; 1463 struct gtt_range *gt;
1464 struct drm_gem_object *obj; 1464 struct drm_gem_object *obj;
1465 int ret; 1465 int ret = 0;
1466 1466
1467 /* if we want to turn of the cursor ignore width and height */ 1467 /* if we want to turn of the cursor ignore width and height */
1468 if (!handle) { 1468 if (!handle) {
@@ -1499,7 +1499,8 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1499 1499
1500 if (obj->size < width * height * 4) { 1500 if (obj->size < width * height * 4) {
1501 dev_dbg(dev->dev, "buffer is to small\n"); 1501 dev_dbg(dev->dev, "buffer is to small\n");
1502 return -ENOMEM; 1502 ret = -ENOMEM;
1503 goto unref_cursor;
1503 } 1504 }
1504 1505
1505 gt = container_of(obj, struct gtt_range, gem); 1506 gt = container_of(obj, struct gtt_range, gem);
@@ -1508,7 +1509,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1508 ret = psb_gtt_pin(gt); 1509 ret = psb_gtt_pin(gt);
1509 if (ret) { 1510 if (ret) {
1510 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); 1511 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
1511 return ret; 1512 goto unref_cursor;
1512 } 1513 }
1513 1514
1514 addr = gt->offset; /* Or resource.start ??? */ 1515 addr = gt->offset; /* Or resource.start ??? */
@@ -1532,9 +1533,14 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
1532 struct gtt_range, gem); 1533 struct gtt_range, gem);
1533 psb_gtt_unpin(gt); 1534 psb_gtt_unpin(gt);
1534 drm_gem_object_unreference(psb_intel_crtc->cursor_obj); 1535 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
1535 psb_intel_crtc->cursor_obj = obj;
1536 } 1536 }
1537 return 0; 1537
1538 psb_intel_crtc->cursor_obj = obj;
1539 return ret;
1540
1541unref_cursor:
1542 drm_gem_object_unreference(obj);
1543 return ret;
1538} 1544}
1539 1545
1540static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 1546static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -1750,6 +1756,19 @@ static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
1750 kfree(psb_intel_crtc); 1756 kfree(psb_intel_crtc);
1751} 1757}
1752 1758
1759static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
1760{
1761 struct gtt_range *gt;
1762 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1763
1764 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1765
1766 if (crtc->fb) {
1767 gt = to_psb_fb(crtc->fb)->gtt;
1768 psb_gtt_unpin(gt);
1769 }
1770}
1771
1753const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { 1772const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
1754 .dpms = cdv_intel_crtc_dpms, 1773 .dpms = cdv_intel_crtc_dpms,
1755 .mode_fixup = cdv_intel_crtc_mode_fixup, 1774 .mode_fixup = cdv_intel_crtc_mode_fixup,
@@ -1757,6 +1776,7 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
1757 .mode_set_base = cdv_intel_pipe_set_base, 1776 .mode_set_base = cdv_intel_pipe_set_base,
1758 .prepare = cdv_intel_crtc_prepare, 1777 .prepare = cdv_intel_crtc_prepare,
1759 .commit = cdv_intel_crtc_commit, 1778 .commit = cdv_intel_crtc_commit,
1779 .disable = cdv_intel_crtc_disable,
1760}; 1780};
1761 1781
1762const struct drm_crtc_funcs cdv_intel_crtc_funcs = { 1782const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1534e220097a..8b1b6d923abe 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -121,8 +121,8 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
121 unsigned long address; 121 unsigned long address;
122 int ret; 122 int ret;
123 unsigned long pfn; 123 unsigned long pfn;
124 /* FIXME: assumes fb at stolen base which may not be true */ 124 unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
125 unsigned long phys_addr = (unsigned long)dev_priv->stolen_base; 125 psbfb->gtt->offset;
126 126
127 page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 127 page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
128 address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT); 128 address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 6e8f42b61ff6..6666493789d1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -843,7 +843,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
843 struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt; 843 struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
844 struct drm_gem_object *obj; 844 struct drm_gem_object *obj;
845 void *tmp_dst, *tmp_src; 845 void *tmp_dst, *tmp_src;
846 int ret, i, cursor_pages; 846 int ret = 0, i, cursor_pages;
847 847
848 /* if we want to turn of the cursor ignore width and height */ 848 /* if we want to turn of the cursor ignore width and height */
849 if (!handle) { 849 if (!handle) {
@@ -880,7 +880,8 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
880 880
881 if (obj->size < width * height * 4) { 881 if (obj->size < width * height * 4) {
882 dev_dbg(dev->dev, "buffer is to small\n"); 882 dev_dbg(dev->dev, "buffer is to small\n");
883 return -ENOMEM; 883 ret = -ENOMEM;
884 goto unref_cursor;
884 } 885 }
885 886
886 gt = container_of(obj, struct gtt_range, gem); 887 gt = container_of(obj, struct gtt_range, gem);
@@ -889,13 +890,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
889 ret = psb_gtt_pin(gt); 890 ret = psb_gtt_pin(gt);
890 if (ret) { 891 if (ret) {
891 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); 892 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
892 return ret; 893 goto unref_cursor;
893 } 894 }
894 895
895 if (dev_priv->ops->cursor_needs_phys) { 896 if (dev_priv->ops->cursor_needs_phys) {
896 if (cursor_gt == NULL) { 897 if (cursor_gt == NULL) {
897 dev_err(dev->dev, "No hardware cursor mem available"); 898 dev_err(dev->dev, "No hardware cursor mem available");
898 return -ENOMEM; 899 ret = -ENOMEM;
900 goto unref_cursor;
899 } 901 }
900 902
901 /* Prevent overflow */ 903 /* Prevent overflow */
@@ -936,9 +938,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
936 struct gtt_range, gem); 938 struct gtt_range, gem);
937 psb_gtt_unpin(gt); 939 psb_gtt_unpin(gt);
938 drm_gem_object_unreference(psb_intel_crtc->cursor_obj); 940 drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
939 psb_intel_crtc->cursor_obj = obj;
940 } 941 }
941 return 0; 942
943 psb_intel_crtc->cursor_obj = obj;
944 return ret;
945
946unref_cursor:
947 drm_gem_object_unreference(obj);
948 return ret;
942} 949}
943 950
944static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 951static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -1150,6 +1157,19 @@ static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
1150 kfree(psb_intel_crtc); 1157 kfree(psb_intel_crtc);
1151} 1158}
1152 1159
1160static void psb_intel_crtc_disable(struct drm_crtc *crtc)
1161{
1162 struct gtt_range *gt;
1163 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1164
1165 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1166
1167 if (crtc->fb) {
1168 gt = to_psb_fb(crtc->fb)->gtt;
1169 psb_gtt_unpin(gt);
1170 }
1171}
1172
1153const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { 1173const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
1154 .dpms = psb_intel_crtc_dpms, 1174 .dpms = psb_intel_crtc_dpms,
1155 .mode_fixup = psb_intel_crtc_mode_fixup, 1175 .mode_fixup = psb_intel_crtc_mode_fixup,
@@ -1157,6 +1177,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
1157 .mode_set_base = psb_intel_pipe_set_base, 1177 .mode_set_base = psb_intel_pipe_set_base,
1158 .prepare = psb_intel_crtc_prepare, 1178 .prepare = psb_intel_crtc_prepare,
1159 .commit = psb_intel_crtc_commit, 1179 .commit = psb_intel_crtc_commit,
1180 .disable = psb_intel_crtc_disable,
1160}; 1181};
1161 1182
1162const struct drm_crtc_funcs psb_intel_crtc_funcs = { 1183const struct drm_crtc_funcs psb_intel_crtc_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 4c47b449b775..d4ea6c265ce1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1777,10 +1777,13 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1777 * arranged in priority order. 1777 * arranged in priority order.
1778 */ 1778 */
1779 intel_ddc_get_modes(connector, &intel_sdvo->ddc); 1779 intel_ddc_get_modes(connector, &intel_sdvo->ddc);
1780 if (list_empty(&connector->probed_modes) == false)
1781 goto end;
1782 1780
1783 /* Fetch modes from VBT */ 1781 /*
1782 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
1783 * SDVO->LVDS transcoders can't cope with the EDID mode. Since
1784 * drm_mode_probed_add adds the mode at the head of the list we add it
1785 * last.
1786 */
1784 if (dev_priv->sdvo_lvds_vbt_mode != NULL) { 1787 if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
1785 newmode = drm_mode_duplicate(connector->dev, 1788 newmode = drm_mode_duplicate(connector->dev,
1786 dev_priv->sdvo_lvds_vbt_mode); 1789 dev_priv->sdvo_lvds_vbt_mode);
@@ -1792,7 +1795,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1792 } 1795 }
1793 } 1796 }
1794 1797
1795end:
1796 list_for_each_entry(newmode, &connector->probed_modes, head) { 1798 list_for_each_entry(newmode, &connector->probed_modes, head) {
1797 if (newmode->type & DRM_MODE_TYPE_PREFERRED) { 1799 if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
1798 intel_sdvo->sdvo_lvds_fixed_mode = 1800 intel_sdvo->sdvo_lvds_fixed_mode =
@@ -2790,12 +2792,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2790 SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915; 2792 SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
2791 } 2793 }
2792 2794
2793 /* Only enable the hotplug irq if we need it, to work around noisy
2794 * hotplug lines.
2795 */
2796 if (intel_sdvo->hotplug_active)
2797 intel_encoder->hpd_pin = HPD_SDVO_B ? HPD_SDVO_B : HPD_SDVO_C;
2798
2799 intel_encoder->compute_config = intel_sdvo_compute_config; 2795 intel_encoder->compute_config = intel_sdvo_compute_config;
2800 intel_encoder->disable = intel_disable_sdvo; 2796 intel_encoder->disable = intel_disable_sdvo;
2801 intel_encoder->mode_set = intel_sdvo_mode_set; 2797 intel_encoder->mode_set = intel_sdvo_mode_set;
@@ -2814,6 +2810,14 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2814 goto err_output; 2810 goto err_output;
2815 } 2811 }
2816 2812
2813 /* Only enable the hotplug irq if we need it, to work around noisy
2814 * hotplug lines.
2815 */
2816 if (intel_sdvo->hotplug_active) {
2817 intel_encoder->hpd_pin =
2818 intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C;
2819 }
2820
2817 /* 2821 /*
2818 * Cloning SDVO with anything is often impossible, since the SDVO 2822 * Cloning SDVO with anything is often impossible, since the SDVO
2819 * encoder can request a special input timing mode. And even if that's 2823 * encoder can request a special input timing mode. And even if that's
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index dc3ae5c56f56..d39a5cede0b0 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -264,9 +264,12 @@ static struct mt_class mt_classes[] = {
264static void mt_free_input_name(struct hid_input *hi) 264static void mt_free_input_name(struct hid_input *hi)
265{ 265{
266 struct hid_device *hdev = hi->report->device; 266 struct hid_device *hdev = hi->report->device;
267 const char *name = hi->input->name;
267 268
268 if (hi->input->name != hdev->name) 269 if (name != hdev->name) {
269 kfree(hi->input->name); 270 hi->input->name = hdev->name;
271 kfree(name);
272 }
270} 273}
271 274
272static ssize_t mt_show_quirks(struct device *dev, 275static ssize_t mt_show_quirks(struct device *dev,
@@ -1040,11 +1043,11 @@ static void mt_remove(struct hid_device *hdev)
1040 struct hid_input *hi; 1043 struct hid_input *hi;
1041 1044
1042 sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group); 1045 sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
1043 hid_hw_stop(hdev);
1044
1045 list_for_each_entry(hi, &hdev->inputs, list) 1046 list_for_each_entry(hi, &hdev->inputs, list)
1046 mt_free_input_name(hi); 1047 mt_free_input_name(hi);
1047 1048
1049 hid_hw_stop(hdev);
1050
1048 kfree(td); 1051 kfree(td);
1049 hid_set_drvdata(hdev, NULL); 1052 hid_set_drvdata(hdev, NULL);
1050} 1053}
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 7e76922a4ba9..f920619cd6da 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -331,26 +331,68 @@ static int adm1021_detect(struct i2c_client *client,
331 man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID); 331 man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID);
332 dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID); 332 dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID);
333 333
334 if (man_id < 0 || dev_id < 0)
335 return -ENODEV;
336
334 if (man_id == 0x4d && dev_id == 0x01) 337 if (man_id == 0x4d && dev_id == 0x01)
335 type_name = "max1617a"; 338 type_name = "max1617a";
336 else if (man_id == 0x41) { 339 else if (man_id == 0x41) {
337 if ((dev_id & 0xF0) == 0x30) 340 if ((dev_id & 0xF0) == 0x30)
338 type_name = "adm1023"; 341 type_name = "adm1023";
339 else 342 else if ((dev_id & 0xF0) == 0x00)
340 type_name = "adm1021"; 343 type_name = "adm1021";
344 else
345 return -ENODEV;
341 } else if (man_id == 0x49) 346 } else if (man_id == 0x49)
342 type_name = "thmc10"; 347 type_name = "thmc10";
343 else if (man_id == 0x23) 348 else if (man_id == 0x23)
344 type_name = "gl523sm"; 349 type_name = "gl523sm";
345 else if (man_id == 0x54) 350 else if (man_id == 0x54)
346 type_name = "mc1066"; 351 type_name = "mc1066";
347 /* LM84 Mfr ID in a different place, and it has more unused bits */ 352 else {
348 else if (conv_rate == 0x00 353 int lte, rte, lhi, rhi, llo, rlo;
349 && (config & 0x7F) == 0x00 354
350 && (status & 0xAB) == 0x00) 355 /* extra checks for LM84 and MAX1617 to avoid misdetections */
351 type_name = "lm84"; 356
352 else 357 llo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(0));
353 type_name = "max1617"; 358 rlo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(1));
359
360 /* fail if any of the additional register reads failed */
361 if (llo < 0 || rlo < 0)
362 return -ENODEV;
363
364 lte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(0));
365 rte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(1));
366 lhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(0));
367 rhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(1));
368
369 /*
370 * Fail for negative temperatures and negative high limits.
371 * This check also catches read errors on the tested registers.
372 */
373 if ((s8)lte < 0 || (s8)rte < 0 || (s8)lhi < 0 || (s8)rhi < 0)
374 return -ENODEV;
375
376 /* fail if all registers hold the same value */
377 if (lte == rte && lte == lhi && lte == rhi && lte == llo
378 && lte == rlo)
379 return -ENODEV;
380
381 /*
382 * LM84 Mfr ID is in a different place,
383 * and it has more unused bits.
384 */
385 if (conv_rate == 0x00
386 && (config & 0x7F) == 0x00
387 && (status & 0xAB) == 0x00) {
388 type_name = "lm84";
389 } else {
390 /* fail if low limits are larger than high limits */
391 if ((s8)llo > lhi || (s8)rlo > rhi)
392 return -ENODEV;
393 type_name = "max1617";
394 }
395 }
354 396
355 pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n", 397 pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n",
356 type_name, i2c_adapter_id(adapter), client->addr); 398 type_name, i2c_adapter_id(adapter), client->addr);
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index 05c220d05e23..f950c9d29f3e 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -1,7 +1,6 @@
1 1
2config BCACHE 2config BCACHE
3 tristate "Block device as cache" 3 tristate "Block device as cache"
4 select CLOSURES
5 ---help--- 4 ---help---
6 Allows a block device to be used as cache for other devices; uses 5 Allows a block device to be used as cache for other devices; uses
7 a btree for indexing and the layout is optimized for SSDs. 6 a btree for indexing and the layout is optimized for SSDs.
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 340146d7c17f..d3e15b42a4ab 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -1241,7 +1241,7 @@ void bch_cache_set_stop(struct cache_set *);
1241struct cache_set *bch_cache_set_alloc(struct cache_sb *); 1241struct cache_set *bch_cache_set_alloc(struct cache_sb *);
1242void bch_btree_cache_free(struct cache_set *); 1242void bch_btree_cache_free(struct cache_set *);
1243int bch_btree_cache_alloc(struct cache_set *); 1243int bch_btree_cache_alloc(struct cache_set *);
1244void bch_writeback_init_cached_dev(struct cached_dev *); 1244void bch_cached_dev_writeback_init(struct cached_dev *);
1245void bch_moving_init_cache_set(struct cache_set *); 1245void bch_moving_init_cache_set(struct cache_set *);
1246 1246
1247void bch_cache_allocator_exit(struct cache *ca); 1247void bch_cache_allocator_exit(struct cache *ca);
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index 64e679449c2a..b8730e714d69 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -93,24 +93,6 @@ static struct attribute *bch_stats_files[] = {
93}; 93};
94static KTYPE(bch_stats); 94static KTYPE(bch_stats);
95 95
96static void scale_accounting(unsigned long data);
97
98void bch_cache_accounting_init(struct cache_accounting *acc,
99 struct closure *parent)
100{
101 kobject_init(&acc->total.kobj, &bch_stats_ktype);
102 kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
103 kobject_init(&acc->hour.kobj, &bch_stats_ktype);
104 kobject_init(&acc->day.kobj, &bch_stats_ktype);
105
106 closure_init(&acc->cl, parent);
107 init_timer(&acc->timer);
108 acc->timer.expires = jiffies + accounting_delay;
109 acc->timer.data = (unsigned long) acc;
110 acc->timer.function = scale_accounting;
111 add_timer(&acc->timer);
112}
113
114int bch_cache_accounting_add_kobjs(struct cache_accounting *acc, 96int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
115 struct kobject *parent) 97 struct kobject *parent)
116{ 98{
@@ -244,3 +226,19 @@ void bch_mark_sectors_bypassed(struct search *s, int sectors)
244 atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); 226 atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
245 atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed); 227 atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
246} 228}
229
230void bch_cache_accounting_init(struct cache_accounting *acc,
231 struct closure *parent)
232{
233 kobject_init(&acc->total.kobj, &bch_stats_ktype);
234 kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
235 kobject_init(&acc->hour.kobj, &bch_stats_ktype);
236 kobject_init(&acc->day.kobj, &bch_stats_ktype);
237
238 closure_init(&acc->cl, parent);
239 init_timer(&acc->timer);
240 acc->timer.expires = jiffies + accounting_delay;
241 acc->timer.data = (unsigned long) acc;
242 acc->timer.function = scale_accounting;
243 add_timer(&acc->timer);
244}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index c8046bc4aa57..f88e2b653a3f 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -634,11 +634,10 @@ static int open_dev(struct block_device *b, fmode_t mode)
634 return 0; 634 return 0;
635} 635}
636 636
637static int release_dev(struct gendisk *b, fmode_t mode) 637static void release_dev(struct gendisk *b, fmode_t mode)
638{ 638{
639 struct bcache_device *d = b->private_data; 639 struct bcache_device *d = b->private_data;
640 closure_put(&d->cl); 640 closure_put(&d->cl);
641 return 0;
642} 641}
643 642
644static int ioctl_dev(struct block_device *b, fmode_t mode, 643static int ioctl_dev(struct block_device *b, fmode_t mode,
@@ -732,8 +731,7 @@ static void bcache_device_free(struct bcache_device *d)
732 731
733 if (d->c) 732 if (d->c)
734 bcache_device_detach(d); 733 bcache_device_detach(d);
735 734 if (d->disk && d->disk->flags & GENHD_FL_UP)
736 if (d->disk)
737 del_gendisk(d->disk); 735 del_gendisk(d->disk);
738 if (d->disk && d->disk->queue) 736 if (d->disk && d->disk->queue)
739 blk_cleanup_queue(d->disk->queue); 737 blk_cleanup_queue(d->disk->queue);
@@ -756,12 +754,9 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
756 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 754 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
757 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, 755 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
758 sizeof(struct bio_vec) * BIO_MAX_PAGES)) || 756 sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
759 bio_split_pool_init(&d->bio_split_hook)) 757 bio_split_pool_init(&d->bio_split_hook) ||
760 758 !(d->disk = alloc_disk(1)) ||
761 return -ENOMEM; 759 !(q = blk_alloc_queue(GFP_KERNEL)))
762
763 d->disk = alloc_disk(1);
764 if (!d->disk)
765 return -ENOMEM; 760 return -ENOMEM;
766 761
767 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); 762 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
@@ -771,10 +766,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
771 d->disk->fops = &bcache_ops; 766 d->disk->fops = &bcache_ops;
772 d->disk->private_data = d; 767 d->disk->private_data = d;
773 768
774 q = blk_alloc_queue(GFP_KERNEL);
775 if (!q)
776 return -ENOMEM;
777
778 blk_queue_make_request(q, NULL); 769 blk_queue_make_request(q, NULL);
779 d->disk->queue = q; 770 d->disk->queue = q;
780 q->queuedata = d; 771 q->queuedata = d;
@@ -999,14 +990,17 @@ static void cached_dev_free(struct closure *cl)
999 990
1000 mutex_lock(&bch_register_lock); 991 mutex_lock(&bch_register_lock);
1001 992
1002 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 993 if (atomic_read(&dc->running))
994 bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
1003 bcache_device_free(&dc->disk); 995 bcache_device_free(&dc->disk);
1004 list_del(&dc->list); 996 list_del(&dc->list);
1005 997
1006 mutex_unlock(&bch_register_lock); 998 mutex_unlock(&bch_register_lock);
1007 999
1008 if (!IS_ERR_OR_NULL(dc->bdev)) { 1000 if (!IS_ERR_OR_NULL(dc->bdev)) {
1009 blk_sync_queue(bdev_get_queue(dc->bdev)); 1001 if (dc->bdev->bd_disk)
1002 blk_sync_queue(bdev_get_queue(dc->bdev));
1003
1010 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1004 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1011 } 1005 }
1012 1006
@@ -1028,73 +1022,67 @@ static void cached_dev_flush(struct closure *cl)
1028 1022
1029static int cached_dev_init(struct cached_dev *dc, unsigned block_size) 1023static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
1030{ 1024{
1031 int err; 1025 int ret;
1032 struct io *io; 1026 struct io *io;
1033 1027 struct request_queue *q = bdev_get_queue(dc->bdev);
1034 closure_init(&dc->disk.cl, NULL);
1035 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1036 1028
1037 __module_get(THIS_MODULE); 1029 __module_get(THIS_MODULE);
1038 INIT_LIST_HEAD(&dc->list); 1030 INIT_LIST_HEAD(&dc->list);
1031 closure_init(&dc->disk.cl, NULL);
1032 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1039 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1033 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
1040
1041 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1042
1043 err = bcache_device_init(&dc->disk, block_size);
1044 if (err)
1045 goto err;
1046
1047 spin_lock_init(&dc->io_lock);
1048 closure_init_unlocked(&dc->sb_write);
1049 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1034 INIT_WORK(&dc->detach, cached_dev_detach_finish);
1035 closure_init_unlocked(&dc->sb_write);
1036 INIT_LIST_HEAD(&dc->io_lru);
1037 spin_lock_init(&dc->io_lock);
1038 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1050 1039
1051 dc->sequential_merge = true; 1040 dc->sequential_merge = true;
1052 dc->sequential_cutoff = 4 << 20; 1041 dc->sequential_cutoff = 4 << 20;
1053 1042
1054 INIT_LIST_HEAD(&dc->io_lru);
1055 dc->sb_bio.bi_max_vecs = 1;
1056 dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
1057
1058 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1043 for (io = dc->io; io < dc->io + RECENT_IO; io++) {
1059 list_add(&io->lru, &dc->io_lru); 1044 list_add(&io->lru, &dc->io_lru);
1060 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1045 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1061 } 1046 }
1062 1047
1063 bch_writeback_init_cached_dev(dc); 1048 ret = bcache_device_init(&dc->disk, block_size);
1049 if (ret)
1050 return ret;
1051
1052 set_capacity(dc->disk.disk,
1053 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1054
1055 dc->disk.disk->queue->backing_dev_info.ra_pages =
1056 max(dc->disk.disk->queue->backing_dev_info.ra_pages,
1057 q->backing_dev_info.ra_pages);
1058
1059 bch_cached_dev_request_init(dc);
1060 bch_cached_dev_writeback_init(dc);
1064 return 0; 1061 return 0;
1065err:
1066 bcache_device_stop(&dc->disk);
1067 return err;
1068} 1062}
1069 1063
1070/* Cached device - bcache superblock */ 1064/* Cached device - bcache superblock */
1071 1065
1072static const char *register_bdev(struct cache_sb *sb, struct page *sb_page, 1066static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1073 struct block_device *bdev, 1067 struct block_device *bdev,
1074 struct cached_dev *dc) 1068 struct cached_dev *dc)
1075{ 1069{
1076 char name[BDEVNAME_SIZE]; 1070 char name[BDEVNAME_SIZE];
1077 const char *err = "cannot allocate memory"; 1071 const char *err = "cannot allocate memory";
1078 struct gendisk *g;
1079 struct cache_set *c; 1072 struct cache_set *c;
1080 1073
1081 if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0)
1082 return err;
1083
1084 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1074 memcpy(&dc->sb, sb, sizeof(struct cache_sb));
1085 dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
1086 dc->bdev = bdev; 1075 dc->bdev = bdev;
1087 dc->bdev->bd_holder = dc; 1076 dc->bdev->bd_holder = dc;
1088 1077
1089 g = dc->disk.disk; 1078 bio_init(&dc->sb_bio);
1090 1079 dc->sb_bio.bi_max_vecs = 1;
1091 set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1080 dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
1092 1081 dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
1093 g->queue->backing_dev_info.ra_pages = 1082 get_page(sb_page);
1094 max(g->queue->backing_dev_info.ra_pages,
1095 bdev->bd_queue->backing_dev_info.ra_pages);
1096 1083
1097 bch_cached_dev_request_init(dc); 1084 if (cached_dev_init(dc, sb->block_size << 9))
1085 goto err;
1098 1086
1099 err = "error creating kobject"; 1087 err = "error creating kobject";
1100 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1088 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
@@ -1103,6 +1091,8 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
1103 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1091 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
1104 goto err; 1092 goto err;
1105 1093
1094 pr_info("registered backing device %s", bdevname(bdev, name));
1095
1106 list_add(&dc->list, &uncached_devices); 1096 list_add(&dc->list, &uncached_devices);
1107 list_for_each_entry(c, &bch_cache_sets, list) 1097 list_for_each_entry(c, &bch_cache_sets, list)
1108 bch_cached_dev_attach(dc, c); 1098 bch_cached_dev_attach(dc, c);
@@ -1111,15 +1101,10 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
1111 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1101 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
1112 bch_cached_dev_run(dc); 1102 bch_cached_dev_run(dc);
1113 1103
1114 return NULL; 1104 return;
1115err: 1105err:
1116 kobject_put(&dc->disk.kobj);
1117 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1106 pr_notice("error opening %s: %s", bdevname(bdev, name), err);
1118 /* 1107 bcache_device_stop(&dc->disk);
1119 * Return NULL instead of an error because kobject_put() cleans
1120 * everything up
1121 */
1122 return NULL;
1123} 1108}
1124 1109
1125/* Flash only volumes */ 1110/* Flash only volumes */
@@ -1717,20 +1702,11 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1717 size_t free; 1702 size_t free;
1718 struct bucket *b; 1703 struct bucket *b;
1719 1704
1720 if (!ca)
1721 return -ENOMEM;
1722
1723 __module_get(THIS_MODULE); 1705 __module_get(THIS_MODULE);
1724 kobject_init(&ca->kobj, &bch_cache_ktype); 1706 kobject_init(&ca->kobj, &bch_cache_ktype);
1725 1707
1726 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
1727
1728 INIT_LIST_HEAD(&ca->discards); 1708 INIT_LIST_HEAD(&ca->discards);
1729 1709
1730 bio_init(&ca->sb_bio);
1731 ca->sb_bio.bi_max_vecs = 1;
1732 ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
1733
1734 bio_init(&ca->journal.bio); 1710 bio_init(&ca->journal.bio);
1735 ca->journal.bio.bi_max_vecs = 8; 1711 ca->journal.bio.bi_max_vecs = 8;
1736 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; 1712 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
@@ -1742,18 +1718,17 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1742 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1718 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
1743 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || 1719 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) ||
1744 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1720 !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
1745 !(ca->buckets = vmalloc(sizeof(struct bucket) * 1721 !(ca->buckets = vzalloc(sizeof(struct bucket) *
1746 ca->sb.nbuckets)) || 1722 ca->sb.nbuckets)) ||
1747 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1723 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
1748 2, GFP_KERNEL)) || 1724 2, GFP_KERNEL)) ||
1749 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || 1725 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
1750 !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) || 1726 !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
1751 bio_split_pool_init(&ca->bio_split_hook)) 1727 bio_split_pool_init(&ca->bio_split_hook))
1752 goto err; 1728 return -ENOMEM;
1753 1729
1754 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 1730 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
1755 1731
1756 memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket));
1757 for_each_bucket(b, ca) 1732 for_each_bucket(b, ca)
1758 atomic_set(&b->pin, 0); 1733 atomic_set(&b->pin, 0);
1759 1734
@@ -1766,22 +1741,28 @@ err:
1766 return -ENOMEM; 1741 return -ENOMEM;
1767} 1742}
1768 1743
1769static const char *register_cache(struct cache_sb *sb, struct page *sb_page, 1744static void register_cache(struct cache_sb *sb, struct page *sb_page,
1770 struct block_device *bdev, struct cache *ca) 1745 struct block_device *bdev, struct cache *ca)
1771{ 1746{
1772 char name[BDEVNAME_SIZE]; 1747 char name[BDEVNAME_SIZE];
1773 const char *err = "cannot allocate memory"; 1748 const char *err = "cannot allocate memory";
1774 1749
1775 if (cache_alloc(sb, ca) != 0) 1750 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
1776 return err;
1777
1778 ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
1779 ca->bdev = bdev; 1751 ca->bdev = bdev;
1780 ca->bdev->bd_holder = ca; 1752 ca->bdev->bd_holder = ca;
1781 1753
1754 bio_init(&ca->sb_bio);
1755 ca->sb_bio.bi_max_vecs = 1;
1756 ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
1757 ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
1758 get_page(sb_page);
1759
1782 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 1760 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1783 ca->discard = CACHE_DISCARD(&ca->sb); 1761 ca->discard = CACHE_DISCARD(&ca->sb);
1784 1762
1763 if (cache_alloc(sb, ca) != 0)
1764 goto err;
1765
1785 err = "error creating kobject"; 1766 err = "error creating kobject";
1786 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) 1767 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
1787 goto err; 1768 goto err;
@@ -1791,15 +1772,10 @@ static const char *register_cache(struct cache_sb *sb, struct page *sb_page,
1791 goto err; 1772 goto err;
1792 1773
1793 pr_info("registered cache device %s", bdevname(bdev, name)); 1774 pr_info("registered cache device %s", bdevname(bdev, name));
1794 1775 return;
1795 return NULL;
1796err: 1776err:
1777 pr_notice("error opening %s: %s", bdevname(bdev, name), err);
1797 kobject_put(&ca->kobj); 1778 kobject_put(&ca->kobj);
1798 pr_info("error opening %s: %s", bdevname(bdev, name), err);
1799 /* Return NULL instead of an error because kobject_put() cleans
1800 * everything up
1801 */
1802 return NULL;
1803} 1779}
1804 1780
1805/* Global interfaces/init */ 1781/* Global interfaces/init */
@@ -1833,12 +1809,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1833 bdev = blkdev_get_by_path(strim(path), 1809 bdev = blkdev_get_by_path(strim(path),
1834 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1810 FMODE_READ|FMODE_WRITE|FMODE_EXCL,
1835 sb); 1811 sb);
1836 if (bdev == ERR_PTR(-EBUSY)) 1812 if (IS_ERR(bdev)) {
1837 err = "device busy"; 1813 if (bdev == ERR_PTR(-EBUSY))
1838 1814 err = "device busy";
1839 if (IS_ERR(bdev) ||
1840 set_blocksize(bdev, 4096))
1841 goto err; 1815 goto err;
1816 }
1817
1818 err = "failed to set blocksize";
1819 if (set_blocksize(bdev, 4096))
1820 goto err_close;
1842 1821
1843 err = read_super(sb, bdev, &sb_page); 1822 err = read_super(sb, bdev, &sb_page);
1844 if (err) 1823 if (err)
@@ -1846,33 +1825,33 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
1846 1825
1847 if (SB_IS_BDEV(sb)) { 1826 if (SB_IS_BDEV(sb)) {
1848 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1827 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1828 if (!dc)
1829 goto err_close;
1849 1830
1850 err = register_bdev(sb, sb_page, bdev, dc); 1831 register_bdev(sb, sb_page, bdev, dc);
1851 } else { 1832 } else {
1852 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 1833 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1834 if (!ca)
1835 goto err_close;
1853 1836
1854 err = register_cache(sb, sb_page, bdev, ca); 1837 register_cache(sb, sb_page, bdev, ca);
1855 } 1838 }
1856 1839out:
1857 if (err) { 1840 if (sb_page)
1858 /* register_(bdev|cache) will only return an error if they
1859 * didn't get far enough to create the kobject - if they did,
1860 * the kobject destructor will do this cleanup.
1861 */
1862 put_page(sb_page); 1841 put_page(sb_page);
1863err_close:
1864 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1865err:
1866 if (attr != &ksysfs_register_quiet)
1867 pr_info("error opening %s: %s", path, err);
1868 ret = -EINVAL;
1869 }
1870
1871 kfree(sb); 1842 kfree(sb);
1872 kfree(path); 1843 kfree(path);
1873 mutex_unlock(&bch_register_lock); 1844 mutex_unlock(&bch_register_lock);
1874 module_put(THIS_MODULE); 1845 module_put(THIS_MODULE);
1875 return ret; 1846 return ret;
1847
1848err_close:
1849 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1850err:
1851 if (attr != &ksysfs_register_quiet)
1852 pr_info("error opening %s: %s", path, err);
1853 ret = -EINVAL;
1854 goto out;
1876} 1855}
1877 1856
1878static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 1857static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 93e7e31a4bd3..2714ed3991d1 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -375,7 +375,7 @@ err:
375 refill_dirty(cl); 375 refill_dirty(cl);
376} 376}
377 377
378void bch_writeback_init_cached_dev(struct cached_dev *dc) 378void bch_cached_dev_writeback_init(struct cached_dev *dc)
379{ 379{
380 closure_init_unlocked(&dc->writeback); 380 closure_init_unlocked(&dc->writeback);
381 init_rwsem(&dc->writeback_lock); 381 init_rwsem(&dc->writeback_lock);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 681d1099a2d5..9b82377a833b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5268,8 +5268,8 @@ static void md_clean(struct mddev *mddev)
5268 5268
5269static void __md_stop_writes(struct mddev *mddev) 5269static void __md_stop_writes(struct mddev *mddev)
5270{ 5270{
5271 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5271 if (mddev->sync_thread) { 5272 if (mddev->sync_thread) {
5272 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5273 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5273 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5274 md_reap_sync_thread(mddev); 5274 md_reap_sync_thread(mddev);
5275 } 5275 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 55951182af73..6e17f8181c4b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -417,7 +417,17 @@ static void raid1_end_write_request(struct bio *bio, int error)
417 417
418 r1_bio->bios[mirror] = NULL; 418 r1_bio->bios[mirror] = NULL;
419 to_put = bio; 419 to_put = bio;
420 set_bit(R1BIO_Uptodate, &r1_bio->state); 420 /*
421 * Do not set R1BIO_Uptodate if the current device is
422 * rebuilding or Faulty. This is because we cannot use
423 * such device for properly reading the data back (we could
424 * potentially use it, if the current write would have felt
425 * before rdev->recovery_offset, but for simplicity we don't
426 * check this here.
427 */
428 if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
429 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
430 set_bit(R1BIO_Uptodate, &r1_bio->state);
421 431
422 /* Maybe we can clear some bad blocks. */ 432 /* Maybe we can clear some bad blocks. */
423 if (is_badblock(conf->mirrors[mirror].rdev, 433 if (is_badblock(conf->mirrors[mirror].rdev,
@@ -870,17 +880,17 @@ static void allow_barrier(struct r1conf *conf)
870 wake_up(&conf->wait_barrier); 880 wake_up(&conf->wait_barrier);
871} 881}
872 882
873static void freeze_array(struct r1conf *conf) 883static void freeze_array(struct r1conf *conf, int extra)
874{ 884{
875 /* stop syncio and normal IO and wait for everything to 885 /* stop syncio and normal IO and wait for everything to
876 * go quite. 886 * go quite.
877 * We increment barrier and nr_waiting, and then 887 * We increment barrier and nr_waiting, and then
878 * wait until nr_pending match nr_queued+1 888 * wait until nr_pending match nr_queued+extra
879 * This is called in the context of one normal IO request 889 * This is called in the context of one normal IO request
880 * that has failed. Thus any sync request that might be pending 890 * that has failed. Thus any sync request that might be pending
881 * will be blocked by nr_pending, and we need to wait for 891 * will be blocked by nr_pending, and we need to wait for
882 * pending IO requests to complete or be queued for re-try. 892 * pending IO requests to complete or be queued for re-try.
883 * Thus the number queued (nr_queued) plus this request (1) 893 * Thus the number queued (nr_queued) plus this request (extra)
884 * must match the number of pending IOs (nr_pending) before 894 * must match the number of pending IOs (nr_pending) before
885 * we continue. 895 * we continue.
886 */ 896 */
@@ -888,7 +898,7 @@ static void freeze_array(struct r1conf *conf)
888 conf->barrier++; 898 conf->barrier++;
889 conf->nr_waiting++; 899 conf->nr_waiting++;
890 wait_event_lock_irq_cmd(conf->wait_barrier, 900 wait_event_lock_irq_cmd(conf->wait_barrier,
891 conf->nr_pending == conf->nr_queued+1, 901 conf->nr_pending == conf->nr_queued+extra,
892 conf->resync_lock, 902 conf->resync_lock,
893 flush_pending_writes(conf)); 903 flush_pending_writes(conf));
894 spin_unlock_irq(&conf->resync_lock); 904 spin_unlock_irq(&conf->resync_lock);
@@ -1544,8 +1554,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1544 * we wait for all outstanding requests to complete. 1554 * we wait for all outstanding requests to complete.
1545 */ 1555 */
1546 synchronize_sched(); 1556 synchronize_sched();
1547 raise_barrier(conf); 1557 freeze_array(conf, 0);
1548 lower_barrier(conf); 1558 unfreeze_array(conf);
1549 clear_bit(Unmerged, &rdev->flags); 1559 clear_bit(Unmerged, &rdev->flags);
1550 } 1560 }
1551 md_integrity_add_rdev(rdev, mddev); 1561 md_integrity_add_rdev(rdev, mddev);
@@ -1595,11 +1605,11 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1595 */ 1605 */
1596 struct md_rdev *repl = 1606 struct md_rdev *repl =
1597 conf->mirrors[conf->raid_disks + number].rdev; 1607 conf->mirrors[conf->raid_disks + number].rdev;
1598 raise_barrier(conf); 1608 freeze_array(conf, 0);
1599 clear_bit(Replacement, &repl->flags); 1609 clear_bit(Replacement, &repl->flags);
1600 p->rdev = repl; 1610 p->rdev = repl;
1601 conf->mirrors[conf->raid_disks + number].rdev = NULL; 1611 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1602 lower_barrier(conf); 1612 unfreeze_array(conf);
1603 clear_bit(WantReplacement, &rdev->flags); 1613 clear_bit(WantReplacement, &rdev->flags);
1604 } else 1614 } else
1605 clear_bit(WantReplacement, &rdev->flags); 1615 clear_bit(WantReplacement, &rdev->flags);
@@ -2195,7 +2205,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2195 * frozen 2205 * frozen
2196 */ 2206 */
2197 if (mddev->ro == 0) { 2207 if (mddev->ro == 0) {
2198 freeze_array(conf); 2208 freeze_array(conf, 1);
2199 fix_read_error(conf, r1_bio->read_disk, 2209 fix_read_error(conf, r1_bio->read_disk,
2200 r1_bio->sector, r1_bio->sectors); 2210 r1_bio->sector, r1_bio->sectors);
2201 unfreeze_array(conf); 2211 unfreeze_array(conf);
@@ -2780,8 +2790,8 @@ static int run(struct mddev *mddev)
2780 return PTR_ERR(conf); 2790 return PTR_ERR(conf);
2781 2791
2782 if (mddev->queue) 2792 if (mddev->queue)
2783 blk_queue_max_write_same_sectors(mddev->queue, 2793 blk_queue_max_write_same_sectors(mddev->queue, 0);
2784 mddev->chunk_sectors); 2794
2785 rdev_for_each(rdev, mddev) { 2795 rdev_for_each(rdev, mddev) {
2786 if (!mddev->gendisk) 2796 if (!mddev->gendisk)
2787 continue; 2797 continue;
@@ -2963,7 +2973,7 @@ static int raid1_reshape(struct mddev *mddev)
2963 return -ENOMEM; 2973 return -ENOMEM;
2964 } 2974 }
2965 2975
2966 raise_barrier(conf); 2976 freeze_array(conf, 0);
2967 2977
2968 /* ok, everything is stopped */ 2978 /* ok, everything is stopped */
2969 oldpool = conf->r1bio_pool; 2979 oldpool = conf->r1bio_pool;
@@ -2994,7 +3004,7 @@ static int raid1_reshape(struct mddev *mddev)
2994 conf->raid_disks = mddev->raid_disks = raid_disks; 3004 conf->raid_disks = mddev->raid_disks = raid_disks;
2995 mddev->delta_disks = 0; 3005 mddev->delta_disks = 0;
2996 3006
2997 lower_barrier(conf); 3007 unfreeze_array(conf);
2998 3008
2999 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3009 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3000 md_wakeup_thread(mddev->thread); 3010 md_wakeup_thread(mddev->thread);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 59d4daa5f4c7..6ddae2501b9a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -490,7 +490,17 @@ static void raid10_end_write_request(struct bio *bio, int error)
490 sector_t first_bad; 490 sector_t first_bad;
491 int bad_sectors; 491 int bad_sectors;
492 492
493 set_bit(R10BIO_Uptodate, &r10_bio->state); 493 /*
494 * Do not set R10BIO_Uptodate if the current device is
495 * rebuilding or Faulty. This is because we cannot use
496 * such device for properly reading the data back (we could
497 * potentially use it, if the current write would have felt
498 * before rdev->recovery_offset, but for simplicity we don't
499 * check this here.
500 */
501 if (test_bit(In_sync, &rdev->flags) &&
502 !test_bit(Faulty, &rdev->flags))
503 set_bit(R10BIO_Uptodate, &r10_bio->state);
494 504
495 /* Maybe we can clear some bad blocks. */ 505 /* Maybe we can clear some bad blocks. */
496 if (is_badblock(rdev, 506 if (is_badblock(rdev,
@@ -1055,17 +1065,17 @@ static void allow_barrier(struct r10conf *conf)
1055 wake_up(&conf->wait_barrier); 1065 wake_up(&conf->wait_barrier);
1056} 1066}
1057 1067
1058static void freeze_array(struct r10conf *conf) 1068static void freeze_array(struct r10conf *conf, int extra)
1059{ 1069{
1060 /* stop syncio and normal IO and wait for everything to 1070 /* stop syncio and normal IO and wait for everything to
1061 * go quiet. 1071 * go quiet.
1062 * We increment barrier and nr_waiting, and then 1072 * We increment barrier and nr_waiting, and then
1063 * wait until nr_pending match nr_queued+1 1073 * wait until nr_pending match nr_queued+extra
1064 * This is called in the context of one normal IO request 1074 * This is called in the context of one normal IO request
1065 * that has failed. Thus any sync request that might be pending 1075 * that has failed. Thus any sync request that might be pending
1066 * will be blocked by nr_pending, and we need to wait for 1076 * will be blocked by nr_pending, and we need to wait for
1067 * pending IO requests to complete or be queued for re-try. 1077 * pending IO requests to complete or be queued for re-try.
1068 * Thus the number queued (nr_queued) plus this request (1) 1078 * Thus the number queued (nr_queued) plus this request (extra)
1069 * must match the number of pending IOs (nr_pending) before 1079 * must match the number of pending IOs (nr_pending) before
1070 * we continue. 1080 * we continue.
1071 */ 1081 */
@@ -1073,7 +1083,7 @@ static void freeze_array(struct r10conf *conf)
1073 conf->barrier++; 1083 conf->barrier++;
1074 conf->nr_waiting++; 1084 conf->nr_waiting++;
1075 wait_event_lock_irq_cmd(conf->wait_barrier, 1085 wait_event_lock_irq_cmd(conf->wait_barrier,
1076 conf->nr_pending == conf->nr_queued+1, 1086 conf->nr_pending == conf->nr_queued+extra,
1077 conf->resync_lock, 1087 conf->resync_lock,
1078 flush_pending_writes(conf)); 1088 flush_pending_writes(conf));
1079 1089
@@ -1837,8 +1847,8 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1837 * we wait for all outstanding requests to complete. 1847 * we wait for all outstanding requests to complete.
1838 */ 1848 */
1839 synchronize_sched(); 1849 synchronize_sched();
1840 raise_barrier(conf, 0); 1850 freeze_array(conf, 0);
1841 lower_barrier(conf); 1851 unfreeze_array(conf);
1842 clear_bit(Unmerged, &rdev->flags); 1852 clear_bit(Unmerged, &rdev->flags);
1843 } 1853 }
1844 md_integrity_add_rdev(rdev, mddev); 1854 md_integrity_add_rdev(rdev, mddev);
@@ -2612,7 +2622,7 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2612 r10_bio->devs[slot].bio = NULL; 2622 r10_bio->devs[slot].bio = NULL;
2613 2623
2614 if (mddev->ro == 0) { 2624 if (mddev->ro == 0) {
2615 freeze_array(conf); 2625 freeze_array(conf, 1);
2616 fix_read_error(conf, mddev, r10_bio); 2626 fix_read_error(conf, mddev, r10_bio);
2617 unfreeze_array(conf); 2627 unfreeze_array(conf);
2618 } else 2628 } else
@@ -3609,8 +3619,7 @@ static int run(struct mddev *mddev)
3609 if (mddev->queue) { 3619 if (mddev->queue) {
3610 blk_queue_max_discard_sectors(mddev->queue, 3620 blk_queue_max_discard_sectors(mddev->queue,
3611 mddev->chunk_sectors); 3621 mddev->chunk_sectors);
3612 blk_queue_max_write_same_sectors(mddev->queue, 3622 blk_queue_max_write_same_sectors(mddev->queue, 0);
3613 mddev->chunk_sectors);
3614 blk_queue_io_min(mddev->queue, chunk_size); 3623 blk_queue_io_min(mddev->queue, chunk_size);
3615 if (conf->geo.raid_disks % conf->geo.near_copies) 3624 if (conf->geo.raid_disks % conf->geo.near_copies)
3616 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); 3625 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9359828ffe26..05e4a105b9c7 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -664,6 +664,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
664 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 664 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
665 bi->bi_rw |= REQ_FLUSH; 665 bi->bi_rw |= REQ_FLUSH;
666 666
667 bi->bi_vcnt = 1;
667 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 668 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
668 bi->bi_io_vec[0].bv_offset = 0; 669 bi->bi_io_vec[0].bv_offset = 0;
669 bi->bi_size = STRIPE_SIZE; 670 bi->bi_size = STRIPE_SIZE;
@@ -701,6 +702,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
701 else 702 else
702 rbi->bi_sector = (sh->sector 703 rbi->bi_sector = (sh->sector
703 + rrdev->data_offset); 704 + rrdev->data_offset);
705 rbi->bi_vcnt = 1;
704 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 706 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
705 rbi->bi_io_vec[0].bv_offset = 0; 707 rbi->bi_io_vec[0].bv_offset = 0;
706 rbi->bi_size = STRIPE_SIZE; 708 rbi->bi_size = STRIPE_SIZE;
@@ -5464,7 +5466,7 @@ static int run(struct mddev *mddev)
5464 if (mddev->major_version == 0 && 5466 if (mddev->major_version == 0 &&
5465 mddev->minor_version > 90) 5467 mddev->minor_version > 90)
5466 rdev->recovery_offset = reshape_offset; 5468 rdev->recovery_offset = reshape_offset;
5467 5469
5468 if (rdev->recovery_offset < reshape_offset) { 5470 if (rdev->recovery_offset < reshape_offset) {
5469 /* We need to check old and new layout */ 5471 /* We need to check old and new layout */
5470 if (!only_parity(rdev->raid_disk, 5472 if (!only_parity(rdev->raid_disk,
@@ -5587,6 +5589,8 @@ static int run(struct mddev *mddev)
5587 */ 5589 */
5588 mddev->queue->limits.discard_zeroes_data = 0; 5590 mddev->queue->limits.discard_zeroes_data = 0;
5589 5591
5592 blk_queue_max_write_same_sectors(mddev->queue, 0);
5593
5590 rdev_for_each(rdev, mddev) { 5594 rdev_for_each(rdev, mddev) {
5591 disk_stack_limits(mddev->gendisk, rdev->bdev, 5595 disk_stack_limits(mddev->gendisk, rdev->bdev,
5592 rdev->data_offset << 9); 5596 rdev->data_offset << 9);
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 44d273c5e19d..0535d1e0bc78 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -172,6 +172,7 @@ static long gru_get_config_info(unsigned long arg)
172 nodesperblade = 2; 172 nodesperblade = 2;
173 else 173 else
174 nodesperblade = 1; 174 nodesperblade = 1;
175 memset(&info, 0, sizeof(info));
175 info.cpus = num_online_cpus(); 176 info.cpus = num_online_cpus();
176 info.nodes = num_online_nodes(); 177 info.nodes = num_online_nodes();
177 info.blades = info.nodes / nodesperblade; 178 info.blades = info.nodes / nodesperblade;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 42e9dd05c936..b4479b5aaee4 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -897,8 +897,8 @@ static int sh_eth_check_reset(struct net_device *ndev)
897 mdelay(1); 897 mdelay(1);
898 cnt--; 898 cnt--;
899 } 899 }
900 if (cnt < 0) { 900 if (cnt <= 0) {
901 pr_err("Device reset fail\n"); 901 pr_err("Device reset failed\n");
902 ret = -ETIMEDOUT; 902 ret = -ETIMEDOUT;
903 } 903 }
904 return ret; 904 return ret;
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 12aec173564c..b2275d1b19b3 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -449,10 +449,9 @@ static int davinci_mdio_suspend(struct device *dev)
449 __raw_writel(ctrl, &data->regs->control); 449 __raw_writel(ctrl, &data->regs->control);
450 wait_for_idle(data); 450 wait_for_idle(data);
451 451
452 pm_runtime_put_sync(data->dev);
453
454 data->suspended = true; 452 data->suspended = true;
455 spin_unlock(&data->lock); 453 spin_unlock(&data->lock);
454 pm_runtime_put_sync(data->dev);
456 455
457 return 0; 456 return 0;
458} 457}
@@ -462,9 +461,9 @@ static int davinci_mdio_resume(struct device *dev)
462 struct davinci_mdio_data *data = dev_get_drvdata(dev); 461 struct davinci_mdio_data *data = dev_get_drvdata(dev);
463 u32 ctrl; 462 u32 ctrl;
464 463
465 spin_lock(&data->lock);
466 pm_runtime_get_sync(data->dev); 464 pm_runtime_get_sync(data->dev);
467 465
466 spin_lock(&data->lock);
468 /* restart the scan state machine */ 467 /* restart the scan state machine */
469 ctrl = __raw_readl(&data->regs->control); 468 ctrl = __raw_readl(&data->regs->control);
470 ctrl |= CONTROL_ENABLE; 469 ctrl |= CONTROL_ENABLE;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index d016a76ad44b..b3051052f3ad 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1092,8 +1092,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1092 } 1092 }
1093 1093
1094 port->index = -1; 1094 port->index = -1;
1095 team_port_enable(team, port);
1096 list_add_tail_rcu(&port->list, &team->port_list); 1095 list_add_tail_rcu(&port->list, &team->port_list);
1096 team_port_enable(team, port);
1097 __team_compute_features(team); 1097 __team_compute_features(team);
1098 __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); 1098 __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
1099 __team_options_change_check(team); 1099 __team_options_change_check(team);
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
index 5ca14d463ba7..7f032e211343 100644
--- a/drivers/net/team/team_mode_random.c
+++ b/drivers/net/team/team_mode_random.c
@@ -28,6 +28,8 @@ static bool rnd_transmit(struct team *team, struct sk_buff *skb)
28 28
29 port_index = random_N(team->en_port_count); 29 port_index = random_N(team->en_port_count);
30 port = team_get_port_by_index_rcu(team, port_index); 30 port = team_get_port_by_index_rcu(team, port_index);
31 if (unlikely(!port))
32 goto drop;
31 port = team_get_first_port_txable_rcu(team, port); 33 port = team_get_first_port_txable_rcu(team, port);
32 if (unlikely(!port)) 34 if (unlikely(!port))
33 goto drop; 35 goto drop;
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index d268e4de781b..472623f8ce3d 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -32,6 +32,8 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
32 32
33 port_index = rr_priv(team)->sent_packets++ % team->en_port_count; 33 port_index = rr_priv(team)->sent_packets++ % team->en_port_count;
34 port = team_get_port_by_index_rcu(team, port_index); 34 port = team_get_port_by_index_rcu(team, port_index);
35 if (unlikely(!port))
36 goto drop;
35 port = team_get_first_port_txable_rcu(team, port); 37 port = team_get_first_port_txable_rcu(team, port);
36 if (unlikely(!port)) 38 if (unlikely(!port))
37 goto drop; 39 goto drop;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 89776c592151..bfa9bb48e42d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -352,7 +352,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
352 u32 numqueues = 0; 352 u32 numqueues = 0;
353 353
354 rcu_read_lock(); 354 rcu_read_lock();
355 numqueues = tun->numqueues; 355 numqueues = ACCESS_ONCE(tun->numqueues);
356 356
357 txq = skb_get_rxhash(skb); 357 txq = skb_get_rxhash(skb);
358 if (txq) { 358 if (txq) {
@@ -2159,6 +2159,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
2159 set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags); 2159 set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
2160 INIT_LIST_HEAD(&tfile->next); 2160 INIT_LIST_HEAD(&tfile->next);
2161 2161
2162 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
2163
2162 return 0; 2164 return 0;
2163} 2165}
2164 2166
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 078795fe6e31..04ee044dde51 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -627,6 +627,12 @@ static const struct usb_device_id products [] = {
627 .driver_info = 0, 627 .driver_info = 0,
628}, 628},
629 629
630/* Huawei E1820 - handled by qmi_wwan */
631{
632 USB_DEVICE_INTERFACE_NUMBER(HUAWEI_VENDOR_ID, 0x14ac, 1),
633 .driver_info = 0,
634},
635
630/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */ 636/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */
631#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE) 637#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
632{ 638{
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 86adfa0a912e..d095d0d3056b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -519,6 +519,7 @@ static const struct usb_device_id products[] = {
519 /* 3. Combined interface devices matching on interface number */ 519 /* 3. Combined interface devices matching on interface number */
520 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ 520 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
521 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 521 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
522 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
522 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, 523 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
523 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, 524 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
524 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, 525 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index f3dc124c60c7..3c2cbc9d6295 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -92,13 +92,17 @@ config ATH9K_MAC_DEBUG
92 This option enables collection of statistics for Rx/Tx status 92 This option enables collection of statistics for Rx/Tx status
93 data and some other MAC related statistics 93 data and some other MAC related statistics
94 94
95config ATH9K_RATE_CONTROL 95config ATH9K_LEGACY_RATE_CONTROL
96 bool "Atheros ath9k rate control" 96 bool "Atheros ath9k rate control"
97 depends on ATH9K 97 depends on ATH9K
98 default y 98 default n
99 ---help--- 99 ---help---
100 Say Y, if you want to use the ath9k specific rate control 100 Say Y, if you want to use the ath9k specific rate control
101 module instead of minstrel_ht. 101 module instead of minstrel_ht. Be warned that there are various
102 issues with the ath9k RC and minstrel is a more robust algorithm.
103 Note that even if this option is selected, "ath9k_rate_control"
104 has to be passed to mac80211 using the module parameter,
105 ieee80211_default_rc_algo.
102 106
103config ATH9K_HTC 107config ATH9K_HTC
104 tristate "Atheros HTC based wireless cards support" 108 tristate "Atheros HTC based wireless cards support"
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 2ad8f9474ba1..75ee9e7704ce 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -8,7 +8,7 @@ ath9k-y += beacon.o \
8 antenna.o 8 antenna.o
9 9
10ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o 10ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
11ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o 11ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
12ath9k-$(CONFIG_ATH9K_PCI) += pci.o 12ath9k-$(CONFIG_ATH9K_PCI) += pci.o
13ath9k-$(CONFIG_ATH9K_AHB) += ahb.o 13ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
14ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o 14ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index db5ffada2217..7546b9a7dcbf 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -958,11 +958,11 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
958 {0x0000a074, 0x00000000}, 958 {0x0000a074, 0x00000000},
959 {0x0000a078, 0x00000000}, 959 {0x0000a078, 0x00000000},
960 {0x0000a07c, 0x00000000}, 960 {0x0000a07c, 0x00000000},
961 {0x0000a080, 0x1a1a1a1a}, 961 {0x0000a080, 0x22222229},
962 {0x0000a084, 0x1a1a1a1a}, 962 {0x0000a084, 0x1d1d1d1d},
963 {0x0000a088, 0x1a1a1a1a}, 963 {0x0000a088, 0x1d1d1d1d},
964 {0x0000a08c, 0x1a1a1a1a}, 964 {0x0000a08c, 0x1d1d1d1d},
965 {0x0000a090, 0x171a1a1a}, 965 {0x0000a090, 0x171d1d1d},
966 {0x0000a094, 0x11111717}, 966 {0x0000a094, 0x11111717},
967 {0x0000a098, 0x00030311}, 967 {0x0000a098, 0x00030311},
968 {0x0000a09c, 0x00000000}, 968 {0x0000a09c, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index aba415103f94..2ba494567777 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -787,8 +787,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
787 hw->wiphy->iface_combinations = if_comb; 787 hw->wiphy->iface_combinations = if_comb;
788 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); 788 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
789 789
790 if (AR_SREV_5416(sc->sc_ah)) 790 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
791 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
792 791
793 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 792 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
794 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 793 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
@@ -830,10 +829,6 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
830 sc->ant_rx = hw->wiphy->available_antennas_rx; 829 sc->ant_rx = hw->wiphy->available_antennas_rx;
831 sc->ant_tx = hw->wiphy->available_antennas_tx; 830 sc->ant_tx = hw->wiphy->available_antennas_tx;
832 831
833#ifdef CONFIG_ATH9K_RATE_CONTROL
834 hw->rate_control_algorithm = "ath9k_rate_control";
835#endif
836
837 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 832 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
838 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 833 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
839 &sc->sbands[IEEE80211_BAND_2GHZ]; 834 &sc->sbands[IEEE80211_BAND_2GHZ];
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 267dbfcfaa96..b9a87383cb43 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -231,7 +231,7 @@ static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
231} 231}
232#endif 232#endif
233 233
234#ifdef CONFIG_ATH9K_RATE_CONTROL 234#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL
235int ath_rate_control_register(void); 235int ath_rate_control_register(void);
236void ath_rate_control_unregister(void); 236void ath_rate_control_unregister(void);
237#else 237#else
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 6dd07e2ec595..a95b77ab360e 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2458,7 +2458,7 @@ static void b43_request_firmware(struct work_struct *work)
2458 for (i = 0; i < B43_NR_FWTYPES; i++) { 2458 for (i = 0; i < B43_NR_FWTYPES; i++) {
2459 errmsg = ctx->errors[i]; 2459 errmsg = ctx->errors[i];
2460 if (strlen(errmsg)) 2460 if (strlen(errmsg))
2461 b43err(dev->wl, errmsg); 2461 b43err(dev->wl, "%s", errmsg);
2462 } 2462 }
2463 b43_print_fw_helptext(dev->wl, 1); 2463 b43_print_fw_helptext(dev->wl, 1);
2464 goto out; 2464 goto out;
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index f8246f2d88f9..4caaf52986a4 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1832,16 +1832,16 @@ u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
1832__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, 1832__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
1833 u32 beacon_interval); 1833 u32 beacon_interval);
1834 1834
1835#ifdef CONFIG_PM 1835#ifdef CONFIG_PM_SLEEP
1836extern const struct dev_pm_ops il_pm_ops; 1836extern const struct dev_pm_ops il_pm_ops;
1837 1837
1838#define IL_LEGACY_PM_OPS (&il_pm_ops) 1838#define IL_LEGACY_PM_OPS (&il_pm_ops)
1839 1839
1840#else /* !CONFIG_PM */ 1840#else /* !CONFIG_PM_SLEEP */
1841 1841
1842#define IL_LEGACY_PM_OPS NULL 1842#define IL_LEGACY_PM_OPS NULL
1843 1843
1844#endif /* !CONFIG_PM */ 1844#endif /* !CONFIG_PM_SLEEP */
1845 1845
1846/***************************************************** 1846/*****************************************************
1847* Error Handling Debugging 1847* Error Handling Debugging
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 753b5682d53f..a5f9875cfd6e 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -26,10 +26,17 @@
26static struct dentry *mwifiex_dfs_dir; 26static struct dentry *mwifiex_dfs_dir;
27 27
28static char *bss_modes[] = { 28static char *bss_modes[] = {
29 "Unknown", 29 "UNSPECIFIED",
30 "Ad-hoc", 30 "ADHOC",
31 "Managed", 31 "STATION",
32 "Auto" 32 "AP",
33 "AP_VLAN",
34 "WDS",
35 "MONITOR",
36 "MESH_POINT",
37 "P2P_CLIENT",
38 "P2P_GO",
39 "P2P_DEVICE",
33}; 40};
34 41
35/* size/addr for mwifiex_debug_info */ 42/* size/addr for mwifiex_debug_info */
@@ -200,7 +207,12 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
200 p += sprintf(p, "driver_version = %s", fmt); 207 p += sprintf(p, "driver_version = %s", fmt);
201 p += sprintf(p, "\nverext = %s", priv->version_str); 208 p += sprintf(p, "\nverext = %s", priv->version_str);
202 p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name); 209 p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name);
203 p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]); 210
211 if (info.bss_mode >= ARRAY_SIZE(bss_modes))
212 p += sprintf(p, "bss_mode=\"%d\"\n", info.bss_mode);
213 else
214 p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
215
204 p += sprintf(p, "media_state=\"%s\"\n", 216 p += sprintf(p, "media_state=\"%s\"\n",
205 (!priv->media_connected ? "Disconnected" : "Connected")); 217 (!priv->media_connected ? "Disconnected" : "Connected"));
206 p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr); 218 p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr);
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 999ffc12578b..c97e9d327331 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -764,6 +764,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
764 "can't alloc skb for rx\n"); 764 "can't alloc skb for rx\n");
765 goto done; 765 goto done;
766 } 766 }
767 kmemleak_not_leak(new_skb);
767 768
768 pci_unmap_single(rtlpci->pdev, 769 pci_unmap_single(rtlpci->pdev,
769 *((dma_addr_t *) skb->cb), 770 *((dma_addr_t *) skb->cb),
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 3d0498e69c8c..189ba124a8c6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1973,26 +1973,35 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1973 } 1973 }
1974} 1974}
1975 1975
1976void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, 1976static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
1977 struct ieee80211_sta *sta, 1977 struct ieee80211_sta *sta)
1978 u8 rssi_level)
1979{ 1978{
1980 struct rtl_priv *rtlpriv = rtl_priv(hw); 1979 struct rtl_priv *rtlpriv = rtl_priv(hw);
1981 struct rtl_phy *rtlphy = &(rtlpriv->phy); 1980 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1982 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1981 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1983 u32 ratr_value = (u32) mac->basic_rates; 1982 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1984 u8 *mcsrate = mac->mcs; 1983 u32 ratr_value;
1985 u8 ratr_index = 0; 1984 u8 ratr_index = 0;
1986 u8 nmode = mac->ht_enable; 1985 u8 nmode = mac->ht_enable;
1987 u8 mimo_ps = 1; 1986 u8 mimo_ps = IEEE80211_SMPS_OFF;
1988 u16 shortgi_rate = 0; 1987 u16 shortgi_rate;
1989 u32 tmp_ratr_value = 0; 1988 u32 tmp_ratr_value;
1990 u8 curtxbw_40mhz = mac->bw_40; 1989 u8 curtxbw_40mhz = mac->bw_40;
1991 u8 curshortgi_40mhz = mac->sgi_40; 1990 u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1992 u8 curshortgi_20mhz = mac->sgi_20; 1991 1 : 0;
1992 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1993 1 : 0;
1993 enum wireless_mode wirelessmode = mac->mode; 1994 enum wireless_mode wirelessmode = mac->mode;
1994 1995
1995 ratr_value |= ((*(u16 *) (mcsrate))) << 12; 1996 if (rtlhal->current_bandtype == BAND_ON_5G)
1997 ratr_value = sta->supp_rates[1] << 4;
1998 else
1999 ratr_value = sta->supp_rates[0];
2000 if (mac->opmode == NL80211_IFTYPE_ADHOC)
2001 ratr_value = 0xfff;
2002
2003 ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
2004 sta->ht_cap.mcs.rx_mask[0] << 12);
1996 switch (wirelessmode) { 2005 switch (wirelessmode) {
1997 case WIRELESS_MODE_B: 2006 case WIRELESS_MODE_B:
1998 if (ratr_value & 0x0000000c) 2007 if (ratr_value & 0x0000000c)
@@ -2006,7 +2015,7 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
2006 case WIRELESS_MODE_N_24G: 2015 case WIRELESS_MODE_N_24G:
2007 case WIRELESS_MODE_N_5G: 2016 case WIRELESS_MODE_N_5G:
2008 nmode = 1; 2017 nmode = 1;
2009 if (mimo_ps == 0) { 2018 if (mimo_ps == IEEE80211_SMPS_STATIC) {
2010 ratr_value &= 0x0007F005; 2019 ratr_value &= 0x0007F005;
2011 } else { 2020 } else {
2012 u32 ratr_mask; 2021 u32 ratr_mask;
@@ -2016,8 +2025,7 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
2016 ratr_mask = 0x000ff005; 2025 ratr_mask = 0x000ff005;
2017 else 2026 else
2018 ratr_mask = 0x0f0ff005; 2027 ratr_mask = 0x0f0ff005;
2019 if (curtxbw_40mhz) 2028
2020 ratr_mask |= 0x00000010;
2021 ratr_value &= ratr_mask; 2029 ratr_value &= ratr_mask;
2022 } 2030 }
2023 break; 2031 break;
@@ -2026,41 +2034,74 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
2026 ratr_value &= 0x000ff0ff; 2034 ratr_value &= 0x000ff0ff;
2027 else 2035 else
2028 ratr_value &= 0x0f0ff0ff; 2036 ratr_value &= 0x0f0ff0ff;
2037
2029 break; 2038 break;
2030 } 2039 }
2040
2031 ratr_value &= 0x0FFFFFFF; 2041 ratr_value &= 0x0FFFFFFF;
2032 if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || 2042
2033 (!curtxbw_40mhz && curshortgi_20mhz))) { 2043 if (nmode && ((curtxbw_40mhz &&
2044 curshortgi_40mhz) || (!curtxbw_40mhz &&
2045 curshortgi_20mhz))) {
2046
2034 ratr_value |= 0x10000000; 2047 ratr_value |= 0x10000000;
2035 tmp_ratr_value = (ratr_value >> 12); 2048 tmp_ratr_value = (ratr_value >> 12);
2049
2036 for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) { 2050 for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
2037 if ((1 << shortgi_rate) & tmp_ratr_value) 2051 if ((1 << shortgi_rate) & tmp_ratr_value)
2038 break; 2052 break;
2039 } 2053 }
2054
2040 shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) | 2055 shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
2041 (shortgi_rate << 4) | (shortgi_rate); 2056 (shortgi_rate << 4) | (shortgi_rate);
2042 } 2057 }
2058
2043 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); 2059 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
2060
2061 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
2062 rtl_read_dword(rtlpriv, REG_ARFR0));
2044} 2063}
2045 2064
2046void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) 2065static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw,
2066 struct ieee80211_sta *sta,
2067 u8 rssi_level)
2047{ 2068{
2048 struct rtl_priv *rtlpriv = rtl_priv(hw); 2069 struct rtl_priv *rtlpriv = rtl_priv(hw);
2049 struct rtl_phy *rtlphy = &(rtlpriv->phy); 2070 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2050 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 2071 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2051 u32 ratr_bitmap = (u32) mac->basic_rates; 2072 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2052 u8 *p_mcsrate = mac->mcs; 2073 struct rtl_sta_info *sta_entry = NULL;
2053 u8 ratr_index = 0; 2074 u32 ratr_bitmap;
2054 u8 curtxbw_40mhz = mac->bw_40; 2075 u8 ratr_index;
2055 u8 curshortgi_40mhz = mac->sgi_40; 2076 u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
2056 u8 curshortgi_20mhz = mac->sgi_20; 2077 u8 curshortgi_40mhz = curtxbw_40mhz &&
2057 enum wireless_mode wirelessmode = mac->mode; 2078 (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
2079 1 : 0;
2080 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
2081 1 : 0;
2082 enum wireless_mode wirelessmode = 0;
2058 bool shortgi = false; 2083 bool shortgi = false;
2059 u8 rate_mask[5]; 2084 u8 rate_mask[5];
2060 u8 macid = 0; 2085 u8 macid = 0;
2061 u8 mimops = 1; 2086 u8 mimo_ps = IEEE80211_SMPS_OFF;
2062 2087
2063 ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12); 2088 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
2089 wirelessmode = sta_entry->wireless_mode;
2090 if (mac->opmode == NL80211_IFTYPE_STATION ||
2091 mac->opmode == NL80211_IFTYPE_MESH_POINT)
2092 curtxbw_40mhz = mac->bw_40;
2093 else if (mac->opmode == NL80211_IFTYPE_AP ||
2094 mac->opmode == NL80211_IFTYPE_ADHOC)
2095 macid = sta->aid + 1;
2096
2097 if (rtlhal->current_bandtype == BAND_ON_5G)
2098 ratr_bitmap = sta->supp_rates[1] << 4;
2099 else
2100 ratr_bitmap = sta->supp_rates[0];
2101 if (mac->opmode == NL80211_IFTYPE_ADHOC)
2102 ratr_bitmap = 0xfff;
2103 ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
2104 sta->ht_cap.mcs.rx_mask[0] << 12);
2064 switch (wirelessmode) { 2105 switch (wirelessmode) {
2065 case WIRELESS_MODE_B: 2106 case WIRELESS_MODE_B:
2066 ratr_index = RATR_INX_WIRELESS_B; 2107 ratr_index = RATR_INX_WIRELESS_B;
@@ -2071,6 +2112,7 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
2071 break; 2112 break;
2072 case WIRELESS_MODE_G: 2113 case WIRELESS_MODE_G:
2073 ratr_index = RATR_INX_WIRELESS_GB; 2114 ratr_index = RATR_INX_WIRELESS_GB;
2115
2074 if (rssi_level == 1) 2116 if (rssi_level == 1)
2075 ratr_bitmap &= 0x00000f00; 2117 ratr_bitmap &= 0x00000f00;
2076 else if (rssi_level == 2) 2118 else if (rssi_level == 2)
@@ -2085,7 +2127,8 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
2085 case WIRELESS_MODE_N_24G: 2127 case WIRELESS_MODE_N_24G:
2086 case WIRELESS_MODE_N_5G: 2128 case WIRELESS_MODE_N_5G:
2087 ratr_index = RATR_INX_WIRELESS_NGB; 2129 ratr_index = RATR_INX_WIRELESS_NGB;
2088 if (mimops == 0) { 2130
2131 if (mimo_ps == IEEE80211_SMPS_STATIC) {
2089 if (rssi_level == 1) 2132 if (rssi_level == 1)
2090 ratr_bitmap &= 0x00070000; 2133 ratr_bitmap &= 0x00070000;
2091 else if (rssi_level == 2) 2134 else if (rssi_level == 2)
@@ -2128,8 +2171,10 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
2128 } 2171 }
2129 } 2172 }
2130 } 2173 }
2174
2131 if ((curtxbw_40mhz && curshortgi_40mhz) || 2175 if ((curtxbw_40mhz && curshortgi_40mhz) ||
2132 (!curtxbw_40mhz && curshortgi_20mhz)) { 2176 (!curtxbw_40mhz && curshortgi_20mhz)) {
2177
2133 if (macid == 0) 2178 if (macid == 0)
2134 shortgi = true; 2179 shortgi = true;
2135 else if (macid == 1) 2180 else if (macid == 1)
@@ -2138,21 +2183,42 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
2138 break; 2183 break;
2139 default: 2184 default:
2140 ratr_index = RATR_INX_WIRELESS_NGB; 2185 ratr_index = RATR_INX_WIRELESS_NGB;
2186
2141 if (rtlphy->rf_type == RF_1T2R) 2187 if (rtlphy->rf_type == RF_1T2R)
2142 ratr_bitmap &= 0x000ff0ff; 2188 ratr_bitmap &= 0x000ff0ff;
2143 else 2189 else
2144 ratr_bitmap &= 0x0f0ff0ff; 2190 ratr_bitmap &= 0x0f0ff0ff;
2145 break; 2191 break;
2146 } 2192 }
2147 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "ratr_bitmap :%x\n", 2193 sta_entry->ratr_index = ratr_index;
2148 ratr_bitmap); 2194
2149 *(u32 *)&rate_mask = ((ratr_bitmap & 0x0fffffff) | 2195 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
2150 ratr_index << 28); 2196 "ratr_bitmap :%x\n", ratr_bitmap);
2197 *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
2198 (ratr_index << 28);
2151 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; 2199 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
2152 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, 2200 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
2153 "Rate_index:%x, ratr_val:%x, %5phC\n", 2201 "Rate_index:%x, ratr_val:%x, %5phC\n",
2154 ratr_index, ratr_bitmap, rate_mask); 2202 ratr_index, ratr_bitmap, rate_mask);
2155 rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask); 2203 memcpy(rtlpriv->rate_mask, rate_mask, 5);
2204 /* rtl92c_fill_h2c_cmd() does USB I/O and will result in a
2205 * "scheduled while atomic" if called directly */
2206 schedule_work(&rtlpriv->works.fill_h2c_cmd);
2207
2208 if (macid != 0)
2209 sta_entry->ratr_index = ratr_index;
2210}
2211
2212void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
2213 struct ieee80211_sta *sta,
2214 u8 rssi_level)
2215{
2216 struct rtl_priv *rtlpriv = rtl_priv(hw);
2217
2218 if (rtlpriv->dm.useramask)
2219 rtl92cu_update_hal_rate_mask(hw, sta, rssi_level);
2220 else
2221 rtl92cu_update_hal_rate_table(hw, sta);
2156} 2222}
2157 2223
2158void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw) 2224void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
index f41a3aa4a26f..8e3ec1e25644 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -98,10 +98,6 @@ void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
98 u32 add_msr, u32 rm_msr); 98 u32 add_msr, u32 rm_msr);
99void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); 99void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
100void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); 100void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
101void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
102 struct ieee80211_sta *sta,
103 u8 rssi_level);
104void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level);
105 101
106void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw); 102void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw);
107bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid); 103bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 85b6bdb163c0..da4f587199ee 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -289,14 +289,30 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
289 macaddr = cam_const_broad; 289 macaddr = cam_const_broad;
290 entry_id = key_index; 290 entry_id = key_index;
291 } else { 291 } else {
292 if (mac->opmode == NL80211_IFTYPE_AP ||
293 mac->opmode == NL80211_IFTYPE_MESH_POINT) {
294 entry_id = rtl_cam_get_free_entry(hw,
295 p_macaddr);
296 if (entry_id >= TOTAL_CAM_ENTRY) {
297 RT_TRACE(rtlpriv, COMP_SEC,
298 DBG_EMERG,
299 "Can not find free hw security cam entry\n");
300 return;
301 }
302 } else {
303 entry_id = CAM_PAIRWISE_KEY_POSITION;
304 }
305
292 key_index = PAIRWISE_KEYIDX; 306 key_index = PAIRWISE_KEYIDX;
293 entry_id = CAM_PAIRWISE_KEY_POSITION;
294 is_pairwise = true; 307 is_pairwise = true;
295 } 308 }
296 } 309 }
297 if (rtlpriv->sec.key_len[key_index] == 0) { 310 if (rtlpriv->sec.key_len[key_index] == 0) {
298 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 311 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
299 "delete one entry\n"); 312 "delete one entry\n");
313 if (mac->opmode == NL80211_IFTYPE_AP ||
314 mac->opmode == NL80211_IFTYPE_MESH_POINT)
315 rtl_cam_del_entry(hw, p_macaddr);
300 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); 316 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
301 } else { 317 } else {
302 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, 318 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 938b1e670b93..826f085c29dd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -106,8 +106,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
106 .update_interrupt_mask = rtl92cu_update_interrupt_mask, 106 .update_interrupt_mask = rtl92cu_update_interrupt_mask,
107 .get_hw_reg = rtl92cu_get_hw_reg, 107 .get_hw_reg = rtl92cu_get_hw_reg,
108 .set_hw_reg = rtl92cu_set_hw_reg, 108 .set_hw_reg = rtl92cu_set_hw_reg,
109 .update_rate_tbl = rtl92cu_update_hal_rate_table, 109 .update_rate_tbl = rtl92cu_update_hal_rate_tbl,
110 .update_rate_mask = rtl92cu_update_hal_rate_mask,
111 .fill_tx_desc = rtl92cu_tx_fill_desc, 110 .fill_tx_desc = rtl92cu_tx_fill_desc,
112 .fill_fake_txdesc = rtl92cu_fill_fake_txdesc, 111 .fill_fake_txdesc = rtl92cu_fill_fake_txdesc,
113 .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc, 112 .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc,
@@ -137,6 +136,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
137 .phy_lc_calibrate = _rtl92cu_phy_lc_calibrate, 136 .phy_lc_calibrate = _rtl92cu_phy_lc_calibrate,
138 .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback, 137 .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
139 .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower, 138 .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
139 .fill_h2c_cmd = rtl92c_fill_h2c_cmd,
140}; 140};
141 141
142static struct rtl_mod_params rtl92cu_mod_params = { 142static struct rtl_mod_params rtl92cu_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
index a1310abd0d54..262e1e4c6e5b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
@@ -49,5 +49,8 @@ bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
49u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw, 49u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
50 enum radio_path rfpath, u32 regaddr, u32 bitmask); 50 enum radio_path rfpath, u32 regaddr, u32 bitmask);
51void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw); 51void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
52void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
53 struct ieee80211_sta *sta,
54 u8 rssi_level);
52 55
53#endif 56#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 76732b0cd221..a3532e077871 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -824,6 +824,7 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
824 824
825 /* should after adapter start and interrupt enable. */ 825 /* should after adapter start and interrupt enable. */
826 set_hal_stop(rtlhal); 826 set_hal_stop(rtlhal);
827 cancel_work_sync(&rtlpriv->works.fill_h2c_cmd);
827 /* Enable software */ 828 /* Enable software */
828 SET_USB_STOP(rtlusb); 829 SET_USB_STOP(rtlusb);
829 rtl_usb_deinit(hw); 830 rtl_usb_deinit(hw);
@@ -1026,6 +1027,16 @@ static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1026 return false; 1027 return false;
1027} 1028}
1028 1029
1030static void rtl_fill_h2c_cmd_work_callback(struct work_struct *work)
1031{
1032 struct rtl_works *rtlworks =
1033 container_of(work, struct rtl_works, fill_h2c_cmd);
1034 struct ieee80211_hw *hw = rtlworks->hw;
1035 struct rtl_priv *rtlpriv = rtl_priv(hw);
1036
1037 rtlpriv->cfg->ops->fill_h2c_cmd(hw, H2C_RA_MASK, 5, rtlpriv->rate_mask);
1038}
1039
1029static struct rtl_intf_ops rtl_usb_ops = { 1040static struct rtl_intf_ops rtl_usb_ops = {
1030 .adapter_start = rtl_usb_start, 1041 .adapter_start = rtl_usb_start,
1031 .adapter_stop = rtl_usb_stop, 1042 .adapter_stop = rtl_usb_stop,
@@ -1057,6 +1068,8 @@ int rtl_usb_probe(struct usb_interface *intf,
1057 1068
1058 /* this spin lock must be initialized early */ 1069 /* this spin lock must be initialized early */
1059 spin_lock_init(&rtlpriv->locks.usb_lock); 1070 spin_lock_init(&rtlpriv->locks.usb_lock);
1071 INIT_WORK(&rtlpriv->works.fill_h2c_cmd,
1072 rtl_fill_h2c_cmd_work_callback);
1060 1073
1061 rtlpriv->usb_data_index = 0; 1074 rtlpriv->usb_data_index = 0;
1062 init_completion(&rtlpriv->firmware_loading_complete); 1075 init_completion(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 44328baa6389..cc03e7c87cbe 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1736,6 +1736,8 @@ struct rtl_hal_ops {
1736 void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw, 1736 void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw,
1737 bool mstate); 1737 bool mstate);
1738 void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw); 1738 void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw);
1739 void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
1740 u32 cmd_len, u8 *p_cmdbuffer);
1739}; 1741};
1740 1742
1741struct rtl_intf_ops { 1743struct rtl_intf_ops {
@@ -1869,6 +1871,7 @@ struct rtl_works {
1869 struct delayed_work fwevt_wq; 1871 struct delayed_work fwevt_wq;
1870 1872
1871 struct work_struct lps_change_work; 1873 struct work_struct lps_change_work;
1874 struct work_struct fill_h2c_cmd;
1872}; 1875};
1873 1876
1874struct rtl_debug { 1877struct rtl_debug {
@@ -2048,6 +2051,7 @@ struct rtl_priv {
2048 }; 2051 };
2049 }; 2052 };
2050 bool enter_ps; /* true when entering PS */ 2053 bool enter_ps; /* true when entering PS */
2054 u8 rate_mask[5];
2051 2055
2052 /*This must be the last item so 2056 /*This must be the last item so
2053 that it points to the data allocated 2057 that it points to the data allocated
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index affdb3ec6225..4a0bbb13806b 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -310,7 +310,7 @@ static void wl12xx_adjust_channels(struct wl1271_cmd_sched_scan_config *cmd,
310 memcpy(cmd->channels_2, cmd_channels->channels_2, 310 memcpy(cmd->channels_2, cmd_channels->channels_2,
311 sizeof(cmd->channels_2)); 311 sizeof(cmd->channels_2));
312 memcpy(cmd->channels_5, cmd_channels->channels_5, 312 memcpy(cmd->channels_5, cmd_channels->channels_5,
313 sizeof(cmd->channels_2)); 313 sizeof(cmd->channels_5));
314 /* channels_4 are not supported, so no need to copy them */ 314 /* channels_4 are not supported, so no need to copy them */
315} 315}
316 316
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 222d03540200..9e5484a73667 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -36,12 +36,12 @@
36#define WL127X_IFTYPE_SR_VER 3 36#define WL127X_IFTYPE_SR_VER 3
37#define WL127X_MAJOR_SR_VER 10 37#define WL127X_MAJOR_SR_VER 10
38#define WL127X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE 38#define WL127X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE
39#define WL127X_MINOR_SR_VER 115 39#define WL127X_MINOR_SR_VER 133
40/* minimum multi-role FW version for wl127x */ 40/* minimum multi-role FW version for wl127x */
41#define WL127X_IFTYPE_MR_VER 5 41#define WL127X_IFTYPE_MR_VER 5
42#define WL127X_MAJOR_MR_VER 7 42#define WL127X_MAJOR_MR_VER 7
43#define WL127X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE 43#define WL127X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE
44#define WL127X_MINOR_MR_VER 115 44#define WL127X_MINOR_MR_VER 42
45 45
46/* FW chip version for wl128x */ 46/* FW chip version for wl128x */
47#define WL128X_CHIP_VER 7 47#define WL128X_CHIP_VER 7
@@ -49,7 +49,7 @@
49#define WL128X_IFTYPE_SR_VER 3 49#define WL128X_IFTYPE_SR_VER 3
50#define WL128X_MAJOR_SR_VER 10 50#define WL128X_MAJOR_SR_VER 10
51#define WL128X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE 51#define WL128X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE
52#define WL128X_MINOR_SR_VER 115 52#define WL128X_MINOR_SR_VER 133
53/* minimum multi-role FW version for wl128x */ 53/* minimum multi-role FW version for wl128x */
54#define WL128X_IFTYPE_MR_VER 5 54#define WL128X_IFTYPE_MR_VER 5
55#define WL128X_MAJOR_MR_VER 7 55#define WL128X_MAJOR_MR_VER 7
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
index 09d944505ac0..2b642f8c9266 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.c
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -34,7 +34,7 @@ static void wl18xx_adjust_channels(struct wl18xx_cmd_scan_params *cmd,
34 memcpy(cmd->channels_2, cmd_channels->channels_2, 34 memcpy(cmd->channels_2, cmd_channels->channels_2,
35 sizeof(cmd->channels_2)); 35 sizeof(cmd->channels_2));
36 memcpy(cmd->channels_5, cmd_channels->channels_5, 36 memcpy(cmd->channels_5, cmd_channels->channels_5,
37 sizeof(cmd->channels_2)); 37 sizeof(cmd->channels_5));
38 /* channels_4 are not supported, so no need to copy them */ 38 /* channels_4 are not supported, so no need to copy them */
39} 39}
40 40
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 0eab77b22340..f296f3f7db9b 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -25,6 +25,7 @@
25#include <linux/rtc.h> 25#include <linux/rtc.h>
26#include <linux/bcd.h> 26#include <linux/bcd.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/spinlock.h>
28#include <linux/ioctl.h> 29#include <linux/ioctl.h>
29#include <linux/completion.h> 30#include <linux/completion.h>
30#include <linux/io.h> 31#include <linux/io.h>
@@ -42,10 +43,65 @@
42 43
43#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */ 44#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
44 45
46struct at91_rtc_config {
47 bool use_shadow_imr;
48};
49
50static const struct at91_rtc_config *at91_rtc_config;
45static DECLARE_COMPLETION(at91_rtc_updated); 51static DECLARE_COMPLETION(at91_rtc_updated);
46static unsigned int at91_alarm_year = AT91_RTC_EPOCH; 52static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
47static void __iomem *at91_rtc_regs; 53static void __iomem *at91_rtc_regs;
48static int irq; 54static int irq;
55static DEFINE_SPINLOCK(at91_rtc_lock);
56static u32 at91_rtc_shadow_imr;
57
58static void at91_rtc_write_ier(u32 mask)
59{
60 unsigned long flags;
61
62 spin_lock_irqsave(&at91_rtc_lock, flags);
63 at91_rtc_shadow_imr |= mask;
64 at91_rtc_write(AT91_RTC_IER, mask);
65 spin_unlock_irqrestore(&at91_rtc_lock, flags);
66}
67
68static void at91_rtc_write_idr(u32 mask)
69{
70 unsigned long flags;
71
72 spin_lock_irqsave(&at91_rtc_lock, flags);
73 at91_rtc_write(AT91_RTC_IDR, mask);
74 /*
75 * Register read back (of any RTC-register) needed to make sure
76 * IDR-register write has reached the peripheral before updating
77 * shadow mask.
78 *
79 * Note that there is still a possibility that the mask is updated
80 * before interrupts have actually been disabled in hardware. The only
81 * way to be certain would be to poll the IMR-register, which is is
82 * the very register we are trying to emulate. The register read back
83 * is a reasonable heuristic.
84 */
85 at91_rtc_read(AT91_RTC_SR);
86 at91_rtc_shadow_imr &= ~mask;
87 spin_unlock_irqrestore(&at91_rtc_lock, flags);
88}
89
90static u32 at91_rtc_read_imr(void)
91{
92 unsigned long flags;
93 u32 mask;
94
95 if (at91_rtc_config->use_shadow_imr) {
96 spin_lock_irqsave(&at91_rtc_lock, flags);
97 mask = at91_rtc_shadow_imr;
98 spin_unlock_irqrestore(&at91_rtc_lock, flags);
99 } else {
100 mask = at91_rtc_read(AT91_RTC_IMR);
101 }
102
103 return mask;
104}
49 105
50/* 106/*
51 * Decode time/date into rtc_time structure 107 * Decode time/date into rtc_time structure
@@ -110,9 +166,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
110 cr = at91_rtc_read(AT91_RTC_CR); 166 cr = at91_rtc_read(AT91_RTC_CR);
111 at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); 167 at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
112 168
113 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); 169 at91_rtc_write_ier(AT91_RTC_ACKUPD);
114 wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ 170 wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
115 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); 171 at91_rtc_write_idr(AT91_RTC_ACKUPD);
116 172
117 at91_rtc_write(AT91_RTC_TIMR, 173 at91_rtc_write(AT91_RTC_TIMR,
118 bin2bcd(tm->tm_sec) << 0 174 bin2bcd(tm->tm_sec) << 0
@@ -144,7 +200,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
144 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); 200 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
145 tm->tm_year = at91_alarm_year - 1900; 201 tm->tm_year = at91_alarm_year - 1900;
146 202
147 alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM) 203 alrm->enabled = (at91_rtc_read_imr() & AT91_RTC_ALARM)
148 ? 1 : 0; 204 ? 1 : 0;
149 205
150 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, 206 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -169,7 +225,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
169 tm.tm_min = alrm->time.tm_min; 225 tm.tm_min = alrm->time.tm_min;
170 tm.tm_sec = alrm->time.tm_sec; 226 tm.tm_sec = alrm->time.tm_sec;
171 227
172 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); 228 at91_rtc_write_idr(AT91_RTC_ALARM);
173 at91_rtc_write(AT91_RTC_TIMALR, 229 at91_rtc_write(AT91_RTC_TIMALR,
174 bin2bcd(tm.tm_sec) << 0 230 bin2bcd(tm.tm_sec) << 0
175 | bin2bcd(tm.tm_min) << 8 231 | bin2bcd(tm.tm_min) << 8
@@ -182,7 +238,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
182 238
183 if (alrm->enabled) { 239 if (alrm->enabled) {
184 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); 240 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
185 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); 241 at91_rtc_write_ier(AT91_RTC_ALARM);
186 } 242 }
187 243
188 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, 244 dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -198,9 +254,9 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
198 254
199 if (enabled) { 255 if (enabled) {
200 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); 256 at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
201 at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); 257 at91_rtc_write_ier(AT91_RTC_ALARM);
202 } else 258 } else
203 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); 259 at91_rtc_write_idr(AT91_RTC_ALARM);
204 260
205 return 0; 261 return 0;
206} 262}
@@ -209,7 +265,7 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
209 */ 265 */
210static int at91_rtc_proc(struct device *dev, struct seq_file *seq) 266static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
211{ 267{
212 unsigned long imr = at91_rtc_read(AT91_RTC_IMR); 268 unsigned long imr = at91_rtc_read_imr();
213 269
214 seq_printf(seq, "update_IRQ\t: %s\n", 270 seq_printf(seq, "update_IRQ\t: %s\n",
215 (imr & AT91_RTC_ACKUPD) ? "yes" : "no"); 271 (imr & AT91_RTC_ACKUPD) ? "yes" : "no");
@@ -229,7 +285,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
229 unsigned int rtsr; 285 unsigned int rtsr;
230 unsigned long events = 0; 286 unsigned long events = 0;
231 287
232 rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR); 288 rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr();
233 if (rtsr) { /* this interrupt is shared! Is it ours? */ 289 if (rtsr) { /* this interrupt is shared! Is it ours? */
234 if (rtsr & AT91_RTC_ALARM) 290 if (rtsr & AT91_RTC_ALARM)
235 events |= (RTC_AF | RTC_IRQF); 291 events |= (RTC_AF | RTC_IRQF);
@@ -250,6 +306,43 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
250 return IRQ_NONE; /* not handled */ 306 return IRQ_NONE; /* not handled */
251} 307}
252 308
309static const struct at91_rtc_config at91rm9200_config = {
310};
311
312static const struct at91_rtc_config at91sam9x5_config = {
313 .use_shadow_imr = true,
314};
315
316#ifdef CONFIG_OF
317static const struct of_device_id at91_rtc_dt_ids[] = {
318 {
319 .compatible = "atmel,at91rm9200-rtc",
320 .data = &at91rm9200_config,
321 }, {
322 .compatible = "atmel,at91sam9x5-rtc",
323 .data = &at91sam9x5_config,
324 }, {
325 /* sentinel */
326 }
327};
328MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
329#endif
330
331static const struct at91_rtc_config *
332at91_rtc_get_config(struct platform_device *pdev)
333{
334 const struct of_device_id *match;
335
336 if (pdev->dev.of_node) {
337 match = of_match_node(at91_rtc_dt_ids, pdev->dev.of_node);
338 if (!match)
339 return NULL;
340 return (const struct at91_rtc_config *)match->data;
341 }
342
343 return &at91rm9200_config;
344}
345
253static const struct rtc_class_ops at91_rtc_ops = { 346static const struct rtc_class_ops at91_rtc_ops = {
254 .read_time = at91_rtc_readtime, 347 .read_time = at91_rtc_readtime,
255 .set_time = at91_rtc_settime, 348 .set_time = at91_rtc_settime,
@@ -268,6 +361,10 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
268 struct resource *regs; 361 struct resource *regs;
269 int ret = 0; 362 int ret = 0;
270 363
364 at91_rtc_config = at91_rtc_get_config(pdev);
365 if (!at91_rtc_config)
366 return -ENODEV;
367
271 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 368 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
272 if (!regs) { 369 if (!regs) {
273 dev_err(&pdev->dev, "no mmio resource defined\n"); 370 dev_err(&pdev->dev, "no mmio resource defined\n");
@@ -290,7 +387,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
290 at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */ 387 at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */
291 388
292 /* Disable all interrupts */ 389 /* Disable all interrupts */
293 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | 390 at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
294 AT91_RTC_SECEV | AT91_RTC_TIMEV | 391 AT91_RTC_SECEV | AT91_RTC_TIMEV |
295 AT91_RTC_CALEV); 392 AT91_RTC_CALEV);
296 393
@@ -335,7 +432,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
335 struct rtc_device *rtc = platform_get_drvdata(pdev); 432 struct rtc_device *rtc = platform_get_drvdata(pdev);
336 433
337 /* Disable all interrupts */ 434 /* Disable all interrupts */
338 at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | 435 at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
339 AT91_RTC_SECEV | AT91_RTC_TIMEV | 436 AT91_RTC_SECEV | AT91_RTC_TIMEV |
340 AT91_RTC_CALEV); 437 AT91_RTC_CALEV);
341 free_irq(irq, pdev); 438 free_irq(irq, pdev);
@@ -358,13 +455,13 @@ static int at91_rtc_suspend(struct device *dev)
358 /* this IRQ is shared with DBGU and other hardware which isn't 455 /* this IRQ is shared with DBGU and other hardware which isn't
359 * necessarily doing PM like we are... 456 * necessarily doing PM like we are...
360 */ 457 */
361 at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR) 458 at91_rtc_imr = at91_rtc_read_imr()
362 & (AT91_RTC_ALARM|AT91_RTC_SECEV); 459 & (AT91_RTC_ALARM|AT91_RTC_SECEV);
363 if (at91_rtc_imr) { 460 if (at91_rtc_imr) {
364 if (device_may_wakeup(dev)) 461 if (device_may_wakeup(dev))
365 enable_irq_wake(irq); 462 enable_irq_wake(irq);
366 else 463 else
367 at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr); 464 at91_rtc_write_idr(at91_rtc_imr);
368 } 465 }
369 return 0; 466 return 0;
370} 467}
@@ -375,7 +472,7 @@ static int at91_rtc_resume(struct device *dev)
375 if (device_may_wakeup(dev)) 472 if (device_may_wakeup(dev))
376 disable_irq_wake(irq); 473 disable_irq_wake(irq);
377 else 474 else
378 at91_rtc_write(AT91_RTC_IER, at91_rtc_imr); 475 at91_rtc_write_ier(at91_rtc_imr);
379 } 476 }
380 return 0; 477 return 0;
381} 478}
@@ -383,12 +480,6 @@ static int at91_rtc_resume(struct device *dev)
383 480
384static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume); 481static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume);
385 482
386static const struct of_device_id at91_rtc_dt_ids[] = {
387 { .compatible = "atmel,at91rm9200-rtc" },
388 { /* sentinel */ }
389};
390MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
391
392static struct platform_driver at91_rtc_driver = { 483static struct platform_driver at91_rtc_driver = {
393 .remove = __exit_p(at91_rtc_remove), 484 .remove = __exit_p(at91_rtc_remove),
394 .driver = { 485 .driver = {
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index cc5bea9c4b1c..f1cb706445c7 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -854,6 +854,9 @@ static int cmos_resume(struct device *dev)
854 } 854 }
855 855
856 spin_lock_irq(&rtc_lock); 856 spin_lock_irq(&rtc_lock);
857 if (device_may_wakeup(dev))
858 hpet_rtc_timer_init();
859
857 do { 860 do {
858 CMOS_WRITE(tmp, RTC_CONTROL); 861 CMOS_WRITE(tmp, RTC_CONTROL);
859 hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK); 862 hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK);
@@ -869,7 +872,6 @@ static int cmos_resume(struct device *dev)
869 rtc_update_irq(cmos->rtc, 1, mask); 872 rtc_update_irq(cmos->rtc, 1, mask);
870 tmp &= ~RTC_AIE; 873 tmp &= ~RTC_AIE;
871 hpet_mask_rtc_irq_bit(RTC_AIE); 874 hpet_mask_rtc_irq_bit(RTC_AIE);
872 hpet_rtc_timer_init();
873 } while (mask & RTC_AIE); 875 } while (mask & RTC_AIE);
874 spin_unlock_irq(&rtc_lock); 876 spin_unlock_irq(&rtc_lock);
875 } 877 }
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index 459c2ffc95a6..426901cef14f 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -273,6 +273,8 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
273 return ret; 273 return ret;
274 } 274 }
275 275
276 device_init_wakeup(&pdev->dev, 1);
277
276 platform_set_drvdata(pdev, rtc); 278 platform_set_drvdata(pdev, rtc);
277 rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev), 279 rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev),
278 &tps6586x_rtc_ops, THIS_MODULE); 280 &tps6586x_rtc_ops, THIS_MODULE);
@@ -292,7 +294,6 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
292 goto fail_rtc_register; 294 goto fail_rtc_register;
293 } 295 }
294 disable_irq(rtc->irq); 296 disable_irq(rtc->irq);
295 device_set_wakeup_capable(&pdev->dev, 1);
296 return 0; 297 return 0;
297 298
298fail_rtc_register: 299fail_rtc_register:
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 8751a5240c99..b2eab34f38d9 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -524,6 +524,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
524 } 524 }
525 525
526 platform_set_drvdata(pdev, rtc); 526 platform_set_drvdata(pdev, rtc);
527 device_init_wakeup(&pdev->dev, 1);
527 return 0; 528 return 0;
528 529
529out2: 530out2:
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 60cfae51c713..eab593eaaafa 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -89,7 +89,7 @@ static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
89 if ((mask & hspi_read(hspi, SPSR)) == val) 89 if ((mask & hspi_read(hspi, SPSR)) == val)
90 return 0; 90 return 0;
91 91
92 msleep(20); 92 udelay(10);
93 } 93 }
94 94
95 dev_err(hspi->dev, "timeout\n"); 95 dev_err(hspi->dev, "timeout\n");
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 35f60bd252dd..637d728fbeb5 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1487,7 +1487,7 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
1487 return 0; 1487 return 0;
1488 1488
1489err_spi_register_master: 1489err_spi_register_master:
1490 free_irq(board_dat->pdev->irq, board_dat); 1490 free_irq(board_dat->pdev->irq, data);
1491err_request_irq: 1491err_request_irq:
1492 pch_spi_free_resources(board_dat, data); 1492 pch_spi_free_resources(board_dat, data);
1493err_spi_get_resources: 1493err_spi_get_resources:
@@ -1667,6 +1667,7 @@ static int pch_spi_probe(struct pci_dev *pdev,
1667 pd_dev = platform_device_alloc("pch-spi", i); 1667 pd_dev = platform_device_alloc("pch-spi", i);
1668 if (!pd_dev) { 1668 if (!pd_dev) {
1669 dev_err(&pdev->dev, "platform_device_alloc failed\n"); 1669 dev_err(&pdev->dev, "platform_device_alloc failed\n");
1670 retval = -ENOMEM;
1670 goto err_platform_device; 1671 goto err_platform_device;
1671 } 1672 }
1672 pd_dev_save->pd_save[i] = pd_dev; 1673 pd_dev_save->pd_save[i] = pd_dev;
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index e1d769607425..34d18dcfa0db 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -267,7 +267,6 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
267{ 267{
268 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); 268 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
269 u32 ipif_ier; 269 u32 ipif_ier;
270 u16 cr;
271 270
272 /* We get here with transmitter inhibited */ 271 /* We get here with transmitter inhibited */
273 272
@@ -276,7 +275,6 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
276 xspi->remaining_bytes = t->len; 275 xspi->remaining_bytes = t->len;
277 INIT_COMPLETION(xspi->done); 276 INIT_COMPLETION(xspi->done);
278 277
279 xilinx_spi_fill_tx_fifo(xspi);
280 278
281 /* Enable the transmit empty interrupt, which we use to determine 279 /* Enable the transmit empty interrupt, which we use to determine
282 * progress on the transmission. 280 * progress on the transmission.
@@ -285,12 +283,41 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
285 xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY, 283 xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
286 xspi->regs + XIPIF_V123B_IIER_OFFSET); 284 xspi->regs + XIPIF_V123B_IIER_OFFSET);
287 285
288 /* Start the transfer by not inhibiting the transmitter any longer */ 286 for (;;) {
289 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & 287 u16 cr;
290 ~XSPI_CR_TRANS_INHIBIT; 288 u8 sr;
291 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); 289
290 xilinx_spi_fill_tx_fifo(xspi);
291
292 /* Start the transfer by not inhibiting the transmitter any
293 * longer
294 */
295 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
296 ~XSPI_CR_TRANS_INHIBIT;
297 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
298
299 wait_for_completion(&xspi->done);
300
301 /* A transmit has just completed. Process received data and
302 * check for more data to transmit. Always inhibit the
303 * transmitter while the Isr refills the transmit register/FIFO,
304 * or make sure it is stopped if we're done.
305 */
306 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
307 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
308 xspi->regs + XSPI_CR_OFFSET);
309
310 /* Read out all the data from the Rx FIFO */
311 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
312 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
313 xspi->rx_fn(xspi);
314 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
315 }
292 316
293 wait_for_completion(&xspi->done); 317 /* See if there is more data to send */
318 if (!xspi->remaining_bytes > 0)
319 break;
320 }
294 321
295 /* Disable the transmit empty interrupt */ 322 /* Disable the transmit empty interrupt */
296 xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET); 323 xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET);
@@ -314,38 +341,7 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
314 xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET); 341 xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
315 342
316 if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */ 343 if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
317 u16 cr; 344 complete(&xspi->done);
318 u8 sr;
319
320 /* A transmit has just completed. Process received data and
321 * check for more data to transmit. Always inhibit the
322 * transmitter while the Isr refills the transmit register/FIFO,
323 * or make sure it is stopped if we're done.
324 */
325 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
326 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
327 xspi->regs + XSPI_CR_OFFSET);
328
329 /* Read out all the data from the Rx FIFO */
330 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
331 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
332 xspi->rx_fn(xspi);
333 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
334 }
335
336 /* See if there is more data to send */
337 if (xspi->remaining_bytes > 0) {
338 xilinx_spi_fill_tx_fifo(xspi);
339 /* Start the transfer by not inhibiting the
340 * transmitter any longer
341 */
342 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
343 } else {
344 /* No more data to send.
345 * Indicate the transfer is completed.
346 */
347 complete(&xspi->done);
348 }
349 } 345 }
350 346
351 return IRQ_HANDLED; 347 return IRQ_HANDLED;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 2b51e2336aa2..f80d3dd41d8c 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -155,14 +155,11 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
155 155
156static void vhost_net_clear_ubuf_info(struct vhost_net *n) 156static void vhost_net_clear_ubuf_info(struct vhost_net *n)
157{ 157{
158
159 bool zcopy;
160 int i; 158 int i;
161 159
162 for (i = 0; i < n->dev.nvqs; ++i) { 160 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
163 zcopy = vhost_net_zcopy_mask & (0x1 << i); 161 kfree(n->vqs[i].ubuf_info);
164 if (zcopy) 162 n->vqs[i].ubuf_info = NULL;
165 kfree(n->vqs[i].ubuf_info);
166 } 163 }
167} 164}
168 165
@@ -171,7 +168,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
171 bool zcopy; 168 bool zcopy;
172 int i; 169 int i;
173 170
174 for (i = 0; i < n->dev.nvqs; ++i) { 171 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
175 zcopy = vhost_net_zcopy_mask & (0x1 << i); 172 zcopy = vhost_net_zcopy_mask & (0x1 << i);
176 if (!zcopy) 173 if (!zcopy)
177 continue; 174 continue;
@@ -183,12 +180,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
183 return 0; 180 return 0;
184 181
185err: 182err:
186 while (i--) { 183 vhost_net_clear_ubuf_info(n);
187 zcopy = vhost_net_zcopy_mask & (0x1 << i);
188 if (!zcopy)
189 continue;
190 kfree(n->vqs[i].ubuf_info);
191 }
192 return -ENOMEM; 184 return -ENOMEM;
193} 185}
194 186
@@ -196,12 +188,12 @@ void vhost_net_vq_reset(struct vhost_net *n)
196{ 188{
197 int i; 189 int i;
198 190
191 vhost_net_clear_ubuf_info(n);
192
199 for (i = 0; i < VHOST_NET_VQ_MAX; i++) { 193 for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
200 n->vqs[i].done_idx = 0; 194 n->vqs[i].done_idx = 0;
201 n->vqs[i].upend_idx = 0; 195 n->vqs[i].upend_idx = 0;
202 n->vqs[i].ubufs = NULL; 196 n->vqs[i].ubufs = NULL;
203 kfree(n->vqs[i].ubuf_info);
204 n->vqs[i].ubuf_info = NULL;
205 n->vqs[i].vhost_hlen = 0; 197 n->vqs[i].vhost_hlen = 0;
206 n->vqs[i].sock_hlen = 0; 198 n->vqs[i].sock_hlen = 0;
207 } 199 }
@@ -436,7 +428,8 @@ static void handle_tx(struct vhost_net *net)
436 kref_get(&ubufs->kref); 428 kref_get(&ubufs->kref);
437 } 429 }
438 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; 430 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
439 } 431 } else
432 msg.msg_control = NULL;
440 /* TODO: Check specific error and bomb out unless ENOBUFS? */ 433 /* TODO: Check specific error and bomb out unless ENOBUFS? */
441 err = sock->ops->sendmsg(NULL, sock, &msg, len); 434 err = sock->ops->sendmsg(NULL, sock, &msg, len);
442 if (unlikely(err < 0)) { 435 if (unlikely(err < 0)) {
@@ -1053,6 +1046,10 @@ static long vhost_net_set_owner(struct vhost_net *n)
1053 int r; 1046 int r;
1054 1047
1055 mutex_lock(&n->dev.mutex); 1048 mutex_lock(&n->dev.mutex);
1049 if (vhost_dev_has_owner(&n->dev)) {
1050 r = -EBUSY;
1051 goto out;
1052 }
1056 r = vhost_net_set_ubuf_info(n); 1053 r = vhost_net_set_ubuf_info(n);
1057 if (r) 1054 if (r)
1058 goto out; 1055 goto out;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index beee7f5787e6..60aa5ad09a2f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -344,13 +344,19 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
344} 344}
345 345
346/* Caller should have device mutex */ 346/* Caller should have device mutex */
347bool vhost_dev_has_owner(struct vhost_dev *dev)
348{
349 return dev->mm;
350}
351
352/* Caller should have device mutex */
347long vhost_dev_set_owner(struct vhost_dev *dev) 353long vhost_dev_set_owner(struct vhost_dev *dev)
348{ 354{
349 struct task_struct *worker; 355 struct task_struct *worker;
350 int err; 356 int err;
351 357
352 /* Is there an owner already? */ 358 /* Is there an owner already? */
353 if (dev->mm) { 359 if (vhost_dev_has_owner(dev)) {
354 err = -EBUSY; 360 err = -EBUSY;
355 goto err_mm; 361 goto err_mm;
356 } 362 }
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index a7ad63592987..64adcf99ff33 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -133,6 +133,7 @@ struct vhost_dev {
133 133
134long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); 134long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
135long vhost_dev_set_owner(struct vhost_dev *dev); 135long vhost_dev_set_owner(struct vhost_dev *dev);
136bool vhost_dev_has_owner(struct vhost_dev *dev);
136long vhost_dev_check_owner(struct vhost_dev *); 137long vhost_dev_check_owner(struct vhost_dev *);
137struct vhost_memory *vhost_dev_reset_owner_prepare(void); 138struct vhost_memory *vhost_dev_reset_owner_prepare(void);
138void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *); 139void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index cc072c66c766..0f0493c63371 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -379,10 +379,10 @@ static int xen_tmem_init(void)
379#ifdef CONFIG_FRONTSWAP 379#ifdef CONFIG_FRONTSWAP
380 if (tmem_enabled && frontswap) { 380 if (tmem_enabled && frontswap) {
381 char *s = ""; 381 char *s = "";
382 struct frontswap_ops *old_ops = 382 struct frontswap_ops *old_ops;
383 frontswap_register_ops(&tmem_frontswap_ops);
384 383
385 tmem_frontswap_poolid = -1; 384 tmem_frontswap_poolid = -1;
385 old_ops = frontswap_register_ops(&tmem_frontswap_ops);
386 if (IS_ERR(old_ops) || old_ops) { 386 if (IS_ERR(old_ops) || old_ops) {
387 if (IS_ERR(old_ops)) 387 if (IS_ERR(old_ops))
388 return PTR_ERR(old_ops); 388 return PTR_ERR(old_ops);
diff --git a/fs/aio.c b/fs/aio.c
index 7fe5bdee1630..2bbcacf74d0c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -141,9 +141,6 @@ static void aio_free_ring(struct kioctx *ctx)
141 for (i = 0; i < ctx->nr_pages; i++) 141 for (i = 0; i < ctx->nr_pages; i++)
142 put_page(ctx->ring_pages[i]); 142 put_page(ctx->ring_pages[i]);
143 143
144 if (ctx->mmap_size)
145 vm_munmap(ctx->mmap_base, ctx->mmap_size);
146
147 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) 144 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
148 kfree(ctx->ring_pages); 145 kfree(ctx->ring_pages);
149} 146}
@@ -322,11 +319,6 @@ static void free_ioctx(struct kioctx *ctx)
322 319
323 aio_free_ring(ctx); 320 aio_free_ring(ctx);
324 321
325 spin_lock(&aio_nr_lock);
326 BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
327 aio_nr -= ctx->max_reqs;
328 spin_unlock(&aio_nr_lock);
329
330 pr_debug("freeing %p\n", ctx); 322 pr_debug("freeing %p\n", ctx);
331 323
332 /* 324 /*
@@ -435,17 +427,24 @@ static void kill_ioctx(struct kioctx *ctx)
435{ 427{
436 if (!atomic_xchg(&ctx->dead, 1)) { 428 if (!atomic_xchg(&ctx->dead, 1)) {
437 hlist_del_rcu(&ctx->list); 429 hlist_del_rcu(&ctx->list);
438 /* Between hlist_del_rcu() and dropping the initial ref */
439 synchronize_rcu();
440 430
441 /* 431 /*
442 * We can't punt to workqueue here because put_ioctx() -> 432 * It'd be more correct to do this in free_ioctx(), after all
443 * free_ioctx() will unmap the ringbuffer, and that has to be 433 * the outstanding kiocbs have finished - but by then io_destroy
444 * done in the original process's context. kill_ioctx_rcu/work() 434 * has already returned, so io_setup() could potentially return
445 * exist for exit_aio(), as in that path free_ioctx() won't do 435 * -EAGAIN with no ioctxs actually in use (as far as userspace
446 * the unmap. 436 * could tell).
447 */ 437 */
448 kill_ioctx_work(&ctx->rcu_work); 438 spin_lock(&aio_nr_lock);
439 BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
440 aio_nr -= ctx->max_reqs;
441 spin_unlock(&aio_nr_lock);
442
443 if (ctx->mmap_size)
444 vm_munmap(ctx->mmap_base, ctx->mmap_size);
445
446 /* Between hlist_del_rcu() and dropping the initial ref */
447 call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
449 } 448 }
450} 449}
451 450
@@ -495,10 +494,7 @@ void exit_aio(struct mm_struct *mm)
495 */ 494 */
496 ctx->mmap_size = 0; 495 ctx->mmap_size = 0;
497 496
498 if (!atomic_xchg(&ctx->dead, 1)) { 497 kill_ioctx(ctx);
499 hlist_del_rcu(&ctx->list);
500 call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
501 }
502 } 498 }
503} 499}
504 500
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 202dd3d68be0..ebbf680378e2 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -191,27 +191,23 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
191} 191}
192 192
193/** 193/**
194 * Encode the flock and fcntl locks for the given inode into the pagelist. 194 * Encode the flock and fcntl locks for the given inode into the ceph_filelock
195 * Format is: #fcntl locks, sequential fcntl locks, #flock locks, 195 * array. Must be called with lock_flocks() already held.
196 * sequential flock locks. 196 * If we encounter more of a specific lock type than expected, return -ENOSPC.
197 * Must be called with lock_flocks() already held.
198 * If we encounter more of a specific lock type than expected,
199 * we return the value 1.
200 */ 197 */
201int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, 198int ceph_encode_locks_to_buffer(struct inode *inode,
202 int num_fcntl_locks, int num_flock_locks) 199 struct ceph_filelock *flocks,
200 int num_fcntl_locks, int num_flock_locks)
203{ 201{
204 struct file_lock *lock; 202 struct file_lock *lock;
205 struct ceph_filelock cephlock;
206 int err = 0; 203 int err = 0;
207 int seen_fcntl = 0; 204 int seen_fcntl = 0;
208 int seen_flock = 0; 205 int seen_flock = 0;
206 int l = 0;
209 207
210 dout("encoding %d flock and %d fcntl locks", num_flock_locks, 208 dout("encoding %d flock and %d fcntl locks", num_flock_locks,
211 num_fcntl_locks); 209 num_fcntl_locks);
212 err = ceph_pagelist_append(pagelist, &num_fcntl_locks, sizeof(u32)); 210
213 if (err)
214 goto fail;
215 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { 211 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
216 if (lock->fl_flags & FL_POSIX) { 212 if (lock->fl_flags & FL_POSIX) {
217 ++seen_fcntl; 213 ++seen_fcntl;
@@ -219,19 +215,12 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
219 err = -ENOSPC; 215 err = -ENOSPC;
220 goto fail; 216 goto fail;
221 } 217 }
222 err = lock_to_ceph_filelock(lock, &cephlock); 218 err = lock_to_ceph_filelock(lock, &flocks[l]);
223 if (err) 219 if (err)
224 goto fail; 220 goto fail;
225 err = ceph_pagelist_append(pagelist, &cephlock, 221 ++l;
226 sizeof(struct ceph_filelock));
227 } 222 }
228 if (err)
229 goto fail;
230 } 223 }
231
232 err = ceph_pagelist_append(pagelist, &num_flock_locks, sizeof(u32));
233 if (err)
234 goto fail;
235 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { 224 for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
236 if (lock->fl_flags & FL_FLOCK) { 225 if (lock->fl_flags & FL_FLOCK) {
237 ++seen_flock; 226 ++seen_flock;
@@ -239,19 +228,51 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
239 err = -ENOSPC; 228 err = -ENOSPC;
240 goto fail; 229 goto fail;
241 } 230 }
242 err = lock_to_ceph_filelock(lock, &cephlock); 231 err = lock_to_ceph_filelock(lock, &flocks[l]);
243 if (err) 232 if (err)
244 goto fail; 233 goto fail;
245 err = ceph_pagelist_append(pagelist, &cephlock, 234 ++l;
246 sizeof(struct ceph_filelock));
247 } 235 }
248 if (err)
249 goto fail;
250 } 236 }
251fail: 237fail:
252 return err; 238 return err;
253} 239}
254 240
241/**
242 * Copy the encoded flock and fcntl locks into the pagelist.
243 * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
244 * sequential flock locks.
245 * Returns zero on success.
246 */
247int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
248 struct ceph_pagelist *pagelist,
249 int num_fcntl_locks, int num_flock_locks)
250{
251 int err = 0;
252 __le32 nlocks;
253
254 nlocks = cpu_to_le32(num_fcntl_locks);
255 err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
256 if (err)
257 goto out_fail;
258
259 err = ceph_pagelist_append(pagelist, flocks,
260 num_fcntl_locks * sizeof(*flocks));
261 if (err)
262 goto out_fail;
263
264 nlocks = cpu_to_le32(num_flock_locks);
265 err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
266 if (err)
267 goto out_fail;
268
269 err = ceph_pagelist_append(pagelist,
270 &flocks[num_fcntl_locks],
271 num_flock_locks * sizeof(*flocks));
272out_fail:
273 return err;
274}
275
255/* 276/*
256 * Given a pointer to a lock, convert it to a ceph filelock 277 * Given a pointer to a lock, convert it to a ceph filelock
257 */ 278 */
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 4f22671a5bd4..4d2920304be8 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2478,39 +2478,44 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2478 2478
2479 if (recon_state->flock) { 2479 if (recon_state->flock) {
2480 int num_fcntl_locks, num_flock_locks; 2480 int num_fcntl_locks, num_flock_locks;
2481 struct ceph_pagelist_cursor trunc_point; 2481 struct ceph_filelock *flocks;
2482 2482
2483 ceph_pagelist_set_cursor(pagelist, &trunc_point); 2483encode_again:
2484 do { 2484 lock_flocks();
2485 lock_flocks(); 2485 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
2486 ceph_count_locks(inode, &num_fcntl_locks, 2486 unlock_flocks();
2487 &num_flock_locks); 2487 flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
2488 rec.v2.flock_len = (2*sizeof(u32) + 2488 sizeof(struct ceph_filelock), GFP_NOFS);
2489 (num_fcntl_locks+num_flock_locks) * 2489 if (!flocks) {
2490 sizeof(struct ceph_filelock)); 2490 err = -ENOMEM;
2491 unlock_flocks(); 2491 goto out_free;
2492 2492 }
2493 /* pre-alloc pagelist */ 2493 lock_flocks();
2494 ceph_pagelist_truncate(pagelist, &trunc_point); 2494 err = ceph_encode_locks_to_buffer(inode, flocks,
2495 err = ceph_pagelist_append(pagelist, &rec, reclen); 2495 num_fcntl_locks,
2496 if (!err) 2496 num_flock_locks);
2497 err = ceph_pagelist_reserve(pagelist, 2497 unlock_flocks();
2498 rec.v2.flock_len); 2498 if (err) {
2499 2499 kfree(flocks);
2500 /* encode locks */ 2500 if (err == -ENOSPC)
2501 if (!err) { 2501 goto encode_again;
2502 lock_flocks(); 2502 goto out_free;
2503 err = ceph_encode_locks(inode, 2503 }
2504 pagelist, 2504 /*
2505 num_fcntl_locks, 2505 * number of encoded locks is stable, so copy to pagelist
2506 num_flock_locks); 2506 */
2507 unlock_flocks(); 2507 rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) +
2508 } 2508 (num_fcntl_locks+num_flock_locks) *
2509 } while (err == -ENOSPC); 2509 sizeof(struct ceph_filelock));
2510 err = ceph_pagelist_append(pagelist, &rec, reclen);
2511 if (!err)
2512 err = ceph_locks_to_pagelist(flocks, pagelist,
2513 num_fcntl_locks,
2514 num_flock_locks);
2515 kfree(flocks);
2510 } else { 2516 } else {
2511 err = ceph_pagelist_append(pagelist, &rec, reclen); 2517 err = ceph_pagelist_append(pagelist, &rec, reclen);
2512 } 2518 }
2513
2514out_free: 2519out_free:
2515 kfree(path); 2520 kfree(path);
2516out_dput: 2521out_dput:
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 8696be2ff679..7ccfdb4aea2e 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -822,8 +822,13 @@ extern const struct export_operations ceph_export_ops;
822extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl); 822extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl);
823extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl); 823extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl);
824extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num); 824extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num);
825extern int ceph_encode_locks(struct inode *i, struct ceph_pagelist *p, 825extern int ceph_encode_locks_to_buffer(struct inode *inode,
826 int p_locks, int f_locks); 826 struct ceph_filelock *flocks,
827 int num_fcntl_locks,
828 int num_flock_locks);
829extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
830 struct ceph_pagelist *pagelist,
831 int num_fcntl_locks, int num_flock_locks);
827extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c); 832extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c);
828 833
829/* debugfs.c */ 834/* debugfs.c */
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index b3fdd1a323d6..e68588e6b1e8 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1408,6 +1408,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1408 mres->lockname_len, mres->lockname); 1408 mres->lockname_len, mres->lockname);
1409 ret = -EFAULT; 1409 ret = -EFAULT;
1410 spin_unlock(&res->spinlock); 1410 spin_unlock(&res->spinlock);
1411 dlm_lockres_put(res);
1411 goto leave; 1412 goto leave;
1412 } 1413 }
1413 res->state |= DLM_LOCK_RES_MIGRATING; 1414 res->state |= DLM_LOCK_RES_MIGRATING;
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 04ee1b57c243..b4a5cdf9dbc5 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -947,7 +947,7 @@ leave:
947 ocfs2_free_dir_lookup_result(&orphan_insert); 947 ocfs2_free_dir_lookup_result(&orphan_insert);
948 ocfs2_free_dir_lookup_result(&lookup); 948 ocfs2_free_dir_lookup_result(&lookup);
949 949
950 if (status) 950 if (status && (status != -ENOTEMPTY))
951 mlog_errno(status); 951 mlog_errno(status);
952 952
953 return status; 953 return status;
@@ -2216,7 +2216,7 @@ out:
2216 2216
2217 brelse(orphan_dir_bh); 2217 brelse(orphan_dir_bh);
2218 2218
2219 return 0; 2219 return ret;
2220} 2220}
2221 2221
2222int ocfs2_create_inode_in_orphan(struct inode *dir, 2222int ocfs2_create_inode_in_orphan(struct inode *dir,
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index bd4b5a740ff1..bdfabdaefdce 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -21,12 +21,12 @@ extern wait_queue_head_t log_wait;
21 21
22static int kmsg_open(struct inode * inode, struct file * file) 22static int kmsg_open(struct inode * inode, struct file * file)
23{ 23{
24 return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE); 24 return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_PROC);
25} 25}
26 26
27static int kmsg_release(struct inode * inode, struct file * file) 27static int kmsg_release(struct inode * inode, struct file * file)
28{ 28{
29 (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE); 29 (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_PROC);
30 return 0; 30 return 0;
31} 31}
32 32
@@ -34,15 +34,15 @@ static ssize_t kmsg_read(struct file *file, char __user *buf,
34 size_t count, loff_t *ppos) 34 size_t count, loff_t *ppos)
35{ 35{
36 if ((file->f_flags & O_NONBLOCK) && 36 if ((file->f_flags & O_NONBLOCK) &&
37 !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) 37 !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC))
38 return -EAGAIN; 38 return -EAGAIN;
39 return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE); 39 return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_PROC);
40} 40}
41 41
42static unsigned int kmsg_poll(struct file *file, poll_table *wait) 42static unsigned int kmsg_poll(struct file *file, poll_table *wait)
43{ 43{
44 poll_wait(file, &log_wait, wait); 44 poll_wait(file, &log_wait, wait);
45 if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) 45 if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC))
46 return POLLIN | POLLRDNORM; 46 return POLLIN | POLLRDNORM;
47 return 0; 47 return 0;
48} 48}
diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h
index 9d96605f160a..fa25becbdcaf 100644
--- a/include/asm-generic/kvm_para.h
+++ b/include/asm-generic/kvm_para.h
@@ -18,4 +18,9 @@ static inline unsigned int kvm_arch_para_features(void)
18 return 0; 18 return 0;
19} 19}
20 20
21static inline bool kvm_para_available(void)
22{
23 return false;
24}
25
21#endif 26#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index c6f6e0839b61..9f3c7e81270a 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -175,6 +175,8 @@ extern struct bus_type cpu_subsys;
175 175
176extern void get_online_cpus(void); 176extern void get_online_cpus(void);
177extern void put_online_cpus(void); 177extern void put_online_cpus(void);
178extern void cpu_hotplug_disable(void);
179extern void cpu_hotplug_enable(void);
178#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) 180#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
179#define register_hotcpu_notifier(nb) register_cpu_notifier(nb) 181#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
180#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) 182#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
@@ -198,6 +200,8 @@ static inline void cpu_hotplug_driver_unlock(void)
198 200
199#define get_online_cpus() do { } while (0) 201#define get_online_cpus() do { } while (0)
200#define put_online_cpus() do { } while (0) 202#define put_online_cpus() do { } while (0)
203#define cpu_hotplug_disable() do { } while (0)
204#define cpu_hotplug_enable() do { } while (0)
201#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) 205#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
202/* These aren't inline functions due to a GCC bug. */ 206/* These aren't inline functions due to a GCC bug. */
203#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) 207#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
diff --git a/include/linux/filter.h b/include/linux/filter.h
index c050dcc322a4..f65f5a69db8f 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -46,6 +46,7 @@ extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
46extern int sk_detach_filter(struct sock *sk); 46extern int sk_detach_filter(struct sock *sk);
47extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen); 47extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
48extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len); 48extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len);
49extern void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
49 50
50#ifdef CONFIG_BPF_JIT 51#ifdef CONFIG_BPF_JIT
51#include <stdarg.h> 52#include <stdarg.h>
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 4474557904f6..16fae6436d0e 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -249,12 +249,12 @@ team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
249 return port; 249 return port;
250 cur = port; 250 cur = port;
251 list_for_each_entry_continue_rcu(cur, &team->port_list, list) 251 list_for_each_entry_continue_rcu(cur, &team->port_list, list)
252 if (team_port_txable(port)) 252 if (team_port_txable(cur))
253 return cur; 253 return cur;
254 list_for_each_entry_rcu(cur, &team->port_list, list) { 254 list_for_each_entry_rcu(cur, &team->port_list, list) {
255 if (cur == port) 255 if (cur == port)
256 break; 256 break;
257 if (team_port_txable(port)) 257 if (team_port_txable(cur))
258 return cur; 258 return cur;
259 } 259 }
260 return NULL; 260 return NULL;
diff --git a/include/linux/math64.h b/include/linux/math64.h
index b8ba85544721..2913b86eb12a 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -6,7 +6,8 @@
6 6
7#if BITS_PER_LONG == 64 7#if BITS_PER_LONG == 64
8 8
9#define div64_long(x,y) div64_s64((x),(y)) 9#define div64_long(x, y) div64_s64((x), (y))
10#define div64_ul(x, y) div64_u64((x), (y))
10 11
11/** 12/**
12 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder 13 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
@@ -47,7 +48,8 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
47 48
48#elif BITS_PER_LONG == 32 49#elif BITS_PER_LONG == 32
49 50
50#define div64_long(x,y) div_s64((x),(y)) 51#define div64_long(x, y) div_s64((x), (y))
52#define div64_ul(x, y) div_u64((x), (y))
51 53
52#ifndef div_u64_rem 54#ifndef div_u64_rem
53static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) 55static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 5951e3f38878..26806775b11b 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -111,6 +111,9 @@ static inline struct page *sg_page(struct scatterlist *sg)
111static inline void sg_set_buf(struct scatterlist *sg, const void *buf, 111static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
112 unsigned int buflen) 112 unsigned int buflen)
113{ 113{
114#ifdef CONFIG_DEBUG_SG
115 BUG_ON(!virt_addr_valid(buf));
116#endif
114 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); 117 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
115} 118}
116 119
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 47ead515c811..c5fd30d2a415 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -137,6 +137,7 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
137 137
138extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 138extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
139 unsigned long address); 139 unsigned long address);
140extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte);
140#else 141#else
141 142
142#define make_migration_entry(page, write) swp_entry(0, 0) 143#define make_migration_entry(page, write) swp_entry(0, 0)
@@ -148,6 +149,8 @@ static inline int is_migration_entry(swp_entry_t swp)
148static inline void make_migration_entry_read(swp_entry_t *entryp) { } 149static inline void make_migration_entry_read(swp_entry_t *entryp) { }
149static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 150static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
150 unsigned long address) { } 151 unsigned long address) { }
152static inline void migration_entry_wait_huge(struct mm_struct *mm,
153 pte_t *pte) { }
151static inline int is_write_migration_entry(swp_entry_t entry) 154static inline int is_write_migration_entry(swp_entry_t entry)
152{ 155{
153 return 0; 156 return 0;
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
index 38911391a139..98a3153c0f96 100644
--- a/include/linux/syslog.h
+++ b/include/linux/syslog.h
@@ -44,8 +44,8 @@
44/* Return size of the log buffer */ 44/* Return size of the log buffer */
45#define SYSLOG_ACTION_SIZE_BUFFER 10 45#define SYSLOG_ACTION_SIZE_BUFFER 10
46 46
47#define SYSLOG_FROM_CALL 0 47#define SYSLOG_FROM_READER 0
48#define SYSLOG_FROM_FILE 1 48#define SYSLOG_FROM_PROC 1
49 49
50int do_syslog(int type, char __user *buf, int count, bool from_file); 50int do_syslog(int type, char __user *buf, int count, bool from_file);
51 51
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 35a57cd1704c..7cb6d360d147 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1117,6 +1117,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
1117int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len); 1117int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
1118int mgmt_index_added(struct hci_dev *hdev); 1118int mgmt_index_added(struct hci_dev *hdev);
1119int mgmt_index_removed(struct hci_dev *hdev); 1119int mgmt_index_removed(struct hci_dev *hdev);
1120int mgmt_set_powered_failed(struct hci_dev *hdev, int err);
1120int mgmt_powered(struct hci_dev *hdev, u8 powered); 1121int mgmt_powered(struct hci_dev *hdev, u8 powered);
1121int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable); 1122int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
1122int mgmt_connectable(struct hci_dev *hdev, u8 connectable); 1123int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 22980a7c3873..9944c3e68c5d 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -42,6 +42,7 @@
42#define MGMT_STATUS_NOT_POWERED 0x0f 42#define MGMT_STATUS_NOT_POWERED 0x0f
43#define MGMT_STATUS_CANCELLED 0x10 43#define MGMT_STATUS_CANCELLED 0x10
44#define MGMT_STATUS_INVALID_INDEX 0x11 44#define MGMT_STATUS_INVALID_INDEX 0x11
45#define MGMT_STATUS_RFKILLED 0x12
45 46
46struct mgmt_hdr { 47struct mgmt_hdr {
47 __le16 opcode; 48 __le16 opcode;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index d4609029f014..385c6329a967 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -450,7 +450,8 @@ enum snd_soc_dapm_type {
450 snd_soc_dapm_aif_in, /* audio interface input */ 450 snd_soc_dapm_aif_in, /* audio interface input */
451 snd_soc_dapm_aif_out, /* audio interface output */ 451 snd_soc_dapm_aif_out, /* audio interface output */
452 snd_soc_dapm_siggen, /* signal generator */ 452 snd_soc_dapm_siggen, /* signal generator */
453 snd_soc_dapm_dai, /* link to DAI structure */ 453 snd_soc_dapm_dai_in, /* link to DAI structure */
454 snd_soc_dapm_dai_out,
454 snd_soc_dapm_dai_link, /* link between two DAI structures */ 455 snd_soc_dapm_dai_link, /* link between two DAI structures */
455}; 456};
456 457
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a5c86fc34a37..d88c8ee00c8b 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -783,6 +783,7 @@ struct kvm_dirty_tlb {
783#define KVM_REG_IA64 0x3000000000000000ULL 783#define KVM_REG_IA64 0x3000000000000000ULL
784#define KVM_REG_ARM 0x4000000000000000ULL 784#define KVM_REG_ARM 0x4000000000000000ULL
785#define KVM_REG_S390 0x5000000000000000ULL 785#define KVM_REG_S390 0x5000000000000000ULL
786#define KVM_REG_MIPS 0x7000000000000000ULL
786 787
787#define KVM_REG_SIZE_SHIFT 52 788#define KVM_REG_SIZE_SHIFT 52
788#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL 789#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
diff --git a/kernel/audit.c b/kernel/audit.c
index 21c7fa615bd3..91e53d04b6a9 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1056,7 +1056,7 @@ static inline void audit_get_stamp(struct audit_context *ctx,
1056static void wait_for_auditd(unsigned long sleep_time) 1056static void wait_for_auditd(unsigned long sleep_time)
1057{ 1057{
1058 DECLARE_WAITQUEUE(wait, current); 1058 DECLARE_WAITQUEUE(wait, current);
1059 set_current_state(TASK_INTERRUPTIBLE); 1059 set_current_state(TASK_UNINTERRUPTIBLE);
1060 add_wait_queue(&audit_backlog_wait, &wait); 1060 add_wait_queue(&audit_backlog_wait, &wait);
1061 1061
1062 if (audit_backlog_limit && 1062 if (audit_backlog_limit &&
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index a291aa23fb3f..43c307dc9453 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -658,6 +658,7 @@ int audit_add_tree_rule(struct audit_krule *rule)
658 struct vfsmount *mnt; 658 struct vfsmount *mnt;
659 int err; 659 int err;
660 660
661 rule->tree = NULL;
661 list_for_each_entry(tree, &tree_list, list) { 662 list_for_each_entry(tree, &tree_list, list) {
662 if (!strcmp(seed->pathname, tree->pathname)) { 663 if (!strcmp(seed->pathname, tree->pathname)) {
663 put_tree(seed); 664 put_tree(seed);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index b5e4ab2d427e..198a38883e64 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -133,6 +133,27 @@ static void cpu_hotplug_done(void)
133 mutex_unlock(&cpu_hotplug.lock); 133 mutex_unlock(&cpu_hotplug.lock);
134} 134}
135 135
136/*
137 * Wait for currently running CPU hotplug operations to complete (if any) and
138 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
139 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
140 * hotplug path before performing hotplug operations. So acquiring that lock
141 * guarantees mutual exclusion from any currently running hotplug operations.
142 */
143void cpu_hotplug_disable(void)
144{
145 cpu_maps_update_begin();
146 cpu_hotplug_disabled = 1;
147 cpu_maps_update_done();
148}
149
150void cpu_hotplug_enable(void)
151{
152 cpu_maps_update_begin();
153 cpu_hotplug_disabled = 0;
154 cpu_maps_update_done();
155}
156
136#else /* #if CONFIG_HOTPLUG_CPU */ 157#else /* #if CONFIG_HOTPLUG_CPU */
137static void cpu_hotplug_begin(void) {} 158static void cpu_hotplug_begin(void) {}
138static void cpu_hotplug_done(void) {} 159static void cpu_hotplug_done(void) {}
@@ -541,36 +562,6 @@ static int __init alloc_frozen_cpus(void)
541core_initcall(alloc_frozen_cpus); 562core_initcall(alloc_frozen_cpus);
542 563
543/* 564/*
544 * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
545 * hotplug when tasks are about to be frozen. Also, don't allow the freezer
546 * to continue until any currently running CPU hotplug operation gets
547 * completed.
548 * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
549 * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
550 * CPU hotplug path and released only after it is complete. Thus, we
551 * (and hence the freezer) will block here until any currently running CPU
552 * hotplug operation gets completed.
553 */
554void cpu_hotplug_disable_before_freeze(void)
555{
556 cpu_maps_update_begin();
557 cpu_hotplug_disabled = 1;
558 cpu_maps_update_done();
559}
560
561
562/*
563 * When tasks have been thawed, re-enable regular CPU hotplug (which had been
564 * disabled while beginning to freeze tasks).
565 */
566void cpu_hotplug_enable_after_thaw(void)
567{
568 cpu_maps_update_begin();
569 cpu_hotplug_disabled = 0;
570 cpu_maps_update_done();
571}
572
573/*
574 * When callbacks for CPU hotplug notifications are being executed, we must 565 * When callbacks for CPU hotplug notifications are being executed, we must
575 * ensure that the state of the system with respect to the tasks being frozen 566 * ensure that the state of the system with respect to the tasks being frozen
576 * or not, as reported by the notification, remains unchanged *throughout the 567 * or not, as reported by the notification, remains unchanged *throughout the
@@ -589,12 +580,12 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
589 580
590 case PM_SUSPEND_PREPARE: 581 case PM_SUSPEND_PREPARE:
591 case PM_HIBERNATION_PREPARE: 582 case PM_HIBERNATION_PREPARE:
592 cpu_hotplug_disable_before_freeze(); 583 cpu_hotplug_disable();
593 break; 584 break;
594 585
595 case PM_POST_SUSPEND: 586 case PM_POST_SUSPEND:
596 case PM_POST_HIBERNATION: 587 case PM_POST_HIBERNATION:
597 cpu_hotplug_enable_after_thaw(); 588 cpu_hotplug_enable();
598 break; 589 break;
599 590
600 default: 591 default:
diff --git a/kernel/printk.c b/kernel/printk.c
index fa36e1494420..8212c1aef125 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -363,6 +363,53 @@ static void log_store(int facility, int level,
363 log_next_seq++; 363 log_next_seq++;
364} 364}
365 365
366#ifdef CONFIG_SECURITY_DMESG_RESTRICT
367int dmesg_restrict = 1;
368#else
369int dmesg_restrict;
370#endif
371
372static int syslog_action_restricted(int type)
373{
374 if (dmesg_restrict)
375 return 1;
376 /*
377 * Unless restricted, we allow "read all" and "get buffer size"
378 * for everybody.
379 */
380 return type != SYSLOG_ACTION_READ_ALL &&
381 type != SYSLOG_ACTION_SIZE_BUFFER;
382}
383
384static int check_syslog_permissions(int type, bool from_file)
385{
386 /*
387 * If this is from /proc/kmsg and we've already opened it, then we've
388 * already done the capabilities checks at open time.
389 */
390 if (from_file && type != SYSLOG_ACTION_OPEN)
391 return 0;
392
393 if (syslog_action_restricted(type)) {
394 if (capable(CAP_SYSLOG))
395 return 0;
396 /*
397 * For historical reasons, accept CAP_SYS_ADMIN too, with
398 * a warning.
399 */
400 if (capable(CAP_SYS_ADMIN)) {
401 pr_warn_once("%s (%d): Attempt to access syslog with "
402 "CAP_SYS_ADMIN but no CAP_SYSLOG "
403 "(deprecated).\n",
404 current->comm, task_pid_nr(current));
405 return 0;
406 }
407 return -EPERM;
408 }
409 return security_syslog(type);
410}
411
412
366/* /dev/kmsg - userspace message inject/listen interface */ 413/* /dev/kmsg - userspace message inject/listen interface */
367struct devkmsg_user { 414struct devkmsg_user {
368 u64 seq; 415 u64 seq;
@@ -620,7 +667,8 @@ static int devkmsg_open(struct inode *inode, struct file *file)
620 if ((file->f_flags & O_ACCMODE) == O_WRONLY) 667 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
621 return 0; 668 return 0;
622 669
623 err = security_syslog(SYSLOG_ACTION_READ_ALL); 670 err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
671 SYSLOG_FROM_READER);
624 if (err) 672 if (err)
625 return err; 673 return err;
626 674
@@ -813,45 +861,6 @@ static inline void boot_delay_msec(int level)
813} 861}
814#endif 862#endif
815 863
816#ifdef CONFIG_SECURITY_DMESG_RESTRICT
817int dmesg_restrict = 1;
818#else
819int dmesg_restrict;
820#endif
821
822static int syslog_action_restricted(int type)
823{
824 if (dmesg_restrict)
825 return 1;
826 /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
827 return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
828}
829
830static int check_syslog_permissions(int type, bool from_file)
831{
832 /*
833 * If this is from /proc/kmsg and we've already opened it, then we've
834 * already done the capabilities checks at open time.
835 */
836 if (from_file && type != SYSLOG_ACTION_OPEN)
837 return 0;
838
839 if (syslog_action_restricted(type)) {
840 if (capable(CAP_SYSLOG))
841 return 0;
842 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
843 if (capable(CAP_SYS_ADMIN)) {
844 printk_once(KERN_WARNING "%s (%d): "
845 "Attempt to access syslog with CAP_SYS_ADMIN "
846 "but no CAP_SYSLOG (deprecated).\n",
847 current->comm, task_pid_nr(current));
848 return 0;
849 }
850 return -EPERM;
851 }
852 return 0;
853}
854
855#if defined(CONFIG_PRINTK_TIME) 864#if defined(CONFIG_PRINTK_TIME)
856static bool printk_time = 1; 865static bool printk_time = 1;
857#else 866#else
@@ -1249,7 +1258,7 @@ out:
1249 1258
1250SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) 1259SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1251{ 1260{
1252 return do_syslog(type, buf, len, SYSLOG_FROM_CALL); 1261 return do_syslog(type, buf, len, SYSLOG_FROM_READER);
1253} 1262}
1254 1263
1255/* 1264/*
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b5197dcb0dad..3d6833f125d3 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -195,8 +195,12 @@ void local_bh_enable_ip(unsigned long ip)
195EXPORT_SYMBOL(local_bh_enable_ip); 195EXPORT_SYMBOL(local_bh_enable_ip);
196 196
197/* 197/*
198 * We restart softirq processing for at most 2 ms, 198 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
199 * and if need_resched() is not set. 199 * but break the loop if need_resched() is set or after 2 ms.
200 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
201 * certain cases, such as stop_machine(), jiffies may cease to
202 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
203 * well to make sure we eventually return from this method.
200 * 204 *
201 * These limits have been established via experimentation. 205 * These limits have been established via experimentation.
202 * The two things to balance is latency against fairness - 206 * The two things to balance is latency against fairness -
@@ -204,6 +208,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
204 * should not be able to lock up the box. 208 * should not be able to lock up the box.
205 */ 209 */
206#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) 210#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
211#define MAX_SOFTIRQ_RESTART 10
207 212
208asmlinkage void __do_softirq(void) 213asmlinkage void __do_softirq(void)
209{ 214{
@@ -212,6 +217,7 @@ asmlinkage void __do_softirq(void)
212 unsigned long end = jiffies + MAX_SOFTIRQ_TIME; 217 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
213 int cpu; 218 int cpu;
214 unsigned long old_flags = current->flags; 219 unsigned long old_flags = current->flags;
220 int max_restart = MAX_SOFTIRQ_RESTART;
215 221
216 /* 222 /*
217 * Mask out PF_MEMALLOC s current task context is borrowed for the 223 * Mask out PF_MEMALLOC s current task context is borrowed for the
@@ -265,7 +271,8 @@ restart:
265 271
266 pending = local_softirq_pending(); 272 pending = local_softirq_pending();
267 if (pending) { 273 if (pending) {
268 if (time_before(jiffies, end) && !need_resched()) 274 if (time_before(jiffies, end) && !need_resched() &&
275 --max_restart)
269 goto restart; 276 goto restart;
270 277
271 wakeup_softirqd(); 278 wakeup_softirqd();
diff --git a/kernel/sys.c b/kernel/sys.c
index b95d3c72ba21..2bbd9a73b54c 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -362,6 +362,29 @@ int unregister_reboot_notifier(struct notifier_block *nb)
362} 362}
363EXPORT_SYMBOL(unregister_reboot_notifier); 363EXPORT_SYMBOL(unregister_reboot_notifier);
364 364
365/* Add backwards compatibility for stable trees. */
366#ifndef PF_NO_SETAFFINITY
367#define PF_NO_SETAFFINITY PF_THREAD_BOUND
368#endif
369
370static void migrate_to_reboot_cpu(void)
371{
372 /* The boot cpu is always logical cpu 0 */
373 int cpu = 0;
374
375 cpu_hotplug_disable();
376
377 /* Make certain the cpu I'm about to reboot on is online */
378 if (!cpu_online(cpu))
379 cpu = cpumask_first(cpu_online_mask);
380
381 /* Prevent races with other tasks migrating this task */
382 current->flags |= PF_NO_SETAFFINITY;
383
384 /* Make certain I only run on the appropriate processor */
385 set_cpus_allowed_ptr(current, cpumask_of(cpu));
386}
387
365/** 388/**
366 * kernel_restart - reboot the system 389 * kernel_restart - reboot the system
367 * @cmd: pointer to buffer containing command to execute for restart 390 * @cmd: pointer to buffer containing command to execute for restart
@@ -373,7 +396,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
373void kernel_restart(char *cmd) 396void kernel_restart(char *cmd)
374{ 397{
375 kernel_restart_prepare(cmd); 398 kernel_restart_prepare(cmd);
376 disable_nonboot_cpus(); 399 migrate_to_reboot_cpu();
377 syscore_shutdown(); 400 syscore_shutdown();
378 if (!cmd) 401 if (!cmd)
379 printk(KERN_EMERG "Restarting system.\n"); 402 printk(KERN_EMERG "Restarting system.\n");
@@ -400,7 +423,7 @@ static void kernel_shutdown_prepare(enum system_states state)
400void kernel_halt(void) 423void kernel_halt(void)
401{ 424{
402 kernel_shutdown_prepare(SYSTEM_HALT); 425 kernel_shutdown_prepare(SYSTEM_HALT);
403 disable_nonboot_cpus(); 426 migrate_to_reboot_cpu();
404 syscore_shutdown(); 427 syscore_shutdown();
405 printk(KERN_EMERG "System halted.\n"); 428 printk(KERN_EMERG "System halted.\n");
406 kmsg_dump(KMSG_DUMP_HALT); 429 kmsg_dump(KMSG_DUMP_HALT);
@@ -419,7 +442,7 @@ void kernel_power_off(void)
419 kernel_shutdown_prepare(SYSTEM_POWER_OFF); 442 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
420 if (pm_power_off_prepare) 443 if (pm_power_off_prepare)
421 pm_power_off_prepare(); 444 pm_power_off_prepare();
422 disable_nonboot_cpus(); 445 migrate_to_reboot_cpu();
423 syscore_shutdown(); 446 syscore_shutdown();
424 printk(KERN_EMERG "Power down.\n"); 447 printk(KERN_EMERG "Power down.\n");
425 kmsg_dump(KMSG_DUMP_POWEROFF); 448 kmsg_dump(KMSG_DUMP_POWEROFF);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1a41023a1f88..e71a8be4a6ee 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -652,8 +652,6 @@ static struct {
652 ARCH_TRACE_CLOCKS 652 ARCH_TRACE_CLOCKS
653}; 653};
654 654
655int trace_clock_id;
656
657/* 655/*
658 * trace_parser_get_init - gets the buffer for trace parser 656 * trace_parser_get_init - gets the buffer for trace parser
659 */ 657 */
@@ -2826,7 +2824,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2826 iter->iter_flags |= TRACE_FILE_ANNOTATE; 2824 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2827 2825
2828 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 2826 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2829 if (trace_clocks[trace_clock_id].in_ns) 2827 if (trace_clocks[tr->clock_id].in_ns)
2830 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 2828 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2831 2829
2832 /* stop the trace while dumping if we are not opening "snapshot" */ 2830 /* stop the trace while dumping if we are not opening "snapshot" */
@@ -3825,7 +3823,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3825 iter->iter_flags |= TRACE_FILE_LAT_FMT; 3823 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3826 3824
3827 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 3825 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3828 if (trace_clocks[trace_clock_id].in_ns) 3826 if (trace_clocks[tr->clock_id].in_ns)
3829 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 3827 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3830 3828
3831 iter->cpu_file = tc->cpu; 3829 iter->cpu_file = tc->cpu;
@@ -5095,7 +5093,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
5095 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); 5093 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5096 trace_seq_printf(s, "bytes: %ld\n", cnt); 5094 trace_seq_printf(s, "bytes: %ld\n", cnt);
5097 5095
5098 if (trace_clocks[trace_clock_id].in_ns) { 5096 if (trace_clocks[tr->clock_id].in_ns) {
5099 /* local or global for trace_clock */ 5097 /* local or global for trace_clock */
5100 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 5098 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5101 usec_rem = do_div(t, USEC_PER_SEC); 5099 usec_rem = do_div(t, USEC_PER_SEC);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 711ca7d3e7f1..20572ed88c5c 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -700,8 +700,6 @@ enum print_line_t print_trace_line(struct trace_iterator *iter);
700 700
701extern unsigned long trace_flags; 701extern unsigned long trace_flags;
702 702
703extern int trace_clock_id;
704
705/* Standard output formatting function used for function return traces */ 703/* Standard output formatting function used for function return traces */
706#ifdef CONFIG_FUNCTION_GRAPH_TRACER 704#ifdef CONFIG_FUNCTION_GRAPH_TRACER
707 705
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index 5f9c44cdf1f5..4cc6442733f4 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -37,7 +37,7 @@ MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes)
37 mpi_limb_t a; 37 mpi_limb_t a;
38 MPI val = NULL; 38 MPI val = NULL;
39 39
40 while (nbytes >= 0 && buffer[0] == 0) { 40 while (nbytes > 0 && buffer[0] == 0) {
41 buffer++; 41 buffer++;
42 nbytes--; 42 nbytes--;
43 } 43 }
diff --git a/mm/frontswap.c b/mm/frontswap.c
index 538367ef1372..1b24bdcb3197 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -319,7 +319,7 @@ void __frontswap_invalidate_area(unsigned type)
319 return; 319 return;
320 frontswap_ops->invalidate_area(type); 320 frontswap_ops->invalidate_area(type);
321 atomic_set(&sis->frontswap_pages, 0); 321 atomic_set(&sis->frontswap_pages, 0);
322 memset(sis->frontswap_map, 0, sis->max / sizeof(long)); 322 bitmap_zero(sis->frontswap_map, sis->max);
323 } 323 }
324 clear_bit(type, need_init); 324 clear_bit(type, need_init);
325} 325}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f8feeeca6686..e2bfbf73a551 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2839,7 +2839,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2839 if (ptep) { 2839 if (ptep) {
2840 entry = huge_ptep_get(ptep); 2840 entry = huge_ptep_get(ptep);
2841 if (unlikely(is_hugetlb_entry_migration(entry))) { 2841 if (unlikely(is_hugetlb_entry_migration(entry))) {
2842 migration_entry_wait(mm, (pmd_t *)ptep, address); 2842 migration_entry_wait_huge(mm, ptep);
2843 return 0; 2843 return 0;
2844 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 2844 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2845 return VM_FAULT_HWPOISON_LARGE | 2845 return VM_FAULT_HWPOISON_LARGE |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 010d6c14129a..194721839cf5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1199,7 +1199,6 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1199 1199
1200 mz = mem_cgroup_zoneinfo(root, nid, zid); 1200 mz = mem_cgroup_zoneinfo(root, nid, zid);
1201 iter = &mz->reclaim_iter[reclaim->priority]; 1201 iter = &mz->reclaim_iter[reclaim->priority];
1202 last_visited = iter->last_visited;
1203 if (prev && reclaim->generation != iter->generation) { 1202 if (prev && reclaim->generation != iter->generation) {
1204 iter->last_visited = NULL; 1203 iter->last_visited = NULL;
1205 goto out_unlock; 1204 goto out_unlock;
@@ -1218,13 +1217,12 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1218 * is alive. 1217 * is alive.
1219 */ 1218 */
1220 dead_count = atomic_read(&root->dead_count); 1219 dead_count = atomic_read(&root->dead_count);
1221 smp_rmb(); 1220 if (dead_count == iter->last_dead_count) {
1222 last_visited = iter->last_visited; 1221 smp_rmb();
1223 if (last_visited) { 1222 last_visited = iter->last_visited;
1224 if ((dead_count != iter->last_dead_count) || 1223 if (last_visited &&
1225 !css_tryget(&last_visited->css)) { 1224 !css_tryget(&last_visited->css))
1226 last_visited = NULL; 1225 last_visited = NULL;
1227 }
1228 } 1226 }
1229 } 1227 }
1230 1228
@@ -3141,8 +3139,6 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3141 return -ENOMEM; 3139 return -ENOMEM;
3142 } 3140 }
3143 3141
3144 INIT_WORK(&s->memcg_params->destroy,
3145 kmem_cache_destroy_work_func);
3146 s->memcg_params->is_root_cache = true; 3142 s->memcg_params->is_root_cache = true;
3147 3143
3148 /* 3144 /*
diff --git a/mm/migrate.c b/mm/migrate.c
index b1f57501de9c..6f0c24438bba 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -200,15 +200,14 @@ static void remove_migration_ptes(struct page *old, struct page *new)
200 * get to the page and wait until migration is finished. 200 * get to the page and wait until migration is finished.
201 * When we return from this function the fault will be retried. 201 * When we return from this function the fault will be retried.
202 */ 202 */
203void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 203static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
204 unsigned long address) 204 spinlock_t *ptl)
205{ 205{
206 pte_t *ptep, pte; 206 pte_t pte;
207 spinlock_t *ptl;
208 swp_entry_t entry; 207 swp_entry_t entry;
209 struct page *page; 208 struct page *page;
210 209
211 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 210 spin_lock(ptl);
212 pte = *ptep; 211 pte = *ptep;
213 if (!is_swap_pte(pte)) 212 if (!is_swap_pte(pte))
214 goto out; 213 goto out;
@@ -236,6 +235,20 @@ out:
236 pte_unmap_unlock(ptep, ptl); 235 pte_unmap_unlock(ptep, ptl);
237} 236}
238 237
238void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
239 unsigned long address)
240{
241 spinlock_t *ptl = pte_lockptr(mm, pmd);
242 pte_t *ptep = pte_offset_map(pmd, address);
243 __migration_entry_wait(mm, ptep, ptl);
244}
245
246void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
247{
248 spinlock_t *ptl = &(mm)->page_table_lock;
249 __migration_entry_wait(mm, pte, ptl);
250}
251
239#ifdef CONFIG_BLOCK 252#ifdef CONFIG_BLOCK
240/* Returns true if all buffers are successfully locked */ 253/* Returns true if all buffers are successfully locked */
241static bool buffer_migrate_lock_buffers(struct buffer_head *head, 254static bool buffer_migrate_lock_buffers(struct buffer_head *head,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 378a15bcd649..c3edb624fccf 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1628,6 +1628,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1628 long min = mark; 1628 long min = mark;
1629 long lowmem_reserve = z->lowmem_reserve[classzone_idx]; 1629 long lowmem_reserve = z->lowmem_reserve[classzone_idx];
1630 int o; 1630 int o;
1631 long free_cma = 0;
1631 1632
1632 free_pages -= (1 << order) - 1; 1633 free_pages -= (1 << order) - 1;
1633 if (alloc_flags & ALLOC_HIGH) 1634 if (alloc_flags & ALLOC_HIGH)
@@ -1637,9 +1638,10 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1637#ifdef CONFIG_CMA 1638#ifdef CONFIG_CMA
1638 /* If allocation can't use CMA areas don't use free CMA pages */ 1639 /* If allocation can't use CMA areas don't use free CMA pages */
1639 if (!(alloc_flags & ALLOC_CMA)) 1640 if (!(alloc_flags & ALLOC_CMA))
1640 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); 1641 free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
1641#endif 1642#endif
1642 if (free_pages <= min + lowmem_reserve) 1643
1644 if (free_pages - free_cma <= min + lowmem_reserve)
1643 return false; 1645 return false;
1644 for (o = 0; o < order; o++) { 1646 for (o = 0; o < order; o++) {
1645 /* At the next order, this order's pages become unavailable */ 1647 /* At the next order, this order's pages become unavailable */
diff --git a/mm/swap_state.c b/mm/swap_state.c
index b3d40dcf3624..f24ab0dff554 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -336,8 +336,24 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
336 * Swap entry may have been freed since our caller observed it. 336 * Swap entry may have been freed since our caller observed it.
337 */ 337 */
338 err = swapcache_prepare(entry); 338 err = swapcache_prepare(entry);
339 if (err == -EEXIST) { /* seems racy */ 339 if (err == -EEXIST) {
340 radix_tree_preload_end(); 340 radix_tree_preload_end();
341 /*
342 * We might race against get_swap_page() and stumble
343 * across a SWAP_HAS_CACHE swap_map entry whose page
344 * has not been brought into the swapcache yet, while
345 * the other end is scheduled away waiting on discard
346 * I/O completion at scan_swap_map().
347 *
348 * In order to avoid turning this transitory state
349 * into a permanent loop around this -EEXIST case
350 * if !CONFIG_PREEMPT and the I/O completion happens
351 * to be waiting on the CPU waitqueue where we are now
352 * busy looping, we just conditionally invoke the
353 * scheduler here, if there are some more important
354 * tasks to run.
355 */
356 cond_resched();
341 continue; 357 continue;
342 } 358 }
343 if (err) { /* swp entry is obsolete ? */ 359 if (err) { /* swp entry is obsolete ? */
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6c340d908b27..746af55b8455 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2116,7 +2116,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2116 } 2116 }
2117 /* frontswap enabled? set up bit-per-page map for frontswap */ 2117 /* frontswap enabled? set up bit-per-page map for frontswap */
2118 if (frontswap_enabled) 2118 if (frontswap_enabled)
2119 frontswap_map = vzalloc(maxpages / sizeof(long)); 2119 frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long));
2120 2120
2121 if (p->bdev) { 2121 if (p->bdev) {
2122 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { 2122 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
diff --git a/net/9p/client.c b/net/9p/client.c
index 8eb75425e6e6..addc116cecf0 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -562,36 +562,19 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
562 562
563 if (!p9_is_proto_dotl(c)) { 563 if (!p9_is_proto_dotl(c)) {
564 /* Error is reported in string format */ 564 /* Error is reported in string format */
565 uint16_t len; 565 int len;
566 /* 7 = header size for RERROR, 2 is the size of string len; */ 566 /* 7 = header size for RERROR; */
567 int inline_len = in_hdrlen - (7 + 2); 567 int inline_len = in_hdrlen - 7;
568 568
569 /* Read the size of error string */ 569 len = req->rc->size - req->rc->offset;
570 err = p9pdu_readf(req->rc, c->proto_version, "w", &len); 570 if (len > (P9_ZC_HDR_SZ - 7)) {
571 if (err) 571 err = -EFAULT;
572 goto out_err;
573
574 ename = kmalloc(len + 1, GFP_NOFS);
575 if (!ename) {
576 err = -ENOMEM;
577 goto out_err; 572 goto out_err;
578 } 573 }
579 if (len <= inline_len) {
580 /* We have error in protocol buffer itself */
581 if (pdu_read(req->rc, ename, len)) {
582 err = -EFAULT;
583 goto out_free;
584 574
585 } 575 ename = &req->rc->sdata[req->rc->offset];
586 } else { 576 if (len > inline_len) {
587 /* 577 /* We have error in external buffer */
588 * Part of the data is in user space buffer.
589 */
590 if (pdu_read(req->rc, ename, inline_len)) {
591 err = -EFAULT;
592 goto out_free;
593
594 }
595 if (kern_buf) { 578 if (kern_buf) {
596 memcpy(ename + inline_len, uidata, 579 memcpy(ename + inline_len, uidata,
597 len - inline_len); 580 len - inline_len);
@@ -600,19 +583,19 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
600 uidata, len - inline_len); 583 uidata, len - inline_len);
601 if (err) { 584 if (err) {
602 err = -EFAULT; 585 err = -EFAULT;
603 goto out_free; 586 goto out_err;
604 } 587 }
605 } 588 }
606 } 589 }
607 ename[len] = 0; 590 ename = NULL;
608 if (p9_is_proto_dotu(c)) { 591 err = p9pdu_readf(req->rc, c->proto_version, "s?d",
609 /* For dotu we also have error code */ 592 &ename, &ecode);
610 err = p9pdu_readf(req->rc, 593 if (err)
611 c->proto_version, "d", &ecode); 594 goto out_err;
612 if (err) 595
613 goto out_free; 596 if (p9_is_proto_dotu(c))
614 err = -ecode; 597 err = -ecode;
615 } 598
616 if (!err || !IS_ERR_VALUE(err)) { 599 if (!err || !IS_ERR_VALUE(err)) {
617 err = p9_errstr2errno(ename, strlen(ename)); 600 err = p9_errstr2errno(ename, strlen(ename));
618 601
@@ -628,8 +611,6 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
628 } 611 }
629 return err; 612 return err;
630 613
631out_free:
632 kfree(ename);
633out_err: 614out_err:
634 p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err); 615 p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
635 return err; 616 return err;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 33843c5c4939..d817c932d634 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1555,11 +1555,15 @@ static const struct rfkill_ops hci_rfkill_ops = {
1555static void hci_power_on(struct work_struct *work) 1555static void hci_power_on(struct work_struct *work)
1556{ 1556{
1557 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); 1557 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1558 int err;
1558 1559
1559 BT_DBG("%s", hdev->name); 1560 BT_DBG("%s", hdev->name);
1560 1561
1561 if (hci_dev_open(hdev->id) < 0) 1562 err = hci_dev_open(hdev->id);
1563 if (err < 0) {
1564 mgmt_set_powered_failed(hdev, err);
1562 return; 1565 return;
1566 }
1563 1567
1564 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1568 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1565 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 1569 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a76d1ac0321b..24bee07ee4ce 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3677,10 +3677,14 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3677} 3677}
3678 3678
3679static inline int l2cap_command_rej(struct l2cap_conn *conn, 3679static inline int l2cap_command_rej(struct l2cap_conn *conn,
3680 struct l2cap_cmd_hdr *cmd, u8 *data) 3680 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3681 u8 *data)
3681{ 3682{
3682 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; 3683 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3683 3684
3685 if (cmd_len < sizeof(*rej))
3686 return -EPROTO;
3687
3684 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD) 3688 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3685 return 0; 3689 return 0;
3686 3690
@@ -3829,11 +3833,14 @@ sendresp:
3829} 3833}
3830 3834
3831static int l2cap_connect_req(struct l2cap_conn *conn, 3835static int l2cap_connect_req(struct l2cap_conn *conn,
3832 struct l2cap_cmd_hdr *cmd, u8 *data) 3836 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3833{ 3837{
3834 struct hci_dev *hdev = conn->hcon->hdev; 3838 struct hci_dev *hdev = conn->hcon->hdev;
3835 struct hci_conn *hcon = conn->hcon; 3839 struct hci_conn *hcon = conn->hcon;
3836 3840
3841 if (cmd_len < sizeof(struct l2cap_conn_req))
3842 return -EPROTO;
3843
3837 hci_dev_lock(hdev); 3844 hci_dev_lock(hdev);
3838 if (test_bit(HCI_MGMT, &hdev->dev_flags) && 3845 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3839 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags)) 3846 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
@@ -3847,7 +3854,8 @@ static int l2cap_connect_req(struct l2cap_conn *conn,
3847} 3854}
3848 3855
3849static int l2cap_connect_create_rsp(struct l2cap_conn *conn, 3856static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3850 struct l2cap_cmd_hdr *cmd, u8 *data) 3857 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3858 u8 *data)
3851{ 3859{
3852 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; 3860 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3853 u16 scid, dcid, result, status; 3861 u16 scid, dcid, result, status;
@@ -3855,6 +3863,9 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3855 u8 req[128]; 3863 u8 req[128];
3856 int err; 3864 int err;
3857 3865
3866 if (cmd_len < sizeof(*rsp))
3867 return -EPROTO;
3868
3858 scid = __le16_to_cpu(rsp->scid); 3869 scid = __le16_to_cpu(rsp->scid);
3859 dcid = __le16_to_cpu(rsp->dcid); 3870 dcid = __le16_to_cpu(rsp->dcid);
3860 result = __le16_to_cpu(rsp->result); 3871 result = __le16_to_cpu(rsp->result);
@@ -3952,6 +3963,9 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
3952 struct l2cap_chan *chan; 3963 struct l2cap_chan *chan;
3953 int len, err = 0; 3964 int len, err = 0;
3954 3965
3966 if (cmd_len < sizeof(*req))
3967 return -EPROTO;
3968
3955 dcid = __le16_to_cpu(req->dcid); 3969 dcid = __le16_to_cpu(req->dcid);
3956 flags = __le16_to_cpu(req->flags); 3970 flags = __le16_to_cpu(req->flags);
3957 3971
@@ -3975,7 +3989,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
3975 3989
3976 /* Reject if config buffer is too small. */ 3990 /* Reject if config buffer is too small. */
3977 len = cmd_len - sizeof(*req); 3991 len = cmd_len - sizeof(*req);
3978 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) { 3992 if (chan->conf_len + len > sizeof(chan->conf_req)) {
3979 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3993 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3980 l2cap_build_conf_rsp(chan, rsp, 3994 l2cap_build_conf_rsp(chan, rsp,
3981 L2CAP_CONF_REJECT, flags), rsp); 3995 L2CAP_CONF_REJECT, flags), rsp);
@@ -4053,14 +4067,18 @@ unlock:
4053} 4067}
4054 4068
4055static inline int l2cap_config_rsp(struct l2cap_conn *conn, 4069static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4056 struct l2cap_cmd_hdr *cmd, u8 *data) 4070 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4071 u8 *data)
4057{ 4072{
4058 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; 4073 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4059 u16 scid, flags, result; 4074 u16 scid, flags, result;
4060 struct l2cap_chan *chan; 4075 struct l2cap_chan *chan;
4061 int len = le16_to_cpu(cmd->len) - sizeof(*rsp); 4076 int len = cmd_len - sizeof(*rsp);
4062 int err = 0; 4077 int err = 0;
4063 4078
4079 if (cmd_len < sizeof(*rsp))
4080 return -EPROTO;
4081
4064 scid = __le16_to_cpu(rsp->scid); 4082 scid = __le16_to_cpu(rsp->scid);
4065 flags = __le16_to_cpu(rsp->flags); 4083 flags = __le16_to_cpu(rsp->flags);
4066 result = __le16_to_cpu(rsp->result); 4084 result = __le16_to_cpu(rsp->result);
@@ -4161,7 +4179,8 @@ done:
4161} 4179}
4162 4180
4163static inline int l2cap_disconnect_req(struct l2cap_conn *conn, 4181static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4164 struct l2cap_cmd_hdr *cmd, u8 *data) 4182 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4183 u8 *data)
4165{ 4184{
4166 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; 4185 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4167 struct l2cap_disconn_rsp rsp; 4186 struct l2cap_disconn_rsp rsp;
@@ -4169,6 +4188,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4169 struct l2cap_chan *chan; 4188 struct l2cap_chan *chan;
4170 struct sock *sk; 4189 struct sock *sk;
4171 4190
4191 if (cmd_len != sizeof(*req))
4192 return -EPROTO;
4193
4172 scid = __le16_to_cpu(req->scid); 4194 scid = __le16_to_cpu(req->scid);
4173 dcid = __le16_to_cpu(req->dcid); 4195 dcid = __le16_to_cpu(req->dcid);
4174 4196
@@ -4208,12 +4230,16 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4208} 4230}
4209 4231
4210static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, 4232static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4211 struct l2cap_cmd_hdr *cmd, u8 *data) 4233 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4234 u8 *data)
4212{ 4235{
4213 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; 4236 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4214 u16 dcid, scid; 4237 u16 dcid, scid;
4215 struct l2cap_chan *chan; 4238 struct l2cap_chan *chan;
4216 4239
4240 if (cmd_len != sizeof(*rsp))
4241 return -EPROTO;
4242
4217 scid = __le16_to_cpu(rsp->scid); 4243 scid = __le16_to_cpu(rsp->scid);
4218 dcid = __le16_to_cpu(rsp->dcid); 4244 dcid = __le16_to_cpu(rsp->dcid);
4219 4245
@@ -4243,11 +4269,15 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4243} 4269}
4244 4270
4245static inline int l2cap_information_req(struct l2cap_conn *conn, 4271static inline int l2cap_information_req(struct l2cap_conn *conn,
4246 struct l2cap_cmd_hdr *cmd, u8 *data) 4272 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4273 u8 *data)
4247{ 4274{
4248 struct l2cap_info_req *req = (struct l2cap_info_req *) data; 4275 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4249 u16 type; 4276 u16 type;
4250 4277
4278 if (cmd_len != sizeof(*req))
4279 return -EPROTO;
4280
4251 type = __le16_to_cpu(req->type); 4281 type = __le16_to_cpu(req->type);
4252 4282
4253 BT_DBG("type 0x%4.4x", type); 4283 BT_DBG("type 0x%4.4x", type);
@@ -4294,11 +4324,15 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
4294} 4324}
4295 4325
4296static inline int l2cap_information_rsp(struct l2cap_conn *conn, 4326static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4297 struct l2cap_cmd_hdr *cmd, u8 *data) 4327 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4328 u8 *data)
4298{ 4329{
4299 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; 4330 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4300 u16 type, result; 4331 u16 type, result;
4301 4332
4333 if (cmd_len != sizeof(*rsp))
4334 return -EPROTO;
4335
4302 type = __le16_to_cpu(rsp->type); 4336 type = __le16_to_cpu(rsp->type);
4303 result = __le16_to_cpu(rsp->result); 4337 result = __le16_to_cpu(rsp->result);
4304 4338
@@ -5164,16 +5198,16 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5164 5198
5165 switch (cmd->code) { 5199 switch (cmd->code) {
5166 case L2CAP_COMMAND_REJ: 5200 case L2CAP_COMMAND_REJ:
5167 l2cap_command_rej(conn, cmd, data); 5201 l2cap_command_rej(conn, cmd, cmd_len, data);
5168 break; 5202 break;
5169 5203
5170 case L2CAP_CONN_REQ: 5204 case L2CAP_CONN_REQ:
5171 err = l2cap_connect_req(conn, cmd, data); 5205 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5172 break; 5206 break;
5173 5207
5174 case L2CAP_CONN_RSP: 5208 case L2CAP_CONN_RSP:
5175 case L2CAP_CREATE_CHAN_RSP: 5209 case L2CAP_CREATE_CHAN_RSP:
5176 err = l2cap_connect_create_rsp(conn, cmd, data); 5210 err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5177 break; 5211 break;
5178 5212
5179 case L2CAP_CONF_REQ: 5213 case L2CAP_CONF_REQ:
@@ -5181,15 +5215,15 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5181 break; 5215 break;
5182 5216
5183 case L2CAP_CONF_RSP: 5217 case L2CAP_CONF_RSP:
5184 err = l2cap_config_rsp(conn, cmd, data); 5218 err = l2cap_config_rsp(conn, cmd, cmd_len, data);
5185 break; 5219 break;
5186 5220
5187 case L2CAP_DISCONN_REQ: 5221 case L2CAP_DISCONN_REQ:
5188 err = l2cap_disconnect_req(conn, cmd, data); 5222 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5189 break; 5223 break;
5190 5224
5191 case L2CAP_DISCONN_RSP: 5225 case L2CAP_DISCONN_RSP:
5192 err = l2cap_disconnect_rsp(conn, cmd, data); 5226 err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5193 break; 5227 break;
5194 5228
5195 case L2CAP_ECHO_REQ: 5229 case L2CAP_ECHO_REQ:
@@ -5200,11 +5234,11 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5200 break; 5234 break;
5201 5235
5202 case L2CAP_INFO_REQ: 5236 case L2CAP_INFO_REQ:
5203 err = l2cap_information_req(conn, cmd, data); 5237 err = l2cap_information_req(conn, cmd, cmd_len, data);
5204 break; 5238 break;
5205 5239
5206 case L2CAP_INFO_RSP: 5240 case L2CAP_INFO_RSP:
5207 err = l2cap_information_rsp(conn, cmd, data); 5241 err = l2cap_information_rsp(conn, cmd, cmd_len, data);
5208 break; 5242 break;
5209 5243
5210 case L2CAP_CREATE_CHAN_REQ: 5244 case L2CAP_CREATE_CHAN_REQ:
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 35fef22703e9..f8ecbc70293d 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2700,7 +2700,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2700 break; 2700 break;
2701 2701
2702 case DISCOV_TYPE_LE: 2702 case DISCOV_TYPE_LE:
2703 if (!lmp_host_le_capable(hdev)) { 2703 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2704 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, 2704 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2705 MGMT_STATUS_NOT_SUPPORTED); 2705 MGMT_STATUS_NOT_SUPPORTED);
2706 mgmt_pending_remove(cmd); 2706 mgmt_pending_remove(cmd);
@@ -3418,6 +3418,27 @@ new_settings:
3418 return err; 3418 return err;
3419} 3419}
3420 3420
3421int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
3422{
3423 struct pending_cmd *cmd;
3424 u8 status;
3425
3426 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
3427 if (!cmd)
3428 return -ENOENT;
3429
3430 if (err == -ERFKILL)
3431 status = MGMT_STATUS_RFKILLED;
3432 else
3433 status = MGMT_STATUS_FAILED;
3434
3435 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
3436
3437 mgmt_pending_remove(cmd);
3438
3439 return err;
3440}
3441
3421int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) 3442int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3422{ 3443{
3423 struct cmd_lookup match = { NULL, hdev }; 3444 struct cmd_lookup match = { NULL, hdev };
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index b2296d3857a0..b5562abdd6e0 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -770,7 +770,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
770 770
771 BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level); 771 BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
772 772
773 if (!lmp_host_le_capable(hcon->hdev)) 773 if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags))
774 return 1; 774 return 1;
775 775
776 if (sec_level == BT_SECURITY_LOW) 776 if (sec_level == BT_SECURITY_LOW)
@@ -851,7 +851,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
851 __u8 reason; 851 __u8 reason;
852 int err = 0; 852 int err = 0;
853 853
854 if (!lmp_host_le_capable(conn->hcon->hdev)) { 854 if (!test_bit(HCI_LE_ENABLED, &conn->hcon->hdev->dev_flags)) {
855 err = -ENOTSUPP; 855 err = -ENOTSUPP;
856 reason = SMP_PAIRING_NOTSUPP; 856 reason = SMP_PAIRING_NOTSUPP;
857 goto done; 857 goto done;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index d5953b87918c..3a246a6cab47 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1675,13 +1675,13 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1675 __register_request(osdc, req); 1675 __register_request(osdc, req);
1676 __unregister_linger_request(osdc, req); 1676 __unregister_linger_request(osdc, req);
1677 } 1677 }
1678 reset_changed_osds(osdc);
1678 mutex_unlock(&osdc->request_mutex); 1679 mutex_unlock(&osdc->request_mutex);
1679 1680
1680 if (needmap) { 1681 if (needmap) {
1681 dout("%d requests for down osds, need new map\n", needmap); 1682 dout("%d requests for down osds, need new map\n", needmap);
1682 ceph_monc_request_next_osdmap(&osdc->client->monc); 1683 ceph_monc_request_next_osdmap(&osdc->client->monc);
1683 } 1684 }
1684 reset_changed_osds(osdc);
1685} 1685}
1686 1686
1687 1687
diff --git a/net/core/filter.c b/net/core/filter.c
index dad2a178f9f8..6438f29ff266 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -778,7 +778,7 @@ int sk_detach_filter(struct sock *sk)
778} 778}
779EXPORT_SYMBOL_GPL(sk_detach_filter); 779EXPORT_SYMBOL_GPL(sk_detach_filter);
780 780
781static void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to) 781void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
782{ 782{
783 static const u16 decodes[] = { 783 static const u16 decodes[] = {
784 [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K, 784 [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index d5bef0b0f639..a0e9cf6379de 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -73,8 +73,13 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
73 goto out; 73 goto out;
74 } 74 }
75 75
76 if (filter) 76 if (filter) {
77 memcpy(nla_data(attr), filter->insns, len); 77 struct sock_filter *fb = (struct sock_filter *)nla_data(attr);
78 int i;
79
80 for (i = 0; i < filter->len; i++, fb++)
81 sk_decode_filter(&filter->insns[i], fb);
82 }
78 83
79out: 84out:
80 rcu_read_unlock(); 85 rcu_read_unlock();
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 5b142fb16480..9e6c2a075a4c 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2542,6 +2542,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
2542 struct ip_vs_dest *dest; 2542 struct ip_vs_dest *dest;
2543 struct ip_vs_dest_entry entry; 2543 struct ip_vs_dest_entry entry;
2544 2544
2545 memset(&entry, 0, sizeof(entry));
2545 list_for_each_entry(dest, &svc->destinations, n_list) { 2546 list_for_each_entry(dest, &svc->destinations, n_list) {
2546 if (count >= get->num_dests) 2547 if (count >= get->num_dests)
2547 break; 2548 break;
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index dc3fd5d44464..c7b6d466a662 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -149,9 +149,12 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
149 149
150 rcu_read_lock(); 150 rcu_read_lock();
151 list_for_each_entry_rcu(cur, &nfnl_acct_list, head) { 151 list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
152 if (last && cur != last) 152 if (last) {
153 continue; 153 if (cur != last)
154 continue;
154 155
156 last = NULL;
157 }
155 if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid, 158 if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid,
156 cb->nlh->nlmsg_seq, 159 cb->nlh->nlmsg_seq,
157 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 160 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 701c88a20fea..65074dfb9383 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -220,9 +220,12 @@ ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
220 220
221 rcu_read_lock(); 221 rcu_read_lock();
222 list_for_each_entry_rcu(cur, &cttimeout_list, head) { 222 list_for_each_entry_rcu(cur, &cttimeout_list, head) {
223 if (last && cur != last) 223 if (last) {
224 continue; 224 if (cur != last)
225 continue;
225 226
227 last = NULL;
228 }
226 if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).portid, 229 if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).portid,
227 cb->nlh->nlmsg_seq, 230 cb->nlh->nlmsg_seq,
228 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 231 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 4e27fa035814..5352b2d2d5bf 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -637,9 +637,6 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
637 if (queue->copy_mode == NFQNL_COPY_NONE) 637 if (queue->copy_mode == NFQNL_COPY_NONE)
638 return -EINVAL; 638 return -EINVAL;
639 639
640 if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(entry->skb))
641 return __nfqnl_enqueue_packet(net, queue, entry);
642
643 skb = entry->skb; 640 skb = entry->skb;
644 641
645 switch (entry->pf) { 642 switch (entry->pf) {
@@ -651,6 +648,9 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
651 break; 648 break;
652 } 649 }
653 650
651 if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
652 return __nfqnl_enqueue_packet(net, queue, entry);
653
654 nf_bridge_adjust_skb_data(skb); 654 nf_bridge_adjust_skb_data(skb);
655 segs = skb_gso_segment(skb, 0); 655 segs = skb_gso_segment(skb, 0);
656 /* Does not use PTR_ERR to limit the number of error codes that can be 656 /* Does not use PTR_ERR to limit the number of error codes that can be
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index a75240f0d42b..afaebc766933 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -125,6 +125,12 @@ tcpmss_mangle_packet(struct sk_buff *skb,
125 125
126 skb_put(skb, TCPOLEN_MSS); 126 skb_put(skb, TCPOLEN_MSS);
127 127
128 /* RFC 879 states that the default MSS is 536 without specific
129 * knowledge that the destination host is prepared to accept larger.
130 * Since no MSS was provided, we MUST NOT set a value > 536.
131 */
132 newmss = min(newmss, (u16)536);
133
128 opt = (u_int8_t *)tcph + sizeof(struct tcphdr); 134 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
129 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); 135 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
130 136
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index d0b3dd60d386..57ee84d21470 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -371,7 +371,7 @@ static int netlink_mmap(struct file *file, struct socket *sock,
371 err = 0; 371 err = 0;
372out: 372out:
373 mutex_unlock(&nlk->pg_vec_lock); 373 mutex_unlock(&nlk->pg_vec_lock);
374 return 0; 374 return err;
375} 375}
376 376
377static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr) 377static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 2b935e7cfe7b..281c1bded1f6 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -291,17 +291,18 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
291{ 291{
292 struct qdisc_rate_table *rtab; 292 struct qdisc_rate_table *rtab;
293 293
294 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
295 nla_len(tab) != TC_RTAB_SIZE)
296 return NULL;
297
294 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) { 298 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
295 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) { 299 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
300 !memcmp(&rtab->data, nla_data(tab), 1024)) {
296 rtab->refcnt++; 301 rtab->refcnt++;
297 return rtab; 302 return rtab;
298 } 303 }
299 } 304 }
300 305
301 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
302 nla_len(tab) != TC_RTAB_SIZE)
303 return NULL;
304
305 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL); 306 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
306 if (rtab) { 307 if (rtab) {
307 rtab->rate = *r; 308 rtab->rate = *r;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f631c5ff4dbf..6abb1caf9836 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4003,6 +4003,12 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
4003 4003
4004 /* Release our hold on the endpoint. */ 4004 /* Release our hold on the endpoint. */
4005 sp = sctp_sk(sk); 4005 sp = sctp_sk(sk);
4006 /* This could happen during socket init, thus we bail out
4007 * early, since the rest of the below is not setup either.
4008 */
4009 if (sp->ep == NULL)
4010 return;
4011
4006 if (sp->do_auto_asconf) { 4012 if (sp->do_auto_asconf) {
4007 sp->do_auto_asconf = 0; 4013 sp->do_auto_asconf = 0;
4008 list_del(&sp->auto_asconf_list); 4014 list_del(&sp->auto_asconf_list);
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 030f53c96ec0..987f728718c5 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -193,6 +193,8 @@ static DECLARE_TLV_DB_SCALE(mic_tlv, 1600, 100, 0);
193 193
194static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0); 194static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
195 195
196static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0);
197
196static const unsigned int limiter_tlv[] = { 198static const unsigned int limiter_tlv[] = {
197 TLV_DB_RANGE_HEAD(2), 199 TLV_DB_RANGE_HEAD(2),
198 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0), 200 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
@@ -260,7 +262,7 @@ static const char * const hp_gain_num_text[] = {
260}; 262};
261 263
262static const struct soc_enum hp_gain_enum = 264static const struct soc_enum hp_gain_enum =
263 SOC_ENUM_SINGLE(CS42L52_PB_CTL1, 4, 265 SOC_ENUM_SINGLE(CS42L52_PB_CTL1, 5,
264 ARRAY_SIZE(hp_gain_num_text), hp_gain_num_text); 266 ARRAY_SIZE(hp_gain_num_text), hp_gain_num_text);
265 267
266static const char * const beep_pitch_text[] = { 268static const char * const beep_pitch_text[] = {
@@ -441,7 +443,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
441 443
442 SOC_DOUBLE_R_SX_TLV("PCM Mixer Volume", 444 SOC_DOUBLE_R_SX_TLV("PCM Mixer Volume",
443 CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL, 445 CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL,
444 0, 0x7f, 0x19, hl_tlv), 446 0, 0x7f, 0x19, mix_tlv),
445 SOC_DOUBLE_R("PCM Mixer Switch", 447 SOC_DOUBLE_R("PCM Mixer Switch",
446 CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL, 7, 1, 1), 448 CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL, 7, 1, 1),
447 449
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 65d09d60b7c6..1514bf845e4b 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -187,14 +187,14 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol,
187 187
188 break; 188 break;
189 } 189 }
190
191 if (found)
192 snd_soc_dapm_sync(widget->dapm);
193 } 190 }
194 191
195 ret = snd_soc_update_bits(widget->codec, reg, val_mask, val);
196
197 mutex_unlock(&widget->codec->mutex); 192 mutex_unlock(&widget->codec->mutex);
193
194 if (found)
195 snd_soc_dapm_sync(widget->dapm);
196
197 ret = snd_soc_update_bits_locked(widget->codec, reg, val_mask, val);
198 return ret; 198 return ret;
199} 199}
200 200
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index e895d3939eef..100fdadda56a 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1120,7 +1120,8 @@ SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 0,
1120ARIZONA_DSP_WIDGETS(DSP1, "DSP1"), 1120ARIZONA_DSP_WIDGETS(DSP1, "DSP1"),
1121 1121
1122SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1, 1122SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1,
1123 ARIZONA_AEC_LOOPBACK_ENA, 0, &wm5102_aec_loopback_mux), 1123 ARIZONA_AEC_LOOPBACK_ENA_SHIFT, 0,
1124 &wm5102_aec_loopback_mux),
1124 1125
1125SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM, 1126SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM,
1126 ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev, 1127 ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index ba38f0679662..88ad7db52dde 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -503,7 +503,8 @@ SND_SOC_DAPM_PGA("ASRC2R", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2R_ENA_SHIFT, 0,
503 NULL, 0), 503 NULL, 0),
504 504
505SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1, 505SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1,
506 ARIZONA_AEC_LOOPBACK_ENA, 0, &wm5110_aec_loopback_mux), 506 ARIZONA_AEC_LOOPBACK_ENA_SHIFT, 0,
507 &wm5110_aec_loopback_mux),
507 508
508SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0, 509SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0,
509 ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0), 510 ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0),
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index dfd997aaadfc..29e95f93d482 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -3836,12 +3836,13 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
3836 ret); 3836 ret);
3837 } else if (!(ret & WM1811_JACKDET_LVL)) { 3837 } else if (!(ret & WM1811_JACKDET_LVL)) {
3838 dev_dbg(codec->dev, "Ignoring removed jack\n"); 3838 dev_dbg(codec->dev, "Ignoring removed jack\n");
3839 return IRQ_HANDLED; 3839 goto out;
3840 } 3840 }
3841 } else if (!(reg & WM8958_MICD_STS)) { 3841 } else if (!(reg & WM8958_MICD_STS)) {
3842 snd_soc_jack_report(wm8994->micdet[0].jack, 0, 3842 snd_soc_jack_report(wm8994->micdet[0].jack, 0,
3843 SND_JACK_MECHANICAL | SND_JACK_HEADSET | 3843 SND_JACK_MECHANICAL | SND_JACK_HEADSET |
3844 wm8994->btn_mask); 3844 wm8994->btn_mask);
3845 wm8994->mic_detecting = true;
3845 goto out; 3846 goto out;
3846 } 3847 }
3847 3848
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index a80c883bb8be..c7051c457b75 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -55,7 +55,8 @@ static int dapm_up_seq[] = {
55 [snd_soc_dapm_clock_supply] = 1, 55 [snd_soc_dapm_clock_supply] = 1,
56 [snd_soc_dapm_micbias] = 2, 56 [snd_soc_dapm_micbias] = 2,
57 [snd_soc_dapm_dai_link] = 2, 57 [snd_soc_dapm_dai_link] = 2,
58 [snd_soc_dapm_dai] = 3, 58 [snd_soc_dapm_dai_in] = 3,
59 [snd_soc_dapm_dai_out] = 3,
59 [snd_soc_dapm_aif_in] = 3, 60 [snd_soc_dapm_aif_in] = 3,
60 [snd_soc_dapm_aif_out] = 3, 61 [snd_soc_dapm_aif_out] = 3,
61 [snd_soc_dapm_mic] = 4, 62 [snd_soc_dapm_mic] = 4,
@@ -92,7 +93,8 @@ static int dapm_down_seq[] = {
92 [snd_soc_dapm_value_mux] = 9, 93 [snd_soc_dapm_value_mux] = 9,
93 [snd_soc_dapm_aif_in] = 10, 94 [snd_soc_dapm_aif_in] = 10,
94 [snd_soc_dapm_aif_out] = 10, 95 [snd_soc_dapm_aif_out] = 10,
95 [snd_soc_dapm_dai] = 10, 96 [snd_soc_dapm_dai_in] = 10,
97 [snd_soc_dapm_dai_out] = 10,
96 [snd_soc_dapm_dai_link] = 11, 98 [snd_soc_dapm_dai_link] = 11,
97 [snd_soc_dapm_clock_supply] = 12, 99 [snd_soc_dapm_clock_supply] = 12,
98 [snd_soc_dapm_regulator_supply] = 12, 100 [snd_soc_dapm_regulator_supply] = 12,
@@ -419,7 +421,8 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w,
419 case snd_soc_dapm_clock_supply: 421 case snd_soc_dapm_clock_supply:
420 case snd_soc_dapm_aif_in: 422 case snd_soc_dapm_aif_in:
421 case snd_soc_dapm_aif_out: 423 case snd_soc_dapm_aif_out:
422 case snd_soc_dapm_dai: 424 case snd_soc_dapm_dai_in:
425 case snd_soc_dapm_dai_out:
423 case snd_soc_dapm_hp: 426 case snd_soc_dapm_hp:
424 case snd_soc_dapm_mic: 427 case snd_soc_dapm_mic:
425 case snd_soc_dapm_spk: 428 case snd_soc_dapm_spk:
@@ -820,7 +823,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
820 switch (widget->id) { 823 switch (widget->id) {
821 case snd_soc_dapm_adc: 824 case snd_soc_dapm_adc:
822 case snd_soc_dapm_aif_out: 825 case snd_soc_dapm_aif_out:
823 case snd_soc_dapm_dai: 826 case snd_soc_dapm_dai_out:
824 if (widget->active) { 827 if (widget->active) {
825 widget->outputs = snd_soc_dapm_suspend_check(widget); 828 widget->outputs = snd_soc_dapm_suspend_check(widget);
826 return widget->outputs; 829 return widget->outputs;
@@ -916,7 +919,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
916 switch (widget->id) { 919 switch (widget->id) {
917 case snd_soc_dapm_dac: 920 case snd_soc_dapm_dac:
918 case snd_soc_dapm_aif_in: 921 case snd_soc_dapm_aif_in:
919 case snd_soc_dapm_dai: 922 case snd_soc_dapm_dai_in:
920 if (widget->active) { 923 if (widget->active) {
921 widget->inputs = snd_soc_dapm_suspend_check(widget); 924 widget->inputs = snd_soc_dapm_suspend_check(widget);
922 return widget->inputs; 925 return widget->inputs;
@@ -1135,16 +1138,6 @@ static int dapm_generic_check_power(struct snd_soc_dapm_widget *w)
1135 return out != 0 && in != 0; 1138 return out != 0 && in != 0;
1136} 1139}
1137 1140
1138static int dapm_dai_check_power(struct snd_soc_dapm_widget *w)
1139{
1140 DAPM_UPDATE_STAT(w, power_checks);
1141
1142 if (w->active)
1143 return w->active;
1144
1145 return dapm_generic_check_power(w);
1146}
1147
1148/* Check to see if an ADC has power */ 1141/* Check to see if an ADC has power */
1149static int dapm_adc_check_power(struct snd_soc_dapm_widget *w) 1142static int dapm_adc_check_power(struct snd_soc_dapm_widget *w)
1150{ 1143{
@@ -2318,7 +2311,8 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
2318 case snd_soc_dapm_clock_supply: 2311 case snd_soc_dapm_clock_supply:
2319 case snd_soc_dapm_aif_in: 2312 case snd_soc_dapm_aif_in:
2320 case snd_soc_dapm_aif_out: 2313 case snd_soc_dapm_aif_out:
2321 case snd_soc_dapm_dai: 2314 case snd_soc_dapm_dai_in:
2315 case snd_soc_dapm_dai_out:
2322 case snd_soc_dapm_dai_link: 2316 case snd_soc_dapm_dai_link:
2323 list_add(&path->list, &dapm->card->paths); 2317 list_add(&path->list, &dapm->card->paths);
2324 list_add(&path->list_sink, &wsink->sources); 2318 list_add(&path->list_sink, &wsink->sources);
@@ -3129,10 +3123,12 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
3129 break; 3123 break;
3130 case snd_soc_dapm_adc: 3124 case snd_soc_dapm_adc:
3131 case snd_soc_dapm_aif_out: 3125 case snd_soc_dapm_aif_out:
3126 case snd_soc_dapm_dai_out:
3132 w->power_check = dapm_adc_check_power; 3127 w->power_check = dapm_adc_check_power;
3133 break; 3128 break;
3134 case snd_soc_dapm_dac: 3129 case snd_soc_dapm_dac:
3135 case snd_soc_dapm_aif_in: 3130 case snd_soc_dapm_aif_in:
3131 case snd_soc_dapm_dai_in:
3136 w->power_check = dapm_dac_check_power; 3132 w->power_check = dapm_dac_check_power;
3137 break; 3133 break;
3138 case snd_soc_dapm_pga: 3134 case snd_soc_dapm_pga:
@@ -3152,9 +3148,6 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
3152 case snd_soc_dapm_clock_supply: 3148 case snd_soc_dapm_clock_supply:
3153 w->power_check = dapm_supply_check_power; 3149 w->power_check = dapm_supply_check_power;
3154 break; 3150 break;
3155 case snd_soc_dapm_dai:
3156 w->power_check = dapm_dai_check_power;
3157 break;
3158 default: 3151 default:
3159 w->power_check = dapm_always_on_check_power; 3152 w->power_check = dapm_always_on_check_power;
3160 break; 3153 break;
@@ -3375,7 +3368,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
3375 template.reg = SND_SOC_NOPM; 3368 template.reg = SND_SOC_NOPM;
3376 3369
3377 if (dai->driver->playback.stream_name) { 3370 if (dai->driver->playback.stream_name) {
3378 template.id = snd_soc_dapm_dai; 3371 template.id = snd_soc_dapm_dai_in;
3379 template.name = dai->driver->playback.stream_name; 3372 template.name = dai->driver->playback.stream_name;
3380 template.sname = dai->driver->playback.stream_name; 3373 template.sname = dai->driver->playback.stream_name;
3381 3374
@@ -3393,7 +3386,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
3393 } 3386 }
3394 3387
3395 if (dai->driver->capture.stream_name) { 3388 if (dai->driver->capture.stream_name) {
3396 template.id = snd_soc_dapm_dai; 3389 template.id = snd_soc_dapm_dai_out;
3397 template.name = dai->driver->capture.stream_name; 3390 template.name = dai->driver->capture.stream_name;
3398 template.sname = dai->driver->capture.stream_name; 3391 template.sname = dai->driver->capture.stream_name;
3399 3392
@@ -3423,8 +3416,13 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
3423 3416
3424 /* For each DAI widget... */ 3417 /* For each DAI widget... */
3425 list_for_each_entry(dai_w, &card->widgets, list) { 3418 list_for_each_entry(dai_w, &card->widgets, list) {
3426 if (dai_w->id != snd_soc_dapm_dai) 3419 switch (dai_w->id) {
3420 case snd_soc_dapm_dai_in:
3421 case snd_soc_dapm_dai_out:
3422 break;
3423 default:
3427 continue; 3424 continue;
3425 }
3428 3426
3429 dai = dai_w->priv; 3427 dai = dai_w->priv;
3430 3428
@@ -3433,8 +3431,13 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
3433 if (w->dapm != dai_w->dapm) 3431 if (w->dapm != dai_w->dapm)
3434 continue; 3432 continue;
3435 3433
3436 if (w->id == snd_soc_dapm_dai) 3434 switch (w->id) {
3435 case snd_soc_dapm_dai_in:
3436 case snd_soc_dapm_dai_out:
3437 continue; 3437 continue;
3438 default:
3439 break;
3440 }
3438 3441
3439 if (!w->sname) 3442 if (!w->sname)
3440 continue; 3443 continue;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 73bb8eefa491..ccb6be4d658d 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -928,8 +928,13 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
928 /* Create any new FE <--> BE connections */ 928 /* Create any new FE <--> BE connections */
929 for (i = 0; i < list->num_widgets; i++) { 929 for (i = 0; i < list->num_widgets; i++) {
930 930
931 if (list->widgets[i]->id != snd_soc_dapm_dai) 931 switch (list->widgets[i]->id) {
932 case snd_soc_dapm_dai_in:
933 case snd_soc_dapm_dai_out:
934 break;
935 default:
932 continue; 936 continue;
937 }
933 938
934 /* is there a valid BE rtd for this widget */ 939 /* is there a valid BE rtd for this widget */
935 be = dpcm_get_be(card, list->widgets[i], stream); 940 be = dpcm_get_be(card, list->widgets[i], stream);
@@ -2011,9 +2016,11 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
2011 if (cpu_dai->driver->capture.channels_min) 2016 if (cpu_dai->driver->capture.channels_min)
2012 capture = 1; 2017 capture = 1;
2013 } else { 2018 } else {
2014 if (codec_dai->driver->playback.channels_min) 2019 if (codec_dai->driver->playback.channels_min &&
2020 cpu_dai->driver->playback.channels_min)
2015 playback = 1; 2021 playback = 1;
2016 if (codec_dai->driver->capture.channels_min) 2022 if (codec_dai->driver->capture.channels_min &&
2023 cpu_dai->driver->capture.channels_min)
2017 capture = 1; 2024 capture = 1;
2018 } 2025 }
2019 2026
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 9e9d34871195..fe702076ca46 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -2191,7 +2191,7 @@ int initialize_counters(int cpu_id)
2191 2191
2192void allocate_output_buffer() 2192void allocate_output_buffer()
2193{ 2193{
2194 output_buffer = calloc(1, (1 + topo.num_cpus) * 128); 2194 output_buffer = calloc(1, (1 + topo.num_cpus) * 256);
2195 outp = output_buffer; 2195 outp = output_buffer;
2196 if (outp == NULL) { 2196 if (outp == NULL) {
2197 perror("calloc"); 2197 perror("calloc");