aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-10-07 13:38:43 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-07 13:38:43 -0400
commit88c5100c28b02c4b2b2c6f6fafbbd76d90f698b9 (patch)
tree08c4399e0341f7eb0ccb24e15f2cab687275c2a4
parent8083f0fc969d9b5353061a7a6f963405057e26b1 (diff)
parent3ee72ca99288f1de95ec9c570e43f531c8799f06 (diff)
Merge branch 'master' of github.com:davem330/net
Conflicts: net/batman-adv/soft-interface.c
-rw-r--r--Documentation/hwmon/coretemp14
-rw-r--r--Documentation/networking/ip-sysctl.txt4
-rw-r--r--Documentation/networking/scaling.txt12
-rw-r--r--Documentation/vm/transhuge.txt7
-rw-r--r--MAINTAINERS1
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig14
-rw-r--r--arch/arm/include/asm/futex.h34
-rw-r--r--arch/arm/include/asm/unistd.h4
-rw-r--r--arch/arm/kernel/smp_scu.c10
-rw-r--r--arch/arm/kernel/vmlinux.lds.S15
-rw-r--r--arch/arm/mach-exynos4/clock.c6
-rw-r--r--arch/arm/mach-s3c2443/clock.c2
-rw-r--r--arch/arm/mach-s5pv210/clock.c6
-rw-r--r--arch/arm/mm/cache-v7.S20
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/plat-s5p/irq-gpioint.c9
-rw-r--r--arch/powerpc/platforms/powermac/pci.c14
-rw-r--r--arch/s390/include/asm/elf.h3
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c3
-rw-r--r--arch/s390/kernel/entry64.S6
-rw-r--r--arch/s390/kvm/kvm-s390.c5
-rw-r--r--arch/s390/mm/pgtable.c17
-rw-r--r--arch/sparc/include/asm/spitfire.h2
-rw-r--r--arch/sparc/include/asm/xor_64.h4
-rw-r--r--arch/sparc/kernel/cpu.c12
-rw-r--r--arch/sparc/kernel/cpumap.c2
-rw-r--r--arch/sparc/kernel/head_64.S25
-rw-r--r--arch/sparc/kernel/process_32.c3
-rw-r--r--arch/sparc/kernel/process_64.c3
-rw-r--r--arch/sparc/kernel/setup_32.c2
-rw-r--r--arch/sparc/kernel/setup_64.c18
-rw-r--r--arch/sparc/mm/init_64.c5
-rw-r--r--arch/x86/kernel/rtc.c23
-rw-r--r--arch/x86/kvm/emulate.c2
-rw-r--r--arch/x86/kvm/mmu.c3
-rw-r--r--arch/x86/pci/acpi.c11
-rw-r--r--arch/x86/platform/mrst/vrtc.c9
-rw-r--r--block/blk-core.c13
-rw-r--r--block/blk-sysfs.c5
-rw-r--r--drivers/base/power/clock_ops.c75
-rw-r--r--drivers/char/tpm/Kconfig1
-rw-r--r--drivers/char/tpm/tpm.c9
-rw-r--r--drivers/char/tpm/tpm_nsc.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c22
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c88
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c16
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c44
-rw-r--r--drivers/gpu/drm/radeon/ni.c32
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c9
-rw-r--r--drivers/gpu/drm/radeon/rv770.c51
-rw-r--r--drivers/hwmon/coretemp.c221
-rw-r--r--drivers/hwmon/ds620.c2
-rw-r--r--drivers/hwmon/w83791d.c4
-rw-r--r--drivers/ide/ide-disk.c7
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c10
-rw-r--r--drivers/input/tablet/wacom_wac.c7
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-flakey.c4
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-table.c32
-rw-r--r--drivers/md/md.c22
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/media/video/omap/omap_vout.c13
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c1
-rw-r--r--drivers/media/video/uvc/uvc_driver.c2
-rw-r--r--drivers/media/video/uvc/uvc_entity.c2
-rw-r--r--drivers/media/video/uvc/uvc_video.c10
-rw-r--r--drivers/media/video/uvc/uvcvideo.h2
-rw-r--r--drivers/media/video/v4l2-dev.c11
-rw-r--r--drivers/media/video/v4l2-device.c2
-rw-r--r--drivers/mfd/jz4740-adc.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c14
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_main.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c56
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/dp83640.c4
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/probe.c14
-rw-r--r--drivers/s390/cio/cio.c8
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c9
-rw-r--r--drivers/spi/spi-fsl-spi.c3
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-topcliff-pch.c93
-rw-r--r--drivers/zorro/zorro.c7
-rw-r--r--fs/btrfs/file.c24
-rw-r--r--fs/namei.c8
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfs/super.c2
-rw-r--r--fs/quota/quota.c2
-rw-r--r--fs/stat.c2
-rw-r--r--include/linux/device-mapper.h5
-rw-r--r--include/linux/irqdomain.h1
-rw-r--r--include/linux/kvm.h1
-rw-r--r--include/linux/namei.h3
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/ptp_classify.h13
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/trace/events/writeback.h10
-rw-r--r--init/main.c4
-rw-r--r--kernel/irq/irqdomain.c6
-rw-r--r--kernel/posix-cpu-timers.c5
-rw-r--r--kernel/ptrace.c23
-rw-r--r--kernel/resource.c7
-rw-r--r--kernel/sched.c26
-rw-r--r--kernel/sched_rt.c4
-rw-r--r--net/batman-adv/soft-interface.c10
-rw-r--r--net/bridge/br_device.c3
-rw-r--r--net/can/bcm.c53
-rw-r--r--net/ceph/ceph_common.c1
-rw-r--r--net/ceph/messenger.c1
-rw-r--r--net/ceph/osd_client.c4
-rw-r--r--net/ceph/osdmap.c84
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c11
-rw-r--r--net/ipv6/ip6mr.c8
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/tcp_ipv6.c11
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/rds/iw_rdma.c13
-rw-r--r--net/xfrm/xfrm_policy.c10
-rw-r--r--sound/pci/fm801.c15
-rw-r--r--sound/pci/hda/hda_intel.c9
-rw-r--r--sound/pci/hda/patch_realtek.c8
-rw-r--r--sound/pci/hda/patch_sigmatel.c1
-rw-r--r--sound/soc/blackfin/bf5xx-ad73311.c2
-rw-r--r--sound/soc/codecs/ssm2602.c3
-rw-r--r--sound/soc/codecs/wm8753.c4
-rw-r--r--sound/soc/codecs/wm8962.c26
-rw-r--r--sound/soc/omap/mcpdm.c2
-rw-r--r--sound/soc/omap/mcpdm.h2
-rw-r--r--sound/soc/omap/omap-mcbsp.c6
-rw-r--r--sound/soc/pxa/zylonite.c8
-rw-r--r--sound/soc/soc-core.c18
-rw-r--r--sound/usb/card.c7
-rw-r--r--tools/perf/Makefile9
-rw-r--r--tools/perf/builtin-record.c3
-rw-r--r--tools/perf/builtin-test.c2
-rw-r--r--tools/perf/builtin-top.c9
-rw-r--r--tools/perf/util/event.c5
-rw-r--r--tools/perf/util/event.h2
-rw-r--r--tools/perf/util/evlist.c13
-rw-r--r--tools/perf/util/evlist.h1
-rw-r--r--tools/perf/util/evsel.c57
-rw-r--r--tools/perf/util/probe-finder.c2
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/session.h3
-rw-r--r--tools/perf/util/sort.c10
-rw-r--r--tools/perf/util/symbol.c153
178 files changed, 1255 insertions, 1000 deletions
diff --git a/Documentation/hwmon/coretemp b/Documentation/hwmon/coretemp
index fa8776ab9b18..84d46c0c71a3 100644
--- a/Documentation/hwmon/coretemp
+++ b/Documentation/hwmon/coretemp
@@ -35,13 +35,6 @@ the Out-Of-Spec bit. Following table summarizes the exported sysfs files:
35All Sysfs entries are named with their core_id (represented here by 'X'). 35All Sysfs entries are named with their core_id (represented here by 'X').
36tempX_input - Core temperature (in millidegrees Celsius). 36tempX_input - Core temperature (in millidegrees Celsius).
37tempX_max - All cooling devices should be turned on (on Core2). 37tempX_max - All cooling devices should be turned on (on Core2).
38 Initialized with IA32_THERM_INTERRUPT. When the CPU
39 temperature reaches this temperature, an interrupt is
40 generated and tempX_max_alarm is set.
41tempX_max_hyst - If the CPU temperature falls below than temperature,
42 an interrupt is generated and tempX_max_alarm is reset.
43tempX_max_alarm - Set if the temperature reaches or exceeds tempX_max.
44 Reset if the temperature drops to or below tempX_max_hyst.
45tempX_crit - Maximum junction temperature (in millidegrees Celsius). 38tempX_crit - Maximum junction temperature (in millidegrees Celsius).
46tempX_crit_alarm - Set when Out-of-spec bit is set, never clears. 39tempX_crit_alarm - Set when Out-of-spec bit is set, never clears.
47 Correct CPU operation is no longer guaranteed. 40 Correct CPU operation is no longer guaranteed.
@@ -49,9 +42,10 @@ tempX_label - Contains string "Core X", where X is processor
49 number. For Package temp, this will be "Physical id Y", 42 number. For Package temp, this will be "Physical id Y",
50 where Y is the package number. 43 where Y is the package number.
51 44
52The TjMax temperature is set to 85 degrees C if undocumented model specific 45On CPU models which support it, TjMax is read from a model-specific register.
53register (UMSR) 0xee has bit 30 set. If not the TjMax is 100 degrees C as 46On other models, it is set to an arbitrary value based on weak heuristics.
54(sometimes) documented in processor datasheet. 47If these heuristics don't work for you, you can pass the correct TjMax value
48as a module parameter (tjmax).
55 49
56Appendix A. Known TjMax lists (TBD): 50Appendix A. Known TjMax lists (TBD):
57Some information comes from ark.intel.com 51Some information comes from ark.intel.com
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 98c8d4229f0a..cb7f3148035d 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1042,7 +1042,7 @@ conf/interface/*:
1042 The functional behaviour for certain settings is different 1042 The functional behaviour for certain settings is different
1043 depending on whether local forwarding is enabled or not. 1043 depending on whether local forwarding is enabled or not.
1044 1044
1045accept_ra - BOOLEAN 1045accept_ra - INTEGER
1046 Accept Router Advertisements; autoconfigure using them. 1046 Accept Router Advertisements; autoconfigure using them.
1047 1047
1048 It also determines whether or not to transmit Router 1048 It also determines whether or not to transmit Router
@@ -1111,7 +1111,7 @@ dad_transmits - INTEGER
1111 The amount of Duplicate Address Detection probes to send. 1111 The amount of Duplicate Address Detection probes to send.
1112 Default: 1 1112 Default: 1
1113 1113
1114forwarding - BOOLEAN 1114forwarding - INTEGER
1115 Configure interface-specific Host/Router behaviour. 1115 Configure interface-specific Host/Router behaviour.
1116 1116
1117 Note: It is recommended to have the same setting on all 1117 Note: It is recommended to have the same setting on all
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index 58fd7414e6c0..fe67b5c79f0f 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -27,7 +27,7 @@ applying a filter to each packet that assigns it to one of a small number
27of logical flows. Packets for each flow are steered to a separate receive 27of logical flows. Packets for each flow are steered to a separate receive
28queue, which in turn can be processed by separate CPUs. This mechanism is 28queue, which in turn can be processed by separate CPUs. This mechanism is
29generally known as “Receive-side Scaling” (RSS). The goal of RSS and 29generally known as “Receive-side Scaling” (RSS). The goal of RSS and
30the other scaling techniques to increase performance uniformly. 30the other scaling techniques is to increase performance uniformly.
31Multi-queue distribution can also be used for traffic prioritization, but 31Multi-queue distribution can also be used for traffic prioritization, but
32that is not the focus of these techniques. 32that is not the focus of these techniques.
33 33
@@ -186,10 +186,10 @@ are steered using plain RPS. Multiple table entries may point to the
186same CPU. Indeed, with many flows and few CPUs, it is very likely that 186same CPU. Indeed, with many flows and few CPUs, it is very likely that
187a single application thread handles flows with many different flow hashes. 187a single application thread handles flows with many different flow hashes.
188 188
189rps_sock_table is a global flow table that contains the *desired* CPU for 189rps_sock_flow_table is a global flow table that contains the *desired* CPU
190flows: the CPU that is currently processing the flow in userspace. Each 190for flows: the CPU that is currently processing the flow in userspace.
191table value is a CPU index that is updated during calls to recvmsg and 191Each table value is a CPU index that is updated during calls to recvmsg
192sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage() 192and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
193and tcp_splice_read()). 193and tcp_splice_read()).
194 194
195When the scheduler moves a thread to a new CPU while it has outstanding 195When the scheduler moves a thread to a new CPU while it has outstanding
@@ -243,7 +243,7 @@ configured. The number of entries in the global flow table is set through:
243 243
244The number of entries in the per-queue flow table are set through: 244The number of entries in the per-queue flow table are set through:
245 245
246 /sys/class/net/<dev>/queues/tx-<n>/rps_flow_cnt 246 /sys/class/net/<dev>/queues/rx-<n>/rps_flow_cnt
247 247
248== Suggested Configuration 248== Suggested Configuration
249 249
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt
index 0924aaca3302..29bdf62aac09 100644
--- a/Documentation/vm/transhuge.txt
+++ b/Documentation/vm/transhuge.txt
@@ -123,10 +123,11 @@ be automatically shutdown if it's set to "never".
123khugepaged runs usually at low frequency so while one may not want to 123khugepaged runs usually at low frequency so while one may not want to
124invoke defrag algorithms synchronously during the page faults, it 124invoke defrag algorithms synchronously during the page faults, it
125should be worth invoking defrag at least in khugepaged. However it's 125should be worth invoking defrag at least in khugepaged. However it's
126also possible to disable defrag in khugepaged: 126also possible to disable defrag in khugepaged by writing 0 or enable
127defrag in khugepaged by writing 1:
127 128
128echo yes >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag 129echo 0 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
129echo no >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag 130echo 1 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
130 131
131You can also control how many pages khugepaged should scan at each 132You can also control how many pages khugepaged should scan at each
132pass: 133pass:
diff --git a/MAINTAINERS b/MAINTAINERS
index 65ca7eae6276..aac56f9bf88a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6382,7 +6382,6 @@ S: Supported
6382F: arch/arm/mach-tegra 6382F: arch/arm/mach-tegra
6383 6383
6384TEHUTI ETHERNET DRIVER 6384TEHUTI ETHERNET DRIVER
6385M: Alexander Indenbaum <baum@tehutinetworks.net>
6386M: Andy Gospodarek <andy@greyhouse.net> 6385M: Andy Gospodarek <andy@greyhouse.net>
6387L: netdev@vger.kernel.org 6386L: netdev@vger.kernel.org
6388S: Supported 6387S: Supported
diff --git a/Makefile b/Makefile
index 733dcba61f34..31f967c31e7f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 1 2PATCHLEVEL = 1
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc7 4EXTRAVERSION = -rc9
5NAME = "Divemaster Edition" 5NAME = "Divemaster Edition"
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 3269576dbfa8..3146ed3f6eca 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1283,6 +1283,20 @@ config ARM_ERRATA_364296
1283 processor into full low interrupt latency mode. ARM11MPCore 1283 processor into full low interrupt latency mode. ARM11MPCore
1284 is not affected. 1284 is not affected.
1285 1285
1286config ARM_ERRATA_764369
1287 bool "ARM errata: Data cache line maintenance operation by MVA may not succeed"
1288 depends on CPU_V7 && SMP
1289 help
1290 This option enables the workaround for erratum 764369
1291 affecting Cortex-A9 MPCore with two or more processors (all
1292 current revisions). Under certain timing circumstances, a data
1293 cache line maintenance operation by MVA targeting an Inner
1294 Shareable memory region may fail to proceed up to either the
1295 Point of Coherency or to the Point of Unification of the
1296 system. This workaround adds a DSB instruction before the
1297 relevant cache maintenance functions and sets a specific bit
1298 in the diagnostic control register of the SCU.
1299
1286endmenu 1300endmenu
1287 1301
1288source "arch/arm/common/Kconfig" 1302source "arch/arm/common/Kconfig"
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 8c73900da9ed..253cc86318bf 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -25,17 +25,17 @@
25 25
26#ifdef CONFIG_SMP 26#ifdef CONFIG_SMP
27 27
28#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 28#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
29 smp_mb(); \ 29 smp_mb(); \
30 __asm__ __volatile__( \ 30 __asm__ __volatile__( \
31 "1: ldrex %1, [%2]\n" \ 31 "1: ldrex %1, [%3]\n" \
32 " " insn "\n" \ 32 " " insn "\n" \
33 "2: strex %1, %0, [%2]\n" \ 33 "2: strex %2, %0, [%3]\n" \
34 " teq %1, #0\n" \ 34 " teq %2, #0\n" \
35 " bne 1b\n" \ 35 " bne 1b\n" \
36 " mov %0, #0\n" \ 36 " mov %0, #0\n" \
37 __futex_atomic_ex_table("%4") \ 37 __futex_atomic_ex_table("%5") \
38 : "=&r" (ret), "=&r" (oldval) \ 38 : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
39 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ 39 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
40 : "cc", "memory") 40 : "cc", "memory")
41 41
@@ -73,14 +73,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
73#include <linux/preempt.h> 73#include <linux/preempt.h>
74#include <asm/domain.h> 74#include <asm/domain.h>
75 75
76#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 76#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
77 __asm__ __volatile__( \ 77 __asm__ __volatile__( \
78 "1: " T(ldr) " %1, [%2]\n" \ 78 "1: " T(ldr) " %1, [%3]\n" \
79 " " insn "\n" \ 79 " " insn "\n" \
80 "2: " T(str) " %0, [%2]\n" \ 80 "2: " T(str) " %0, [%3]\n" \
81 " mov %0, #0\n" \ 81 " mov %0, #0\n" \
82 __futex_atomic_ex_table("%4") \ 82 __futex_atomic_ex_table("%5") \
83 : "=&r" (ret), "=&r" (oldval) \ 83 : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
84 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ 84 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
85 : "cc", "memory") 85 : "cc", "memory")
86 86
@@ -117,7 +117,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
117 int cmp = (encoded_op >> 24) & 15; 117 int cmp = (encoded_op >> 24) & 15;
118 int oparg = (encoded_op << 8) >> 20; 118 int oparg = (encoded_op << 8) >> 20;
119 int cmparg = (encoded_op << 20) >> 20; 119 int cmparg = (encoded_op << 20) >> 20;
120 int oldval = 0, ret; 120 int oldval = 0, ret, tmp;
121 121
122 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 122 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
123 oparg = 1 << oparg; 123 oparg = 1 << oparg;
@@ -129,19 +129,19 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
129 129
130 switch (op) { 130 switch (op) {
131 case FUTEX_OP_SET: 131 case FUTEX_OP_SET:
132 __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg); 132 __futex_atomic_op("mov %0, %4", ret, oldval, tmp, uaddr, oparg);
133 break; 133 break;
134 case FUTEX_OP_ADD: 134 case FUTEX_OP_ADD:
135 __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg); 135 __futex_atomic_op("add %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
136 break; 136 break;
137 case FUTEX_OP_OR: 137 case FUTEX_OP_OR:
138 __futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg); 138 __futex_atomic_op("orr %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
139 break; 139 break;
140 case FUTEX_OP_ANDN: 140 case FUTEX_OP_ANDN:
141 __futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg); 141 __futex_atomic_op("and %0, %1, %4", ret, oldval, tmp, uaddr, ~oparg);
142 break; 142 break;
143 case FUTEX_OP_XOR: 143 case FUTEX_OP_XOR:
144 __futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg); 144 __futex_atomic_op("eor %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
145 break; 145 break;
146 default: 146 default:
147 ret = -ENOSYS; 147 ret = -ENOSYS;
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 2c04ed5efeb5..c60a2944f95b 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -478,8 +478,8 @@
478/* 478/*
479 * Unimplemented (or alternatively implemented) syscalls 479 * Unimplemented (or alternatively implemented) syscalls
480 */ 480 */
481#define __IGNORE_fadvise64_64 1 481#define __IGNORE_fadvise64_64
482#define __IGNORE_migrate_pages 1 482#define __IGNORE_migrate_pages
483 483
484#endif /* __KERNEL__ */ 484#endif /* __KERNEL__ */
485#endif /* __ASM_ARM_UNISTD_H */ 485#endif /* __ASM_ARM_UNISTD_H */
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 79ed5e7f204a..7fcddb75c877 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -13,6 +13,7 @@
13 13
14#include <asm/smp_scu.h> 14#include <asm/smp_scu.h>
15#include <asm/cacheflush.h> 15#include <asm/cacheflush.h>
16#include <asm/cputype.h>
16 17
17#define SCU_CTRL 0x00 18#define SCU_CTRL 0x00
18#define SCU_CONFIG 0x04 19#define SCU_CONFIG 0x04
@@ -37,6 +38,15 @@ void __init scu_enable(void __iomem *scu_base)
37{ 38{
38 u32 scu_ctrl; 39 u32 scu_ctrl;
39 40
41#ifdef CONFIG_ARM_ERRATA_764369
42 /* Cortex-A9 only */
43 if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
44 scu_ctrl = __raw_readl(scu_base + 0x30);
45 if (!(scu_ctrl & 1))
46 __raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
47 }
48#endif
49
40 scu_ctrl = __raw_readl(scu_base + SCU_CTRL); 50 scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
41 /* already enabled? */ 51 /* already enabled? */
42 if (scu_ctrl & 1) 52 if (scu_ctrl & 1)
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index bf977f8514f6..4e66f62b8d41 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -23,8 +23,10 @@
23 23
24#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) 24#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
25#define ARM_EXIT_KEEP(x) x 25#define ARM_EXIT_KEEP(x) x
26#define ARM_EXIT_DISCARD(x)
26#else 27#else
27#define ARM_EXIT_KEEP(x) 28#define ARM_EXIT_KEEP(x)
29#define ARM_EXIT_DISCARD(x) x
28#endif 30#endif
29 31
30OUTPUT_ARCH(arm) 32OUTPUT_ARCH(arm)
@@ -39,6 +41,11 @@ jiffies = jiffies_64 + 4;
39SECTIONS 41SECTIONS
40{ 42{
41 /* 43 /*
44 * XXX: The linker does not define how output sections are
45 * assigned to input sections when there are multiple statements
46 * matching the same input section name. There is no documented
47 * order of matching.
48 *
42 * unwind exit sections must be discarded before the rest of the 49 * unwind exit sections must be discarded before the rest of the
43 * unwind sections get included. 50 * unwind sections get included.
44 */ 51 */
@@ -47,6 +54,9 @@ SECTIONS
47 *(.ARM.extab.exit.text) 54 *(.ARM.extab.exit.text)
48 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) 55 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
49 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) 56 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
57 ARM_EXIT_DISCARD(EXIT_TEXT)
58 ARM_EXIT_DISCARD(EXIT_DATA)
59 EXIT_CALL
50#ifndef CONFIG_HOTPLUG 60#ifndef CONFIG_HOTPLUG
51 *(.ARM.exidx.devexit.text) 61 *(.ARM.exidx.devexit.text)
52 *(.ARM.extab.devexit.text) 62 *(.ARM.extab.devexit.text)
@@ -58,6 +68,8 @@ SECTIONS
58#ifndef CONFIG_SMP_ON_UP 68#ifndef CONFIG_SMP_ON_UP
59 *(.alt.smp.init) 69 *(.alt.smp.init)
60#endif 70#endif
71 *(.discard)
72 *(.discard.*)
61 } 73 }
62 74
63#ifdef CONFIG_XIP_KERNEL 75#ifdef CONFIG_XIP_KERNEL
@@ -279,9 +291,6 @@ SECTIONS
279 291
280 STABS_DEBUG 292 STABS_DEBUG
281 .comment 0 : { *(.comment) } 293 .comment 0 : { *(.comment) }
282
283 /* Default discards */
284 DISCARDS
285} 294}
286 295
287/* 296/*
diff --git a/arch/arm/mach-exynos4/clock.c b/arch/arm/mach-exynos4/clock.c
index 79d6cd0c8e7b..86964d2e9e1b 100644
--- a/arch/arm/mach-exynos4/clock.c
+++ b/arch/arm/mach-exynos4/clock.c
@@ -899,8 +899,7 @@ static struct clksrc_clk clksrcs[] = {
899 .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 28, .size = 4 }, 899 .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 28, .size = 4 },
900 }, { 900 }, {
901 .clk = { 901 .clk = {
902 .name = "sclk_cam", 902 .name = "sclk_cam0",
903 .devname = "exynos4-fimc.0",
904 .enable = exynos4_clksrc_mask_cam_ctrl, 903 .enable = exynos4_clksrc_mask_cam_ctrl,
905 .ctrlbit = (1 << 16), 904 .ctrlbit = (1 << 16),
906 }, 905 },
@@ -909,8 +908,7 @@ static struct clksrc_clk clksrcs[] = {
909 .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 16, .size = 4 }, 908 .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 16, .size = 4 },
910 }, { 909 }, {
911 .clk = { 910 .clk = {
912 .name = "sclk_cam", 911 .name = "sclk_cam1",
913 .devname = "exynos4-fimc.1",
914 .enable = exynos4_clksrc_mask_cam_ctrl, 912 .enable = exynos4_clksrc_mask_cam_ctrl,
915 .ctrlbit = (1 << 20), 913 .ctrlbit = (1 << 20),
916 }, 914 },
diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c
index a1a7176675b9..38058af48972 100644
--- a/arch/arm/mach-s3c2443/clock.c
+++ b/arch/arm/mach-s3c2443/clock.c
@@ -128,7 +128,7 @@ static int s3c2443_armclk_setrate(struct clk *clk, unsigned long rate)
128 unsigned long clkcon0; 128 unsigned long clkcon0;
129 129
130 clkcon0 = __raw_readl(S3C2443_CLKDIV0); 130 clkcon0 = __raw_readl(S3C2443_CLKDIV0);
131 clkcon0 &= S3C2443_CLKDIV0_ARMDIV_MASK; 131 clkcon0 &= ~S3C2443_CLKDIV0_ARMDIV_MASK;
132 clkcon0 |= val << S3C2443_CLKDIV0_ARMDIV_SHIFT; 132 clkcon0 |= val << S3C2443_CLKDIV0_ARMDIV_SHIFT;
133 __raw_writel(clkcon0, S3C2443_CLKDIV0); 133 __raw_writel(clkcon0, S3C2443_CLKDIV0);
134 } 134 }
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index 52a8e607bcc2..f5f8fa89679c 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -815,8 +815,7 @@ static struct clksrc_clk clksrcs[] = {
815 .reg_div = { .reg = S5P_CLK_DIV3, .shift = 20, .size = 4 }, 815 .reg_div = { .reg = S5P_CLK_DIV3, .shift = 20, .size = 4 },
816 }, { 816 }, {
817 .clk = { 817 .clk = {
818 .name = "sclk_cam", 818 .name = "sclk_cam0",
819 .devname = "s5pv210-fimc.0",
820 .enable = s5pv210_clk_mask0_ctrl, 819 .enable = s5pv210_clk_mask0_ctrl,
821 .ctrlbit = (1 << 3), 820 .ctrlbit = (1 << 3),
822 }, 821 },
@@ -825,8 +824,7 @@ static struct clksrc_clk clksrcs[] = {
825 .reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 4 }, 824 .reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 4 },
826 }, { 825 }, {
827 .clk = { 826 .clk = {
828 .name = "sclk_cam", 827 .name = "sclk_cam1",
829 .devname = "s5pv210-fimc.1",
830 .enable = s5pv210_clk_mask0_ctrl, 828 .enable = s5pv210_clk_mask0_ctrl,
831 .ctrlbit = (1 << 4), 829 .ctrlbit = (1 << 4),
832 }, 830 },
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 3b24bfa3b828..07c4bc8ea0a4 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -174,6 +174,10 @@ ENTRY(v7_coherent_user_range)
174 dcache_line_size r2, r3 174 dcache_line_size r2, r3
175 sub r3, r2, #1 175 sub r3, r2, #1
176 bic r12, r0, r3 176 bic r12, r0, r3
177#ifdef CONFIG_ARM_ERRATA_764369
178 ALT_SMP(W(dsb))
179 ALT_UP(W(nop))
180#endif
1771: 1811:
178 USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification 182 USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification
179 add r12, r12, r2 183 add r12, r12, r2
@@ -223,6 +227,10 @@ ENTRY(v7_flush_kern_dcache_area)
223 add r1, r0, r1 227 add r1, r0, r1
224 sub r3, r2, #1 228 sub r3, r2, #1
225 bic r0, r0, r3 229 bic r0, r0, r3
230#ifdef CONFIG_ARM_ERRATA_764369
231 ALT_SMP(W(dsb))
232 ALT_UP(W(nop))
233#endif
2261: 2341:
227 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line 235 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
228 add r0, r0, r2 236 add r0, r0, r2
@@ -247,6 +255,10 @@ v7_dma_inv_range:
247 sub r3, r2, #1 255 sub r3, r2, #1
248 tst r0, r3 256 tst r0, r3
249 bic r0, r0, r3 257 bic r0, r0, r3
258#ifdef CONFIG_ARM_ERRATA_764369
259 ALT_SMP(W(dsb))
260 ALT_UP(W(nop))
261#endif
250 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line 262 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
251 263
252 tst r1, r3 264 tst r1, r3
@@ -270,6 +282,10 @@ v7_dma_clean_range:
270 dcache_line_size r2, r3 282 dcache_line_size r2, r3
271 sub r3, r2, #1 283 sub r3, r2, #1
272 bic r0, r0, r3 284 bic r0, r0, r3
285#ifdef CONFIG_ARM_ERRATA_764369
286 ALT_SMP(W(dsb))
287 ALT_UP(W(nop))
288#endif
2731: 2891:
274 mcr p15, 0, r0, c7, c10, 1 @ clean D / U line 290 mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
275 add r0, r0, r2 291 add r0, r0, r2
@@ -288,6 +304,10 @@ ENTRY(v7_dma_flush_range)
288 dcache_line_size r2, r3 304 dcache_line_size r2, r3
289 sub r3, r2, #1 305 sub r3, r2, #1
290 bic r0, r0, r3 306 bic r0, r0, r3
307#ifdef CONFIG_ARM_ERRATA_764369
308 ALT_SMP(W(dsb))
309 ALT_UP(W(nop))
310#endif
2911: 3111:
292 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line 312 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
293 add r0, r0, r2 313 add r0, r0, r2
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 0a0a1e7c20d2..c3ff82f92d9c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -324,6 +324,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
324 324
325 if (addr) 325 if (addr)
326 *handle = pfn_to_dma(dev, page_to_pfn(page)); 326 *handle = pfn_to_dma(dev, page_to_pfn(page));
327 else
328 __dma_free_buffer(page, size);
327 329
328 return addr; 330 return addr;
329} 331}
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c
index f71078ef6bb5..f88216d23991 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-s5p/irq-gpioint.c
@@ -114,17 +114,18 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
114{ 114{
115 static int used_gpioint_groups = 0; 115 static int used_gpioint_groups = 0;
116 int group = chip->group; 116 int group = chip->group;
117 struct s5p_gpioint_bank *bank = NULL; 117 struct s5p_gpioint_bank *b, *bank = NULL;
118 struct irq_chip_generic *gc; 118 struct irq_chip_generic *gc;
119 struct irq_chip_type *ct; 119 struct irq_chip_type *ct;
120 120
121 if (used_gpioint_groups >= S5P_GPIOINT_GROUP_COUNT) 121 if (used_gpioint_groups >= S5P_GPIOINT_GROUP_COUNT)
122 return -ENOMEM; 122 return -ENOMEM;
123 123
124 list_for_each_entry(bank, &banks, list) { 124 list_for_each_entry(b, &banks, list) {
125 if (group >= bank->start && 125 if (group >= b->start && group < b->start + b->nr_groups) {
126 group < bank->start + bank->nr_groups) 126 bank = b;
127 break; 127 break;
128 }
128 } 129 }
129 if (!bank) 130 if (!bank)
130 return -EINVAL; 131 return -EINVAL;
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 5cc83851ad06..31a7d3a7ce25 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -561,6 +561,20 @@ static struct pci_ops u4_pcie_pci_ops =
561 .write = u4_pcie_write_config, 561 .write = u4_pcie_write_config,
562}; 562};
563 563
564static void __devinit pmac_pci_fixup_u4_of_node(struct pci_dev *dev)
565{
566 /* Apple's device-tree "hides" the root complex virtual P2P bridge
567 * on U4. However, Linux sees it, causing the PCI <-> OF matching
568 * code to fail to properly match devices below it. This works around
569 * it by setting the node of the bridge to point to the PHB node,
570 * which is not entirely correct but fixes the matching code and
571 * doesn't break anything else. It's also the simplest possible fix.
572 */
573 if (dev->dev.of_node == NULL)
574 dev->dev.of_node = pcibios_get_phb_of_node(dev->bus);
575}
576DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, 0x5b, pmac_pci_fixup_u4_of_node);
577
564#endif /* CONFIG_PPC64 */ 578#endif /* CONFIG_PPC64 */
565 579
566#ifdef CONFIG_PPC32 580#ifdef CONFIG_PPC32
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 64b61bf72e93..547f1a6a35d4 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -188,7 +188,8 @@ extern char elf_platform[];
188#define SET_PERSONALITY(ex) \ 188#define SET_PERSONALITY(ex) \
189do { \ 189do { \
190 if (personality(current->personality) != PER_LINUX32) \ 190 if (personality(current->personality) != PER_LINUX32) \
191 set_personality(PER_LINUX); \ 191 set_personality(PER_LINUX | \
192 (current->personality & ~PER_MASK)); \
192 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ 193 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
193 set_thread_flag(TIF_31BIT); \ 194 set_thread_flag(TIF_31BIT); \
194 else \ 195 else \
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 519eb5f187ef..c0cb794bb365 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -658,12 +658,14 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
658 * struct gmap_struct - guest address space 658 * struct gmap_struct - guest address space
659 * @mm: pointer to the parent mm_struct 659 * @mm: pointer to the parent mm_struct
660 * @table: pointer to the page directory 660 * @table: pointer to the page directory
661 * @asce: address space control element for gmap page table
661 * @crst_list: list of all crst tables used in the guest address space 662 * @crst_list: list of all crst tables used in the guest address space
662 */ 663 */
663struct gmap { 664struct gmap {
664 struct list_head list; 665 struct list_head list;
665 struct mm_struct *mm; 666 struct mm_struct *mm;
666 unsigned long *table; 667 unsigned long *table;
668 unsigned long asce;
667 struct list_head crst_list; 669 struct list_head crst_list;
668}; 670};
669 671
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 532fd4322156..2b45591e1582 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -10,6 +10,7 @@
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <asm/vdso.h> 11#include <asm/vdso.h>
12#include <asm/sigp.h> 12#include <asm/sigp.h>
13#include <asm/pgtable.h>
13 14
14/* 15/*
15 * Make sure that the compiler is new enough. We want a compiler that 16 * Make sure that the compiler is new enough. We want a compiler that
@@ -126,6 +127,7 @@ int main(void)
126 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); 127 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
127 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); 128 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
128 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); 129 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
130 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
129 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); 131 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
130 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); 132 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
131 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); 133 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
@@ -151,6 +153,7 @@ int main(void)
151 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); 153 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
152 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); 154 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
153 DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp)); 155 DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
156 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
154#endif /* CONFIG_32BIT */ 157#endif /* CONFIG_32BIT */
155 return 0; 158 return 0;
156} 159}
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 5f729d627cef..713da0760538 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -1076,6 +1076,11 @@ sie_loop:
1076 lg %r14,__LC_THREAD_INFO # pointer thread_info struct 1076 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
1077 tm __TI_flags+7(%r14),_TIF_EXIT_SIE 1077 tm __TI_flags+7(%r14),_TIF_EXIT_SIE
1078 jnz sie_exit 1078 jnz sie_exit
1079 lg %r14,__LC_GMAP # get gmap pointer
1080 ltgr %r14,%r14
1081 jz sie_gmap
1082 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
1083sie_gmap:
1079 lg %r14,__SF_EMPTY(%r15) # get control block pointer 1084 lg %r14,__SF_EMPTY(%r15) # get control block pointer
1080 SPP __SF_EMPTY(%r15) # set guest id 1085 SPP __SF_EMPTY(%r15) # set guest id
1081 sie 0(%r14) 1086 sie 0(%r14)
@@ -1083,6 +1088,7 @@ sie_done:
1083 SPP __LC_CMF_HPP # set host id 1088 SPP __LC_CMF_HPP # set host id
1084 lg %r14,__LC_THREAD_INFO # pointer thread_info struct 1089 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
1085sie_exit: 1090sie_exit:
1091 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1086 ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) 1092 ni __TI_flags+6(%r14),255-(_TIF_SIE>>8)
1087 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 1093 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
1088 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 1094 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f17296e4fc89..dc2b580e27bc 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -123,6 +123,7 @@ int kvm_dev_ioctl_check_extension(long ext)
123 123
124 switch (ext) { 124 switch (ext) {
125 case KVM_CAP_S390_PSW: 125 case KVM_CAP_S390_PSW:
126 case KVM_CAP_S390_GMAP:
126 r = 1; 127 r = 1;
127 break; 128 break;
128 default: 129 default:
@@ -263,10 +264,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
263 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; 264 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
264 restore_fp_regs(&vcpu->arch.guest_fpregs); 265 restore_fp_regs(&vcpu->arch.guest_fpregs);
265 restore_access_regs(vcpu->arch.guest_acrs); 266 restore_access_regs(vcpu->arch.guest_acrs);
267 gmap_enable(vcpu->arch.gmap);
266} 268}
267 269
268void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 270void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
269{ 271{
272 gmap_disable(vcpu->arch.gmap);
270 save_fp_regs(&vcpu->arch.guest_fpregs); 273 save_fp_regs(&vcpu->arch.guest_fpregs);
271 save_access_regs(vcpu->arch.guest_acrs); 274 save_access_regs(vcpu->arch.guest_acrs);
272 restore_fp_regs(&vcpu->arch.host_fpregs); 275 restore_fp_regs(&vcpu->arch.host_fpregs);
@@ -461,7 +464,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
461 local_irq_disable(); 464 local_irq_disable();
462 kvm_guest_enter(); 465 kvm_guest_enter();
463 local_irq_enable(); 466 local_irq_enable();
464 gmap_enable(vcpu->arch.gmap);
465 VCPU_EVENT(vcpu, 6, "entering sie flags %x", 467 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
466 atomic_read(&vcpu->arch.sie_block->cpuflags)); 468 atomic_read(&vcpu->arch.sie_block->cpuflags));
467 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { 469 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
@@ -470,7 +472,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
470 } 472 }
471 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 473 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
472 vcpu->arch.sie_block->icptcode); 474 vcpu->arch.sie_block->icptcode);
473 gmap_disable(vcpu->arch.gmap);
474 local_irq_disable(); 475 local_irq_disable();
475 kvm_guest_exit(); 476 kvm_guest_exit();
476 local_irq_enable(); 477 local_irq_enable();
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 4d1f2bce87b3..5d56c2b95b14 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -160,6 +160,8 @@ struct gmap *gmap_alloc(struct mm_struct *mm)
160 table = (unsigned long *) page_to_phys(page); 160 table = (unsigned long *) page_to_phys(page);
161 crst_table_init(table, _REGION1_ENTRY_EMPTY); 161 crst_table_init(table, _REGION1_ENTRY_EMPTY);
162 gmap->table = table; 162 gmap->table = table;
163 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
164 _ASCE_USER_BITS | __pa(table);
163 list_add(&gmap->list, &mm->context.gmap_list); 165 list_add(&gmap->list, &mm->context.gmap_list);
164 return gmap; 166 return gmap;
165 167
@@ -240,10 +242,6 @@ EXPORT_SYMBOL_GPL(gmap_free);
240 */ 242 */
241void gmap_enable(struct gmap *gmap) 243void gmap_enable(struct gmap *gmap)
242{ 244{
243 /* Load primary space page table origin. */
244 S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
245 _ASCE_USER_BITS | __pa(gmap->table);
246 asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
247 S390_lowcore.gmap = (unsigned long) gmap; 245 S390_lowcore.gmap = (unsigned long) gmap;
248} 246}
249EXPORT_SYMBOL_GPL(gmap_enable); 247EXPORT_SYMBOL_GPL(gmap_enable);
@@ -254,10 +252,6 @@ EXPORT_SYMBOL_GPL(gmap_enable);
254 */ 252 */
255void gmap_disable(struct gmap *gmap) 253void gmap_disable(struct gmap *gmap)
256{ 254{
257 /* Load primary space page table origin. */
258 S390_lowcore.user_asce =
259 gmap->mm->context.asce_bits | __pa(gmap->mm->pgd);
260 asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
261 S390_lowcore.gmap = 0UL; 255 S390_lowcore.gmap = 0UL;
262} 256}
263EXPORT_SYMBOL_GPL(gmap_disable); 257EXPORT_SYMBOL_GPL(gmap_disable);
@@ -309,15 +303,15 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
309 /* Walk the guest addr space page table */ 303 /* Walk the guest addr space page table */
310 table = gmap->table + (((to + off) >> 53) & 0x7ff); 304 table = gmap->table + (((to + off) >> 53) & 0x7ff);
311 if (*table & _REGION_ENTRY_INV) 305 if (*table & _REGION_ENTRY_INV)
312 return 0; 306 goto out;
313 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 307 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
314 table = table + (((to + off) >> 42) & 0x7ff); 308 table = table + (((to + off) >> 42) & 0x7ff);
315 if (*table & _REGION_ENTRY_INV) 309 if (*table & _REGION_ENTRY_INV)
316 return 0; 310 goto out;
317 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 311 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
318 table = table + (((to + off) >> 31) & 0x7ff); 312 table = table + (((to + off) >> 31) & 0x7ff);
319 if (*table & _REGION_ENTRY_INV) 313 if (*table & _REGION_ENTRY_INV)
320 return 0; 314 goto out;
321 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 315 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
322 table = table + (((to + off) >> 20) & 0x7ff); 316 table = table + (((to + off) >> 20) & 0x7ff);
323 317
@@ -325,6 +319,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
325 flush |= gmap_unlink_segment(gmap, table); 319 flush |= gmap_unlink_segment(gmap, table);
326 *table = _SEGMENT_ENTRY_INV; 320 *table = _SEGMENT_ENTRY_INV;
327 } 321 }
322out:
328 up_read(&gmap->mm->mmap_sem); 323 up_read(&gmap->mm->mmap_sem);
329 if (flush) 324 if (flush)
330 gmap_flush_tlb(gmap); 325 gmap_flush_tlb(gmap);
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
index 55a17c6efeb8..d06a26601753 100644
--- a/arch/sparc/include/asm/spitfire.h
+++ b/arch/sparc/include/asm/spitfire.h
@@ -43,6 +43,8 @@
43#define SUN4V_CHIP_NIAGARA1 0x01 43#define SUN4V_CHIP_NIAGARA1 0x01
44#define SUN4V_CHIP_NIAGARA2 0x02 44#define SUN4V_CHIP_NIAGARA2 0x02
45#define SUN4V_CHIP_NIAGARA3 0x03 45#define SUN4V_CHIP_NIAGARA3 0x03
46#define SUN4V_CHIP_NIAGARA4 0x04
47#define SUN4V_CHIP_NIAGARA5 0x05
46#define SUN4V_CHIP_UNKNOWN 0xff 48#define SUN4V_CHIP_UNKNOWN 0xff
47 49
48#ifndef __ASSEMBLY__ 50#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/xor_64.h b/arch/sparc/include/asm/xor_64.h
index 9ed6ff679ab7..ee8edc68423e 100644
--- a/arch/sparc/include/asm/xor_64.h
+++ b/arch/sparc/include/asm/xor_64.h
@@ -66,6 +66,8 @@ static struct xor_block_template xor_block_niagara = {
66 ((tlb_type == hypervisor && \ 66 ((tlb_type == hypervisor && \
67 (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \ 67 (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \
68 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || \ 68 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || \
69 sun4v_chip_type == SUN4V_CHIP_NIAGARA3)) ? \ 69 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || \
70 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || \
71 sun4v_chip_type == SUN4V_CHIP_NIAGARA5)) ? \
70 &xor_block_niagara : \ 72 &xor_block_niagara : \
71 &xor_block_VIS) 73 &xor_block_VIS)
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 9810fd881058..ba9b1cec4e6b 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -481,6 +481,18 @@ static void __init sun4v_cpu_probe(void)
481 sparc_pmu_type = "niagara3"; 481 sparc_pmu_type = "niagara3";
482 break; 482 break;
483 483
484 case SUN4V_CHIP_NIAGARA4:
485 sparc_cpu_type = "UltraSparc T4 (Niagara4)";
486 sparc_fpu_type = "UltraSparc T4 integrated FPU";
487 sparc_pmu_type = "niagara4";
488 break;
489
490 case SUN4V_CHIP_NIAGARA5:
491 sparc_cpu_type = "UltraSparc T5 (Niagara5)";
492 sparc_fpu_type = "UltraSparc T5 integrated FPU";
493 sparc_pmu_type = "niagara5";
494 break;
495
484 default: 496 default:
485 printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", 497 printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
486 prom_cpu_compatible); 498 prom_cpu_compatible);
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index 4197e8d62d4c..9323eafccb93 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -325,6 +325,8 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
325 case SUN4V_CHIP_NIAGARA1: 325 case SUN4V_CHIP_NIAGARA1:
326 case SUN4V_CHIP_NIAGARA2: 326 case SUN4V_CHIP_NIAGARA2:
327 case SUN4V_CHIP_NIAGARA3: 327 case SUN4V_CHIP_NIAGARA3:
328 case SUN4V_CHIP_NIAGARA4:
329 case SUN4V_CHIP_NIAGARA5:
328 rover_inc_table = niagara_iterate_method; 330 rover_inc_table = niagara_iterate_method;
329 break; 331 break;
330 default: 332 default:
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 0eac1b2fc53d..0d810c2f1d00 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -133,7 +133,7 @@ prom_sun4v_name:
133prom_niagara_prefix: 133prom_niagara_prefix:
134 .asciz "SUNW,UltraSPARC-T" 134 .asciz "SUNW,UltraSPARC-T"
135prom_sparc_prefix: 135prom_sparc_prefix:
136 .asciz "SPARC-T" 136 .asciz "SPARC-"
137 .align 4 137 .align 4
138prom_root_compatible: 138prom_root_compatible:
139 .skip 64 139 .skip 64
@@ -396,7 +396,7 @@ sun4v_chip_type:
396 or %g1, %lo(prom_cpu_compatible), %g1 396 or %g1, %lo(prom_cpu_compatible), %g1
397 sethi %hi(prom_sparc_prefix), %g7 397 sethi %hi(prom_sparc_prefix), %g7
398 or %g7, %lo(prom_sparc_prefix), %g7 398 or %g7, %lo(prom_sparc_prefix), %g7
399 mov 7, %g3 399 mov 6, %g3
40090: ldub [%g7], %g2 40090: ldub [%g7], %g2
401 ldub [%g1], %g4 401 ldub [%g1], %g4
402 cmp %g2, %g4 402 cmp %g2, %g4
@@ -408,10 +408,23 @@ sun4v_chip_type:
408 408
409 sethi %hi(prom_cpu_compatible), %g1 409 sethi %hi(prom_cpu_compatible), %g1
410 or %g1, %lo(prom_cpu_compatible), %g1 410 or %g1, %lo(prom_cpu_compatible), %g1
411 ldub [%g1 + 7], %g2 411 ldub [%g1 + 6], %g2
412 cmp %g2, 'T'
413 be,pt %xcc, 70f
414 cmp %g2, 'M'
415 bne,pn %xcc, 4f
416 nop
417
41870: ldub [%g1 + 7], %g2
412 cmp %g2, '3' 419 cmp %g2, '3'
413 be,pt %xcc, 5f 420 be,pt %xcc, 5f
414 mov SUN4V_CHIP_NIAGARA3, %g4 421 mov SUN4V_CHIP_NIAGARA3, %g4
422 cmp %g2, '4'
423 be,pt %xcc, 5f
424 mov SUN4V_CHIP_NIAGARA4, %g4
425 cmp %g2, '5'
426 be,pt %xcc, 5f
427 mov SUN4V_CHIP_NIAGARA5, %g4
415 ba,pt %xcc, 4f 428 ba,pt %xcc, 4f
416 nop 429 nop
417 430
@@ -545,6 +558,12 @@ niagara_tlb_fixup:
545 cmp %g1, SUN4V_CHIP_NIAGARA3 558 cmp %g1, SUN4V_CHIP_NIAGARA3
546 be,pt %xcc, niagara2_patch 559 be,pt %xcc, niagara2_patch
547 nop 560 nop
561 cmp %g1, SUN4V_CHIP_NIAGARA4
562 be,pt %xcc, niagara2_patch
563 nop
564 cmp %g1, SUN4V_CHIP_NIAGARA5
565 be,pt %xcc, niagara2_patch
566 nop
548 567
549 call generic_patch_copyops 568 call generic_patch_copyops
550 nop 569 nop
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index c8cc461ff75f..f793742eec2b 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -380,8 +380,7 @@ void flush_thread(void)
380#endif 380#endif
381 } 381 }
382 382
383 /* Now, this task is no longer a kernel thread. */ 383 /* This task is no longer a kernel thread. */
384 current->thread.current_ds = USER_DS;
385 if (current->thread.flags & SPARC_FLAG_KTHREAD) { 384 if (current->thread.flags & SPARC_FLAG_KTHREAD) {
386 current->thread.flags &= ~SPARC_FLAG_KTHREAD; 385 current->thread.flags &= ~SPARC_FLAG_KTHREAD;
387 386
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index c158a95ec664..d959cd0a4aa4 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -368,9 +368,6 @@ void flush_thread(void)
368 368
369 /* Clear FPU register state. */ 369 /* Clear FPU register state. */
370 t->fpsaved[0] = 0; 370 t->fpsaved[0] = 0;
371
372 if (get_thread_current_ds() != ASI_AIUS)
373 set_fs(USER_DS);
374} 371}
375 372
376/* It's a bit more tricky when 64-bit tasks are involved... */ 373/* It's a bit more tricky when 64-bit tasks are involved... */
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index d26e1f6c717a..3e3e2914c70b 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -137,7 +137,7 @@ static void __init process_switch(char c)
137 prom_halt(); 137 prom_halt();
138 break; 138 break;
139 case 'p': 139 case 'p':
140 /* Just ignore, this behavior is now the default. */ 140 prom_early_console.flags &= ~CON_BOOT;
141 break; 141 break;
142 default: 142 default:
143 printk("Unknown boot switch (-%c)\n", c); 143 printk("Unknown boot switch (-%c)\n", c);
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 3c5bb784214f..c965595aa7e9 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -106,7 +106,7 @@ static void __init process_switch(char c)
106 prom_halt(); 106 prom_halt();
107 break; 107 break;
108 case 'p': 108 case 'p':
109 /* Just ignore, this behavior is now the default. */ 109 prom_early_console.flags &= ~CON_BOOT;
110 break; 110 break;
111 case 'P': 111 case 'P':
112 /* Force UltraSPARC-III P-Cache on. */ 112 /* Force UltraSPARC-III P-Cache on. */
@@ -425,10 +425,14 @@ static void __init init_sparc64_elf_hwcap(void)
425 else if (tlb_type == hypervisor) { 425 else if (tlb_type == hypervisor) {
426 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || 426 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
427 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 427 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
428 sun4v_chip_type == SUN4V_CHIP_NIAGARA3) 428 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
429 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
430 sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
429 cap |= HWCAP_SPARC_BLKINIT; 431 cap |= HWCAP_SPARC_BLKINIT;
430 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 432 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
431 sun4v_chip_type == SUN4V_CHIP_NIAGARA3) 433 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
434 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
435 sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
432 cap |= HWCAP_SPARC_N2; 436 cap |= HWCAP_SPARC_N2;
433 } 437 }
434 438
@@ -452,11 +456,15 @@ static void __init init_sparc64_elf_hwcap(void)
452 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1) 456 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
453 cap |= AV_SPARC_ASI_BLK_INIT; 457 cap |= AV_SPARC_ASI_BLK_INIT;
454 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 458 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
455 sun4v_chip_type == SUN4V_CHIP_NIAGARA3) 459 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
460 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
461 sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
456 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | 462 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
457 AV_SPARC_ASI_BLK_INIT | 463 AV_SPARC_ASI_BLK_INIT |
458 AV_SPARC_POPC); 464 AV_SPARC_POPC);
459 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3) 465 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
466 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
467 sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
460 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | 468 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
461 AV_SPARC_FMAF); 469 AV_SPARC_FMAF);
462 } 470 }
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 581531dbc8b5..8e073d802139 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -511,6 +511,11 @@ static void __init read_obp_translations(void)
511 for (i = 0; i < prom_trans_ents; i++) 511 for (i = 0; i < prom_trans_ents; i++)
512 prom_trans[i].data &= ~0x0003fe0000000000UL; 512 prom_trans[i].data &= ~0x0003fe0000000000UL;
513 } 513 }
514
515 /* Force execute bit on. */
516 for (i = 0; i < prom_trans_ents; i++)
517 prom_trans[i].data |= (tlb_type == hypervisor ?
518 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
514} 519}
515 520
516static void __init hypervisor_tlb_lock(unsigned long vaddr, 521static void __init hypervisor_tlb_lock(unsigned long vaddr,
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 3f2ad2640d85..ccdbc16b8941 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -42,8 +42,11 @@ int mach_set_rtc_mmss(unsigned long nowtime)
42{ 42{
43 int real_seconds, real_minutes, cmos_minutes; 43 int real_seconds, real_minutes, cmos_minutes;
44 unsigned char save_control, save_freq_select; 44 unsigned char save_control, save_freq_select;
45 unsigned long flags;
45 int retval = 0; 46 int retval = 0;
46 47
48 spin_lock_irqsave(&rtc_lock, flags);
49
47 /* tell the clock it's being set */ 50 /* tell the clock it's being set */
48 save_control = CMOS_READ(RTC_CONTROL); 51 save_control = CMOS_READ(RTC_CONTROL);
49 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); 52 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
@@ -93,12 +96,17 @@ int mach_set_rtc_mmss(unsigned long nowtime)
93 CMOS_WRITE(save_control, RTC_CONTROL); 96 CMOS_WRITE(save_control, RTC_CONTROL);
94 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 97 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
95 98
99 spin_unlock_irqrestore(&rtc_lock, flags);
100
96 return retval; 101 return retval;
97} 102}
98 103
99unsigned long mach_get_cmos_time(void) 104unsigned long mach_get_cmos_time(void)
100{ 105{
101 unsigned int status, year, mon, day, hour, min, sec, century = 0; 106 unsigned int status, year, mon, day, hour, min, sec, century = 0;
107 unsigned long flags;
108
109 spin_lock_irqsave(&rtc_lock, flags);
102 110
103 /* 111 /*
104 * If UIP is clear, then we have >= 244 microseconds before 112 * If UIP is clear, then we have >= 244 microseconds before
@@ -125,6 +133,8 @@ unsigned long mach_get_cmos_time(void)
125 status = CMOS_READ(RTC_CONTROL); 133 status = CMOS_READ(RTC_CONTROL);
126 WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY)); 134 WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY));
127 135
136 spin_unlock_irqrestore(&rtc_lock, flags);
137
128 if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) { 138 if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) {
129 sec = bcd2bin(sec); 139 sec = bcd2bin(sec);
130 min = bcd2bin(min); 140 min = bcd2bin(min);
@@ -169,24 +179,15 @@ EXPORT_SYMBOL(rtc_cmos_write);
169 179
170int update_persistent_clock(struct timespec now) 180int update_persistent_clock(struct timespec now)
171{ 181{
172 unsigned long flags; 182 return x86_platform.set_wallclock(now.tv_sec);
173 int retval;
174
175 spin_lock_irqsave(&rtc_lock, flags);
176 retval = x86_platform.set_wallclock(now.tv_sec);
177 spin_unlock_irqrestore(&rtc_lock, flags);
178
179 return retval;
180} 183}
181 184
182/* not static: needed by APM */ 185/* not static: needed by APM */
183void read_persistent_clock(struct timespec *ts) 186void read_persistent_clock(struct timespec *ts)
184{ 187{
185 unsigned long retval, flags; 188 unsigned long retval;
186 189
187 spin_lock_irqsave(&rtc_lock, flags);
188 retval = x86_platform.get_wallclock(); 190 retval = x86_platform.get_wallclock();
189 spin_unlock_irqrestore(&rtc_lock, flags);
190 191
191 ts->tv_sec = retval; 192 ts->tv_sec = retval;
192 ts->tv_nsec = 0; 193 ts->tv_nsec = 0;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 6f08bc940fa8..8b4cc5f067de 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3603,7 +3603,7 @@ done_prefixes:
3603 break; 3603 break;
3604 case Src2CL: 3604 case Src2CL:
3605 ctxt->src2.bytes = 1; 3605 ctxt->src2.bytes = 1;
3606 ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0x8; 3606 ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
3607 break; 3607 break;
3608 case Src2ImmByte: 3608 case Src2ImmByte:
3609 rc = decode_imm(ctxt, &ctxt->src2, 1, true); 3609 rc = decode_imm(ctxt, &ctxt->src2, 1, true);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1c5b69373a00..8e8da7960dbe 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -400,7 +400,8 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
400 400
401 /* xchg acts as a barrier before the setting of the high bits */ 401 /* xchg acts as a barrier before the setting of the high bits */
402 orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); 402 orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
403 orig.spte_high = ssptep->spte_high = sspte.spte_high; 403 orig.spte_high = ssptep->spte_high;
404 ssptep->spte_high = sspte.spte_high;
404 count_spte_clear(sptep, spte); 405 count_spte_clear(sptep, spte);
405 406
406 return orig.spte; 407 return orig.spte;
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 039d91315bc5..404f21a3ff9e 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -43,6 +43,17 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = {
43 DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"), 43 DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
44 }, 44 },
45 }, 45 },
46 /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
47 /* 2006 AMD HT/VIA system with two host bridges */
48 {
49 .callback = set_use_crs,
50 .ident = "ASUS M2V-MX SE",
51 .matches = {
52 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
53 DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
54 DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
55 },
56 },
46 {} 57 {}
47}; 58};
48 59
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c
index 73d70d65e76e..6d5dbcdd444a 100644
--- a/arch/x86/platform/mrst/vrtc.c
+++ b/arch/x86/platform/mrst/vrtc.c
@@ -58,8 +58,11 @@ EXPORT_SYMBOL_GPL(vrtc_cmos_write);
58unsigned long vrtc_get_time(void) 58unsigned long vrtc_get_time(void)
59{ 59{
60 u8 sec, min, hour, mday, mon; 60 u8 sec, min, hour, mday, mon;
61 unsigned long flags;
61 u32 year; 62 u32 year;
62 63
64 spin_lock_irqsave(&rtc_lock, flags);
65
63 while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP)) 66 while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
64 cpu_relax(); 67 cpu_relax();
65 68
@@ -70,6 +73,8 @@ unsigned long vrtc_get_time(void)
70 mon = vrtc_cmos_read(RTC_MONTH); 73 mon = vrtc_cmos_read(RTC_MONTH);
71 year = vrtc_cmos_read(RTC_YEAR); 74 year = vrtc_cmos_read(RTC_YEAR);
72 75
76 spin_unlock_irqrestore(&rtc_lock, flags);
77
73 /* vRTC YEAR reg contains the offset to 1960 */ 78 /* vRTC YEAR reg contains the offset to 1960 */
74 year += 1960; 79 year += 1960;
75 80
@@ -83,8 +88,10 @@ unsigned long vrtc_get_time(void)
83int vrtc_set_mmss(unsigned long nowtime) 88int vrtc_set_mmss(unsigned long nowtime)
84{ 89{
85 int real_sec, real_min; 90 int real_sec, real_min;
91 unsigned long flags;
86 int vrtc_min; 92 int vrtc_min;
87 93
94 spin_lock_irqsave(&rtc_lock, flags);
88 vrtc_min = vrtc_cmos_read(RTC_MINUTES); 95 vrtc_min = vrtc_cmos_read(RTC_MINUTES);
89 96
90 real_sec = nowtime % 60; 97 real_sec = nowtime % 60;
@@ -95,6 +102,8 @@ int vrtc_set_mmss(unsigned long nowtime)
95 102
96 vrtc_cmos_write(real_sec, RTC_SECONDS); 103 vrtc_cmos_write(real_sec, RTC_SECONDS);
97 vrtc_cmos_write(real_min, RTC_MINUTES); 104 vrtc_cmos_write(real_min, RTC_MINUTES);
105 spin_unlock_irqrestore(&rtc_lock, flags);
106
98 return 0; 107 return 0;
99} 108}
100 109
diff --git a/block/blk-core.c b/block/blk-core.c
index b2ed78afd9f0..d34433ae7917 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -348,9 +348,10 @@ void blk_put_queue(struct request_queue *q)
348EXPORT_SYMBOL(blk_put_queue); 348EXPORT_SYMBOL(blk_put_queue);
349 349
350/* 350/*
351 * Note: If a driver supplied the queue lock, it should not zap that lock 351 * Note: If a driver supplied the queue lock, it is disconnected
352 * unexpectedly as some queue cleanup components like elevator_exit() and 352 * by this function. The actual state of the lock doesn't matter
353 * blk_throtl_exit() need queue lock. 353 * here as the request_queue isn't accessible after this point
354 * (QUEUE_FLAG_DEAD is set) and no other requests will be queued.
354 */ 355 */
355void blk_cleanup_queue(struct request_queue *q) 356void blk_cleanup_queue(struct request_queue *q)
356{ 357{
@@ -367,10 +368,8 @@ void blk_cleanup_queue(struct request_queue *q)
367 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 368 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
368 mutex_unlock(&q->sysfs_lock); 369 mutex_unlock(&q->sysfs_lock);
369 370
370 if (q->elevator) 371 if (q->queue_lock != &q->__queue_lock)
371 elevator_exit(q->elevator); 372 q->queue_lock = &q->__queue_lock;
372
373 blk_throtl_exit(q);
374 373
375 blk_put_queue(q); 374 blk_put_queue(q);
376} 375}
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e681805cdb47..60fda88c57f0 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -479,6 +479,11 @@ static void blk_release_queue(struct kobject *kobj)
479 479
480 blk_sync_queue(q); 480 blk_sync_queue(q);
481 481
482 if (q->elevator)
483 elevator_exit(q->elevator);
484
485 blk_throtl_exit(q);
486
482 if (rl->rq_pool) 487 if (rl->rq_pool)
483 mempool_destroy(rl->rq_pool); 488 mempool_destroy(rl->rq_pool);
484 489
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 2c18d584066d..b97294e2d95b 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -42,6 +42,22 @@ static struct pm_clk_data *__to_pcd(struct device *dev)
42} 42}
43 43
44/** 44/**
45 * pm_clk_acquire - Acquire a device clock.
46 * @dev: Device whose clock is to be acquired.
47 * @ce: PM clock entry corresponding to the clock.
48 */
49static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
50{
51 ce->clk = clk_get(dev, ce->con_id);
52 if (IS_ERR(ce->clk)) {
53 ce->status = PCE_STATUS_ERROR;
54 } else {
55 ce->status = PCE_STATUS_ACQUIRED;
56 dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
57 }
58}
59
60/**
45 * pm_clk_add - Start using a device clock for power management. 61 * pm_clk_add - Start using a device clock for power management.
46 * @dev: Device whose clock is going to be used for power management. 62 * @dev: Device whose clock is going to be used for power management.
47 * @con_id: Connection ID of the clock. 63 * @con_id: Connection ID of the clock.
@@ -73,6 +89,8 @@ int pm_clk_add(struct device *dev, const char *con_id)
73 } 89 }
74 } 90 }
75 91
92 pm_clk_acquire(dev, ce);
93
76 spin_lock_irq(&pcd->lock); 94 spin_lock_irq(&pcd->lock);
77 list_add_tail(&ce->node, &pcd->clock_list); 95 list_add_tail(&ce->node, &pcd->clock_list);
78 spin_unlock_irq(&pcd->lock); 96 spin_unlock_irq(&pcd->lock);
@@ -82,17 +100,12 @@ int pm_clk_add(struct device *dev, const char *con_id)
82/** 100/**
83 * __pm_clk_remove - Destroy PM clock entry. 101 * __pm_clk_remove - Destroy PM clock entry.
84 * @ce: PM clock entry to destroy. 102 * @ce: PM clock entry to destroy.
85 *
86 * This routine must be called under the spinlock protecting the PM list of
87 * clocks corresponding the the @ce's device.
88 */ 103 */
89static void __pm_clk_remove(struct pm_clock_entry *ce) 104static void __pm_clk_remove(struct pm_clock_entry *ce)
90{ 105{
91 if (!ce) 106 if (!ce)
92 return; 107 return;
93 108
94 list_del(&ce->node);
95
96 if (ce->status < PCE_STATUS_ERROR) { 109 if (ce->status < PCE_STATUS_ERROR) {
97 if (ce->status == PCE_STATUS_ENABLED) 110 if (ce->status == PCE_STATUS_ENABLED)
98 clk_disable(ce->clk); 111 clk_disable(ce->clk);
@@ -126,18 +139,22 @@ void pm_clk_remove(struct device *dev, const char *con_id)
126 spin_lock_irq(&pcd->lock); 139 spin_lock_irq(&pcd->lock);
127 140
128 list_for_each_entry(ce, &pcd->clock_list, node) { 141 list_for_each_entry(ce, &pcd->clock_list, node) {
129 if (!con_id && !ce->con_id) { 142 if (!con_id && !ce->con_id)
130 __pm_clk_remove(ce); 143 goto remove;
131 break; 144 else if (!con_id || !ce->con_id)
132 } else if (!con_id || !ce->con_id) {
133 continue; 145 continue;
134 } else if (!strcmp(con_id, ce->con_id)) { 146 else if (!strcmp(con_id, ce->con_id))
135 __pm_clk_remove(ce); 147 goto remove;
136 break;
137 }
138 } 148 }
139 149
140 spin_unlock_irq(&pcd->lock); 150 spin_unlock_irq(&pcd->lock);
151 return;
152
153 remove:
154 list_del(&ce->node);
155 spin_unlock_irq(&pcd->lock);
156
157 __pm_clk_remove(ce);
141} 158}
142 159
143/** 160/**
@@ -175,20 +192,27 @@ void pm_clk_destroy(struct device *dev)
175{ 192{
176 struct pm_clk_data *pcd = __to_pcd(dev); 193 struct pm_clk_data *pcd = __to_pcd(dev);
177 struct pm_clock_entry *ce, *c; 194 struct pm_clock_entry *ce, *c;
195 struct list_head list;
178 196
179 if (!pcd) 197 if (!pcd)
180 return; 198 return;
181 199
182 dev->power.subsys_data = NULL; 200 dev->power.subsys_data = NULL;
201 INIT_LIST_HEAD(&list);
183 202
184 spin_lock_irq(&pcd->lock); 203 spin_lock_irq(&pcd->lock);
185 204
186 list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) 205 list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
187 __pm_clk_remove(ce); 206 list_move(&ce->node, &list);
188 207
189 spin_unlock_irq(&pcd->lock); 208 spin_unlock_irq(&pcd->lock);
190 209
191 kfree(pcd); 210 kfree(pcd);
211
212 list_for_each_entry_safe_reverse(ce, c, &list, node) {
213 list_del(&ce->node);
214 __pm_clk_remove(ce);
215 }
192} 216}
193 217
194#endif /* CONFIG_PM */ 218#endif /* CONFIG_PM */
@@ -196,23 +220,6 @@ void pm_clk_destroy(struct device *dev)
196#ifdef CONFIG_PM_RUNTIME 220#ifdef CONFIG_PM_RUNTIME
197 221
198/** 222/**
199 * pm_clk_acquire - Acquire a device clock.
200 * @dev: Device whose clock is to be acquired.
201 * @con_id: Connection ID of the clock.
202 */
203static void pm_clk_acquire(struct device *dev,
204 struct pm_clock_entry *ce)
205{
206 ce->clk = clk_get(dev, ce->con_id);
207 if (IS_ERR(ce->clk)) {
208 ce->status = PCE_STATUS_ERROR;
209 } else {
210 ce->status = PCE_STATUS_ACQUIRED;
211 dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
212 }
213}
214
215/**
216 * pm_clk_suspend - Disable clocks in a device's PM clock list. 223 * pm_clk_suspend - Disable clocks in a device's PM clock list.
217 * @dev: Device to disable the clocks for. 224 * @dev: Device to disable the clocks for.
218 */ 225 */
@@ -230,9 +237,6 @@ int pm_clk_suspend(struct device *dev)
230 spin_lock_irqsave(&pcd->lock, flags); 237 spin_lock_irqsave(&pcd->lock, flags);
231 238
232 list_for_each_entry_reverse(ce, &pcd->clock_list, node) { 239 list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
233 if (ce->status == PCE_STATUS_NONE)
234 pm_clk_acquire(dev, ce);
235
236 if (ce->status < PCE_STATUS_ERROR) { 240 if (ce->status < PCE_STATUS_ERROR) {
237 clk_disable(ce->clk); 241 clk_disable(ce->clk);
238 ce->status = PCE_STATUS_ACQUIRED; 242 ce->status = PCE_STATUS_ACQUIRED;
@@ -262,9 +266,6 @@ int pm_clk_resume(struct device *dev)
262 spin_lock_irqsave(&pcd->lock, flags); 266 spin_lock_irqsave(&pcd->lock, flags);
263 267
264 list_for_each_entry(ce, &pcd->clock_list, node) { 268 list_for_each_entry(ce, &pcd->clock_list, node) {
265 if (ce->status == PCE_STATUS_NONE)
266 pm_clk_acquire(dev, ce);
267
268 if (ce->status < PCE_STATUS_ERROR) { 269 if (ce->status < PCE_STATUS_ERROR) {
269 clk_enable(ce->clk); 270 clk_enable(ce->clk);
270 ce->status = PCE_STATUS_ENABLED; 271 ce->status = PCE_STATUS_ENABLED;
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index f6595aba4f0f..fa567f1158c2 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -43,6 +43,7 @@ config TCG_NSC
43 43
44config TCG_ATMEL 44config TCG_ATMEL
45 tristate "Atmel TPM Interface" 45 tristate "Atmel TPM Interface"
46 depends on PPC64 || HAS_IOPORT
46 ---help--- 47 ---help---
47 If you have a TPM security chip from Atmel say Yes and it 48 If you have a TPM security chip from Atmel say Yes and it
48 will be accessible from within Linux. To compile this driver 49 will be accessible from within Linux. To compile this driver
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index caf8012ef47c..9ca5c021d0b6 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -383,6 +383,9 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
383 u32 count, ordinal; 383 u32 count, ordinal;
384 unsigned long stop; 384 unsigned long stop;
385 385
386 if (bufsiz > TPM_BUFSIZE)
387 bufsiz = TPM_BUFSIZE;
388
386 count = be32_to_cpu(*((__be32 *) (buf + 2))); 389 count = be32_to_cpu(*((__be32 *) (buf + 2)));
387 ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); 390 ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
388 if (count == 0) 391 if (count == 0)
@@ -1102,6 +1105,7 @@ ssize_t tpm_read(struct file *file, char __user *buf,
1102{ 1105{
1103 struct tpm_chip *chip = file->private_data; 1106 struct tpm_chip *chip = file->private_data;
1104 ssize_t ret_size; 1107 ssize_t ret_size;
1108 int rc;
1105 1109
1106 del_singleshot_timer_sync(&chip->user_read_timer); 1110 del_singleshot_timer_sync(&chip->user_read_timer);
1107 flush_work_sync(&chip->work); 1111 flush_work_sync(&chip->work);
@@ -1112,8 +1116,11 @@ ssize_t tpm_read(struct file *file, char __user *buf,
1112 ret_size = size; 1116 ret_size = size;
1113 1117
1114 mutex_lock(&chip->buffer_mutex); 1118 mutex_lock(&chip->buffer_mutex);
1115 if (copy_to_user(buf, chip->data_buffer, ret_size)) 1119 rc = copy_to_user(buf, chip->data_buffer, ret_size);
1120 memset(chip->data_buffer, 0, ret_size);
1121 if (rc)
1116 ret_size = -EFAULT; 1122 ret_size = -EFAULT;
1123
1117 mutex_unlock(&chip->buffer_mutex); 1124 mutex_unlock(&chip->buffer_mutex);
1118 } 1125 }
1119 1126
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 82facc9104c7..4d2464871ada 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -396,8 +396,6 @@ static void __exit cleanup_nsc(void)
396 if (pdev) { 396 if (pdev) {
397 tpm_nsc_remove(&pdev->dev); 397 tpm_nsc_remove(&pdev->dev);
398 platform_device_unregister(pdev); 398 platform_device_unregister(pdev);
399 kfree(pdev);
400 pdev = NULL;
401 } 399 }
402 400
403 platform_driver_unregister(&nsc_drv); 401 platform_driver_unregister(&nsc_drv);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ce045a8cf82c..f07e4252b708 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,11 +67,11 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
67MODULE_PARM_DESC(i915_enable_rc6, 67MODULE_PARM_DESC(i915_enable_rc6,
68 "Enable power-saving render C-state 6 (default: true)"); 68 "Enable power-saving render C-state 6 (default: true)");
69 69
70unsigned int i915_enable_fbc __read_mostly = 1; 70unsigned int i915_enable_fbc __read_mostly = -1;
71module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 71module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
72MODULE_PARM_DESC(i915_enable_fbc, 72MODULE_PARM_DESC(i915_enable_fbc,
73 "Enable frame buffer compression for power savings " 73 "Enable frame buffer compression for power savings "
74 "(default: false)"); 74 "(default: -1 (use per-chip default))");
75 75
76unsigned int i915_lvds_downclock __read_mostly = 0; 76unsigned int i915_lvds_downclock __read_mostly = 0;
77module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 77module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 56a8554d9039..04411ad2e779 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1799,6 +1799,7 @@ static void intel_update_fbc(struct drm_device *dev)
1799 struct drm_framebuffer *fb; 1799 struct drm_framebuffer *fb;
1800 struct intel_framebuffer *intel_fb; 1800 struct intel_framebuffer *intel_fb;
1801 struct drm_i915_gem_object *obj; 1801 struct drm_i915_gem_object *obj;
1802 int enable_fbc;
1802 1803
1803 DRM_DEBUG_KMS("\n"); 1804 DRM_DEBUG_KMS("\n");
1804 1805
@@ -1839,8 +1840,15 @@ static void intel_update_fbc(struct drm_device *dev)
1839 intel_fb = to_intel_framebuffer(fb); 1840 intel_fb = to_intel_framebuffer(fb);
1840 obj = intel_fb->obj; 1841 obj = intel_fb->obj;
1841 1842
1842 if (!i915_enable_fbc) { 1843 enable_fbc = i915_enable_fbc;
1843 DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); 1844 if (enable_fbc < 0) {
1845 DRM_DEBUG_KMS("fbc set to per-chip default\n");
1846 enable_fbc = 1;
1847 if (INTEL_INFO(dev)->gen <= 5)
1848 enable_fbc = 0;
1849 }
1850 if (!enable_fbc) {
1851 DRM_DEBUG_KMS("fbc disabled per module param\n");
1844 dev_priv->no_fbc_reason = FBC_MODULE_PARAM; 1852 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1845 goto out_disable; 1853 goto out_disable;
1846 } 1854 }
@@ -4687,13 +4695,13 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4687 bpc = 6; /* min is 18bpp */ 4695 bpc = 6; /* min is 18bpp */
4688 break; 4696 break;
4689 case 24: 4697 case 24:
4690 bpc = min((unsigned int)8, display_bpc); 4698 bpc = 8;
4691 break; 4699 break;
4692 case 30: 4700 case 30:
4693 bpc = min((unsigned int)10, display_bpc); 4701 bpc = 10;
4694 break; 4702 break;
4695 case 48: 4703 case 48:
4696 bpc = min((unsigned int)12, display_bpc); 4704 bpc = 12;
4697 break; 4705 break;
4698 default: 4706 default:
4699 DRM_DEBUG("unsupported depth, assuming 24 bits\n"); 4707 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
@@ -4701,10 +4709,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4701 break; 4709 break;
4702 } 4710 }
4703 4711
4712 display_bpc = min(display_bpc, bpc);
4713
4704 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", 4714 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
4705 bpc, display_bpc); 4715 bpc, display_bpc);
4706 4716
4707 *pipe_bpp = bpc * 3; 4717 *pipe_bpp = display_bpc * 3;
4708 4718
4709 return display_bpc != bpc; 4719 return display_bpc != bpc;
4710} 4720}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0b2ee9d39980..fe1099d8817e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -337,9 +337,6 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
337 struct drm_connector *connector, 337 struct drm_connector *connector,
338 struct intel_load_detect_pipe *old); 338 struct intel_load_detect_pipe *old);
339 339
340extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
341extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
342extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
343extern void intelfb_restore(void); 340extern void intelfb_restore(void);
344extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 341extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
345 u16 blue, int regno); 342 u16 blue, int regno);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 30fe554d8936..6348c499616f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -92,6 +92,11 @@ struct intel_sdvo {
92 */ 92 */
93 uint16_t attached_output; 93 uint16_t attached_output;
94 94
95 /*
96 * Hotplug activation bits for this device
97 */
98 uint8_t hotplug_active[2];
99
95 /** 100 /**
96 * This is used to select the color range of RBG outputs in HDMI mode. 101 * This is used to select the color range of RBG outputs in HDMI mode.
97 * It is only valid when using TMDS encoding and 8 bit per color mode. 102 * It is only valid when using TMDS encoding and 8 bit per color mode.
@@ -1208,74 +1213,20 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
1208 return true; 1213 return true;
1209} 1214}
1210 1215
1211/* No use! */ 1216static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
1212#if 0
1213struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
1214{
1215 struct drm_connector *connector = NULL;
1216 struct intel_sdvo *iout = NULL;
1217 struct intel_sdvo *sdvo;
1218
1219 /* find the sdvo connector */
1220 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1221 iout = to_intel_sdvo(connector);
1222
1223 if (iout->type != INTEL_OUTPUT_SDVO)
1224 continue;
1225
1226 sdvo = iout->dev_priv;
1227
1228 if (sdvo->sdvo_reg == SDVOB && sdvoB)
1229 return connector;
1230
1231 if (sdvo->sdvo_reg == SDVOC && !sdvoB)
1232 return connector;
1233
1234 }
1235
1236 return NULL;
1237}
1238
1239int intel_sdvo_supports_hotplug(struct drm_connector *connector)
1240{ 1217{
1241 u8 response[2]; 1218 u8 response[2];
1242 u8 status;
1243 struct intel_sdvo *intel_sdvo;
1244 DRM_DEBUG_KMS("\n");
1245
1246 if (!connector)
1247 return 0;
1248
1249 intel_sdvo = to_intel_sdvo(connector);
1250 1219
1251 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, 1220 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
1252 &response, 2) && response[0]; 1221 &response, 2) && response[0];
1253} 1222}
1254 1223
1255void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) 1224static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
1256{ 1225{
1257 u8 response[2]; 1226 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1258 u8 status;
1259 struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
1260
1261 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1262 intel_sdvo_read_response(intel_sdvo, &response, 2);
1263
1264 if (on) {
1265 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
1266 status = intel_sdvo_read_response(intel_sdvo, &response, 2);
1267
1268 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1269 } else {
1270 response[0] = 0;
1271 response[1] = 0;
1272 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1273 }
1274 1227
1275 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1228 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
1276 intel_sdvo_read_response(intel_sdvo, &response, 2);
1277} 1229}
1278#endif
1279 1230
1280static bool 1231static bool
1281intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) 1232intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
@@ -2045,6 +1996,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2045{ 1996{
2046 struct drm_encoder *encoder = &intel_sdvo->base.base; 1997 struct drm_encoder *encoder = &intel_sdvo->base.base;
2047 struct drm_connector *connector; 1998 struct drm_connector *connector;
1999 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2048 struct intel_connector *intel_connector; 2000 struct intel_connector *intel_connector;
2049 struct intel_sdvo_connector *intel_sdvo_connector; 2001 struct intel_sdvo_connector *intel_sdvo_connector;
2050 2002
@@ -2062,7 +2014,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2062 2014
2063 intel_connector = &intel_sdvo_connector->base; 2015 intel_connector = &intel_sdvo_connector->base;
2064 connector = &intel_connector->base; 2016 connector = &intel_connector->base;
2065 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; 2017 if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
2018 connector->polled = DRM_CONNECTOR_POLL_HPD;
2019 intel_sdvo->hotplug_active[0] |= 1 << device;
2020 /* Some SDVO devices have one-shot hotplug interrupts.
2021 * Ensure that they get re-enabled when an interrupt happens.
2022 */
2023 intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
2024 intel_sdvo_enable_hotplug(intel_encoder);
2025 }
2026 else
2027 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
2066 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2028 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2067 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2029 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2068 2030
@@ -2569,6 +2531,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2569 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) 2531 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
2570 goto err; 2532 goto err;
2571 2533
2534 /* Set up hotplug command - note paranoia about contents of reply.
2535 * We assume that the hardware is in a sane state, and only touch
2536 * the bits we think we understand.
2537 */
2538 intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
2539 &intel_sdvo->hotplug_active, 2);
2540 intel_sdvo->hotplug_active[0] &= ~0x3;
2541
2572 if (intel_sdvo_output_setup(intel_sdvo, 2542 if (intel_sdvo_output_setup(intel_sdvo,
2573 intel_sdvo->caps.output_flags) != true) { 2543 intel_sdvo->caps.output_flags) != true) {
2574 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", 2544 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 7ad43c6b1db7..4da23889fea6 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
115 u8 msg[20]; 115 u8 msg[20];
116 int msg_bytes = send_bytes + 4; 116 int msg_bytes = send_bytes + 4;
117 u8 ack; 117 u8 ack;
118 unsigned retry;
118 119
119 if (send_bytes > 16) 120 if (send_bytes > 16)
120 return -1; 121 return -1;
@@ -125,20 +126,20 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
125 msg[3] = (msg_bytes << 4) | (send_bytes - 1); 126 msg[3] = (msg_bytes << 4) | (send_bytes - 1);
126 memcpy(&msg[4], send, send_bytes); 127 memcpy(&msg[4], send, send_bytes);
127 128
128 while (1) { 129 for (retry = 0; retry < 4; retry++) {
129 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, 130 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
130 msg, msg_bytes, NULL, 0, delay, &ack); 131 msg, msg_bytes, NULL, 0, delay, &ack);
131 if (ret < 0) 132 if (ret < 0)
132 return ret; 133 return ret;
133 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 134 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
134 break; 135 return send_bytes;
135 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 136 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
136 udelay(400); 137 udelay(400);
137 else 138 else
138 return -EIO; 139 return -EIO;
139 } 140 }
140 141
141 return send_bytes; 142 return -EIO;
142} 143}
143 144
144static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, 145static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
@@ -149,26 +150,29 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
149 int msg_bytes = 4; 150 int msg_bytes = 4;
150 u8 ack; 151 u8 ack;
151 int ret; 152 int ret;
153 unsigned retry;
152 154
153 msg[0] = address; 155 msg[0] = address;
154 msg[1] = address >> 8; 156 msg[1] = address >> 8;
155 msg[2] = AUX_NATIVE_READ << 4; 157 msg[2] = AUX_NATIVE_READ << 4;
156 msg[3] = (msg_bytes << 4) | (recv_bytes - 1); 158 msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
157 159
158 while (1) { 160 for (retry = 0; retry < 4; retry++) {
159 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, 161 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
160 msg, msg_bytes, recv, recv_bytes, delay, &ack); 162 msg, msg_bytes, recv, recv_bytes, delay, &ack);
161 if (ret == 0)
162 return -EPROTO;
163 if (ret < 0) 163 if (ret < 0)
164 return ret; 164 return ret;
165 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 165 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
166 return ret; 166 return ret;
167 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 167 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
168 udelay(400); 168 udelay(400);
169 else if (ret == 0)
170 return -EPROTO;
169 else 171 else
170 return -EIO; 172 return -EIO;
171 } 173 }
174
175 return -EIO;
172} 176}
173 177
174static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, 178static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e8a746712b5b..c4ffa14fb2f4 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1590,48 +1590,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1590 return backend_map; 1590 return backend_map;
1591} 1591}
1592 1592
1593static void evergreen_program_channel_remap(struct radeon_device *rdev)
1594{
1595 u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
1596
1597 tmp = RREG32(MC_SHARED_CHMAP);
1598 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1599 case 0:
1600 case 1:
1601 case 2:
1602 case 3:
1603 default:
1604 /* default mapping */
1605 mc_shared_chremap = 0x00fac688;
1606 break;
1607 }
1608
1609 switch (rdev->family) {
1610 case CHIP_HEMLOCK:
1611 case CHIP_CYPRESS:
1612 case CHIP_BARTS:
1613 tcp_chan_steer_lo = 0x54763210;
1614 tcp_chan_steer_hi = 0x0000ba98;
1615 break;
1616 case CHIP_JUNIPER:
1617 case CHIP_REDWOOD:
1618 case CHIP_CEDAR:
1619 case CHIP_PALM:
1620 case CHIP_SUMO:
1621 case CHIP_SUMO2:
1622 case CHIP_TURKS:
1623 case CHIP_CAICOS:
1624 default:
1625 tcp_chan_steer_lo = 0x76543210;
1626 tcp_chan_steer_hi = 0x0000ba98;
1627 break;
1628 }
1629
1630 WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
1631 WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
1632 WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
1633}
1634
1635static void evergreen_gpu_init(struct radeon_device *rdev) 1593static void evergreen_gpu_init(struct radeon_device *rdev)
1636{ 1594{
1637 u32 cc_rb_backend_disable = 0; 1595 u32 cc_rb_backend_disable = 0;
@@ -2078,8 +2036,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2078 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 2036 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2079 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 2037 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2080 2038
2081 evergreen_program_channel_remap(rdev);
2082
2083 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; 2039 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
2084 grbm_gfx_index = INSTANCE_BROADCAST_WRITES; 2040 grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
2085 2041
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 99fbd793c08c..8c79ca97753d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -569,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
569 return backend_map; 569 return backend_map;
570} 570}
571 571
572static void cayman_program_channel_remap(struct radeon_device *rdev)
573{
574 u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
575
576 tmp = RREG32(MC_SHARED_CHMAP);
577 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
578 case 0:
579 case 1:
580 case 2:
581 case 3:
582 default:
583 /* default mapping */
584 mc_shared_chremap = 0x00fac688;
585 break;
586 }
587
588 switch (rdev->family) {
589 case CHIP_CAYMAN:
590 default:
591 //tcp_chan_steer_lo = 0x54763210
592 tcp_chan_steer_lo = 0x76543210;
593 tcp_chan_steer_hi = 0x0000ba98;
594 break;
595 }
596
597 WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
598 WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
599 WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
600}
601
602static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, 572static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
603 u32 disable_mask_per_se, 573 u32 disable_mask_per_se,
604 u32 max_disable_mask_per_se, 574 u32 max_disable_mask_per_se,
@@ -842,8 +812,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
842 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 812 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
843 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 813 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
844 814
845 cayman_program_channel_remap(rdev);
846
847 /* primary versions */ 815 /* primary versions */
848 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 816 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
849 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 817 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5b1837b4aacf..7fcdbbbf2979 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev,
773 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 773 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
774 radeon_ring_write(rdev, 0); 774 radeon_ring_write(rdev, 0);
775 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 775 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
776 radeon_ring_write(rdev, cur_pages); 776 radeon_ring_write(rdev, num_gpu_pages);
777 radeon_ring_write(rdev, cur_pages); 777 radeon_ring_write(rdev, num_gpu_pages);
778 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); 778 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
779 } 779 }
780 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); 780 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index c4b8741dbf58..bce63fd329d4 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -68,11 +68,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
68 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 68 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
69 int saved_dpms = connector->dpms; 69 int saved_dpms = connector->dpms;
70 70
71 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && 71 /* Only turn off the display it it's physically disconnected */
72 radeon_dp_needs_link_train(radeon_connector)) 72 if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
73 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
74 else
75 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 73 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
74 else if (radeon_dp_needs_link_train(radeon_connector))
75 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
76 connector->dpms = saved_dpms; 76 connector->dpms = saved_dpms;
77 } 77 }
78} 78}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 3189a7efb2e9..fde25c0d65a0 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -208,23 +208,25 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
208 int xorigin = 0, yorigin = 0; 208 int xorigin = 0, yorigin = 0;
209 int w = radeon_crtc->cursor_width; 209 int w = radeon_crtc->cursor_width;
210 210
211 if (x < 0)
212 xorigin = -x + 1;
213 if (y < 0)
214 yorigin = -y + 1;
215 if (xorigin >= CURSOR_WIDTH)
216 xorigin = CURSOR_WIDTH - 1;
217 if (yorigin >= CURSOR_HEIGHT)
218 yorigin = CURSOR_HEIGHT - 1;
219
220 if (ASIC_IS_AVIVO(rdev)) { 211 if (ASIC_IS_AVIVO(rdev)) {
221 int i = 0;
222 struct drm_crtc *crtc_p;
223
224 /* avivo cursor are offset into the total surface */ 212 /* avivo cursor are offset into the total surface */
225 x += crtc->x; 213 x += crtc->x;
226 y += crtc->y; 214 y += crtc->y;
227 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 215 }
216 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
217
218 if (x < 0) {
219 xorigin = min(-x, CURSOR_WIDTH - 1);
220 x = 0;
221 }
222 if (y < 0) {
223 yorigin = min(-y, CURSOR_HEIGHT - 1);
224 y = 0;
225 }
226
227 if (ASIC_IS_AVIVO(rdev)) {
228 int i = 0;
229 struct drm_crtc *crtc_p;
228 230
229 /* avivo cursor image can't end on 128 pixel boundary or 231 /* avivo cursor image can't end on 128 pixel boundary or
230 * go past the end of the frame if both crtcs are enabled 232 * go past the end of the frame if both crtcs are enabled
@@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
253 255
254 radeon_lock_cursor(crtc, true); 256 radeon_lock_cursor(crtc, true);
255 if (ASIC_IS_DCE4(rdev)) { 257 if (ASIC_IS_DCE4(rdev)) {
256 WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, 258 WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
257 ((xorigin ? 0 : x) << 16) |
258 (yorigin ? 0 : y));
259 WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); 259 WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
260 WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, 260 WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
261 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); 261 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
262 } else if (ASIC_IS_AVIVO(rdev)) { 262 } else if (ASIC_IS_AVIVO(rdev)) {
263 WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, 263 WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
264 ((xorigin ? 0 : x) << 16) |
265 (yorigin ? 0 : y));
266 WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); 264 WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
267 WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, 265 WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
268 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); 266 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
@@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
276 | yorigin)); 274 | yorigin));
277 WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, 275 WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
278 (RADEON_CUR_LOCK 276 (RADEON_CUR_LOCK
279 | ((xorigin ? 0 : x) << 16) 277 | (x << 16)
280 | (yorigin ? 0 : y))); 278 | y));
281 /* offset is from DISP(2)_BASE_ADDRESS */ 279 /* offset is from DISP(2)_BASE_ADDRESS */
282 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + 280 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
283 (yorigin * 256))); 281 (yorigin * 256)));
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 319d85d7e759..13690f3eb4a4 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1507,7 +1507,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1507 switch (mode) { 1507 switch (mode) {
1508 case DRM_MODE_DPMS_ON: 1508 case DRM_MODE_DPMS_ON:
1509 args.ucAction = ATOM_ENABLE; 1509 args.ucAction = ATOM_ENABLE;
1510 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1510 /* workaround for DVOOutputControl on some RS690 systems */
1511 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
1512 u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
1513 WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
1514 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1515 WREG32(RADEON_BIOS_3_SCRATCH, reg);
1516 } else
1517 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1511 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 1518 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1512 args.ucAction = ATOM_LCD_BLON; 1519 args.ucAction = ATOM_LCD_BLON;
1513 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1520 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4720d000d440..b13c2eedc321 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
536 return backend_map; 536 return backend_map;
537} 537}
538 538
539static void rv770_program_channel_remap(struct radeon_device *rdev)
540{
541 u32 tcp_chan_steer, mc_shared_chremap, tmp;
542 bool force_no_swizzle;
543
544 switch (rdev->family) {
545 case CHIP_RV770:
546 case CHIP_RV730:
547 force_no_swizzle = false;
548 break;
549 case CHIP_RV710:
550 case CHIP_RV740:
551 default:
552 force_no_swizzle = true;
553 break;
554 }
555
556 tmp = RREG32(MC_SHARED_CHMAP);
557 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
558 case 0:
559 case 1:
560 default:
561 /* default mapping */
562 mc_shared_chremap = 0x00fac688;
563 break;
564 case 2:
565 case 3:
566 if (force_no_swizzle)
567 mc_shared_chremap = 0x00fac688;
568 else
569 mc_shared_chremap = 0x00bbc298;
570 break;
571 }
572
573 if (rdev->family == CHIP_RV740)
574 tcp_chan_steer = 0x00ef2a60;
575 else
576 tcp_chan_steer = 0x00fac688;
577
578 /* RV770 CE has special chremap setup */
579 if (rdev->pdev->device == 0x944e) {
580 tcp_chan_steer = 0x00b08b08;
581 mc_shared_chremap = 0x00b08b08;
582 }
583
584 WREG32(TCP_CHAN_STEER, tcp_chan_steer);
585 WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
586}
587
588static void rv770_gpu_init(struct radeon_device *rdev) 539static void rv770_gpu_init(struct radeon_device *rdev)
589{ 540{
590 int i, j, num_qd_pipes; 541 int i, j, num_qd_pipes;
@@ -785,8 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
785 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 736 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
786 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 737 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
787 738
788 rv770_program_channel_remap(rdev);
789
790 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 739 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
791 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 740 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
792 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 741 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 411257676133..932383786642 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -36,17 +36,25 @@
36#include <linux/cpu.h> 36#include <linux/cpu.h>
37#include <linux/pci.h> 37#include <linux/pci.h>
38#include <linux/smp.h> 38#include <linux/smp.h>
39#include <linux/moduleparam.h>
39#include <asm/msr.h> 40#include <asm/msr.h>
40#include <asm/processor.h> 41#include <asm/processor.h>
41 42
42#define DRVNAME "coretemp" 43#define DRVNAME "coretemp"
43 44
45/*
46 * force_tjmax only matters when TjMax can't be read from the CPU itself.
47 * When set, it replaces the driver's suboptimal heuristic.
48 */
49static int force_tjmax;
50module_param_named(tjmax, force_tjmax, int, 0444);
51MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
52
44#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ 53#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
45#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ 54#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
46#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ 55#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
47#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ 56#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
48#define MAX_THRESH_ATTRS 3 /* Maximum no of Threshold attrs */ 57#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
49#define TOTAL_ATTRS (MAX_CORE_ATTRS + MAX_THRESH_ATTRS)
50#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) 58#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
51 59
52#ifdef CONFIG_SMP 60#ifdef CONFIG_SMP
@@ -69,8 +77,6 @@
69 * This value is passed as "id" field to rdmsr/wrmsr functions. 77 * This value is passed as "id" field to rdmsr/wrmsr functions.
70 * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS, 78 * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
71 * from where the temperature values should be read. 79 * from where the temperature values should be read.
72 * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT,
73 * from where the thresholds are read.
74 * @attr_size: Total number of pre-core attrs displayed in the sysfs. 80 * @attr_size: Total number of pre-core attrs displayed in the sysfs.
75 * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data. 81 * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
76 * Otherwise, temp_data holds coretemp data. 82 * Otherwise, temp_data holds coretemp data.
@@ -79,13 +85,11 @@
79struct temp_data { 85struct temp_data {
80 int temp; 86 int temp;
81 int ttarget; 87 int ttarget;
82 int tmin;
83 int tjmax; 88 int tjmax;
84 unsigned long last_updated; 89 unsigned long last_updated;
85 unsigned int cpu; 90 unsigned int cpu;
86 u32 cpu_core_id; 91 u32 cpu_core_id;
87 u32 status_reg; 92 u32 status_reg;
88 u32 intrpt_reg;
89 int attr_size; 93 int attr_size;
90 bool is_pkg_data; 94 bool is_pkg_data;
91 bool valid; 95 bool valid;
@@ -143,19 +147,6 @@ static ssize_t show_crit_alarm(struct device *dev,
143 return sprintf(buf, "%d\n", (eax >> 5) & 1); 147 return sprintf(buf, "%d\n", (eax >> 5) & 1);
144} 148}
145 149
146static ssize_t show_max_alarm(struct device *dev,
147 struct device_attribute *devattr, char *buf)
148{
149 u32 eax, edx;
150 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
151 struct platform_data *pdata = dev_get_drvdata(dev);
152 struct temp_data *tdata = pdata->core_data[attr->index];
153
154 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
155
156 return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1));
157}
158
159static ssize_t show_tjmax(struct device *dev, 150static ssize_t show_tjmax(struct device *dev,
160 struct device_attribute *devattr, char *buf) 151 struct device_attribute *devattr, char *buf)
161{ 152{
@@ -174,83 +165,6 @@ static ssize_t show_ttarget(struct device *dev,
174 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget); 165 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
175} 166}
176 167
177static ssize_t store_ttarget(struct device *dev,
178 struct device_attribute *devattr,
179 const char *buf, size_t count)
180{
181 struct platform_data *pdata = dev_get_drvdata(dev);
182 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
183 struct temp_data *tdata = pdata->core_data[attr->index];
184 u32 eax, edx;
185 unsigned long val;
186 int diff;
187
188 if (strict_strtoul(buf, 10, &val))
189 return -EINVAL;
190
191 /*
192 * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms
193 * of milli degree celsius. Hence don't accept val > (127 * 1000)
194 */
195 if (val > tdata->tjmax || val > 127000)
196 return -EINVAL;
197
198 diff = (tdata->tjmax - val) / 1000;
199
200 mutex_lock(&tdata->update_lock);
201 rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
202 eax = (eax & ~THERM_MASK_THRESHOLD1) |
203 (diff << THERM_SHIFT_THRESHOLD1);
204 wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
205 tdata->ttarget = val;
206 mutex_unlock(&tdata->update_lock);
207
208 return count;
209}
210
211static ssize_t show_tmin(struct device *dev,
212 struct device_attribute *devattr, char *buf)
213{
214 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
215 struct platform_data *pdata = dev_get_drvdata(dev);
216
217 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin);
218}
219
220static ssize_t store_tmin(struct device *dev,
221 struct device_attribute *devattr,
222 const char *buf, size_t count)
223{
224 struct platform_data *pdata = dev_get_drvdata(dev);
225 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
226 struct temp_data *tdata = pdata->core_data[attr->index];
227 u32 eax, edx;
228 unsigned long val;
229 int diff;
230
231 if (strict_strtoul(buf, 10, &val))
232 return -EINVAL;
233
234 /*
235 * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms
236 * of milli degree celsius. Hence don't accept val > (127 * 1000)
237 */
238 if (val > tdata->tjmax || val > 127000)
239 return -EINVAL;
240
241 diff = (tdata->tjmax - val) / 1000;
242
243 mutex_lock(&tdata->update_lock);
244 rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
245 eax = (eax & ~THERM_MASK_THRESHOLD0) |
246 (diff << THERM_SHIFT_THRESHOLD0);
247 wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
248 tdata->tmin = val;
249 mutex_unlock(&tdata->update_lock);
250
251 return count;
252}
253
254static ssize_t show_temp(struct device *dev, 168static ssize_t show_temp(struct device *dev,
255 struct device_attribute *devattr, char *buf) 169 struct device_attribute *devattr, char *buf)
256{ 170{
@@ -374,7 +288,6 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
374 288
375static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) 289static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
376{ 290{
377 /* The 100C is default for both mobile and non mobile CPUs */
378 int err; 291 int err;
379 u32 eax, edx; 292 u32 eax, edx;
380 u32 val; 293 u32 val;
@@ -385,7 +298,8 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
385 */ 298 */
386 err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); 299 err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
387 if (err) { 300 if (err) {
388 dev_warn(dev, "Unable to read TjMax from CPU.\n"); 301 if (c->x86_model > 0xe && c->x86_model != 0x1c)
302 dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
389 } else { 303 } else {
390 val = (eax >> 16) & 0xff; 304 val = (eax >> 16) & 0xff;
391 /* 305 /*
@@ -393,11 +307,17 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
393 * will be used 307 * will be used
394 */ 308 */
395 if (val) { 309 if (val) {
396 dev_info(dev, "TjMax is %d C.\n", val); 310 dev_dbg(dev, "TjMax is %d degrees C\n", val);
397 return val * 1000; 311 return val * 1000;
398 } 312 }
399 } 313 }
400 314
315 if (force_tjmax) {
316 dev_notice(dev, "TjMax forced to %d degrees C by user\n",
317 force_tjmax);
318 return force_tjmax * 1000;
319 }
320
401 /* 321 /*
402 * An assumption is made for early CPUs and unreadable MSR. 322 * An assumption is made for early CPUs and unreadable MSR.
403 * NOTE: the calculated value may not be correct. 323 * NOTE: the calculated value may not be correct.
@@ -414,21 +334,6 @@ static void __devinit get_ucode_rev_on_cpu(void *edx)
414 rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx); 334 rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx);
415} 335}
416 336
417static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
418{
419 int err;
420 u32 eax, edx, val;
421
422 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
423 if (!err) {
424 val = (eax >> 16) & 0xff;
425 if (val)
426 return val * 1000;
427 }
428 dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);
429 return 100000; /* Default TjMax: 100 degree celsius */
430}
431
432static int create_name_attr(struct platform_data *pdata, struct device *dev) 337static int create_name_attr(struct platform_data *pdata, struct device *dev)
433{ 338{
434 sysfs_attr_init(&pdata->name_attr.attr); 339 sysfs_attr_init(&pdata->name_attr.attr);
@@ -442,19 +347,14 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
442 int attr_no) 347 int attr_no)
443{ 348{
444 int err, i; 349 int err, i;
445 static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev, 350 static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
446 struct device_attribute *devattr, char *buf) = { 351 struct device_attribute *devattr, char *buf) = {
447 show_label, show_crit_alarm, show_temp, show_tjmax, 352 show_label, show_crit_alarm, show_temp, show_tjmax,
448 show_max_alarm, show_ttarget, show_tmin }; 353 show_ttarget };
449 static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev, 354 static const char *const names[TOTAL_ATTRS] = {
450 struct device_attribute *devattr, const char *buf,
451 size_t count) = { NULL, NULL, NULL, NULL, NULL,
452 store_ttarget, store_tmin };
453 static const char *names[TOTAL_ATTRS] = {
454 "temp%d_label", "temp%d_crit_alarm", 355 "temp%d_label", "temp%d_crit_alarm",
455 "temp%d_input", "temp%d_crit", 356 "temp%d_input", "temp%d_crit",
456 "temp%d_max_alarm", "temp%d_max", 357 "temp%d_max" };
457 "temp%d_max_hyst" };
458 358
459 for (i = 0; i < tdata->attr_size; i++) { 359 for (i = 0; i < tdata->attr_size; i++) {
460 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], 360 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
@@ -462,10 +362,6 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
462 sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); 362 sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
463 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; 363 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
464 tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; 364 tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
465 if (rw_ptr[i]) {
466 tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR;
467 tdata->sd_attrs[i].dev_attr.store = rw_ptr[i];
468 }
469 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; 365 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
470 tdata->sd_attrs[i].index = attr_no; 366 tdata->sd_attrs[i].index = attr_no;
471 err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr); 367 err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
@@ -481,9 +377,9 @@ exit_free:
481} 377}
482 378
483 379
484static int __devinit chk_ucode_version(struct platform_device *pdev) 380static int __cpuinit chk_ucode_version(unsigned int cpu)
485{ 381{
486 struct cpuinfo_x86 *c = &cpu_data(pdev->id); 382 struct cpuinfo_x86 *c = &cpu_data(cpu);
487 int err; 383 int err;
488 u32 edx; 384 u32 edx;
489 385
@@ -494,17 +390,15 @@ static int __devinit chk_ucode_version(struct platform_device *pdev)
494 */ 390 */
495 if (c->x86_model == 0xe && c->x86_mask < 0xc) { 391 if (c->x86_model == 0xe && c->x86_mask < 0xc) {
496 /* check for microcode update */ 392 /* check for microcode update */
497 err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu, 393 err = smp_call_function_single(cpu, get_ucode_rev_on_cpu,
498 &edx, 1); 394 &edx, 1);
499 if (err) { 395 if (err) {
500 dev_err(&pdev->dev, 396 pr_err("Cannot determine microcode revision of "
501 "Cannot determine microcode revision of " 397 "CPU#%u (%d)!\n", cpu, err);
502 "CPU#%u (%d)!\n", pdev->id, err);
503 return -ENODEV; 398 return -ENODEV;
504 } else if (edx < 0x39) { 399 } else if (edx < 0x39) {
505 dev_err(&pdev->dev, 400 pr_err("Errata AE18 not fixed, update BIOS or "
506 "Errata AE18 not fixed, update BIOS or " 401 "microcode of the CPU!\n");
507 "microcode of the CPU!\n");
508 return -ENODEV; 402 return -ENODEV;
509 } 403 }
510 } 404 }
@@ -538,8 +432,6 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
538 432
539 tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS : 433 tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
540 MSR_IA32_THERM_STATUS; 434 MSR_IA32_THERM_STATUS;
541 tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT :
542 MSR_IA32_THERM_INTERRUPT;
543 tdata->is_pkg_data = pkg_flag; 435 tdata->is_pkg_data = pkg_flag;
544 tdata->cpu = cpu; 436 tdata->cpu = cpu;
545 tdata->cpu_core_id = TO_CORE_ID(cpu); 437 tdata->cpu_core_id = TO_CORE_ID(cpu);
@@ -548,11 +440,11 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
548 return tdata; 440 return tdata;
549} 441}
550 442
551static int create_core_data(struct platform_data *pdata, 443static int create_core_data(struct platform_device *pdev,
552 struct platform_device *pdev,
553 unsigned int cpu, int pkg_flag) 444 unsigned int cpu, int pkg_flag)
554{ 445{
555 struct temp_data *tdata; 446 struct temp_data *tdata;
447 struct platform_data *pdata = platform_get_drvdata(pdev);
556 struct cpuinfo_x86 *c = &cpu_data(cpu); 448 struct cpuinfo_x86 *c = &cpu_data(cpu);
557 u32 eax, edx; 449 u32 eax, edx;
558 int err, attr_no; 450 int err, attr_no;
@@ -588,25 +480,21 @@ static int create_core_data(struct platform_data *pdata,
588 goto exit_free; 480 goto exit_free;
589 481
590 /* We can access status register. Get Critical Temperature */ 482 /* We can access status register. Get Critical Temperature */
591 if (pkg_flag) 483 tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
592 tdata->tjmax = get_pkg_tjmax(pdev->id, &pdev->dev);
593 else
594 tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
595 484
596 /* 485 /*
597 * Test if we can access the intrpt register. If so, increase the 486 * Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET.
598 * 'size' enough to have ttarget/tmin/max_alarm interfaces. 487 * The target temperature is available on older CPUs but not in this
599 * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT 488 * register. Atoms don't have the register at all.
600 */ 489 */
601 err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx); 490 if (c->x86_model > 0xe && c->x86_model != 0x1c) {
602 if (!err) { 491 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET,
603 tdata->attr_size += MAX_THRESH_ATTRS; 492 &eax, &edx);
604 tdata->tmin = tdata->tjmax - 493 if (!err) {
605 ((eax & THERM_MASK_THRESHOLD0) >> 494 tdata->ttarget
606 THERM_SHIFT_THRESHOLD0) * 1000; 495 = tdata->tjmax - ((eax >> 8) & 0xff) * 1000;
607 tdata->ttarget = tdata->tjmax - 496 tdata->attr_size++;
608 ((eax & THERM_MASK_THRESHOLD1) >> 497 }
609 THERM_SHIFT_THRESHOLD1) * 1000;
610 } 498 }
611 499
612 pdata->core_data[attr_no] = tdata; 500 pdata->core_data[attr_no] = tdata;
@@ -618,22 +506,20 @@ static int create_core_data(struct platform_data *pdata,
618 506
619 return 0; 507 return 0;
620exit_free: 508exit_free:
509 pdata->core_data[attr_no] = NULL;
621 kfree(tdata); 510 kfree(tdata);
622 return err; 511 return err;
623} 512}
624 513
625static void coretemp_add_core(unsigned int cpu, int pkg_flag) 514static void coretemp_add_core(unsigned int cpu, int pkg_flag)
626{ 515{
627 struct platform_data *pdata;
628 struct platform_device *pdev = coretemp_get_pdev(cpu); 516 struct platform_device *pdev = coretemp_get_pdev(cpu);
629 int err; 517 int err;
630 518
631 if (!pdev) 519 if (!pdev)
632 return; 520 return;
633 521
634 pdata = platform_get_drvdata(pdev); 522 err = create_core_data(pdev, cpu, pkg_flag);
635
636 err = create_core_data(pdata, pdev, cpu, pkg_flag);
637 if (err) 523 if (err)
638 dev_err(&pdev->dev, "Adding Core %u failed\n", cpu); 524 dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
639} 525}
@@ -657,11 +543,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
657 struct platform_data *pdata; 543 struct platform_data *pdata;
658 int err; 544 int err;
659 545
660 /* Check the microcode version of the CPU */
661 err = chk_ucode_version(pdev);
662 if (err)
663 return err;
664
665 /* Initialize the per-package data structures */ 546 /* Initialize the per-package data structures */
666 pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); 547 pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL);
667 if (!pdata) 548 if (!pdata)
@@ -671,7 +552,7 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
671 if (err) 552 if (err)
672 goto exit_free; 553 goto exit_free;
673 554
674 pdata->phys_proc_id = TO_PHYS_ID(pdev->id); 555 pdata->phys_proc_id = pdev->id;
675 platform_set_drvdata(pdev, pdata); 556 platform_set_drvdata(pdev, pdata);
676 557
677 pdata->hwmon_dev = hwmon_device_register(&pdev->dev); 558 pdata->hwmon_dev = hwmon_device_register(&pdev->dev);
@@ -723,7 +604,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
723 604
724 mutex_lock(&pdev_list_mutex); 605 mutex_lock(&pdev_list_mutex);
725 606
726 pdev = platform_device_alloc(DRVNAME, cpu); 607 pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu));
727 if (!pdev) { 608 if (!pdev) {
728 err = -ENOMEM; 609 err = -ENOMEM;
729 pr_err("Device allocation failed\n"); 610 pr_err("Device allocation failed\n");
@@ -743,7 +624,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
743 } 624 }
744 625
745 pdev_entry->pdev = pdev; 626 pdev_entry->pdev = pdev;
746 pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); 627 pdev_entry->phys_proc_id = pdev->id;
747 628
748 list_add_tail(&pdev_entry->list, &pdev_list); 629 list_add_tail(&pdev_entry->list, &pdev_list);
749 mutex_unlock(&pdev_list_mutex); 630 mutex_unlock(&pdev_list_mutex);
@@ -804,6 +685,10 @@ static void __cpuinit get_core_online(unsigned int cpu)
804 return; 685 return;
805 686
806 if (!pdev) { 687 if (!pdev) {
688 /* Check the microcode version of the CPU */
689 if (chk_ucode_version(cpu))
690 return;
691
807 /* 692 /*
808 * Alright, we have DTS support. 693 * Alright, we have DTS support.
809 * We are bringing the _first_ core in this pkg 694 * We are bringing the _first_ core in this pkg
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 257957c69d92..4f7c3fc40a89 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -72,7 +72,7 @@ struct ds620_data {
72 char valid; /* !=0 if following fields are valid */ 72 char valid; /* !=0 if following fields are valid */
73 unsigned long last_updated; /* In jiffies */ 73 unsigned long last_updated; /* In jiffies */
74 74
75 u16 temp[3]; /* Register values, word */ 75 s16 temp[3]; /* Register values, word */
76}; 76};
77 77
78/* 78/*
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 17cf1ab95521..8c2844e5691c 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_client *client,
329 struct i2c_board_info *info); 329 struct i2c_board_info *info);
330static int w83791d_remove(struct i2c_client *client); 330static int w83791d_remove(struct i2c_client *client);
331 331
332static int w83791d_read(struct i2c_client *client, u8 register); 332static int w83791d_read(struct i2c_client *client, u8 reg);
333static int w83791d_write(struct i2c_client *client, u8 register, u8 value); 333static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
334static struct w83791d_data *w83791d_update_device(struct device *dev); 334static struct w83791d_data *w83791d_update_device(struct device *dev);
335 335
336#ifdef DEBUG 336#ifdef DEBUG
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 274798068a54..16f69be820c7 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -435,7 +435,12 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
435 if (!(rq->cmd_flags & REQ_FLUSH)) 435 if (!(rq->cmd_flags & REQ_FLUSH))
436 return BLKPREP_OK; 436 return BLKPREP_OK;
437 437
438 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); 438 if (rq->special) {
439 cmd = rq->special;
440 memset(cmd, 0, sizeof(*cmd));
441 } else {
442 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
443 }
439 444
440 /* FIXME: map struct ide_taskfile on rq->cmd[] */ 445 /* FIXME: map struct ide_taskfile on rq->cmd[] */
441 BUG_ON(cmd == NULL); 446 BUG_ON(cmd == NULL);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 17bf9d95463c..6cd642aaa4de 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -287,7 +287,7 @@ void __free_ep(struct kref *kref)
287 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 287 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
288 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); 288 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
289 dst_release(ep->dst); 289 dst_release(ep->dst);
290 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 290 l2t_release(ep->com.tdev, ep->l2t);
291 } 291 }
292 kfree(ep); 292 kfree(ep);
293} 293}
@@ -1178,7 +1178,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1178 release_tid(ep->com.tdev, GET_TID(rpl), NULL); 1178 release_tid(ep->com.tdev, GET_TID(rpl), NULL);
1179 cxgb3_free_atid(ep->com.tdev, ep->atid); 1179 cxgb3_free_atid(ep->com.tdev, ep->atid);
1180 dst_release(ep->dst); 1180 dst_release(ep->dst);
1181 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 1181 l2t_release(ep->com.tdev, ep->l2t);
1182 put_ep(&ep->com); 1182 put_ep(&ep->com);
1183 return CPL_RET_BUF_DONE; 1183 return CPL_RET_BUF_DONE;
1184} 1184}
@@ -1377,7 +1377,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1377 if (!child_ep) { 1377 if (!child_ep) {
1378 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 1378 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1379 __func__); 1379 __func__);
1380 l2t_release(L2DATA(tdev), l2t); 1380 l2t_release(tdev, l2t);
1381 dst_release(dst); 1381 dst_release(dst);
1382 goto reject; 1382 goto reject;
1383 } 1383 }
@@ -1956,7 +1956,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1956 if (!err) 1956 if (!err)
1957 goto out; 1957 goto out;
1958 1958
1959 l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t); 1959 l2t_release(h->rdev.t3cdev_p, ep->l2t);
1960fail4: 1960fail4:
1961 dst_release(ep->dst); 1961 dst_release(ep->dst);
1962fail3: 1962fail3:
@@ -2127,7 +2127,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2127 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, 2127 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2128 l2t); 2128 l2t);
2129 dst_hold(new); 2129 dst_hold(new);
2130 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 2130 l2t_release(ep->com.tdev, ep->l2t);
2131 ep->l2t = l2t; 2131 ep->l2t = l2t;
2132 dst_release(old); 2132 dst_release(old);
2133 ep->dst = new; 2133 ep->dst = new;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 0dc97ec15c28..9dea71849f40 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1124,11 +1124,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1124 for (i = 0; i < 8; i++) 1124 for (i = 0; i < 8; i++)
1125 __set_bit(BTN_0 + i, input_dev->keybit); 1125 __set_bit(BTN_0 + i, input_dev->keybit);
1126 1126
1127 if (wacom_wac->features.type != WACOM_21UX2) { 1127 input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
1128 input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); 1128 input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
1129 input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
1130 }
1131
1132 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1129 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1133 1130
1134 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); 1131 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 49da55c1528a..8c2a000cf3f5 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1698 } 1698 }
1699 1699
1700 ti->num_flush_requests = 1; 1700 ti->num_flush_requests = 1;
1701 ti->discard_zeroes_data_unsupported = 1;
1702
1701 return 0; 1703 return 0;
1702 1704
1703bad: 1705bad:
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 89f73ca22cfa..f84c08029b21 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
81 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> 81 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
82 */ 82 */
83 if (!strcasecmp(arg_name, "corrupt_bio_byte")) { 83 if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
84 if (!argc) 84 if (!argc) {
85 ti->error = "Feature corrupt_bio_byte requires parameters"; 85 ti->error = "Feature corrupt_bio_byte requires parameters";
86 return -EINVAL;
87 }
86 88
87 r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); 89 r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
88 if (r) 90 if (r)
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index a002dd85db1e..86df8b2cf927 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
449 rs->ti->error = "write_mostly option is only valid for RAID1"; 449 rs->ti->error = "write_mostly option is only valid for RAID1";
450 return -EINVAL; 450 return -EINVAL;
451 } 451 }
452 if (value > rs->md.raid_disks) { 452 if (value >= rs->md.raid_disks) {
453 rs->ti->error = "Invalid write_mostly drive index given"; 453 rs->ti->error = "Invalid write_mostly drive index given";
454 return -EINVAL; 454 return -EINVAL;
455 } 455 }
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 986b8754bb08..bc04518e9d8b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t)
1238 return; 1238 return;
1239 1239
1240 template_disk = dm_table_get_integrity_disk(t, true); 1240 template_disk = dm_table_get_integrity_disk(t, true);
1241 if (!template_disk && 1241 if (template_disk)
1242 blk_integrity_is_initialized(dm_disk(t->md))) { 1242 blk_integrity_register(dm_disk(t->md),
1243 blk_get_integrity(template_disk));
1244 else if (blk_integrity_is_initialized(dm_disk(t->md)))
1243 DMWARN("%s: device no longer has a valid integrity profile", 1245 DMWARN("%s: device no longer has a valid integrity profile",
1244 dm_device_name(t->md)); 1246 dm_device_name(t->md));
1245 return; 1247 else
1246 } 1248 DMWARN("%s: unable to establish an integrity profile",
1247 blk_integrity_register(dm_disk(t->md), 1249 dm_device_name(t->md));
1248 blk_get_integrity(template_disk));
1249} 1250}
1250 1251
1251static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, 1252static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
1282 return 0; 1283 return 0;
1283} 1284}
1284 1285
1286static bool dm_table_discard_zeroes_data(struct dm_table *t)
1287{
1288 struct dm_target *ti;
1289 unsigned i = 0;
1290
1291 /* Ensure that all targets supports discard_zeroes_data. */
1292 while (i < dm_table_get_num_targets(t)) {
1293 ti = dm_table_get_target(t, i++);
1294
1295 if (ti->discard_zeroes_data_unsupported)
1296 return 0;
1297 }
1298
1299 return 1;
1300}
1301
1285void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1302void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1286 struct queue_limits *limits) 1303 struct queue_limits *limits)
1287{ 1304{
@@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1304 } 1321 }
1305 blk_queue_flush(q, flush); 1322 blk_queue_flush(q, flush);
1306 1323
1324 if (!dm_table_discard_zeroes_data(t))
1325 q->limits.discard_zeroes_data = 0;
1326
1307 dm_table_set_integrity(t); 1327 dm_table_set_integrity(t);
1308 1328
1309 /* 1329 /*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5404b2295820..5c95ccb59500 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -61,6 +61,11 @@
61static void autostart_arrays(int part); 61static void autostart_arrays(int part);
62#endif 62#endif
63 63
64/* pers_list is a list of registered personalities protected
65 * by pers_lock.
66 * pers_lock does extra service to protect accesses to
67 * mddev->thread when the mutex cannot be held.
68 */
64static LIST_HEAD(pers_list); 69static LIST_HEAD(pers_list);
65static DEFINE_SPINLOCK(pers_lock); 70static DEFINE_SPINLOCK(pers_lock);
66 71
@@ -739,7 +744,12 @@ static void mddev_unlock(mddev_t * mddev)
739 } else 744 } else
740 mutex_unlock(&mddev->reconfig_mutex); 745 mutex_unlock(&mddev->reconfig_mutex);
741 746
747 /* was we've dropped the mutex we need a spinlock to
748 * make sur the thread doesn't disappear
749 */
750 spin_lock(&pers_lock);
742 md_wakeup_thread(mddev->thread); 751 md_wakeup_thread(mddev->thread);
752 spin_unlock(&pers_lock);
743} 753}
744 754
745static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 755static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
@@ -6429,11 +6439,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
6429 return thread; 6439 return thread;
6430} 6440}
6431 6441
6432void md_unregister_thread(mdk_thread_t *thread) 6442void md_unregister_thread(mdk_thread_t **threadp)
6433{ 6443{
6444 mdk_thread_t *thread = *threadp;
6434 if (!thread) 6445 if (!thread)
6435 return; 6446 return;
6436 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 6447 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
6448 /* Locking ensures that mddev_unlock does not wake_up a
6449 * non-existent thread
6450 */
6451 spin_lock(&pers_lock);
6452 *threadp = NULL;
6453 spin_unlock(&pers_lock);
6437 6454
6438 kthread_stop(thread->tsk); 6455 kthread_stop(thread->tsk);
6439 kfree(thread); 6456 kfree(thread);
@@ -7340,8 +7357,7 @@ static void reap_sync_thread(mddev_t *mddev)
7340 mdk_rdev_t *rdev; 7357 mdk_rdev_t *rdev;
7341 7358
7342 /* resync has finished, collect result */ 7359 /* resync has finished, collect result */
7343 md_unregister_thread(mddev->sync_thread); 7360 md_unregister_thread(&mddev->sync_thread);
7344 mddev->sync_thread = NULL;
7345 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7361 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7346 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7362 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7347 /* success...*/ 7363 /* success...*/
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 1e586bb4452e..0a309dc29b45 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p);
560extern int unregister_md_personality(struct mdk_personality *p); 560extern int unregister_md_personality(struct mdk_personality *p);
561extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), 561extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
562 mddev_t *mddev, const char *name); 562 mddev_t *mddev, const char *name);
563extern void md_unregister_thread(mdk_thread_t *thread); 563extern void md_unregister_thread(mdk_thread_t **threadp);
564extern void md_wakeup_thread(mdk_thread_t *thread); 564extern void md_wakeup_thread(mdk_thread_t *thread);
565extern void md_check_recovery(mddev_t *mddev); 565extern void md_check_recovery(mddev_t *mddev);
566extern void md_write_start(mddev_t *mddev, struct bio *bi); 566extern void md_write_start(mddev_t *mddev, struct bio *bi);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 3535c23af288..d5b5fb300171 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev)
514{ 514{
515 multipath_conf_t *conf = mddev->private; 515 multipath_conf_t *conf = mddev->private;
516 516
517 md_unregister_thread(mddev->thread); 517 md_unregister_thread(&mddev->thread);
518 mddev->thread = NULL;
519 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 518 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
520 mempool_destroy(conf->pool); 519 mempool_destroy(conf->pool);
521 kfree(conf->multipaths); 520 kfree(conf->multipaths);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f4622dd8fc59..d9587dffe533 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2562,8 +2562,7 @@ static int stop(mddev_t *mddev)
2562 raise_barrier(conf); 2562 raise_barrier(conf);
2563 lower_barrier(conf); 2563 lower_barrier(conf);
2564 2564
2565 md_unregister_thread(mddev->thread); 2565 md_unregister_thread(&mddev->thread);
2566 mddev->thread = NULL;
2567 if (conf->r1bio_pool) 2566 if (conf->r1bio_pool)
2568 mempool_destroy(conf->r1bio_pool); 2567 mempool_destroy(conf->r1bio_pool);
2569 kfree(conf->mirrors); 2568 kfree(conf->mirrors);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d7a8468ddeab..0cd9672cf9cb 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2955,7 +2955,7 @@ static int run(mddev_t *mddev)
2955 return 0; 2955 return 0;
2956 2956
2957out_free_conf: 2957out_free_conf:
2958 md_unregister_thread(mddev->thread); 2958 md_unregister_thread(&mddev->thread);
2959 if (conf->r10bio_pool) 2959 if (conf->r10bio_pool)
2960 mempool_destroy(conf->r10bio_pool); 2960 mempool_destroy(conf->r10bio_pool);
2961 safe_put_page(conf->tmppage); 2961 safe_put_page(conf->tmppage);
@@ -2973,8 +2973,7 @@ static int stop(mddev_t *mddev)
2973 raise_barrier(conf, 0); 2973 raise_barrier(conf, 0);
2974 lower_barrier(conf); 2974 lower_barrier(conf);
2975 2975
2976 md_unregister_thread(mddev->thread); 2976 md_unregister_thread(&mddev->thread);
2977 mddev->thread = NULL;
2978 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 2977 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2979 if (conf->r10bio_pool) 2978 if (conf->r10bio_pool)
2980 mempool_destroy(conf->r10bio_pool); 2979 mempool_destroy(conf->r10bio_pool);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 43709fa6b6df..ac5e8b57e50f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4941,8 +4941,7 @@ static int run(mddev_t *mddev)
4941 4941
4942 return 0; 4942 return 0;
4943abort: 4943abort:
4944 md_unregister_thread(mddev->thread); 4944 md_unregister_thread(&mddev->thread);
4945 mddev->thread = NULL;
4946 if (conf) { 4945 if (conf) {
4947 print_raid5_conf(conf); 4946 print_raid5_conf(conf);
4948 free_conf(conf); 4947 free_conf(conf);
@@ -4956,8 +4955,7 @@ static int stop(mddev_t *mddev)
4956{ 4955{
4957 raid5_conf_t *conf = mddev->private; 4956 raid5_conf_t *conf = mddev->private;
4958 4957
4959 md_unregister_thread(mddev->thread); 4958 md_unregister_thread(&mddev->thread);
4960 mddev->thread = NULL;
4961 if (mddev->queue) 4959 if (mddev->queue)
4962 mddev->queue->backing_dev_info.congested_fn = NULL; 4960 mddev->queue->backing_dev_info.congested_fn = NULL;
4963 free_conf(conf); 4961 free_conf(conf);
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index b5ef36222440..b3a5ecdb33ac 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -2194,19 +2194,6 @@ static int __init omap_vout_probe(struct platform_device *pdev)
2194 "'%s' Display already enabled\n", 2194 "'%s' Display already enabled\n",
2195 def_display->name); 2195 def_display->name);
2196 } 2196 }
2197 /* set the update mode */
2198 if (def_display->caps &
2199 OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
2200 if (dssdrv->enable_te)
2201 dssdrv->enable_te(def_display, 0);
2202 if (dssdrv->set_update_mode)
2203 dssdrv->set_update_mode(def_display,
2204 OMAP_DSS_UPDATE_MANUAL);
2205 } else {
2206 if (dssdrv->set_update_mode)
2207 dssdrv->set_update_mode(def_display,
2208 OMAP_DSS_UPDATE_AUTO);
2209 }
2210 } 2197 }
2211 } 2198 }
2212 2199
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index 9d3459de04b2..80796eb0c53e 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -31,6 +31,7 @@
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/slab.h>
34#include <media/v4l2-event.h> 35#include <media/v4l2-event.h>
35 36
36#include "isp.h" 37#include "isp.h"
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index d29f9c2d0854..e4100b1f68df 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -1961,7 +1961,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
1961 1961
1962 list_for_each_entry(stream, &dev->streams, list) { 1962 list_for_each_entry(stream, &dev->streams, list) {
1963 if (stream->intf == intf) 1963 if (stream->intf == intf)
1964 return uvc_video_resume(stream); 1964 return uvc_video_resume(stream, reset);
1965 } 1965 }
1966 1966
1967 uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface " 1967 uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface "
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
index 48fea373c25a..29e239911d0e 100644
--- a/drivers/media/video/uvc/uvc_entity.c
+++ b/drivers/media/video/uvc/uvc_entity.c
@@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
49 if (remote == NULL) 49 if (remote == NULL)
50 return -EINVAL; 50 return -EINVAL;
51 51
52 source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING) 52 source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
53 ? (remote->vdev ? &remote->vdev->entity : NULL) 53 ? (remote->vdev ? &remote->vdev->entity : NULL)
54 : &remote->subdev.entity; 54 : &remote->subdev.entity;
55 if (source == NULL) 55 if (source == NULL)
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 8244167c8915..ffd1158628b6 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -1104,10 +1104,18 @@ int uvc_video_suspend(struct uvc_streaming *stream)
1104 * buffers, making sure userspace applications are notified of the problem 1104 * buffers, making sure userspace applications are notified of the problem
1105 * instead of waiting forever. 1105 * instead of waiting forever.
1106 */ 1106 */
1107int uvc_video_resume(struct uvc_streaming *stream) 1107int uvc_video_resume(struct uvc_streaming *stream, int reset)
1108{ 1108{
1109 int ret; 1109 int ret;
1110 1110
1111 /* If the bus has been reset on resume, set the alternate setting to 0.
1112 * This should be the default value, but some devices crash or otherwise
1113 * misbehave if they don't receive a SET_INTERFACE request before any
1114 * other video control request.
1115 */
1116 if (reset)
1117 usb_set_interface(stream->dev->udev, stream->intfnum, 0);
1118
1111 stream->frozen = 0; 1119 stream->frozen = 0;
1112 1120
1113 ret = uvc_commit_video(stream, &stream->ctrl); 1121 ret = uvc_commit_video(stream, &stream->ctrl);
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index df32a43ca86a..cbdd49bf8b67 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -638,7 +638,7 @@ extern void uvc_mc_cleanup_entity(struct uvc_entity *entity);
638/* Video */ 638/* Video */
639extern int uvc_video_init(struct uvc_streaming *stream); 639extern int uvc_video_init(struct uvc_streaming *stream);
640extern int uvc_video_suspend(struct uvc_streaming *stream); 640extern int uvc_video_suspend(struct uvc_streaming *stream);
641extern int uvc_video_resume(struct uvc_streaming *stream); 641extern int uvc_video_resume(struct uvc_streaming *stream, int reset);
642extern int uvc_video_enable(struct uvc_streaming *stream, int enable); 642extern int uvc_video_enable(struct uvc_streaming *stream, int enable);
643extern int uvc_probe_video(struct uvc_streaming *stream, 643extern int uvc_probe_video(struct uvc_streaming *stream,
644 struct uvc_streaming_control *probe); 644 struct uvc_streaming_control *probe);
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 06f14008b346..d72156517726 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -173,6 +173,17 @@ static void v4l2_device_release(struct device *cd)
173 media_device_unregister_entity(&vdev->entity); 173 media_device_unregister_entity(&vdev->entity);
174#endif 174#endif
175 175
176 /* Do not call v4l2_device_put if there is no release callback set.
177 * Drivers that have no v4l2_device release callback might free the
178 * v4l2_dev instance in the video_device release callback below, so we
179 * must perform this check here.
180 *
181 * TODO: In the long run all drivers that use v4l2_device should use the
182 * v4l2_device release callback. This check will then be unnecessary.
183 */
184 if (v4l2_dev->release == NULL)
185 v4l2_dev = NULL;
186
176 /* Release video_device and perform other 187 /* Release video_device and perform other
177 cleanups as needed. */ 188 cleanups as needed. */
178 vdev->release(vdev); 189 vdev->release(vdev);
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index c72856c41434..e6a2c3b302d4 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -38,6 +38,7 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
38 mutex_init(&v4l2_dev->ioctl_lock); 38 mutex_init(&v4l2_dev->ioctl_lock);
39 v4l2_prio_init(&v4l2_dev->prio); 39 v4l2_prio_init(&v4l2_dev->prio);
40 kref_init(&v4l2_dev->ref); 40 kref_init(&v4l2_dev->ref);
41 get_device(dev);
41 v4l2_dev->dev = dev; 42 v4l2_dev->dev = dev;
42 if (dev == NULL) { 43 if (dev == NULL) {
43 /* If dev == NULL, then name must be filled in by the caller */ 44 /* If dev == NULL, then name must be filled in by the caller */
@@ -93,6 +94,7 @@ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)
93 94
94 if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) 95 if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev)
95 dev_set_drvdata(v4l2_dev->dev, NULL); 96 dev_set_drvdata(v4l2_dev->dev, NULL);
97 put_device(v4l2_dev->dev);
96 v4l2_dev->dev = NULL; 98 v4l2_dev->dev = NULL;
97} 99}
98EXPORT_SYMBOL_GPL(v4l2_device_disconnect); 100EXPORT_SYMBOL_GPL(v4l2_device_disconnect);
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 21131c7b0f1e..563654c9b19e 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -273,7 +273,7 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
273 ct->regs.ack = JZ_REG_ADC_STATUS; 273 ct->regs.ack = JZ_REG_ADC_STATUS;
274 ct->chip.irq_mask = irq_gc_mask_set_bit; 274 ct->chip.irq_mask = irq_gc_mask_set_bit;
275 ct->chip.irq_unmask = irq_gc_mask_clr_bit; 275 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
276 ct->chip.irq_ack = irq_gc_ack; 276 ct->chip.irq_ack = irq_gc_ack_set_bit;
277 277
278 irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); 278 irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
279 279
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index b928bc14e97b..8b51cd62d067 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
375 * both have been read. So the value read will always be correct. 375 * both have been read. So the value read will always be correct.
376 * Set BOOT bit to refresh factory tuning values. 376 * Set BOOT bit to refresh factory tuning values.
377 */ 377 */
378 lis3->read(lis3, CTRL_REG2, &reg); 378 if (lis3->pdata) {
379 if (lis3->whoami == WAI_12B) 379 lis3->read(lis3, CTRL_REG2, &reg);
380 reg |= CTRL2_BDU | CTRL2_BOOT; 380 if (lis3->whoami == WAI_12B)
381 else 381 reg |= CTRL2_BDU | CTRL2_BOOT;
382 reg |= CTRL2_BOOT_8B; 382 else
383 lis3->write(lis3, CTRL_REG2, reg); 383 reg |= CTRL2_BOOT_8B;
384 lis3->write(lis3, CTRL_REG2, reg);
385 }
384 386
385 /* LIS3 power on delay is quite long */ 387 /* LIS3 power on delay is quite long */
386 msleep(lis3->pwron_delay / lis3lv02d_get_odr()); 388 msleep(lis3->pwron_delay / lis3lv02d_get_odr());
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index a047eb973e3b..47b928ed08f8 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2168 } 2168 }
2169 2169
2170re_arm: 2170re_arm:
2171 queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); 2171 if (!bond->kill_timers)
2172 queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
2172out: 2173out:
2173 read_unlock(&bond->lock); 2174 read_unlock(&bond->lock);
2174} 2175}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 7f8b20a34ee3..d4fbd2e62616 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work)
1440 } 1440 }
1441 1441
1442re_arm: 1442re_arm:
1443 queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); 1443 if (!bond->kill_timers)
1444 queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
1444out: 1445out:
1445 read_unlock(&bond->lock); 1446 read_unlock(&bond->lock);
1446} 1447}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 1dcb07ce5263..6191e6337284 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -774,6 +774,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
774 774
775 read_lock(&bond->lock); 775 read_lock(&bond->lock);
776 776
777 if (bond->kill_timers)
778 goto out;
779
777 /* rejoin all groups on bond device */ 780 /* rejoin all groups on bond device */
778 __bond_resend_igmp_join_requests(bond->dev); 781 __bond_resend_igmp_join_requests(bond->dev);
779 782
@@ -787,9 +790,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
787 __bond_resend_igmp_join_requests(vlan_dev); 790 __bond_resend_igmp_join_requests(vlan_dev);
788 } 791 }
789 792
790 if (--bond->igmp_retrans > 0) 793 if ((--bond->igmp_retrans > 0) && !bond->kill_timers)
791 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); 794 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
792 795out:
793 read_unlock(&bond->lock); 796 read_unlock(&bond->lock);
794} 797}
795 798
@@ -2535,7 +2538,7 @@ void bond_mii_monitor(struct work_struct *work)
2535 } 2538 }
2536 2539
2537re_arm: 2540re_arm:
2538 if (bond->params.miimon) 2541 if (bond->params.miimon && !bond->kill_timers)
2539 queue_delayed_work(bond->wq, &bond->mii_work, 2542 queue_delayed_work(bond->wq, &bond->mii_work,
2540 msecs_to_jiffies(bond->params.miimon)); 2543 msecs_to_jiffies(bond->params.miimon));
2541out: 2544out:
@@ -2883,7 +2886,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2883 } 2886 }
2884 2887
2885re_arm: 2888re_arm:
2886 if (bond->params.arp_interval) 2889 if (bond->params.arp_interval && !bond->kill_timers)
2887 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 2890 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
2888out: 2891out:
2889 read_unlock(&bond->lock); 2892 read_unlock(&bond->lock);
@@ -3151,7 +3154,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
3151 bond_ab_arp_probe(bond); 3154 bond_ab_arp_probe(bond);
3152 3155
3153re_arm: 3156re_arm:
3154 if (bond->params.arp_interval) 3157 if (bond->params.arp_interval && !bond->kill_timers)
3155 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 3158 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
3156out: 3159out:
3157 read_unlock(&bond->lock); 3160 read_unlock(&bond->lock);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 0b9bd551580b..51bd7485ab18 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -2123,6 +2123,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
2123 break; 2123 break;
2124 case DCB_CAP_ATTR_DCBX: 2124 case DCB_CAP_ATTR_DCBX:
2125 *cap = BNX2X_DCBX_CAPS; 2125 *cap = BNX2X_DCBX_CAPS;
2126 break;
2126 default: 2127 default:
2127 rval = -EINVAL; 2128 rval = -EINVAL;
2128 break; 2129 break;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 28bde1610ffb..6486ab8c8fc8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4937,7 +4937,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
4937 int igu_seg_id; 4937 int igu_seg_id;
4938 int port = BP_PORT(bp); 4938 int port = BP_PORT(bp);
4939 int func = BP_FUNC(bp); 4939 int func = BP_FUNC(bp);
4940 int reg_offset; 4940 int reg_offset, reg_offset_en5;
4941 u64 section; 4941 u64 section;
4942 int index; 4942 int index;
4943 struct hc_sp_status_block_data sp_sb_data; 4943 struct hc_sp_status_block_data sp_sb_data;
@@ -4960,6 +4960,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
4960 4960
4961 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4961 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4962 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 4962 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4963 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
4964 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
4963 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 4965 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4964 int sindex; 4966 int sindex;
4965 /* take care of sig[0]..sig[4] */ 4967 /* take care of sig[0]..sig[4] */
@@ -4974,7 +4976,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
4974 * and not 16 between the different groups 4976 * and not 16 between the different groups
4975 */ 4977 */
4976 bp->attn_group[index].sig[4] = REG_RD(bp, 4978 bp->attn_group[index].sig[4] = REG_RD(bp,
4977 reg_offset + 0x10 + 0x4*index); 4979 reg_offset_en5 + 0x4*index);
4978 else 4980 else
4979 bp->attn_group[index].sig[4] = 0; 4981 bp->attn_group[index].sig[4] = 0;
4980 } 4982 }
@@ -7619,8 +7621,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
7619 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 7621 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7620 u8 *mac_addr = bp->dev->dev_addr; 7622 u8 *mac_addr = bp->dev->dev_addr;
7621 u32 val; 7623 u32 val;
7624 u16 pmc;
7625
7622 /* The mac address is written to entries 1-4 to 7626 /* The mac address is written to entries 1-4 to
7623 preserve entry 0 which is used by the PMF */ 7627 * preserve entry 0 which is used by the PMF
7628 */
7624 u8 entry = (BP_VN(bp) + 1)*8; 7629 u8 entry = (BP_VN(bp) + 1)*8;
7625 7630
7626 val = (mac_addr[0] << 8) | mac_addr[1]; 7631 val = (mac_addr[0] << 8) | mac_addr[1];
@@ -7630,6 +7635,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
7630 (mac_addr[4] << 8) | mac_addr[5]; 7635 (mac_addr[4] << 8) | mac_addr[5];
7631 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 7636 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7632 7637
7638 /* Enable the PME and clear the status */
7639 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
7640 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
7641 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
7642
7633 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 7643 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7634 7644
7635 } else 7645 } else
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 750e8445dac4..fc7bd0f23c0b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1384,6 +1384,18 @@
1384 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ 1384 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
1385#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 1385#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108
1386#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 1386#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8
1387/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped
1388 * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
1389 * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
1390 * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
1391 * parity; [31-10] Reserved; */
1392#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688
1393/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped
1394 * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
1395 * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
1396 * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
1397 * parity; [31-10] Reserved; */
1398#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0
1387/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu 1399/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
1388 128 bit vector */ 1400 128 bit vector */
1389#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 1401#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 805076c54f1b..da5a5d9b8aff 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -1146,12 +1146,14 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1146 if (te && te->ctx && te->client && te->client->redirect) { 1146 if (te && te->ctx && te->client && te->client->redirect) {
1147 update_tcb = te->client->redirect(te->ctx, old, new, e); 1147 update_tcb = te->client->redirect(te->ctx, old, new, e);
1148 if (update_tcb) { 1148 if (update_tcb) {
1149 rcu_read_lock();
1149 l2t_hold(L2DATA(tdev), e); 1150 l2t_hold(L2DATA(tdev), e);
1151 rcu_read_unlock();
1150 set_l2t_ix(tdev, tid, e); 1152 set_l2t_ix(tdev, tid, e);
1151 } 1153 }
1152 } 1154 }
1153 } 1155 }
1154 l2t_release(L2DATA(tdev), e); 1156 l2t_release(tdev, e);
1155} 1157}
1156 1158
1157/* 1159/*
@@ -1264,7 +1266,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
1264 goto out_free; 1266 goto out_free;
1265 1267
1266 err = -ENOMEM; 1268 err = -ENOMEM;
1267 L2DATA(dev) = t3_init_l2t(l2t_capacity); 1269 RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity));
1268 if (!L2DATA(dev)) 1270 if (!L2DATA(dev))
1269 goto out_free; 1271 goto out_free;
1270 1272
@@ -1298,16 +1300,24 @@ int cxgb3_offload_activate(struct adapter *adapter)
1298 1300
1299out_free_l2t: 1301out_free_l2t:
1300 t3_free_l2t(L2DATA(dev)); 1302 t3_free_l2t(L2DATA(dev));
1301 L2DATA(dev) = NULL; 1303 rcu_assign_pointer(dev->l2opt, NULL);
1302out_free: 1304out_free:
1303 kfree(t); 1305 kfree(t);
1304 return err; 1306 return err;
1305} 1307}
1306 1308
1309static void clean_l2_data(struct rcu_head *head)
1310{
1311 struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
1312 t3_free_l2t(d);
1313}
1314
1315
1307void cxgb3_offload_deactivate(struct adapter *adapter) 1316void cxgb3_offload_deactivate(struct adapter *adapter)
1308{ 1317{
1309 struct t3cdev *tdev = &adapter->tdev; 1318 struct t3cdev *tdev = &adapter->tdev;
1310 struct t3c_data *t = T3C_DATA(tdev); 1319 struct t3c_data *t = T3C_DATA(tdev);
1320 struct l2t_data *d;
1311 1321
1312 remove_adapter(adapter); 1322 remove_adapter(adapter);
1313 if (list_empty(&adapter_list)) 1323 if (list_empty(&adapter_list))
@@ -1315,8 +1325,11 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
1315 1325
1316 free_tid_maps(&t->tid_maps); 1326 free_tid_maps(&t->tid_maps);
1317 T3C_DATA(tdev) = NULL; 1327 T3C_DATA(tdev) = NULL;
1318 t3_free_l2t(L2DATA(tdev)); 1328 rcu_read_lock();
1319 L2DATA(tdev) = NULL; 1329 d = L2DATA(tdev);
1330 rcu_read_unlock();
1331 rcu_assign_pointer(tdev->l2opt, NULL);
1332 call_rcu(&d->rcu_head, clean_l2_data);
1320 if (t->nofail_skb) 1333 if (t->nofail_skb)
1321 kfree_skb(t->nofail_skb); 1334 kfree_skb(t->nofail_skb);
1322 kfree(t); 1335 kfree(t);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index f452c4003253..41540978a173 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -300,14 +300,21 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
300struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, 300struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
301 struct net_device *dev) 301 struct net_device *dev)
302{ 302{
303 struct l2t_entry *e; 303 struct l2t_entry *e = NULL;
304 struct l2t_data *d = L2DATA(cdev); 304 struct l2t_data *d;
305 int hash;
305 u32 addr = *(u32 *) neigh->primary_key; 306 u32 addr = *(u32 *) neigh->primary_key;
306 int ifidx = neigh->dev->ifindex; 307 int ifidx = neigh->dev->ifindex;
307 int hash = arp_hash(addr, ifidx, d);
308 struct port_info *p = netdev_priv(dev); 308 struct port_info *p = netdev_priv(dev);
309 int smt_idx = p->port_id; 309 int smt_idx = p->port_id;
310 310
311 rcu_read_lock();
312 d = L2DATA(cdev);
313 if (!d)
314 goto done_rcu;
315
316 hash = arp_hash(addr, ifidx, d);
317
311 write_lock_bh(&d->lock); 318 write_lock_bh(&d->lock);
312 for (e = d->l2tab[hash].first; e; e = e->next) 319 for (e = d->l2tab[hash].first; e; e = e->next)
313 if (e->addr == addr && e->ifindex == ifidx && 320 if (e->addr == addr && e->ifindex == ifidx &&
@@ -338,6 +345,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
338 } 345 }
339done: 346done:
340 write_unlock_bh(&d->lock); 347 write_unlock_bh(&d->lock);
348done_rcu:
349 rcu_read_unlock();
341 return e; 350 return e;
342} 351}
343 352
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index 7a12d52ed4fc..c5f54796e2cb 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -76,6 +76,7 @@ struct l2t_data {
76 atomic_t nfree; /* number of free entries */ 76 atomic_t nfree; /* number of free entries */
77 rwlock_t lock; 77 rwlock_t lock;
78 struct l2t_entry l2tab[0]; 78 struct l2t_entry l2tab[0];
79 struct rcu_head rcu_head; /* to handle rcu cleanup */
79}; 80};
80 81
81typedef void (*arp_failure_handler_func)(struct t3cdev * dev, 82typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
@@ -99,7 +100,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
99/* 100/*
100 * Getting to the L2 data from an offload device. 101 * Getting to the L2 data from an offload device.
101 */ 102 */
102#define L2DATA(dev) ((dev)->l2opt) 103#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
103 104
104#define W_TCB_L2T_IX 0 105#define W_TCB_L2T_IX 0
105#define S_TCB_L2T_IX 7 106#define S_TCB_L2T_IX 7
@@ -126,15 +127,22 @@ static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
126 return t3_l2t_send_slow(dev, skb, e); 127 return t3_l2t_send_slow(dev, skb, e);
127} 128}
128 129
129static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e) 130static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
130{ 131{
131 if (atomic_dec_and_test(&e->refcnt)) 132 struct l2t_data *d;
133
134 rcu_read_lock();
135 d = L2DATA(t);
136
137 if (atomic_dec_and_test(&e->refcnt) && d)
132 t3_l2e_free(d, e); 138 t3_l2e_free(d, e);
139
140 rcu_read_unlock();
133} 141}
134 142
135static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) 143static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
136{ 144{
137 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ 145 if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
138 atomic_dec(&d->nfree); 146 atomic_dec(&d->nfree);
139} 147}
140 148
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 40b395f932cf..4c8f42afa3c6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3715,6 +3715,9 @@ static int __devinit init_one(struct pci_dev *pdev,
3715 setup_debugfs(adapter); 3715 setup_debugfs(adapter);
3716 } 3716 }
3717 3717
3718 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
3719 pdev->needs_freset = 1;
3720
3718 if (is_offload(adapter)) 3721 if (is_offload(adapter))
3719 attach_ulds(adapter); 3722 attach_ulds(adapter);
3720 3723
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 72b84de48756..4da972eaabb4 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -636,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev)
636 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", 636 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
637 netdev->irq, rc); 637 netdev->irq, rc);
638 do { 638 do {
639 rc = h_free_logical_lan(adapter->vdev->unit_address); 639 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
640 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); 640 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
641 641
642 goto err_out; 642 goto err_out;
643 } 643 }
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 5dc61b4ef3cd..b89f3a684aec 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1198,6 +1198,8 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1198 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), 1198 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1199 &hw->reg->INT_EN); 1199 &hw->reg->INT_EN);
1200 pch_gbe_stop_receive(adapter); 1200 pch_gbe_stop_receive(adapter);
1201 int_st |= ioread32(&hw->reg->INT_ST);
1202 int_st = int_st & ioread32(&hw->reg->INT_EN);
1201 } 1203 }
1202 if (int_st & PCH_GBE_INT_RX_DMA_ERR) 1204 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1203 adapter->stats.intr_rx_dma_err_count++; 1205 adapter->stats.intr_rx_dma_err_count++;
@@ -1217,14 +1219,11 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1217 /* Set Pause packet */ 1219 /* Set Pause packet */
1218 pch_gbe_mac_set_pause_packet(hw); 1220 pch_gbe_mac_set_pause_packet(hw);
1219 } 1221 }
1220 if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
1221 == 0) {
1222 return IRQ_HANDLED;
1223 }
1224 } 1222 }
1225 1223
1226 /* When request status is Receive interruption */ 1224 /* When request status is Receive interruption */
1227 if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) { 1225 if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
1226 (adapter->rx_stop_flag == true)) {
1228 if (likely(napi_schedule_prep(&adapter->napi))) { 1227 if (likely(napi_schedule_prep(&adapter->napi))) {
1229 /* Enable only Rx Descriptor empty */ 1228 /* Enable only Rx Descriptor empty */
1230 atomic_inc(&adapter->irq_sem); 1229 atomic_inc(&adapter->irq_sem);
@@ -1384,7 +1383,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1384 struct sk_buff *skb; 1383 struct sk_buff *skb;
1385 unsigned int i; 1384 unsigned int i;
1386 unsigned int cleaned_count = 0; 1385 unsigned int cleaned_count = 0;
1387 bool cleaned = false; 1386 bool cleaned = true;
1388 1387
1389 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); 1388 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1390 1389
@@ -1395,7 +1394,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1395 1394
1396 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { 1395 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1397 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); 1396 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
1398 cleaned = true;
1399 buffer_info = &tx_ring->buffer_info[i]; 1397 buffer_info = &tx_ring->buffer_info[i];
1400 skb = buffer_info->skb; 1398 skb = buffer_info->skb;
1401 1399
@@ -1438,8 +1436,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1438 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); 1436 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1439 1437
1440 /* weight of a sort for tx, to avoid endless transmit cleanup */ 1438 /* weight of a sort for tx, to avoid endless transmit cleanup */
1441 if (cleaned_count++ == PCH_GBE_TX_WEIGHT) 1439 if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1440 cleaned = false;
1442 break; 1441 break;
1442 }
1443 } 1443 }
1444 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", 1444 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1445 cleaned_count); 1445 cleaned_count);
@@ -2167,7 +2167,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2167{ 2167{
2168 struct pch_gbe_adapter *adapter = 2168 struct pch_gbe_adapter *adapter =
2169 container_of(napi, struct pch_gbe_adapter, napi); 2169 container_of(napi, struct pch_gbe_adapter, napi);
2170 struct net_device *netdev = adapter->netdev;
2171 int work_done = 0; 2170 int work_done = 0;
2172 bool poll_end_flag = false; 2171 bool poll_end_flag = false;
2173 bool cleaned = false; 2172 bool cleaned = false;
@@ -2175,33 +2174,32 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2175 2174
2176 pr_debug("budget : %d\n", budget); 2175 pr_debug("budget : %d\n", budget);
2177 2176
2178 /* Keep link state information with original netdev */ 2177 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2179 if (!netif_carrier_ok(netdev)) { 2178 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2179
2180 if (!cleaned)
2181 work_done = budget;
2182 /* If no Tx and not enough Rx work done,
2183 * exit the polling mode
2184 */
2185 if (work_done < budget)
2180 poll_end_flag = true; 2186 poll_end_flag = true;
2181 } else { 2187
2182 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); 2188 if (poll_end_flag) {
2189 napi_complete(napi);
2190 if (adapter->rx_stop_flag) {
2191 adapter->rx_stop_flag = false;
2192 pch_gbe_start_receive(&adapter->hw);
2193 }
2194 pch_gbe_irq_enable(adapter);
2195 } else
2183 if (adapter->rx_stop_flag) { 2196 if (adapter->rx_stop_flag) {
2184 adapter->rx_stop_flag = false; 2197 adapter->rx_stop_flag = false;
2185 pch_gbe_start_receive(&adapter->hw); 2198 pch_gbe_start_receive(&adapter->hw);
2186 int_en = ioread32(&adapter->hw.reg->INT_EN); 2199 int_en = ioread32(&adapter->hw.reg->INT_EN);
2187 iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), 2200 iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
2188 &adapter->hw.reg->INT_EN); 2201 &adapter->hw.reg->INT_EN);
2189 } 2202 }
2190 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2191
2192 if (cleaned)
2193 work_done = budget;
2194 /* If no Tx and not enough Rx work done,
2195 * exit the polling mode
2196 */
2197 if ((work_done < budget) || !netif_running(netdev))
2198 poll_end_flag = true;
2199 }
2200
2201 if (poll_end_flag) {
2202 napi_complete(napi);
2203 pch_gbe_irq_enable(adapter);
2204 }
2205 2203
2206 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", 2204 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
2207 poll_end_flag, work_done, budget); 2205 poll_end_flag, work_done, budget);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index b100c90e8507..24cf942e1316 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
239 dest = macvlan_hash_lookup(port, eth->h_dest); 239 dest = macvlan_hash_lookup(port, eth->h_dest);
240 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { 240 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
241 /* send to lowerdev first for its network taps */ 241 /* send to lowerdev first for its network taps */
242 vlan->forward(vlan->lowerdev, skb); 242 dev_forward_skb(vlan->lowerdev, skb);
243 243
244 return NET_XMIT_SUCCESS; 244 return NET_XMIT_SUCCESS;
245 } 245 }
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index be381c24c4b4..c588a162050f 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -686,7 +686,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
686 prune_rx_ts(dp83640); 686 prune_rx_ts(dp83640);
687 687
688 if (list_empty(&dp83640->rxpool)) { 688 if (list_empty(&dp83640->rxpool)) {
689 pr_warning("dp83640: rx timestamp pool is empty\n"); 689 pr_debug("dp83640: rx timestamp pool is empty\n");
690 goto out; 690 goto out;
691 } 691 }
692 rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); 692 rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
@@ -709,7 +709,7 @@ static void decode_txts(struct dp83640_private *dp83640,
709 skb = skb_dequeue(&dp83640->tx_queue); 709 skb = skb_dequeue(&dp83640->tx_queue);
710 710
711 if (!skb) { 711 if (!skb) {
712 pr_warning("dp83640: have timestamp but tx_queue empty\n"); 712 pr_debug("dp83640: have timestamp but tx_queue empty\n");
713 return; 713 return;
714 } 714 }
715 ns = phy2txts(phy_txts); 715 ns = phy2txts(phy_txts);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 0ca86f9ec4ed..182562952c79 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
327 xenvif_get(vif); 327 xenvif_get(vif);
328 328
329 rtnl_lock(); 329 rtnl_lock();
330 if (netif_running(vif->dev))
331 xenvif_up(vif);
332 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 330 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
333 dev_set_mtu(vif->dev, ETH_DATA_LEN); 331 dev_set_mtu(vif->dev, ETH_DATA_LEN);
334 netdev_update_features(vif->dev); 332 netdev_update_features(vif->dev);
335 netif_carrier_on(vif->dev); 333 netif_carrier_on(vif->dev);
334 if (netif_running(vif->dev))
335 xenvif_up(vif);
336 rtnl_unlock(); 336 rtnl_unlock();
337 337
338 return 0; 338 return 0;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4e84fd4a4312..e9651f0a8817 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79 79
80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; 80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
81 81
82/* 82/*
83 * The default CLS is used if arch didn't set CLS explicitly and not 83 * The default CLS is used if arch didn't set CLS explicitly and not
@@ -3568,10 +3568,14 @@ static int __init pci_setup(char *str)
3568 pci_hotplug_io_size = memparse(str + 9, &str); 3568 pci_hotplug_io_size = memparse(str + 9, &str);
3569 } else if (!strncmp(str, "hpmemsize=", 10)) { 3569 } else if (!strncmp(str, "hpmemsize=", 10)) {
3570 pci_hotplug_mem_size = memparse(str + 10, &str); 3570 pci_hotplug_mem_size = memparse(str + 10, &str);
3571 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3572 pcie_bus_config = PCIE_BUS_TUNE_OFF;
3571 } else if (!strncmp(str, "pcie_bus_safe", 13)) { 3573 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3572 pcie_bus_config = PCIE_BUS_SAFE; 3574 pcie_bus_config = PCIE_BUS_SAFE;
3573 } else if (!strncmp(str, "pcie_bus_perf", 13)) { 3575 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3574 pcie_bus_config = PCIE_BUS_PERFORMANCE; 3576 pcie_bus_config = PCIE_BUS_PERFORMANCE;
3577 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3578 pcie_bus_config = PCIE_BUS_PEER2PEER;
3575 } else { 3579 } else {
3576 printk(KERN_ERR "PCI: Unknown option `%s'\n", 3580 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3577 str); 3581 str);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f3f94a5c068f..6ab6bd3df4b2 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1458,12 +1458,24 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1458 */ 1458 */
1459void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) 1459void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
1460{ 1460{
1461 u8 smpss = mpss; 1461 u8 smpss;
1462 1462
1463 if (!pci_is_pcie(bus->self)) 1463 if (!pci_is_pcie(bus->self))
1464 return; 1464 return;
1465 1465
1466 if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
1467 return;
1468
1469 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
1470 * to be aware to the MPS of the destination. To work around this,
1471 * simply force the MPS of the entire system to the smallest possible.
1472 */
1473 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1474 smpss = 0;
1475
1466 if (pcie_bus_config == PCIE_BUS_SAFE) { 1476 if (pcie_bus_config == PCIE_BUS_SAFE) {
1477 smpss = mpss;
1478
1467 pcie_find_smpss(bus->self, &smpss); 1479 pcie_find_smpss(bus->self, &smpss);
1468 pci_walk_bus(bus, pcie_find_smpss, &smpss); 1480 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1469 } 1481 }
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index cbde448f9947..eb3140ee821e 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -654,8 +654,8 @@ static struct io_subchannel_private console_priv;
654static int console_subchannel_in_use; 654static int console_subchannel_in_use;
655 655
656/* 656/*
657 * Use tpi to get a pending interrupt, call the interrupt handler and 657 * Use cio_tpi to get a pending interrupt and call the interrupt handler.
658 * return a pointer to the subchannel structure. 658 * Return non-zero if an interrupt was processed, zero otherwise.
659 */ 659 */
660static int cio_tpi(void) 660static int cio_tpi(void)
661{ 661{
@@ -667,6 +667,10 @@ static int cio_tpi(void)
667 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; 667 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
668 if (tpi(NULL) != 1) 668 if (tpi(NULL) != 1)
669 return 0; 669 return 0;
670 if (tpi_info->adapter_IO) {
671 do_adapter_IO(tpi_info->isc);
672 return 1;
673 }
670 irb = (struct irb *)&S390_lowcore.irb; 674 irb = (struct irb *)&S390_lowcore.irb;
671 /* Store interrupt response block to lowcore. */ 675 /* Store interrupt response block to lowcore. */
672 if (tsch(tpi_info->schid, irb) != 0) 676 if (tsch(tpi_info->schid, irb) != 0)
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index b7bd5b0cc7aa..3868ab2397c6 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1800,10 +1800,12 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
1800 switch (retval) { 1800 switch (retval) {
1801 case SCSI_MLQUEUE_HOST_BUSY: 1801 case SCSI_MLQUEUE_HOST_BUSY:
1802 twa_free_request_id(tw_dev, request_id); 1802 twa_free_request_id(tw_dev, request_id);
1803 twa_unmap_scsi_data(tw_dev, request_id);
1803 break; 1804 break;
1804 case 1: 1805 case 1:
1805 tw_dev->state[request_id] = TW_S_COMPLETED; 1806 tw_dev->state[request_id] = TW_S_COMPLETED;
1806 twa_free_request_id(tw_dev, request_id); 1807 twa_free_request_id(tw_dev, request_id);
1808 twa_unmap_scsi_data(tw_dev, request_id);
1807 SCpnt->result = (DID_ERROR << 16); 1809 SCpnt->result = (DID_ERROR << 16);
1808 done(SCpnt); 1810 done(SCpnt);
1809 retval = 0; 1811 retval = 0;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8d9dae89f065..3878b7395081 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -837,6 +837,7 @@ config SCSI_ISCI
837 # (temporary): known alpha quality driver 837 # (temporary): known alpha quality driver
838 depends on EXPERIMENTAL 838 depends on EXPERIMENTAL
839 select SCSI_SAS_LIBSAS 839 select SCSI_SAS_LIBSAS
840 select SCSI_SAS_HOST_SMP
840 ---help--- 841 ---help---
841 This driver supports the 6Gb/s SAS capabilities of the storage 842 This driver supports the 6Gb/s SAS capabilities of the storage
842 control unit found in the Intel(R) C600 series chipset. 843 control unit found in the Intel(R) C600 series chipset.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 3c08f5352b2d..6153a66a8a31 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -88,7 +88,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
88obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o 88obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
89obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o 89obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
90obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ 90obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
91obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/ 91obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
92obj-$(CONFIG_SCSI_LPFC) += lpfc/ 92obj-$(CONFIG_SCSI_LPFC) += lpfc/
93obj-$(CONFIG_SCSI_BFA_FC) += bfa/ 93obj-$(CONFIG_SCSI_BFA_FC) += bfa/
94obj-$(CONFIG_SCSI_PAS16) += pas16.o 94obj-$(CONFIG_SCSI_PAS16) += pas16.o
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index e7d0d47b9185..e5f2d7d9002e 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1283,6 +1283,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1283 kfree(aac->queues); 1283 kfree(aac->queues);
1284 aac->queues = NULL; 1284 aac->queues = NULL;
1285 free_irq(aac->pdev->irq, aac); 1285 free_irq(aac->pdev->irq, aac);
1286 if (aac->msi)
1287 pci_disable_msi(aac->pdev);
1286 kfree(aac->fsa_dev); 1288 kfree(aac->fsa_dev);
1287 aac->fsa_dev = NULL; 1289 aac->fsa_dev = NULL;
1288 quirks = aac_get_driver_ident(index)->quirks; 1290 quirks = aac_get_driver_ident(index)->quirks;
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index bd22041e2789..f58644850333 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -913,7 +913,7 @@ static void l2t_put(struct cxgbi_sock *csk)
913 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; 913 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
914 914
915 if (csk->l2t) { 915 if (csk->l2t) {
916 l2t_release(L2DATA(t3dev), csk->l2t); 916 l2t_release(t3dev, csk->l2t);
917 csk->l2t = NULL; 917 csk->l2t = NULL;
918 cxgbi_sock_put(csk); 918 cxgbi_sock_put(csk);
919 } 919 }
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index f84084bba2f0..16ad97df5ba6 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1721,7 +1721,7 @@ static int sas_find_bcast_dev(struct domain_device *dev,
1721 list_for_each_entry(ch, &ex->children, siblings) { 1721 list_for_each_entry(ch, &ex->children, siblings) {
1722 if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { 1722 if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {
1723 res = sas_find_bcast_dev(ch, src_dev); 1723 res = sas_find_bcast_dev(ch, src_dev);
1724 if (src_dev) 1724 if (*src_dev)
1725 return res; 1725 return res;
1726 } 1726 }
1727 } 1727 }
@@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1769 sas_disable_routing(parent, phy->attached_sas_addr); 1769 sas_disable_routing(parent, phy->attached_sas_addr);
1770 } 1770 }
1771 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 1771 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
1772 sas_port_delete_phy(phy->port, phy->phy); 1772 if (phy->port) {
1773 if (phy->port->num_phys == 0) 1773 sas_port_delete_phy(phy->port, phy->phy);
1774 sas_port_delete(phy->port); 1774 if (phy->port->num_phys == 0)
1775 phy->port = NULL; 1775 sas_port_delete(phy->port);
1776 phy->port = NULL;
1777 }
1776} 1778}
1777 1779
1778static int sas_discover_bfs_by_root_level(struct domain_device *root, 1780static int sas_discover_bfs_by_root_level(struct domain_device *root,
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 646fc5263d50..8a7591f035e6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1507,8 +1507,8 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1507 1507
1508 if (k != blocks_done) { 1508 if (k != blocks_done) {
1509 qla_printk(KERN_WARNING, sp->fcport->vha->hw, 1509 qla_printk(KERN_WARNING, sp->fcport->vha->hw,
1510 "unexpected tag values tag:lba=%x:%lx)\n", 1510 "unexpected tag values tag:lba=%x:%llx)\n",
1511 e_ref_tag, lba_s); 1511 e_ref_tag, (unsigned long long)lba_s);
1512 return 1; 1512 return 1;
1513 } 1513 }
1514 1514
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 4cace3f20c04..1e69527f1e4e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1328,10 +1328,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1328 qla2x00_sp_compl(ha, sp); 1328 qla2x00_sp_compl(ha, sp);
1329 } else { 1329 } else {
1330 ctx = sp->ctx; 1330 ctx = sp->ctx;
1331 if (ctx->type == SRB_LOGIN_CMD || 1331 if (ctx->type == SRB_ELS_CMD_RPT ||
1332 ctx->type == SRB_LOGOUT_CMD) { 1332 ctx->type == SRB_ELS_CMD_HST ||
1333 ctx->u.iocb_cmd->free(sp); 1333 ctx->type == SRB_CT_CMD) {
1334 } else {
1335 struct fc_bsg_job *bsg_job = 1334 struct fc_bsg_job *bsg_job =
1336 ctx->u.bsg_job; 1335 ctx->u.bsg_job;
1337 if (bsg_job->request->msgcode 1336 if (bsg_job->request->msgcode
@@ -1343,6 +1342,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1343 kfree(sp->ctx); 1342 kfree(sp->ctx);
1344 mempool_free(sp, 1343 mempool_free(sp,
1345 ha->srb_mempool); 1344 ha->srb_mempool);
1345 } else {
1346 ctx->u.iocb_cmd->free(sp);
1346 } 1347 }
1347 } 1348 }
1348 } 1349 }
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index d2407558773f..24cacff57786 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -825,6 +825,9 @@ static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
825{ 825{
826 struct device *dev = mspi->dev; 826 struct device *dev = mspi->dev;
827 827
828 if (!(mspi->flags & SPI_CPM_MODE))
829 return;
830
828 dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); 831 dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
829 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); 832 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
830 cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); 833 cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 8ac6542aedcd..fa594d604aca 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -786,9 +786,11 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
786 int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); 786 int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
787 if (cs_gpio < 0) 787 if (cs_gpio < 0)
788 cs_gpio = mxc_platform_info->chipselect[i]; 788 cs_gpio = mxc_platform_info->chipselect[i];
789
790 spi_imx->chipselect[i] = cs_gpio;
789 if (cs_gpio < 0) 791 if (cs_gpio < 0)
790 continue; 792 continue;
791 spi_imx->chipselect[i] = cs_gpio; 793
792 ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); 794 ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
793 if (ret) { 795 if (ret) {
794 while (i > 0) { 796 while (i > 0) {
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 1d23f3831866..6a80749391db 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -50,6 +50,8 @@
50#define PCH_RX_THOLD 7 50#define PCH_RX_THOLD 7
51#define PCH_RX_THOLD_MAX 15 51#define PCH_RX_THOLD_MAX 15
52 52
53#define PCH_TX_THOLD 2
54
53#define PCH_MAX_BAUDRATE 5000000 55#define PCH_MAX_BAUDRATE 5000000
54#define PCH_MAX_FIFO_DEPTH 16 56#define PCH_MAX_FIFO_DEPTH 16
55 57
@@ -58,6 +60,7 @@
58#define PCH_SLEEP_TIME 10 60#define PCH_SLEEP_TIME 10
59 61
60#define SSN_LOW 0x02U 62#define SSN_LOW 0x02U
63#define SSN_HIGH 0x03U
61#define SSN_NO_CONTROL 0x00U 64#define SSN_NO_CONTROL 0x00U
62#define PCH_MAX_CS 0xFF 65#define PCH_MAX_CS 0xFF
63#define PCI_DEVICE_ID_GE_SPI 0x8816 66#define PCI_DEVICE_ID_GE_SPI 0x8816
@@ -316,16 +319,19 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
316 319
317 /* if transfer complete interrupt */ 320 /* if transfer complete interrupt */
318 if (reg_spsr_val & SPSR_FI_BIT) { 321 if (reg_spsr_val & SPSR_FI_BIT) {
319 if (tx_index < bpw_len) 322 if ((tx_index == bpw_len) && (rx_index == tx_index)) {
323 /* disable interrupts */
324 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
325
326 /* transfer is completed;
327 inform pch_spi_process_messages */
328 data->transfer_complete = true;
329 data->transfer_active = false;
330 wake_up(&data->wait);
331 } else {
320 dev_err(&data->master->dev, 332 dev_err(&data->master->dev,
321 "%s : Transfer is not completed", __func__); 333 "%s : Transfer is not completed", __func__);
322 /* disable interrupts */ 334 }
323 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
324
325 /* transfer is completed;inform pch_spi_process_messages */
326 data->transfer_complete = true;
327 data->transfer_active = false;
328 wake_up(&data->wait);
329 } 335 }
330} 336}
331 337
@@ -348,16 +354,26 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id)
348 "%s returning due to suspend\n", __func__); 354 "%s returning due to suspend\n", __func__);
349 return IRQ_NONE; 355 return IRQ_NONE;
350 } 356 }
351 if (data->use_dma)
352 return IRQ_NONE;
353 357
354 io_remap_addr = data->io_remap_addr; 358 io_remap_addr = data->io_remap_addr;
355 spsr = io_remap_addr + PCH_SPSR; 359 spsr = io_remap_addr + PCH_SPSR;
356 360
357 reg_spsr_val = ioread32(spsr); 361 reg_spsr_val = ioread32(spsr);
358 362
359 if (reg_spsr_val & SPSR_ORF_BIT) 363 if (reg_spsr_val & SPSR_ORF_BIT) {
360 dev_err(&board_dat->pdev->dev, "%s Over run error", __func__); 364 dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
365 if (data->current_msg->complete != 0) {
366 data->transfer_complete = true;
367 data->current_msg->status = -EIO;
368 data->current_msg->complete(data->current_msg->context);
369 data->bcurrent_msg_processing = false;
370 data->current_msg = NULL;
371 data->cur_trans = NULL;
372 }
373 }
374
375 if (data->use_dma)
376 return IRQ_NONE;
361 377
362 /* Check if the interrupt is for SPI device */ 378 /* Check if the interrupt is for SPI device */
363 if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { 379 if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
@@ -756,10 +772,6 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
756 772
757 wait_event_interruptible(data->wait, data->transfer_complete); 773 wait_event_interruptible(data->wait, data->transfer_complete);
758 774
759 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
760 dev_dbg(&data->master->dev,
761 "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
762
763 /* clear all interrupts */ 775 /* clear all interrupts */
764 pch_spi_writereg(data->master, PCH_SPSR, 776 pch_spi_writereg(data->master, PCH_SPSR,
765 pch_spi_readreg(data->master, PCH_SPSR)); 777 pch_spi_readreg(data->master, PCH_SPSR));
@@ -815,10 +827,11 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
815 } 827 }
816} 828}
817 829
818static void pch_spi_start_transfer(struct pch_spi_data *data) 830static int pch_spi_start_transfer(struct pch_spi_data *data)
819{ 831{
820 struct pch_spi_dma_ctrl *dma; 832 struct pch_spi_dma_ctrl *dma;
821 unsigned long flags; 833 unsigned long flags;
834 int rtn;
822 835
823 dma = &data->dma; 836 dma = &data->dma;
824 837
@@ -833,19 +846,23 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)
833 initiating the transfer. */ 846 initiating the transfer. */
834 dev_dbg(&data->master->dev, 847 dev_dbg(&data->master->dev,
835 "%s:waiting for transfer to get over\n", __func__); 848 "%s:waiting for transfer to get over\n", __func__);
836 wait_event_interruptible(data->wait, data->transfer_complete); 849 rtn = wait_event_interruptible_timeout(data->wait,
850 data->transfer_complete,
851 msecs_to_jiffies(2 * HZ));
837 852
838 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, 853 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
839 DMA_FROM_DEVICE); 854 DMA_FROM_DEVICE);
855
856 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
857 DMA_FROM_DEVICE);
858 memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
859
840 async_tx_ack(dma->desc_rx); 860 async_tx_ack(dma->desc_rx);
841 async_tx_ack(dma->desc_tx); 861 async_tx_ack(dma->desc_tx);
842 kfree(dma->sg_tx_p); 862 kfree(dma->sg_tx_p);
843 kfree(dma->sg_rx_p); 863 kfree(dma->sg_rx_p);
844 864
845 spin_lock_irqsave(&data->lock, flags); 865 spin_lock_irqsave(&data->lock, flags);
846 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
847 dev_dbg(&data->master->dev,
848 "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
849 866
850 /* clear fifo threshold, disable interrupts, disable SPI transfer */ 867 /* clear fifo threshold, disable interrupts, disable SPI transfer */
851 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, 868 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
@@ -858,6 +875,8 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)
858 pch_spi_clear_fifo(data->master); 875 pch_spi_clear_fifo(data->master);
859 876
860 spin_unlock_irqrestore(&data->lock, flags); 877 spin_unlock_irqrestore(&data->lock, flags);
878
879 return rtn;
861} 880}
862 881
863static void pch_dma_rx_complete(void *arg) 882static void pch_dma_rx_complete(void *arg)
@@ -1023,8 +1042,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1023 /* set receive fifo threshold and transmit fifo threshold */ 1042 /* set receive fifo threshold and transmit fifo threshold */
1024 pch_spi_setclr_reg(data->master, PCH_SPCR, 1043 pch_spi_setclr_reg(data->master, PCH_SPCR,
1025 ((size - 1) << SPCR_RFIC_FIELD) | 1044 ((size - 1) << SPCR_RFIC_FIELD) |
1026 ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) << 1045 (PCH_TX_THOLD << SPCR_TFIC_FIELD),
1027 SPCR_TFIC_FIELD),
1028 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); 1046 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
1029 1047
1030 spin_unlock_irqrestore(&data->lock, flags); 1048 spin_unlock_irqrestore(&data->lock, flags);
@@ -1035,13 +1053,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1035 /* offset, length setting */ 1053 /* offset, length setting */
1036 sg = dma->sg_rx_p; 1054 sg = dma->sg_rx_p;
1037 for (i = 0; i < num; i++, sg++) { 1055 for (i = 0; i < num; i++, sg++) {
1038 if (i == 0) { 1056 if (i == (num - 2)) {
1039 sg->offset = 0; 1057 sg->offset = size * i;
1058 sg->offset = sg->offset * (*bpw / 8);
1040 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, 1059 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
1041 sg->offset); 1060 sg->offset);
1042 sg_dma_len(sg) = rem; 1061 sg_dma_len(sg) = rem;
1062 } else if (i == (num - 1)) {
1063 sg->offset = size * (i - 1) + rem;
1064 sg->offset = sg->offset * (*bpw / 8);
1065 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1066 sg->offset);
1067 sg_dma_len(sg) = size;
1043 } else { 1068 } else {
1044 sg->offset = rem + size * (i - 1); 1069 sg->offset = size * i;
1045 sg->offset = sg->offset * (*bpw / 8); 1070 sg->offset = sg->offset * (*bpw / 8);
1046 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, 1071 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1047 sg->offset); 1072 sg->offset);
@@ -1065,6 +1090,16 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1065 dma->desc_rx = desc_rx; 1090 dma->desc_rx = desc_rx;
1066 1091
1067 /* TX */ 1092 /* TX */
1093 if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
1094 num = data->bpw_len / PCH_DMA_TRANS_SIZE;
1095 size = PCH_DMA_TRANS_SIZE;
1096 rem = 16;
1097 } else {
1098 num = 1;
1099 size = data->bpw_len;
1100 rem = data->bpw_len;
1101 }
1102
1068 dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); 1103 dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
1069 sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ 1104 sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
1070 /* offset, length setting */ 1105 /* offset, length setting */
@@ -1162,6 +1197,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
1162 if (data->use_dma) 1197 if (data->use_dma)
1163 pch_spi_request_dma(data, 1198 pch_spi_request_dma(data,
1164 data->current_msg->spi->bits_per_word); 1199 data->current_msg->spi->bits_per_word);
1200 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
1165 do { 1201 do {
1166 /* If we are already processing a message get the next 1202 /* If we are already processing a message get the next
1167 transfer structure from the message otherwise retrieve 1203 transfer structure from the message otherwise retrieve
@@ -1184,7 +1220,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
1184 1220
1185 if (data->use_dma) { 1221 if (data->use_dma) {
1186 pch_spi_handle_dma(data, &bpw); 1222 pch_spi_handle_dma(data, &bpw);
1187 pch_spi_start_transfer(data); 1223 if (!pch_spi_start_transfer(data))
1224 goto out;
1188 pch_spi_copy_rx_data_for_dma(data, bpw); 1225 pch_spi_copy_rx_data_for_dma(data, bpw);
1189 } else { 1226 } else {
1190 pch_spi_set_tx(data, &bpw); 1227 pch_spi_set_tx(data, &bpw);
@@ -1222,6 +1259,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
1222 1259
1223 } while (data->cur_trans != NULL); 1260 } while (data->cur_trans != NULL);
1224 1261
1262out:
1263 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
1225 if (data->use_dma) 1264 if (data->use_dma)
1226 pch_spi_release_dma(data); 1265 pch_spi_release_dma(data);
1227} 1266}
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index e0c2807b0970..181fa8158a8b 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -148,10 +148,10 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
148 } 148 }
149 platform_set_drvdata(pdev, bus); 149 platform_set_drvdata(pdev, bus);
150 150
151 /* Register all devices */
152 pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n", 151 pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n",
153 zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); 152 zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
154 153
154 /* First identify all devices ... */
155 for (i = 0; i < zorro_num_autocon; i++) { 155 for (i = 0; i < zorro_num_autocon; i++) {
156 z = &zorro_autocon[i]; 156 z = &zorro_autocon[i];
157 z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8); 157 z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
@@ -172,6 +172,11 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
172 dev_set_name(&z->dev, "%02x", i); 172 dev_set_name(&z->dev, "%02x", i);
173 z->dev.parent = &bus->dev; 173 z->dev.parent = &bus->dev;
174 z->dev.bus = &zorro_bus_type; 174 z->dev.bus = &zorro_bus_type;
175 }
176
177 /* ... then register them */
178 for (i = 0; i < zorro_num_autocon; i++) {
179 z = &zorro_autocon[i];
175 error = device_register(&z->dev); 180 error = device_register(&z->dev);
176 if (error) { 181 if (error) {
177 dev_err(&bus->dev, "Error registering device %s\n", 182 dev_err(&bus->dev, "Error registering device %s\n",
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a381cd22f518..e4e57d59edb7 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1036,11 +1036,13 @@ out:
1036 * on error we return an unlocked page and the error value 1036 * on error we return an unlocked page and the error value
1037 * on success we return a locked page and 0 1037 * on success we return a locked page and 0
1038 */ 1038 */
1039static int prepare_uptodate_page(struct page *page, u64 pos) 1039static int prepare_uptodate_page(struct page *page, u64 pos,
1040 bool force_uptodate)
1040{ 1041{
1041 int ret = 0; 1042 int ret = 0;
1042 1043
1043 if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) { 1044 if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1045 !PageUptodate(page)) {
1044 ret = btrfs_readpage(NULL, page); 1046 ret = btrfs_readpage(NULL, page);
1045 if (ret) 1047 if (ret)
1046 return ret; 1048 return ret;
@@ -1061,7 +1063,7 @@ static int prepare_uptodate_page(struct page *page, u64 pos)
1061static noinline int prepare_pages(struct btrfs_root *root, struct file *file, 1063static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1062 struct page **pages, size_t num_pages, 1064 struct page **pages, size_t num_pages,
1063 loff_t pos, unsigned long first_index, 1065 loff_t pos, unsigned long first_index,
1064 size_t write_bytes) 1066 size_t write_bytes, bool force_uptodate)
1065{ 1067{
1066 struct extent_state *cached_state = NULL; 1068 struct extent_state *cached_state = NULL;
1067 int i; 1069 int i;
@@ -1086,10 +1088,11 @@ again:
1086 } 1088 }
1087 1089
1088 if (i == 0) 1090 if (i == 0)
1089 err = prepare_uptodate_page(pages[i], pos); 1091 err = prepare_uptodate_page(pages[i], pos,
1092 force_uptodate);
1090 if (i == num_pages - 1) 1093 if (i == num_pages - 1)
1091 err = prepare_uptodate_page(pages[i], 1094 err = prepare_uptodate_page(pages[i],
1092 pos + write_bytes); 1095 pos + write_bytes, false);
1093 if (err) { 1096 if (err) {
1094 page_cache_release(pages[i]); 1097 page_cache_release(pages[i]);
1095 faili = i - 1; 1098 faili = i - 1;
@@ -1158,6 +1161,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1158 size_t num_written = 0; 1161 size_t num_written = 0;
1159 int nrptrs; 1162 int nrptrs;
1160 int ret = 0; 1163 int ret = 0;
1164 bool force_page_uptodate = false;
1161 1165
1162 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / 1166 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1163 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / 1167 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
@@ -1200,7 +1204,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1200 * contents of pages from loop to loop 1204 * contents of pages from loop to loop
1201 */ 1205 */
1202 ret = prepare_pages(root, file, pages, num_pages, 1206 ret = prepare_pages(root, file, pages, num_pages,
1203 pos, first_index, write_bytes); 1207 pos, first_index, write_bytes,
1208 force_page_uptodate);
1204 if (ret) { 1209 if (ret) {
1205 btrfs_delalloc_release_space(inode, 1210 btrfs_delalloc_release_space(inode,
1206 num_pages << PAGE_CACHE_SHIFT); 1211 num_pages << PAGE_CACHE_SHIFT);
@@ -1217,12 +1222,15 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1217 if (copied < write_bytes) 1222 if (copied < write_bytes)
1218 nrptrs = 1; 1223 nrptrs = 1;
1219 1224
1220 if (copied == 0) 1225 if (copied == 0) {
1226 force_page_uptodate = true;
1221 dirty_pages = 0; 1227 dirty_pages = 0;
1222 else 1228 } else {
1229 force_page_uptodate = false;
1223 dirty_pages = (copied + offset + 1230 dirty_pages = (copied + offset +
1224 PAGE_CACHE_SIZE - 1) >> 1231 PAGE_CACHE_SIZE - 1) >>
1225 PAGE_CACHE_SHIFT; 1232 PAGE_CACHE_SHIFT;
1233 }
1226 1234
1227 /* 1235 /*
1228 * If we had a short copy we need to release the excess delaloc 1236 * If we had a short copy we need to release the excess delaloc
diff --git a/fs/namei.c b/fs/namei.c
index f4788365ea22..0b3138de2a3b 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -721,12 +721,6 @@ static int follow_automount(struct path *path, unsigned flags,
721 if (!path->dentry->d_op || !path->dentry->d_op->d_automount) 721 if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
722 return -EREMOTE; 722 return -EREMOTE;
723 723
724 /* We don't want to mount if someone supplied AT_NO_AUTOMOUNT
725 * and this is the terminal part of the path.
726 */
727 if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT))
728 return -EISDIR; /* we actually want to stop here */
729
730 /* We don't want to mount if someone's just doing a stat - 724 /* We don't want to mount if someone's just doing a stat -
731 * unless they're stat'ing a directory and appended a '/' to 725 * unless they're stat'ing a directory and appended a '/' to
732 * the name. 726 * the name.
@@ -739,7 +733,7 @@ static int follow_automount(struct path *path, unsigned flags,
739 * of the daemon to instantiate them before they can be used. 733 * of the daemon to instantiate them before they can be used.
740 */ 734 */
741 if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | 735 if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
742 LOOKUP_OPEN | LOOKUP_CREATE)) && 736 LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
743 path->dentry->d_inode) 737 path->dentry->d_inode)
744 return -EISDIR; 738 return -EISDIR;
745 739
diff --git a/fs/namespace.c b/fs/namespace.c
index 22bfe8273c68..b4febb29d3bb 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1757,7 +1757,7 @@ static int do_loopback(struct path *path, char *old_name,
1757 return err; 1757 return err;
1758 if (!old_name || !*old_name) 1758 if (!old_name || !*old_name)
1759 return -EINVAL; 1759 return -EINVAL;
1760 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); 1760 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
1761 if (err) 1761 if (err)
1762 return err; 1762 return err;
1763 1763
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 9b7dd7013b15..5b19b6aabe18 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2798,7 +2798,7 @@ static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
2798 goto out_put_mnt_ns; 2798 goto out_put_mnt_ns;
2799 2799
2800 ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt, 2800 ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt,
2801 export_path, LOOKUP_FOLLOW, &path); 2801 export_path, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
2802 2802
2803 nfs_referral_loop_unprotect(); 2803 nfs_referral_loop_unprotect();
2804 put_mnt_ns(ns_private); 2804 put_mnt_ns(ns_private);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index b34bdb25490c..10b6be3ca280 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -355,7 +355,7 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
355 * resolution (think about autofs) and thus deadlocks could arise. 355 * resolution (think about autofs) and thus deadlocks could arise.
356 */ 356 */
357 if (cmds == Q_QUOTAON) { 357 if (cmds == Q_QUOTAON) {
358 ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW, &path); 358 ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
359 if (ret) 359 if (ret)
360 pathp = ERR_PTR(ret); 360 pathp = ERR_PTR(ret);
361 else 361 else
diff --git a/fs/stat.c b/fs/stat.c
index ba5316ffac61..78a3aa83c7ea 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -81,8 +81,6 @@ int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
81 81
82 if (!(flag & AT_SYMLINK_NOFOLLOW)) 82 if (!(flag & AT_SYMLINK_NOFOLLOW))
83 lookup_flags |= LOOKUP_FOLLOW; 83 lookup_flags |= LOOKUP_FOLLOW;
84 if (flag & AT_NO_AUTOMOUNT)
85 lookup_flags |= LOOKUP_NO_AUTOMOUNT;
86 if (flag & AT_EMPTY_PATH) 84 if (flag & AT_EMPTY_PATH)
87 lookup_flags |= LOOKUP_EMPTY; 85 lookup_flags |= LOOKUP_EMPTY;
88 86
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 3fa1f3d90ce0..99e3e50b5c57 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -197,6 +197,11 @@ struct dm_target {
197 * whether or not its underlying devices have support. 197 * whether or not its underlying devices have support.
198 */ 198 */
199 unsigned discards_supported:1; 199 unsigned discards_supported:1;
200
201 /*
202 * Set if this target does not return zeroes on discarded blocks.
203 */
204 unsigned discard_zeroes_data_unsupported:1;
200}; 205};
201 206
202/* Each target can link one of these into the table */ 207/* Each target can link one of these into the table */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index e807ad687a07..3ad553e8eae2 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -80,6 +80,7 @@ extern void irq_domain_del(struct irq_domain *domain);
80#endif /* CONFIG_IRQ_DOMAIN */ 80#endif /* CONFIG_IRQ_DOMAIN */
81 81
82#if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ) 82#if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ)
83extern struct irq_domain_ops irq_domain_simple_ops;
83extern void irq_domain_add_simple(struct device_node *controller, int irq_base); 84extern void irq_domain_add_simple(struct device_node *controller, int irq_base);
84extern void irq_domain_generate_simple(const struct of_device_id *match, 85extern void irq_domain_generate_simple(const struct of_device_id *match,
85 u64 phys_base, unsigned int irq_start); 86 u64 phys_base, unsigned int irq_start);
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 2c366b52f505..aace6b8691a2 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -553,6 +553,7 @@ struct kvm_ppc_pvinfo {
553#define KVM_CAP_SPAPR_TCE 63 553#define KVM_CAP_SPAPR_TCE 63
554#define KVM_CAP_PPC_SMT 64 554#define KVM_CAP_PPC_SMT 64
555#define KVM_CAP_PPC_RMA 65 555#define KVM_CAP_PPC_RMA 65
556#define KVM_CAP_S390_GMAP 71
556 557
557#ifdef KVM_CAP_IRQ_ROUTING 558#ifdef KVM_CAP_IRQ_ROUTING
558 559
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 76fe2c62ae71..409328d1cbbb 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -48,11 +48,12 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
48 */ 48 */
49#define LOOKUP_FOLLOW 0x0001 49#define LOOKUP_FOLLOW 0x0001
50#define LOOKUP_DIRECTORY 0x0002 50#define LOOKUP_DIRECTORY 0x0002
51#define LOOKUP_AUTOMOUNT 0x0004
51 52
52#define LOOKUP_PARENT 0x0010 53#define LOOKUP_PARENT 0x0010
53#define LOOKUP_REVAL 0x0020 54#define LOOKUP_REVAL 0x0020
54#define LOOKUP_RCU 0x0040 55#define LOOKUP_RCU 0x0040
55#define LOOKUP_NO_AUTOMOUNT 0x0080 56
56/* 57/*
57 * Intent data 58 * Intent data
58 */ 59 */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 581d2e2e5b00..f1b1ca1a09e1 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -623,8 +623,9 @@ struct pci_driver {
623extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss); 623extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss);
624 624
625enum pcie_bus_config_types { 625enum pcie_bus_config_types {
626 PCIE_BUS_PERFORMANCE, 626 PCIE_BUS_TUNE_OFF,
627 PCIE_BUS_SAFE, 627 PCIE_BUS_SAFE,
628 PCIE_BUS_PERFORMANCE,
628 PCIE_BUS_PEER2PEER, 629 PCIE_BUS_PEER2PEER,
629}; 630};
630 631
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index e07e2742a865..1dc420ba213a 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -51,6 +51,7 @@
51#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) 51#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN)
52 52
53#define PTP_EV_PORT 319 53#define PTP_EV_PORT 319
54#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
54 55
55#define OFF_ETYPE 12 56#define OFF_ETYPE 12
56#define OFF_IHL 14 57#define OFF_IHL 14
@@ -116,14 +117,20 @@ static inline int ptp_filter_init(struct sock_filter *f, int len)
116 {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \ 117 {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \
117 {OP_RETA, 0, 0, 0 }, /* */ \ 118 {OP_RETA, 0, 0, 0 }, /* */ \
118/*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ 119/*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \
119/*L40*/ {OP_JEQ, 0, 6, ETH_P_8021Q }, /* f goto L50 */ \ 120/*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \
120 {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \ 121 {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \
121 {OP_JEQ, 0, 9, ETH_P_1588 }, /* f goto L60 */ \ 122 {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \
123 {OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
124 {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
125 {OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \
122 {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ 126 {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
123 {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ 127 {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
124 {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \ 128 {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \
125 {OP_RETA, 0, 0, 0 }, /* */ \ 129 {OP_RETA, 0, 0, 0 }, /* */ \
126/*L50*/ {OP_JEQ, 0, 4, ETH_P_1588 }, /* f goto L61 */ \ 130/*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \
131 {OP_LDB, 0, 0, ETH_HLEN }, /* */ \
132 {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
133 {OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \
127 {OP_LDH, 0, 0, ETH_HLEN }, /* */ \ 134 {OP_LDH, 0, 0, ETH_HLEN }, /* */ \
128 {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ 135 {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
129 {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \ 136 {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4ac2c0578e0f..41d0237fd449 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1956,7 +1956,6 @@ static inline void disable_sched_clock_irqtime(void) {}
1956 1956
1957extern unsigned long long 1957extern unsigned long long
1958task_sched_runtime(struct task_struct *task); 1958task_sched_runtime(struct task_struct *task);
1959extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1960 1959
1961/* sched_exec is called by processes performing an exec */ 1960/* sched_exec is called by processes performing an exec */
1962#ifdef CONFIG_SMP 1961#ifdef CONFIG_SMP
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 6bca4cc0063c..5f172703eb4f 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -298,7 +298,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
298 __array(char, name, 32) 298 __array(char, name, 32)
299 __field(unsigned long, ino) 299 __field(unsigned long, ino)
300 __field(unsigned long, state) 300 __field(unsigned long, state)
301 __field(unsigned long, age) 301 __field(unsigned long, dirtied_when)
302 __field(unsigned long, writeback_index) 302 __field(unsigned long, writeback_index)
303 __field(long, nr_to_write) 303 __field(long, nr_to_write)
304 __field(unsigned long, wrote) 304 __field(unsigned long, wrote)
@@ -309,19 +309,19 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
309 dev_name(inode->i_mapping->backing_dev_info->dev), 32); 309 dev_name(inode->i_mapping->backing_dev_info->dev), 32);
310 __entry->ino = inode->i_ino; 310 __entry->ino = inode->i_ino;
311 __entry->state = inode->i_state; 311 __entry->state = inode->i_state;
312 __entry->age = (jiffies - inode->dirtied_when) * 312 __entry->dirtied_when = inode->dirtied_when;
313 1000 / HZ;
314 __entry->writeback_index = inode->i_mapping->writeback_index; 313 __entry->writeback_index = inode->i_mapping->writeback_index;
315 __entry->nr_to_write = nr_to_write; 314 __entry->nr_to_write = nr_to_write;
316 __entry->wrote = nr_to_write - wbc->nr_to_write; 315 __entry->wrote = nr_to_write - wbc->nr_to_write;
317 ), 316 ),
318 317
319 TP_printk("bdi %s: ino=%lu state=%s age=%lu " 318 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
320 "index=%lu to_write=%ld wrote=%lu", 319 "index=%lu to_write=%ld wrote=%lu",
321 __entry->name, 320 __entry->name,
322 __entry->ino, 321 __entry->ino,
323 show_inode_state(__entry->state), 322 show_inode_state(__entry->state),
324 __entry->age, 323 __entry->dirtied_when,
324 (jiffies - __entry->dirtied_when) / HZ,
325 __entry->writeback_index, 325 __entry->writeback_index,
326 __entry->nr_to_write, 326 __entry->nr_to_write,
327 __entry->wrote 327 __entry->wrote
diff --git a/init/main.c b/init/main.c
index 2a9b88aa5e76..03b408dff825 100644
--- a/init/main.c
+++ b/init/main.c
@@ -381,9 +381,6 @@ static noinline void __init_refok rest_init(void)
381 preempt_enable_no_resched(); 381 preempt_enable_no_resched();
382 schedule(); 382 schedule();
383 383
384 /* At this point, we can enable user mode helper functionality */
385 usermodehelper_enable();
386
387 /* Call into cpu_idle with preempt disabled */ 384 /* Call into cpu_idle with preempt disabled */
388 preempt_disable(); 385 preempt_disable();
389 cpu_idle(); 386 cpu_idle();
@@ -733,6 +730,7 @@ static void __init do_basic_setup(void)
733 driver_init(); 730 driver_init();
734 init_irq_proc(); 731 init_irq_proc();
735 do_ctors(); 732 do_ctors();
733 usermodehelper_enable();
736 do_initcalls(); 734 do_initcalls();
737} 735}
738 736
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index d5828da3fd38..b57a3776de44 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -29,7 +29,11 @@ void irq_domain_add(struct irq_domain *domain)
29 */ 29 */
30 for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) { 30 for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) {
31 d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq)); 31 d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
32 if (d || d->domain) { 32 if (!d) {
33 WARN(1, "error: assigning domain to non existant irq_desc");
34 return;
35 }
36 if (d->domain) {
33 /* things are broken; just report, don't clean up */ 37 /* things are broken; just report, don't clean up */
34 WARN(1, "error: irq_desc already assigned to a domain"); 38 WARN(1, "error: irq_desc already assigned to a domain");
35 return; 39 return;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 58f405b581e7..c8008dd58ef2 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -250,7 +250,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
250 do { 250 do {
251 times->utime = cputime_add(times->utime, t->utime); 251 times->utime = cputime_add(times->utime, t->utime);
252 times->stime = cputime_add(times->stime, t->stime); 252 times->stime = cputime_add(times->stime, t->stime);
253 times->sum_exec_runtime += t->se.sum_exec_runtime; 253 times->sum_exec_runtime += task_sched_runtime(t);
254 } while_each_thread(tsk, t); 254 } while_each_thread(tsk, t);
255out: 255out:
256 rcu_read_unlock(); 256 rcu_read_unlock();
@@ -312,7 +312,8 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
312 cpu->cpu = cputime.utime; 312 cpu->cpu = cputime.utime;
313 break; 313 break;
314 case CPUCLOCK_SCHED: 314 case CPUCLOCK_SCHED:
315 cpu->sched = thread_group_sched_runtime(p); 315 thread_group_cputime(p, &cputime);
316 cpu->sched = cputime.sum_exec_runtime;
316 break; 317 break;
317 } 318 }
318 return 0; 319 return 0;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 9de3ecfd20f9..a70d2a5d8c7b 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -744,20 +744,17 @@ int ptrace_request(struct task_struct *child, long request,
744 break; 744 break;
745 745
746 si = child->last_siginfo; 746 si = child->last_siginfo;
747 if (unlikely(!si || si->si_code >> 8 != PTRACE_EVENT_STOP)) 747 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
748 break; 748 child->jobctl |= JOBCTL_LISTENING;
749 749 /*
750 child->jobctl |= JOBCTL_LISTENING; 750 * If NOTIFY is set, it means event happened between
751 751 * start of this trap and now. Trigger re-trap.
752 /* 752 */
753 * If NOTIFY is set, it means event happened between start 753 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
754 * of this trap and now. Trigger re-trap immediately. 754 signal_wake_up(child, true);
755 */ 755 ret = 0;
756 if (child->jobctl & JOBCTL_TRAP_NOTIFY) 756 }
757 signal_wake_up(child, true);
758
759 unlock_task_sighand(child, &flags); 757 unlock_task_sighand(child, &flags);
760 ret = 0;
761 break; 758 break;
762 759
763 case PTRACE_DETACH: /* detach a process that was attached. */ 760 case PTRACE_DETACH: /* detach a process that was attached. */
diff --git a/kernel/resource.c b/kernel/resource.c
index 3b3cedc52592..c8dc249da5ce 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -419,6 +419,9 @@ static int __find_resource(struct resource *root, struct resource *old,
419 else 419 else
420 tmp.end = root->end; 420 tmp.end = root->end;
421 421
422 if (tmp.end < tmp.start)
423 goto next;
424
422 resource_clip(&tmp, constraint->min, constraint->max); 425 resource_clip(&tmp, constraint->min, constraint->max);
423 arch_remove_reservations(&tmp); 426 arch_remove_reservations(&tmp);
424 427
@@ -436,8 +439,10 @@ static int __find_resource(struct resource *root, struct resource *old,
436 return 0; 439 return 0;
437 } 440 }
438 } 441 }
439 if (!this) 442
443next: if (!this || this->end == root->end)
440 break; 444 break;
445
441 if (this != old) 446 if (this != old)
442 tmp.start = this->end + 1; 447 tmp.start = this->end + 1;
443 this = this->sibling; 448 this = this->sibling;
diff --git a/kernel/sched.c b/kernel/sched.c
index ec5f472bc5b9..b50b0f0c9aa9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3725,30 +3725,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
3725} 3725}
3726 3726
3727/* 3727/*
3728 * Return sum_exec_runtime for the thread group.
3729 * In case the task is currently running, return the sum plus current's
3730 * pending runtime that have not been accounted yet.
3731 *
3732 * Note that the thread group might have other running tasks as well,
3733 * so the return value not includes other pending runtime that other
3734 * running tasks might have.
3735 */
3736unsigned long long thread_group_sched_runtime(struct task_struct *p)
3737{
3738 struct task_cputime totals;
3739 unsigned long flags;
3740 struct rq *rq;
3741 u64 ns;
3742
3743 rq = task_rq_lock(p, &flags);
3744 thread_group_cputime(p, &totals);
3745 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
3746 task_rq_unlock(rq, p, &flags);
3747
3748 return ns;
3749}
3750
3751/*
3752 * Account user cpu time to a process. 3728 * Account user cpu time to a process.
3753 * @p: the process that the cpu time gets accounted to 3729 * @p: the process that the cpu time gets accounted to
3754 * @cputime: the cpu time spent in user space since the last update 3730 * @cputime: the cpu time spent in user space since the last update
@@ -4372,7 +4348,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
4372 blk_schedule_flush_plug(tsk); 4348 blk_schedule_flush_plug(tsk);
4373} 4349}
4374 4350
4375asmlinkage void schedule(void) 4351asmlinkage void __sched schedule(void)
4376{ 4352{
4377 struct task_struct *tsk = current; 4353 struct task_struct *tsk = current;
4378 4354
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 97540f0c9e47..af1177858be3 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1050,7 +1050,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1050 */ 1050 */
1051 if (curr && unlikely(rt_task(curr)) && 1051 if (curr && unlikely(rt_task(curr)) &&
1052 (curr->rt.nr_cpus_allowed < 2 || 1052 (curr->rt.nr_cpus_allowed < 2 ||
1053 curr->prio < p->prio) && 1053 curr->prio <= p->prio) &&
1054 (p->rt.nr_cpus_allowed > 1)) { 1054 (p->rt.nr_cpus_allowed > 1)) {
1055 int target = find_lowest_rq(p); 1055 int target = find_lowest_rq(p);
1056 1056
@@ -1581,7 +1581,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
1581 p->rt.nr_cpus_allowed > 1 && 1581 p->rt.nr_cpus_allowed > 1 &&
1582 rt_task(rq->curr) && 1582 rt_task(rq->curr) &&
1583 (rq->curr->rt.nr_cpus_allowed < 2 || 1583 (rq->curr->rt.nr_cpus_allowed < 2 ||
1584 rq->curr->prio < p->prio)) 1584 rq->curr->prio <= p->prio))
1585 push_rt_tasks(rq); 1585 push_rt_tasks(rq);
1586} 1586}
1587 1587
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index aceeabc2ca86..f9cc95728989 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -566,7 +566,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
566 struct orig_node *orig_node = NULL; 566 struct orig_node *orig_node = NULL;
567 int data_len = skb->len, ret; 567 int data_len = skb->len, ret;
568 short vid = -1; 568 short vid = -1;
569 bool do_bcast = false; 569 bool do_bcast;
570 570
571 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 571 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
572 goto dropped; 572 goto dropped;
@@ -600,15 +600,15 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
600 600
601 orig_node = transtable_search(bat_priv, ethhdr->h_source, 601 orig_node = transtable_search(bat_priv, ethhdr->h_source,
602 ethhdr->h_dest); 602 ethhdr->h_dest);
603 if (is_multicast_ether_addr(ethhdr->h_dest) || 603 do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
604 (orig_node && orig_node->gw_flags)) { 604 if (do_bcast || (orig_node && orig_node->gw_flags)) {
605 ret = gw_is_target(bat_priv, skb, orig_node); 605 ret = gw_is_target(bat_priv, skb, orig_node);
606 606
607 if (ret < 0) 607 if (ret < 0)
608 goto dropped; 608 goto dropped;
609 609
610 if (ret == 0) 610 if (ret)
611 do_bcast = true; 611 do_bcast = false;
612 } 612 }
613 613
614 /* ethernet packet should be broadcasted */ 614 /* ethernet packet should be broadcasted */
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 28325d15773b..feb77ea7b58e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -91,7 +91,6 @@ static int br_dev_open(struct net_device *dev)
91{ 91{
92 struct net_bridge *br = netdev_priv(dev); 92 struct net_bridge *br = netdev_priv(dev);
93 93
94 netif_carrier_off(dev);
95 netdev_update_features(dev); 94 netdev_update_features(dev);
96 netif_start_queue(dev); 95 netif_start_queue(dev);
97 br_stp_enable_bridge(br); 96 br_stp_enable_bridge(br);
@@ -108,8 +107,6 @@ static int br_dev_stop(struct net_device *dev)
108{ 107{
109 struct net_bridge *br = netdev_priv(dev); 108 struct net_bridge *br = netdev_priv(dev);
110 109
111 netif_carrier_off(dev);
112
113 br_stp_disable_bridge(br); 110 br_stp_disable_bridge(br);
114 br_multicast_stop(br); 111 br_multicast_stop(br);
115 112
diff --git a/net/can/bcm.c b/net/can/bcm.c
index d6c8ae5b2e6a..c84963d2dee6 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -344,6 +344,18 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
344 } 344 }
345} 345}
346 346
347static void bcm_tx_start_timer(struct bcm_op *op)
348{
349 if (op->kt_ival1.tv64 && op->count)
350 hrtimer_start(&op->timer,
351 ktime_add(ktime_get(), op->kt_ival1),
352 HRTIMER_MODE_ABS);
353 else if (op->kt_ival2.tv64)
354 hrtimer_start(&op->timer,
355 ktime_add(ktime_get(), op->kt_ival2),
356 HRTIMER_MODE_ABS);
357}
358
347static void bcm_tx_timeout_tsklet(unsigned long data) 359static void bcm_tx_timeout_tsklet(unsigned long data)
348{ 360{
349 struct bcm_op *op = (struct bcm_op *)data; 361 struct bcm_op *op = (struct bcm_op *)data;
@@ -365,26 +377,12 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
365 377
366 bcm_send_to_user(op, &msg_head, NULL, 0); 378 bcm_send_to_user(op, &msg_head, NULL, 0);
367 } 379 }
368 }
369
370 if (op->kt_ival1.tv64 && (op->count > 0)) {
371
372 /* send (next) frame */
373 bcm_can_tx(op); 380 bcm_can_tx(op);
374 hrtimer_start(&op->timer,
375 ktime_add(ktime_get(), op->kt_ival1),
376 HRTIMER_MODE_ABS);
377 381
378 } else { 382 } else if (op->kt_ival2.tv64)
379 if (op->kt_ival2.tv64) { 383 bcm_can_tx(op);
380 384
381 /* send (next) frame */ 385 bcm_tx_start_timer(op);
382 bcm_can_tx(op);
383 hrtimer_start(&op->timer,
384 ktime_add(ktime_get(), op->kt_ival2),
385 HRTIMER_MODE_ABS);
386 }
387 }
388} 386}
389 387
390/* 388/*
@@ -964,23 +962,20 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
964 hrtimer_cancel(&op->timer); 962 hrtimer_cancel(&op->timer);
965 } 963 }
966 964
967 if ((op->flags & STARTTIMER) && 965 if (op->flags & STARTTIMER) {
968 ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) { 966 hrtimer_cancel(&op->timer);
969
970 /* spec: send can_frame when starting timer */ 967 /* spec: send can_frame when starting timer */
971 op->flags |= TX_ANNOUNCE; 968 op->flags |= TX_ANNOUNCE;
972
973 if (op->kt_ival1.tv64 && (op->count > 0)) {
974 /* op->count-- is done in bcm_tx_timeout_handler */
975 hrtimer_start(&op->timer, op->kt_ival1,
976 HRTIMER_MODE_REL);
977 } else
978 hrtimer_start(&op->timer, op->kt_ival2,
979 HRTIMER_MODE_REL);
980 } 969 }
981 970
982 if (op->flags & TX_ANNOUNCE) 971 if (op->flags & TX_ANNOUNCE) {
983 bcm_can_tx(op); 972 bcm_can_tx(op);
973 if (op->count)
974 op->count--;
975 }
976
977 if (op->flags & STARTTIMER)
978 bcm_tx_start_timer(op);
984 979
985 return msg_head->nframes * CFSIZ + MHSIZ; 980 return msg_head->nframes * CFSIZ + MHSIZ;
986} 981}
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 132963abc266..2883ea01e680 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -232,6 +232,7 @@ void ceph_destroy_options(struct ceph_options *opt)
232 ceph_crypto_key_destroy(opt->key); 232 ceph_crypto_key_destroy(opt->key);
233 kfree(opt->key); 233 kfree(opt->key);
234 } 234 }
235 kfree(opt->mon_addr);
235 kfree(opt); 236 kfree(opt);
236} 237}
237EXPORT_SYMBOL(ceph_destroy_options); 238EXPORT_SYMBOL(ceph_destroy_options);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index c340e2e0765b..9918e9eb276e 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2307,6 +2307,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
2307 m->front_max = front_len; 2307 m->front_max = front_len;
2308 m->front_is_vmalloc = false; 2308 m->front_is_vmalloc = false;
2309 m->more_to_follow = false; 2309 m->more_to_follow = false;
2310 m->ack_stamp = 0;
2310 m->pool = NULL; 2311 m->pool = NULL;
2311 2312
2312 /* middle */ 2313 /* middle */
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 16836a7df7a6..88ad8a2501b5 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -217,6 +217,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
217 INIT_LIST_HEAD(&req->r_unsafe_item); 217 INIT_LIST_HEAD(&req->r_unsafe_item);
218 INIT_LIST_HEAD(&req->r_linger_item); 218 INIT_LIST_HEAD(&req->r_linger_item);
219 INIT_LIST_HEAD(&req->r_linger_osd); 219 INIT_LIST_HEAD(&req->r_linger_osd);
220 INIT_LIST_HEAD(&req->r_req_lru_item);
220 req->r_flags = flags; 221 req->r_flags = flags;
221 222
222 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); 223 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
@@ -816,13 +817,10 @@ static void __register_request(struct ceph_osd_client *osdc,
816{ 817{
817 req->r_tid = ++osdc->last_tid; 818 req->r_tid = ++osdc->last_tid;
818 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); 819 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
819 INIT_LIST_HEAD(&req->r_req_lru_item);
820
821 dout("__register_request %p tid %lld\n", req, req->r_tid); 820 dout("__register_request %p tid %lld\n", req, req->r_tid);
822 __insert_request(osdc, req); 821 __insert_request(osdc, req);
823 ceph_osdc_get_request(req); 822 ceph_osdc_get_request(req);
824 osdc->num_requests++; 823 osdc->num_requests++;
825
826 if (osdc->num_requests == 1) { 824 if (osdc->num_requests == 1) {
827 dout(" first request, scheduling timeout\n"); 825 dout(" first request, scheduling timeout\n");
828 __schedule_osd_timeout(osdc); 826 __schedule_osd_timeout(osdc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index e97c3588c3ec..fd863fe76934 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -339,6 +339,7 @@ static int __insert_pg_mapping(struct ceph_pg_mapping *new,
339 struct ceph_pg_mapping *pg = NULL; 339 struct ceph_pg_mapping *pg = NULL;
340 int c; 340 int c;
341 341
342 dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new);
342 while (*p) { 343 while (*p) {
343 parent = *p; 344 parent = *p;
344 pg = rb_entry(parent, struct ceph_pg_mapping, node); 345 pg = rb_entry(parent, struct ceph_pg_mapping, node);
@@ -366,16 +367,33 @@ static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
366 while (n) { 367 while (n) {
367 pg = rb_entry(n, struct ceph_pg_mapping, node); 368 pg = rb_entry(n, struct ceph_pg_mapping, node);
368 c = pgid_cmp(pgid, pg->pgid); 369 c = pgid_cmp(pgid, pg->pgid);
369 if (c < 0) 370 if (c < 0) {
370 n = n->rb_left; 371 n = n->rb_left;
371 else if (c > 0) 372 } else if (c > 0) {
372 n = n->rb_right; 373 n = n->rb_right;
373 else 374 } else {
375 dout("__lookup_pg_mapping %llx got %p\n",
376 *(u64 *)&pgid, pg);
374 return pg; 377 return pg;
378 }
375 } 379 }
376 return NULL; 380 return NULL;
377} 381}
378 382
383static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid)
384{
385 struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid);
386
387 if (pg) {
388 dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg);
389 rb_erase(&pg->node, root);
390 kfree(pg);
391 return 0;
392 }
393 dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid);
394 return -ENOENT;
395}
396
379/* 397/*
380 * rbtree of pg pool info 398 * rbtree of pg pool info
381 */ 399 */
@@ -711,7 +729,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
711 void *start = *p; 729 void *start = *p;
712 int err = -EINVAL; 730 int err = -EINVAL;
713 u16 version; 731 u16 version;
714 struct rb_node *rbp;
715 732
716 ceph_decode_16_safe(p, end, version, bad); 733 ceph_decode_16_safe(p, end, version, bad);
717 if (version > CEPH_OSDMAP_INC_VERSION) { 734 if (version > CEPH_OSDMAP_INC_VERSION) {
@@ -861,7 +878,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
861 } 878 }
862 879
863 /* new_pg_temp */ 880 /* new_pg_temp */
864 rbp = rb_first(&map->pg_temp);
865 ceph_decode_32_safe(p, end, len, bad); 881 ceph_decode_32_safe(p, end, len, bad);
866 while (len--) { 882 while (len--) {
867 struct ceph_pg_mapping *pg; 883 struct ceph_pg_mapping *pg;
@@ -872,18 +888,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
872 ceph_decode_copy(p, &pgid, sizeof(pgid)); 888 ceph_decode_copy(p, &pgid, sizeof(pgid));
873 pglen = ceph_decode_32(p); 889 pglen = ceph_decode_32(p);
874 890
875 /* remove any? */
876 while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping,
877 node)->pgid, pgid) <= 0) {
878 struct ceph_pg_mapping *cur =
879 rb_entry(rbp, struct ceph_pg_mapping, node);
880
881 rbp = rb_next(rbp);
882 dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
883 rb_erase(&cur->node, &map->pg_temp);
884 kfree(cur);
885 }
886
887 if (pglen) { 891 if (pglen) {
888 /* insert */ 892 /* insert */
889 ceph_decode_need(p, end, pglen*sizeof(u32), bad); 893 ceph_decode_need(p, end, pglen*sizeof(u32), bad);
@@ -903,17 +907,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
903 } 907 }
904 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, 908 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
905 pglen); 909 pglen);
910 } else {
911 /* remove */
912 __remove_pg_mapping(&map->pg_temp, pgid);
906 } 913 }
907 } 914 }
908 while (rbp) {
909 struct ceph_pg_mapping *cur =
910 rb_entry(rbp, struct ceph_pg_mapping, node);
911
912 rbp = rb_next(rbp);
913 dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
914 rb_erase(&cur->node, &map->pg_temp);
915 kfree(cur);
916 }
917 915
918 /* ignore the rest */ 916 /* ignore the rest */
919 *p = end; 917 *p = end;
@@ -1046,10 +1044,25 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1046 struct ceph_pg_mapping *pg; 1044 struct ceph_pg_mapping *pg;
1047 struct ceph_pg_pool_info *pool; 1045 struct ceph_pg_pool_info *pool;
1048 int ruleno; 1046 int ruleno;
1049 unsigned poolid, ps, pps; 1047 unsigned poolid, ps, pps, t;
1050 int preferred; 1048 int preferred;
1051 1049
1050 poolid = le32_to_cpu(pgid.pool);
1051 ps = le16_to_cpu(pgid.ps);
1052 preferred = (s16)le16_to_cpu(pgid.preferred);
1053
1054 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1055 if (!pool)
1056 return NULL;
1057
1052 /* pg_temp? */ 1058 /* pg_temp? */
1059 if (preferred >= 0)
1060 t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num),
1061 pool->lpgp_num_mask);
1062 else
1063 t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
1064 pool->pgp_num_mask);
1065 pgid.ps = cpu_to_le16(t);
1053 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); 1066 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
1054 if (pg) { 1067 if (pg) {
1055 *num = pg->len; 1068 *num = pg->len;
@@ -1057,18 +1070,6 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1057 } 1070 }
1058 1071
1059 /* crush */ 1072 /* crush */
1060 poolid = le32_to_cpu(pgid.pool);
1061 ps = le16_to_cpu(pgid.ps);
1062 preferred = (s16)le16_to_cpu(pgid.preferred);
1063
1064 /* don't forcefeed bad device ids to crush */
1065 if (preferred >= osdmap->max_osd ||
1066 preferred >= osdmap->crush->max_devices)
1067 preferred = -1;
1068
1069 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1070 if (!pool)
1071 return NULL;
1072 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, 1073 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
1073 pool->v.type, pool->v.size); 1074 pool->v.type, pool->v.size);
1074 if (ruleno < 0) { 1075 if (ruleno < 0) {
@@ -1078,6 +1079,11 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1078 return NULL; 1079 return NULL;
1079 } 1080 }
1080 1081
1082 /* don't forcefeed bad device ids to crush */
1083 if (preferred >= osdmap->max_osd ||
1084 preferred >= osdmap->crush->max_devices)
1085 preferred = -1;
1086
1081 if (preferred >= 0) 1087 if (preferred >= 0)
1082 pps = ceph_stable_mod(ps, 1088 pps = ceph_stable_mod(ps,
1083 le32_to_cpu(pool->v.lpgp_num), 1089 le32_to_cpu(pool->v.lpgp_num),
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 143221ebeb7a..81cae641c9a9 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1398,9 +1398,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1398 1398
1399 BUG_ON(!pcount); 1399 BUG_ON(!pcount);
1400 1400
1401 /* Tweak before seqno plays */ 1401 if (skb == tp->lost_skb_hint)
1402 if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
1403 !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
1404 tp->lost_cnt_hint += pcount; 1402 tp->lost_cnt_hint += pcount;
1405 1403
1406 TCP_SKB_CB(prev)->end_seq += shifted; 1404 TCP_SKB_CB(prev)->end_seq += shifted;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index dd3fad9fb633..48da7cc41e23 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -927,18 +927,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
927 } 927 }
928 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 928 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
929 } 929 }
930 if (tcp_alloc_md5sig_pool(sk) == NULL) { 930
931 md5sig = tp->md5sig_info;
932 if (md5sig->entries4 == 0 &&
933 tcp_alloc_md5sig_pool(sk) == NULL) {
931 kfree(newkey); 934 kfree(newkey);
932 return -ENOMEM; 935 return -ENOMEM;
933 } 936 }
934 md5sig = tp->md5sig_info;
935 937
936 if (md5sig->alloced4 == md5sig->entries4) { 938 if (md5sig->alloced4 == md5sig->entries4) {
937 keys = kmalloc((sizeof(*keys) * 939 keys = kmalloc((sizeof(*keys) *
938 (md5sig->entries4 + 1)), GFP_ATOMIC); 940 (md5sig->entries4 + 1)), GFP_ATOMIC);
939 if (!keys) { 941 if (!keys) {
940 kfree(newkey); 942 kfree(newkey);
941 tcp_free_md5sig_pool(); 943 if (md5sig->entries4 == 0)
944 tcp_free_md5sig_pool();
942 return -ENOMEM; 945 return -ENOMEM;
943 } 946 }
944 947
@@ -982,6 +985,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
982 kfree(tp->md5sig_info->keys4); 985 kfree(tp->md5sig_info->keys4);
983 tp->md5sig_info->keys4 = NULL; 986 tp->md5sig_info->keys4 = NULL;
984 tp->md5sig_info->alloced4 = 0; 987 tp->md5sig_info->alloced4 = 0;
988 tcp_free_md5sig_pool();
985 } else if (tp->md5sig_info->entries4 != i) { 989 } else if (tp->md5sig_info->entries4 != i) {
986 /* Need to do some manipulation */ 990 /* Need to do some manipulation */
987 memmove(&tp->md5sig_info->keys4[i], 991 memmove(&tp->md5sig_info->keys4[i],
@@ -989,7 +993,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
989 (tp->md5sig_info->entries4 - i) * 993 (tp->md5sig_info->entries4 - i) *
990 sizeof(struct tcp4_md5sig_key)); 994 sizeof(struct tcp4_md5sig_key));
991 } 995 }
992 tcp_free_md5sig_pool();
993 return 0; 996 return 0;
994 } 997 }
995 } 998 }
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 705c82886281..def0538e2413 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -696,8 +696,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
696 int err; 696 int err;
697 697
698 err = ip6mr_fib_lookup(net, &fl6, &mrt); 698 err = ip6mr_fib_lookup(net, &fl6, &mrt);
699 if (err < 0) 699 if (err < 0) {
700 kfree_skb(skb);
700 return err; 701 return err;
702 }
701 703
702 read_lock(&mrt_lock); 704 read_lock(&mrt_lock);
703 dev->stats.tx_bytes += skb->len; 705 dev->stats.tx_bytes += skb->len;
@@ -2052,8 +2054,10 @@ int ip6_mr_input(struct sk_buff *skb)
2052 int err; 2054 int err;
2053 2055
2054 err = ip6mr_fib_lookup(net, &fl6, &mrt); 2056 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2055 if (err < 0) 2057 if (err < 0) {
2058 kfree_skb(skb);
2056 return err; 2059 return err;
2060 }
2057 2061
2058 read_lock(&mrt_lock); 2062 read_lock(&mrt_lock);
2059 cache = ip6mr_cache_find(mrt, 2063 cache = ip6mr_cache_find(mrt,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1250f9020670..fb545edef6ea 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -244,7 +244,9 @@ static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
244{ 244{
245 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags); 245 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
246 246
247 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry)); 247 if (rt != NULL)
248 memset(&rt->rt6i_table, 0,
249 sizeof(*rt) - sizeof(struct dst_entry));
248 250
249 return rt; 251 return rt;
250} 252}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 00797d857667..5357902c7978 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -591,7 +591,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
591 } 591 }
592 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 592 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
593 } 593 }
594 if (tcp_alloc_md5sig_pool(sk) == NULL) { 594 if (tp->md5sig_info->entries6 == 0 &&
595 tcp_alloc_md5sig_pool(sk) == NULL) {
595 kfree(newkey); 596 kfree(newkey);
596 return -ENOMEM; 597 return -ENOMEM;
597 } 598 }
@@ -600,8 +601,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
600 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); 601 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
601 602
602 if (!keys) { 603 if (!keys) {
603 tcp_free_md5sig_pool();
604 kfree(newkey); 604 kfree(newkey);
605 if (tp->md5sig_info->entries6 == 0)
606 tcp_free_md5sig_pool();
605 return -ENOMEM; 607 return -ENOMEM;
606 } 608 }
607 609
@@ -647,6 +649,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
647 kfree(tp->md5sig_info->keys6); 649 kfree(tp->md5sig_info->keys6);
648 tp->md5sig_info->keys6 = NULL; 650 tp->md5sig_info->keys6 = NULL;
649 tp->md5sig_info->alloced6 = 0; 651 tp->md5sig_info->alloced6 = 0;
652 tcp_free_md5sig_pool();
650 } else { 653 } else {
651 /* shrink the database */ 654 /* shrink the database */
652 if (tp->md5sig_info->entries6 != i) 655 if (tp->md5sig_info->entries6 != i)
@@ -655,7 +658,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
655 (tp->md5sig_info->entries6 - i) 658 (tp->md5sig_info->entries6 - i)
656 * sizeof (tp->md5sig_info->keys6[0])); 659 * sizeof (tp->md5sig_info->keys6[0]));
657 } 660 }
658 tcp_free_md5sig_pool();
659 return 0; 661 return 0;
660 } 662 }
661 } 663 }
@@ -1383,6 +1385,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1383 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1385 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1384#endif 1386#endif
1385 1387
1388 newnp->ipv6_ac_list = NULL;
1389 newnp->ipv6_fl_list = NULL;
1386 newnp->pktoptions = NULL; 1390 newnp->pktoptions = NULL;
1387 newnp->opt = NULL; 1391 newnp->opt = NULL;
1388 newnp->mcast_oif = inet6_iif(skb); 1392 newnp->mcast_oif = inet6_iif(skb);
@@ -1447,6 +1451,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1447 First: no IPv4 options. 1451 First: no IPv4 options.
1448 */ 1452 */
1449 newinet->inet_opt = NULL; 1453 newinet->inet_opt = NULL;
1454 newnp->ipv6_ac_list = NULL;
1450 newnp->ipv6_fl_list = NULL; 1455 newnp->ipv6_fl_list = NULL;
1451 1456
1452 /* Clone RX bits */ 1457 /* Clone RX bits */
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 2b771dc708a3..5290ac353a5e 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3679,7 +3679,7 @@ int __net_init ip_vs_control_net_init(struct net *net)
3679 int idx; 3679 int idx;
3680 struct netns_ipvs *ipvs = net_ipvs(net); 3680 struct netns_ipvs *ipvs = net_ipvs(net);
3681 3681
3682 ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock); 3682 rwlock_init(&ipvs->rs_lock);
3683 3683
3684 /* Initialize rs_table */ 3684 /* Initialize rs_table */
3685 for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++) 3685 for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 25e68f56b4ba..dac91abf4c0f 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1720,7 +1720,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1720 return 0; 1720 return 0;
1721 1721
1722drop_n_acct: 1722drop_n_acct:
1723 po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); 1723 spin_lock(&sk->sk_receive_queue.lock);
1724 po->stats.tp_drops++;
1725 atomic_inc(&sk->sk_drops);
1726 spin_unlock(&sk->sk_receive_queue.lock);
1724 1727
1725drop_n_restore: 1728drop_n_restore:
1726 if (skb_head != skb->data && skb_shared(skb)) { 1729 if (skb_head != skb->data && skb_shared(skb)) {
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 8b77edbab272..4e1de171866c 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -84,7 +84,8 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
84static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 84static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
85static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, 85static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
86 struct list_head *unmap_list, 86 struct list_head *unmap_list,
87 struct list_head *kill_list); 87 struct list_head *kill_list,
88 int *unpinned);
88static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 89static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
89 90
90static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) 91static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
@@ -499,7 +500,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
499 LIST_HEAD(unmap_list); 500 LIST_HEAD(unmap_list);
500 LIST_HEAD(kill_list); 501 LIST_HEAD(kill_list);
501 unsigned long flags; 502 unsigned long flags;
502 unsigned int nfreed = 0, ncleaned = 0, free_goal; 503 unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal;
503 int ret = 0; 504 int ret = 0;
504 505
505 rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); 506 rds_iw_stats_inc(s_iw_rdma_mr_pool_flush);
@@ -524,7 +525,8 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
524 * will be destroyed by the unmap function. 525 * will be destroyed by the unmap function.
525 */ 526 */
526 if (!list_empty(&unmap_list)) { 527 if (!list_empty(&unmap_list)) {
527 ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list); 528 ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list,
529 &kill_list, &unpinned);
528 /* If we've been asked to destroy all MRs, move those 530 /* If we've been asked to destroy all MRs, move those
529 * that were simply cleaned to the kill list */ 531 * that were simply cleaned to the kill list */
530 if (free_all) 532 if (free_all)
@@ -548,6 +550,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
548 spin_unlock_irqrestore(&pool->list_lock, flags); 550 spin_unlock_irqrestore(&pool->list_lock, flags);
549 } 551 }
550 552
553 atomic_sub(unpinned, &pool->free_pinned);
551 atomic_sub(ncleaned, &pool->dirty_count); 554 atomic_sub(ncleaned, &pool->dirty_count);
552 atomic_sub(nfreed, &pool->item_count); 555 atomic_sub(nfreed, &pool->item_count);
553 556
@@ -828,7 +831,8 @@ static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool,
828 831
829static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, 832static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
830 struct list_head *unmap_list, 833 struct list_head *unmap_list,
831 struct list_head *kill_list) 834 struct list_head *kill_list,
835 int *unpinned)
832{ 836{
833 struct rds_iw_mapping *mapping, *next; 837 struct rds_iw_mapping *mapping, *next;
834 unsigned int ncleaned = 0; 838 unsigned int ncleaned = 0;
@@ -855,6 +859,7 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
855 859
856 spin_lock_irqsave(&pool->list_lock, flags); 860 spin_lock_irqsave(&pool->list_lock, flags);
857 list_for_each_entry_safe(mapping, next, unmap_list, m_list) { 861 list_for_each_entry_safe(mapping, next, unmap_list, m_list) {
862 *unpinned += mapping->m_sg.len;
858 list_move(&mapping->m_list, &laundered); 863 list_move(&mapping->m_list, &laundered);
859 ncleaned++; 864 ncleaned++;
860 } 865 }
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 94fdcc7f1030..552df27dcf53 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1349,14 +1349,16 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1349 BUG(); 1349 BUG();
1350 } 1350 }
1351 xdst = dst_alloc(dst_ops, NULL, 0, 0, 0); 1351 xdst = dst_alloc(dst_ops, NULL, 0, 0, 0);
1352 memset(&xdst->u.rt6.rt6i_table, 0, sizeof(*xdst) - sizeof(struct dst_entry));
1353 xfrm_policy_put_afinfo(afinfo);
1354 1352
1355 if (likely(xdst)) 1353 if (likely(xdst)) {
1354 memset(&xdst->u.rt6.rt6i_table, 0,
1355 sizeof(*xdst) - sizeof(struct dst_entry));
1356 xdst->flo.ops = &xfrm_bundle_fc_ops; 1356 xdst->flo.ops = &xfrm_bundle_fc_ops;
1357 else 1357 } else
1358 xdst = ERR_PTR(-ENOBUFS); 1358 xdst = ERR_PTR(-ENOBUFS);
1359 1359
1360 xfrm_policy_put_afinfo(afinfo);
1361
1360 return xdst; 1362 return xdst;
1361} 1363}
1362 1364
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index f9123f09e83e..32b02d906703 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -68,6 +68,7 @@ MODULE_PARM_DESC(enable, "Enable FM801 soundcard.");
68module_param_array(tea575x_tuner, int, NULL, 0444); 68module_param_array(tea575x_tuner, int, NULL, 0444);
69MODULE_PARM_DESC(tea575x_tuner, "TEA575x tuner access method (0 = auto, 1 = SF256-PCS, 2=SF256-PCP, 3=SF64-PCR, 8=disable, +16=tuner-only)."); 69MODULE_PARM_DESC(tea575x_tuner, "TEA575x tuner access method (0 = auto, 1 = SF256-PCS, 2=SF256-PCP, 3=SF64-PCR, 8=disable, +16=tuner-only).");
70 70
71#define TUNER_DISABLED (1<<3)
71#define TUNER_ONLY (1<<4) 72#define TUNER_ONLY (1<<4)
72#define TUNER_TYPE_MASK (~TUNER_ONLY & 0xFFFF) 73#define TUNER_TYPE_MASK (~TUNER_ONLY & 0xFFFF)
73 74
@@ -1150,7 +1151,8 @@ static int snd_fm801_free(struct fm801 *chip)
1150 1151
1151 __end_hw: 1152 __end_hw:
1152#ifdef CONFIG_SND_FM801_TEA575X_BOOL 1153#ifdef CONFIG_SND_FM801_TEA575X_BOOL
1153 snd_tea575x_exit(&chip->tea); 1154 if (!(chip->tea575x_tuner & TUNER_DISABLED))
1155 snd_tea575x_exit(&chip->tea);
1154#endif 1156#endif
1155 if (chip->irq >= 0) 1157 if (chip->irq >= 0)
1156 free_irq(chip->irq, chip); 1158 free_irq(chip->irq, chip);
@@ -1236,7 +1238,6 @@ static int __devinit snd_fm801_create(struct snd_card *card,
1236 (tea575x_tuner & TUNER_TYPE_MASK) < 4) { 1238 (tea575x_tuner & TUNER_TYPE_MASK) < 4) {
1237 if (snd_tea575x_init(&chip->tea)) { 1239 if (snd_tea575x_init(&chip->tea)) {
1238 snd_printk(KERN_ERR "TEA575x radio not found\n"); 1240 snd_printk(KERN_ERR "TEA575x radio not found\n");
1239 snd_fm801_free(chip);
1240 return -ENODEV; 1241 return -ENODEV;
1241 } 1242 }
1242 } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) { 1243 } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) {
@@ -1251,11 +1252,15 @@ static int __devinit snd_fm801_create(struct snd_card *card,
1251 } 1252 }
1252 if (tea575x_tuner == 4) { 1253 if (tea575x_tuner == 4) {
1253 snd_printk(KERN_ERR "TEA575x radio not found\n"); 1254 snd_printk(KERN_ERR "TEA575x radio not found\n");
1254 snd_fm801_free(chip); 1255 chip->tea575x_tuner = TUNER_DISABLED;
1255 return -ENODEV;
1256 } 1256 }
1257 } 1257 }
1258 strlcpy(chip->tea.card, snd_fm801_tea575x_gpios[(tea575x_tuner & TUNER_TYPE_MASK) - 1].name, sizeof(chip->tea.card)); 1258 if (!(chip->tea575x_tuner & TUNER_DISABLED)) {
1259 strlcpy(chip->tea.card,
1260 snd_fm801_tea575x_gpios[(tea575x_tuner &
1261 TUNER_TYPE_MASK) - 1].name,
1262 sizeof(chip->tea.card));
1263 }
1259#endif 1264#endif
1260 1265
1261 *rchip = chip; 1266 *rchip = chip;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index be6982289c0d..e9a2a8795d1b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1924,7 +1924,8 @@ static unsigned int azx_via_get_position(struct azx *chip,
1924} 1924}
1925 1925
1926static unsigned int azx_get_position(struct azx *chip, 1926static unsigned int azx_get_position(struct azx *chip,
1927 struct azx_dev *azx_dev) 1927 struct azx_dev *azx_dev,
1928 bool with_check)
1928{ 1929{
1929 unsigned int pos; 1930 unsigned int pos;
1930 int stream = azx_dev->substream->stream; 1931 int stream = azx_dev->substream->stream;
@@ -1940,7 +1941,7 @@ static unsigned int azx_get_position(struct azx *chip,
1940 default: 1941 default:
1941 /* use the position buffer */ 1942 /* use the position buffer */
1942 pos = le32_to_cpu(*azx_dev->posbuf); 1943 pos = le32_to_cpu(*azx_dev->posbuf);
1943 if (chip->position_fix[stream] == POS_FIX_AUTO) { 1944 if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) {
1944 if (!pos || pos == (u32)-1) { 1945 if (!pos || pos == (u32)-1) {
1945 printk(KERN_WARNING 1946 printk(KERN_WARNING
1946 "hda-intel: Invalid position buffer, " 1947 "hda-intel: Invalid position buffer, "
@@ -1964,7 +1965,7 @@ static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
1964 struct azx *chip = apcm->chip; 1965 struct azx *chip = apcm->chip;
1965 struct azx_dev *azx_dev = get_azx_dev(substream); 1966 struct azx_dev *azx_dev = get_azx_dev(substream);
1966 return bytes_to_frames(substream->runtime, 1967 return bytes_to_frames(substream->runtime,
1967 azx_get_position(chip, azx_dev)); 1968 azx_get_position(chip, azx_dev, false));
1968} 1969}
1969 1970
1970/* 1971/*
@@ -1987,7 +1988,7 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
1987 return -1; /* bogus (too early) interrupt */ 1988 return -1; /* bogus (too early) interrupt */
1988 1989
1989 stream = azx_dev->substream->stream; 1990 stream = azx_dev->substream->stream;
1990 pos = azx_get_position(chip, azx_dev); 1991 pos = azx_get_position(chip, azx_dev, true);
1991 1992
1992 if (WARN_ONCE(!azx_dev->period_bytes, 1993 if (WARN_ONCE(!azx_dev->period_bytes,
1993 "hda-intel: zero azx_dev->period_bytes")) 1994 "hda-intel: zero azx_dev->period_bytes"))
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0503c999e7d3..7a73621a8909 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -578,6 +578,10 @@ static void alc_line_automute(struct hda_codec *codec)
578{ 578{
579 struct alc_spec *spec = codec->spec; 579 struct alc_spec *spec = codec->spec;
580 580
581 /* check LO jack only when it's different from HP */
582 if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0])
583 return;
584
581 spec->line_jack_present = 585 spec->line_jack_present =
582 detect_jacks(codec, ARRAY_SIZE(spec->autocfg.line_out_pins), 586 detect_jacks(codec, ARRAY_SIZE(spec->autocfg.line_out_pins),
583 spec->autocfg.line_out_pins); 587 spec->autocfg.line_out_pins);
@@ -1321,7 +1325,9 @@ do_sku:
1321 * 15 : 1 --> enable the function "Mute internal speaker 1325 * 15 : 1 --> enable the function "Mute internal speaker
1322 * when the external headphone out jack is plugged" 1326 * when the external headphone out jack is plugged"
1323 */ 1327 */
1324 if (!spec->autocfg.hp_pins[0]) { 1328 if (!spec->autocfg.hp_pins[0] &&
1329 !(spec->autocfg.line_out_pins[0] &&
1330 spec->autocfg.line_out_type == AUTO_PIN_HP_OUT)) {
1325 hda_nid_t nid; 1331 hda_nid_t nid;
1326 tmp = (ass >> 11) & 0x3; /* HP to chassis */ 1332 tmp = (ass >> 11) & 0x3; /* HP to chassis */
1327 if (tmp == 0) 1333 if (tmp == 0)
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 1b7c11432aa7..987e3cf71a0b 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -5630,6 +5630,7 @@ again:
5630 switch (codec->vendor_id) { 5630 switch (codec->vendor_id) {
5631 case 0x111d76d1: 5631 case 0x111d76d1:
5632 case 0x111d76d9: 5632 case 0x111d76d9:
5633 case 0x111d76df:
5633 case 0x111d76e5: 5634 case 0x111d76e5:
5634 case 0x111d7666: 5635 case 0x111d7666:
5635 case 0x111d7667: 5636 case 0x111d7667:
diff --git a/sound/soc/blackfin/bf5xx-ad73311.c b/sound/soc/blackfin/bf5xx-ad73311.c
index 732a247f2527..b94eb7ef7d16 100644
--- a/sound/soc/blackfin/bf5xx-ad73311.c
+++ b/sound/soc/blackfin/bf5xx-ad73311.c
@@ -128,7 +128,7 @@ static int snd_ad73311_configure(void)
128 return 0; 128 return 0;
129} 129}
130 130
131static int bf5xx_probe(struct platform_device *pdev) 131static int bf5xx_probe(struct snd_soc_card *card)
132{ 132{
133 int err; 133 int err;
134 if (gpio_request(GPIO_SE, "AD73311_SE")) { 134 if (gpio_request(GPIO_SE, "AD73311_SE")) {
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 84f4ad568556..9801cd7cfcb5 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -431,7 +431,8 @@ static int ssm2602_set_dai_fmt(struct snd_soc_dai *codec_dai,
431static int ssm2602_set_bias_level(struct snd_soc_codec *codec, 431static int ssm2602_set_bias_level(struct snd_soc_codec *codec,
432 enum snd_soc_bias_level level) 432 enum snd_soc_bias_level level)
433{ 433{
434 u16 reg = snd_soc_read(codec, SSM2602_PWR) & 0xff7f; 434 u16 reg = snd_soc_read(codec, SSM2602_PWR);
435 reg &= ~(PWR_POWER_OFF | PWR_OSC_PDN);
435 436
436 switch (level) { 437 switch (level) {
437 case SND_SOC_BIAS_ON: 438 case SND_SOC_BIAS_ON:
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index ffa2ffe5ec11..aa091a0d8187 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -1454,8 +1454,8 @@ static int wm8753_probe(struct snd_soc_codec *codec)
1454 /* set the update bits */ 1454 /* set the update bits */
1455 snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); 1455 snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100);
1456 snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); 1456 snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100);
1457 snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); 1457 snd_soc_update_bits(codec, WM8753_LADC, 0x0100, 0x0100);
1458 snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); 1458 snd_soc_update_bits(codec, WM8753_RADC, 0x0100, 0x0100);
1459 snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100); 1459 snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100);
1460 snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100); 1460 snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100);
1461 snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100); 1461 snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100);
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 1725550c293e..d2c315fa1b9b 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3479,31 +3479,6 @@ int wm8962_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack)
3479} 3479}
3480EXPORT_SYMBOL_GPL(wm8962_mic_detect); 3480EXPORT_SYMBOL_GPL(wm8962_mic_detect);
3481 3481
3482#ifdef CONFIG_PM
3483static int wm8962_resume(struct snd_soc_codec *codec)
3484{
3485 u16 *reg_cache = codec->reg_cache;
3486 int i;
3487
3488 /* Restore the registers */
3489 for (i = 1; i < codec->driver->reg_cache_size; i++) {
3490 switch (i) {
3491 case WM8962_SOFTWARE_RESET:
3492 continue;
3493 default:
3494 break;
3495 }
3496
3497 if (reg_cache[i] != wm8962_reg[i])
3498 snd_soc_write(codec, i, reg_cache[i]);
3499 }
3500
3501 return 0;
3502}
3503#else
3504#define wm8962_resume NULL
3505#endif
3506
3507#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE) 3482#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
3508static int beep_rates[] = { 3483static int beep_rates[] = {
3509 500, 1000, 2000, 4000, 3484 500, 1000, 2000, 4000,
@@ -4015,7 +3990,6 @@ static int wm8962_remove(struct snd_soc_codec *codec)
4015static struct snd_soc_codec_driver soc_codec_dev_wm8962 = { 3990static struct snd_soc_codec_driver soc_codec_dev_wm8962 = {
4016 .probe = wm8962_probe, 3991 .probe = wm8962_probe,
4017 .remove = wm8962_remove, 3992 .remove = wm8962_remove,
4018 .resume = wm8962_resume,
4019 .set_bias_level = wm8962_set_bias_level, 3993 .set_bias_level = wm8962_set_bias_level,
4020 .reg_cache_size = WM8962_MAX_REGISTER + 1, 3994 .reg_cache_size = WM8962_MAX_REGISTER + 1,
4021 .reg_word_size = sizeof(u16), 3995 .reg_word_size = sizeof(u16),
diff --git a/sound/soc/omap/mcpdm.c b/sound/soc/omap/mcpdm.c
index 928f03707451..50e59194ad81 100644
--- a/sound/soc/omap/mcpdm.c
+++ b/sound/soc/omap/mcpdm.c
@@ -449,7 +449,7 @@ exit:
449 return ret; 449 return ret;
450} 450}
451 451
452int __devexit omap_mcpdm_remove(struct platform_device *pdev) 452int omap_mcpdm_remove(struct platform_device *pdev)
453{ 453{
454 struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev); 454 struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev);
455 455
diff --git a/sound/soc/omap/mcpdm.h b/sound/soc/omap/mcpdm.h
index df3e16fb51f3..20c20a8649fe 100644
--- a/sound/soc/omap/mcpdm.h
+++ b/sound/soc/omap/mcpdm.h
@@ -150,4 +150,4 @@ extern int omap_mcpdm_request(void);
150extern void omap_mcpdm_free(void); 150extern void omap_mcpdm_free(void);
151extern int omap_mcpdm_set_offset(int offset1, int offset2); 151extern int omap_mcpdm_set_offset(int offset1, int offset2);
152int __devinit omap_mcpdm_probe(struct platform_device *pdev); 152int __devinit omap_mcpdm_probe(struct platform_device *pdev);
153int __devexit omap_mcpdm_remove(struct platform_device *pdev); 153int omap_mcpdm_remove(struct platform_device *pdev);
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index ebcc2d4d2b18..478d60778453 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -516,6 +516,12 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
516 struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs; 516 struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
517 int err = 0; 517 int err = 0;
518 518
519 if (mcbsp_data->active)
520 if (freq == mcbsp_data->in_freq)
521 return 0;
522 else
523 return -EBUSY;
524
519 /* The McBSP signal muxing functions are only available on McBSP1 */ 525 /* The McBSP signal muxing functions are only available on McBSP1 */
520 if (clk_id == OMAP_MCBSP_CLKR_SRC_CLKR || 526 if (clk_id == OMAP_MCBSP_CLKR_SRC_CLKR ||
521 clk_id == OMAP_MCBSP_CLKR_SRC_CLKX || 527 clk_id == OMAP_MCBSP_CLKR_SRC_CLKX ||
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index b6445757fc54..2b8350b52232 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -196,20 +196,20 @@ static int zylonite_probe(struct snd_soc_card *card)
196 if (clk_pout) { 196 if (clk_pout) {
197 pout = clk_get(NULL, "CLK_POUT"); 197 pout = clk_get(NULL, "CLK_POUT");
198 if (IS_ERR(pout)) { 198 if (IS_ERR(pout)) {
199 dev_err(&pdev->dev, "Unable to obtain CLK_POUT: %ld\n", 199 dev_err(card->dev, "Unable to obtain CLK_POUT: %ld\n",
200 PTR_ERR(pout)); 200 PTR_ERR(pout));
201 return PTR_ERR(pout); 201 return PTR_ERR(pout);
202 } 202 }
203 203
204 ret = clk_enable(pout); 204 ret = clk_enable(pout);
205 if (ret != 0) { 205 if (ret != 0) {
206 dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n", 206 dev_err(card->dev, "Unable to enable CLK_POUT: %d\n",
207 ret); 207 ret);
208 clk_put(pout); 208 clk_put(pout);
209 return ret; 209 return ret;
210 } 210 }
211 211
212 dev_dbg(&pdev->dev, "MCLK enabled at %luHz\n", 212 dev_dbg(card->dev, "MCLK enabled at %luHz\n",
213 clk_get_rate(pout)); 213 clk_get_rate(pout));
214 } 214 }
215 215
@@ -241,7 +241,7 @@ static int zylonite_resume_pre(struct snd_soc_card *card)
241 if (clk_pout) { 241 if (clk_pout) {
242 ret = clk_enable(pout); 242 ret = clk_enable(pout);
243 if (ret != 0) 243 if (ret != 0)
244 dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n", 244 dev_err(card->dev, "Unable to enable CLK_POUT: %d\n",
245 ret); 245 ret);
246 } 246 }
247 247
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index d2ef014af215..ef69f5a02709 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -30,6 +30,7 @@
30#include <linux/bitops.h> 30#include <linux/bitops.h>
31#include <linux/debugfs.h> 31#include <linux/debugfs.h>
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/ctype.h>
33#include <linux/slab.h> 34#include <linux/slab.h>
34#include <sound/ac97_codec.h> 35#include <sound/ac97_codec.h>
35#include <sound/core.h> 36#include <sound/core.h>
@@ -1434,9 +1435,20 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card)
1434 "%s", card->name); 1435 "%s", card->name);
1435 snprintf(card->snd_card->longname, sizeof(card->snd_card->longname), 1436 snprintf(card->snd_card->longname, sizeof(card->snd_card->longname),
1436 "%s", card->long_name ? card->long_name : card->name); 1437 "%s", card->long_name ? card->long_name : card->name);
1437 if (card->driver_name) 1438 snprintf(card->snd_card->driver, sizeof(card->snd_card->driver),
1438 strlcpy(card->snd_card->driver, card->driver_name, 1439 "%s", card->driver_name ? card->driver_name : card->name);
1439 sizeof(card->snd_card->driver)); 1440 for (i = 0; i < ARRAY_SIZE(card->snd_card->driver); i++) {
1441 switch (card->snd_card->driver[i]) {
1442 case '_':
1443 case '-':
1444 case '\0':
1445 break;
1446 default:
1447 if (!isalnum(card->snd_card->driver[i]))
1448 card->snd_card->driver[i] = '_';
1449 break;
1450 }
1451 }
1440 1452
1441 if (card->late_probe) { 1453 if (card->late_probe) {
1442 ret = card->late_probe(card); 1454 ret = card->late_probe(card);
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 781d9e61adfb..d8f2bf401458 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -530,8 +530,11 @@ snd_usb_audio_probe(struct usb_device *dev,
530 return chip; 530 return chip;
531 531
532 __error: 532 __error:
533 if (chip && !chip->num_interfaces) 533 if (chip) {
534 snd_card_free(chip->card); 534 if (!chip->num_interfaces)
535 snd_card_free(chip->card);
536 chip->probing = 0;
537 }
535 mutex_unlock(&register_mutex); 538 mutex_unlock(&register_mutex);
536 __err_val: 539 __err_val:
537 return NULL; 540 return NULL;
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 3b8f7b80376b..e9d5c271db69 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -30,6 +30,8 @@ endif
30# Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds. 30# Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds.
31# 31#
32# Define NO_DWARF if you do not want debug-info analysis feature at all. 32# Define NO_DWARF if you do not want debug-info analysis feature at all.
33#
34# Define WERROR=0 to disable treating any warnings as errors.
33 35
34$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE 36$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
35 @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) 37 @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
@@ -63,6 +65,11 @@ ifeq ($(ARCH),x86_64)
63 endif 65 endif
64endif 66endif
65 67
68# Treat warnings as errors unless directed not to
69ifneq ($(WERROR),0)
70 CFLAGS_WERROR := -Werror
71endif
72
66# 73#
67# Include saner warnings here, which can catch bugs: 74# Include saner warnings here, which can catch bugs:
68# 75#
@@ -95,7 +102,7 @@ ifndef PERF_DEBUG
95 CFLAGS_OPTIMIZE = -O6 102 CFLAGS_OPTIMIZE = -O6
96endif 103endif
97 104
98CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) 105CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
99EXTLIBS = -lpthread -lrt -lelf -lm 106EXTLIBS = -lpthread -lrt -lelf -lm
100ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 107ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
101ALL_LDFLAGS = $(LDFLAGS) 108ALL_LDFLAGS = $(LDFLAGS)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 6b0519f885e4..f4c3fbee4bad 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -161,6 +161,7 @@ static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist)
161 struct perf_event_attr *attr = &evsel->attr; 161 struct perf_event_attr *attr = &evsel->attr;
162 int track = !evsel->idx; /* only the first counter needs these */ 162 int track = !evsel->idx; /* only the first counter needs these */
163 163
164 attr->disabled = 1;
164 attr->inherit = !no_inherit; 165 attr->inherit = !no_inherit;
165 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 166 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
166 PERF_FORMAT_TOTAL_TIME_RUNNING | 167 PERF_FORMAT_TOTAL_TIME_RUNNING |
@@ -671,6 +672,8 @@ static int __cmd_record(int argc, const char **argv)
671 } 672 }
672 } 673 }
673 674
675 perf_evlist__enable(evsel_list);
676
674 /* 677 /*
675 * Let the child rip 678 * Let the child rip
676 */ 679 */
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 55f4c76f2821..efe696f936e2 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -561,7 +561,7 @@ static int test__basic_mmap(void)
561 } 561 }
562 562
563 err = perf_event__parse_sample(event, attr.sample_type, sample_size, 563 err = perf_event__parse_sample(event, attr.sample_type, sample_size,
564 false, &sample); 564 false, &sample, false);
565 if (err) { 565 if (err) {
566 pr_err("Can't parse sample, err = %d\n", err); 566 pr_err("Can't parse sample, err = %d\n", err);
567 goto out_munmap; 567 goto out_munmap;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index a43433f08300..d28013b7d61c 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -191,7 +191,8 @@ static void __zero_source_counters(struct sym_entry *syme)
191 symbol__annotate_zero_histograms(sym); 191 symbol__annotate_zero_histograms(sym);
192} 192}
193 193
194static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip) 194static void record_precise_ip(struct sym_entry *syme, struct map *map,
195 int counter, u64 ip)
195{ 196{
196 struct annotation *notes; 197 struct annotation *notes;
197 struct symbol *sym; 198 struct symbol *sym;
@@ -205,8 +206,8 @@ static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
205 if (pthread_mutex_trylock(&notes->lock)) 206 if (pthread_mutex_trylock(&notes->lock))
206 return; 207 return;
207 208
208 ip = syme->map->map_ip(syme->map, ip); 209 ip = map->map_ip(map, ip);
209 symbol__inc_addr_samples(sym, syme->map, counter, ip); 210 symbol__inc_addr_samples(sym, map, counter, ip);
210 211
211 pthread_mutex_unlock(&notes->lock); 212 pthread_mutex_unlock(&notes->lock);
212} 213}
@@ -810,7 +811,7 @@ static void perf_event__process_sample(const union perf_event *event,
810 evsel = perf_evlist__id2evsel(top.evlist, sample->id); 811 evsel = perf_evlist__id2evsel(top.evlist, sample->id);
811 assert(evsel != NULL); 812 assert(evsel != NULL);
812 syme->count[evsel->idx]++; 813 syme->count[evsel->idx]++;
813 record_precise_ip(syme, evsel->idx, ip); 814 record_precise_ip(syme, al.map, evsel->idx, ip);
814 pthread_mutex_lock(&top.active_symbols_lock); 815 pthread_mutex_lock(&top.active_symbols_lock);
815 if (list_empty(&syme->node) || !syme->node.next) { 816 if (list_empty(&syme->node) || !syme->node.next) {
816 static bool first = true; 817 static bool first = true;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 3c1b8a632101..437f8ca679a0 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -169,12 +169,17 @@ static int perf_event__synthesize_mmap_events(union perf_event *event,
169 continue; 169 continue;
170 pbf += n + 3; 170 pbf += n + 3;
171 if (*pbf == 'x') { /* vm_exec */ 171 if (*pbf == 'x') { /* vm_exec */
172 char anonstr[] = "//anon\n";
172 char *execname = strchr(bf, '/'); 173 char *execname = strchr(bf, '/');
173 174
174 /* Catch VDSO */ 175 /* Catch VDSO */
175 if (execname == NULL) 176 if (execname == NULL)
176 execname = strstr(bf, "[vdso]"); 177 execname = strstr(bf, "[vdso]");
177 178
179 /* Catch anonymous mmaps */
180 if ((execname == NULL) && !strstr(bf, "["))
181 execname = anonstr;
182
178 if (execname == NULL) 183 if (execname == NULL)
179 continue; 184 continue;
180 185
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 1d7f66488a88..357a85b85248 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -186,6 +186,6 @@ const char *perf_event__name(unsigned int id);
186 186
187int perf_event__parse_sample(const union perf_event *event, u64 type, 187int perf_event__parse_sample(const union perf_event *event, u64 type,
188 int sample_size, bool sample_id_all, 188 int sample_size, bool sample_id_all,
189 struct perf_sample *sample); 189 struct perf_sample *sample, bool swapped);
190 190
191#endif /* __PERF_RECORD_H */ 191#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index c12bd476c6f7..72e9f4886b6d 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -113,6 +113,19 @@ void perf_evlist__disable(struct perf_evlist *evlist)
113 } 113 }
114} 114}
115 115
116void perf_evlist__enable(struct perf_evlist *evlist)
117{
118 int cpu, thread;
119 struct perf_evsel *pos;
120
121 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
122 list_for_each_entry(pos, &evlist->entries, node) {
123 for (thread = 0; thread < evlist->threads->nr; thread++)
124 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
125 }
126 }
127}
128
116int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 129int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
117{ 130{
118 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; 131 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index ce85ae9ae57a..f34915002745 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -54,6 +54,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
54void perf_evlist__munmap(struct perf_evlist *evlist); 54void perf_evlist__munmap(struct perf_evlist *evlist);
55 55
56void perf_evlist__disable(struct perf_evlist *evlist); 56void perf_evlist__disable(struct perf_evlist *evlist);
57void perf_evlist__enable(struct perf_evlist *evlist);
57 58
58static inline void perf_evlist__set_maps(struct perf_evlist *evlist, 59static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
59 struct cpu_map *cpus, 60 struct cpu_map *cpus,
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index a03a36b7908a..e389815078d3 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -7,6 +7,8 @@
7 * Released under the GPL v2. (and only v2, not any later version) 7 * Released under the GPL v2. (and only v2, not any later version)
8 */ 8 */
9 9
10#include <byteswap.h>
11#include "asm/bug.h"
10#include "evsel.h" 12#include "evsel.h"
11#include "evlist.h" 13#include "evlist.h"
12#include "util.h" 14#include "util.h"
@@ -342,10 +344,20 @@ static bool sample_overlap(const union perf_event *event,
342 344
343int perf_event__parse_sample(const union perf_event *event, u64 type, 345int perf_event__parse_sample(const union perf_event *event, u64 type,
344 int sample_size, bool sample_id_all, 346 int sample_size, bool sample_id_all,
345 struct perf_sample *data) 347 struct perf_sample *data, bool swapped)
346{ 348{
347 const u64 *array; 349 const u64 *array;
348 350
351 /*
352 * used for cross-endian analysis. See git commit 65014ab3
353 * for why this goofiness is needed.
354 */
355 union {
356 u64 val64;
357 u32 val32[2];
358 } u;
359
360
349 data->cpu = data->pid = data->tid = -1; 361 data->cpu = data->pid = data->tid = -1;
350 data->stream_id = data->id = data->time = -1ULL; 362 data->stream_id = data->id = data->time = -1ULL;
351 363
@@ -366,9 +378,16 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
366 } 378 }
367 379
368 if (type & PERF_SAMPLE_TID) { 380 if (type & PERF_SAMPLE_TID) {
369 u32 *p = (u32 *)array; 381 u.val64 = *array;
370 data->pid = p[0]; 382 if (swapped) {
371 data->tid = p[1]; 383 /* undo swap of u64, then swap on individual u32s */
384 u.val64 = bswap_64(u.val64);
385 u.val32[0] = bswap_32(u.val32[0]);
386 u.val32[1] = bswap_32(u.val32[1]);
387 }
388
389 data->pid = u.val32[0];
390 data->tid = u.val32[1];
372 array++; 391 array++;
373 } 392 }
374 393
@@ -395,8 +414,15 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
395 } 414 }
396 415
397 if (type & PERF_SAMPLE_CPU) { 416 if (type & PERF_SAMPLE_CPU) {
398 u32 *p = (u32 *)array; 417
399 data->cpu = *p; 418 u.val64 = *array;
419 if (swapped) {
420 /* undo swap of u64, then swap on individual u32s */
421 u.val64 = bswap_64(u.val64);
422 u.val32[0] = bswap_32(u.val32[0]);
423 }
424
425 data->cpu = u.val32[0];
400 array++; 426 array++;
401 } 427 }
402 428
@@ -423,18 +449,27 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
423 } 449 }
424 450
425 if (type & PERF_SAMPLE_RAW) { 451 if (type & PERF_SAMPLE_RAW) {
426 u32 *p = (u32 *)array; 452 const u64 *pdata;
453
454 u.val64 = *array;
455 if (WARN_ONCE(swapped,
456 "Endianness of raw data not corrected!\n")) {
457 /* undo swap of u64, then swap on individual u32s */
458 u.val64 = bswap_64(u.val64);
459 u.val32[0] = bswap_32(u.val32[0]);
460 u.val32[1] = bswap_32(u.val32[1]);
461 }
427 462
428 if (sample_overlap(event, array, sizeof(u32))) 463 if (sample_overlap(event, array, sizeof(u32)))
429 return -EFAULT; 464 return -EFAULT;
430 465
431 data->raw_size = *p; 466 data->raw_size = u.val32[0];
432 p++; 467 pdata = (void *) array + sizeof(u32);
433 468
434 if (sample_overlap(event, p, data->raw_size)) 469 if (sample_overlap(event, pdata, data->raw_size))
435 return -EFAULT; 470 return -EFAULT;
436 471
437 data->raw_data = p; 472 data->raw_data = (void *) pdata;
438 } 473 }
439 474
440 return 0; 475 return 0;
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 555fc3864b90..5d732621a462 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -659,7 +659,7 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
659 if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die)) 659 if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die))
660 ret = -ENOENT; 660 ret = -ENOENT;
661 } 661 }
662 if (ret == 0) 662 if (ret >= 0)
663 ret = convert_variable(&vr_die, pf); 663 ret = convert_variable(&vr_die, pf);
664 664
665 if (ret < 0) 665 if (ret < 0)
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index cbc8f215d4b7..7624324efad4 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -803,7 +803,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
803 first = list_entry(evlist->entries.next, struct perf_evsel, node); 803 first = list_entry(evlist->entries.next, struct perf_evsel, node);
804 err = perf_event__parse_sample(event, first->attr.sample_type, 804 err = perf_event__parse_sample(event, first->attr.sample_type,
805 perf_evsel__sample_size(first), 805 perf_evsel__sample_size(first),
806 sample_id_all, &pevent->sample); 806 sample_id_all, &pevent->sample, false);
807 if (err) 807 if (err)
808 return PyErr_Format(PyExc_OSError, 808 return PyErr_Format(PyExc_OSError,
809 "perf: can't parse sample, err=%d", err); 809 "perf: can't parse sample, err=%d", err);
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 170601e67d6b..974d0cbee5e9 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -162,7 +162,8 @@ static inline int perf_session__parse_sample(struct perf_session *session,
162{ 162{
163 return perf_event__parse_sample(event, session->sample_type, 163 return perf_event__parse_sample(event, session->sample_type,
164 session->sample_size, 164 session->sample_size,
165 session->sample_id_all, sample); 165 session->sample_id_all, sample,
166 session->header.needs_swap);
166} 167}
167 168
168struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 169struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 401e220566fd..1ee8f1e40f18 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -151,11 +151,17 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
151{ 151{
152 u64 ip_l, ip_r; 152 u64 ip_l, ip_r;
153 153
154 if (!left->ms.sym && !right->ms.sym)
155 return right->level - left->level;
156
157 if (!left->ms.sym || !right->ms.sym)
158 return cmp_null(left->ms.sym, right->ms.sym);
159
154 if (left->ms.sym == right->ms.sym) 160 if (left->ms.sym == right->ms.sym)
155 return 0; 161 return 0;
156 162
157 ip_l = left->ms.sym ? left->ms.sym->start : left->ip; 163 ip_l = left->ms.sym->start;
158 ip_r = right->ms.sym ? right->ms.sym->start : right->ip; 164 ip_r = right->ms.sym->start;
159 165
160 return (int64_t)(ip_r - ip_l); 166 return (int64_t)(ip_r - ip_l);
161} 167}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 469c0264ed29..40eeaf07725b 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -74,16 +74,104 @@ static void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
74 74
75bool symbol_type__is_a(char symbol_type, enum map_type map_type) 75bool symbol_type__is_a(char symbol_type, enum map_type map_type)
76{ 76{
77 symbol_type = toupper(symbol_type);
78
77 switch (map_type) { 79 switch (map_type) {
78 case MAP__FUNCTION: 80 case MAP__FUNCTION:
79 return symbol_type == 'T' || symbol_type == 'W'; 81 return symbol_type == 'T' || symbol_type == 'W';
80 case MAP__VARIABLE: 82 case MAP__VARIABLE:
81 return symbol_type == 'D' || symbol_type == 'd'; 83 return symbol_type == 'D';
82 default: 84 default:
83 return false; 85 return false;
84 } 86 }
85} 87}
86 88
89static int prefix_underscores_count(const char *str)
90{
91 const char *tail = str;
92
93 while (*tail == '_')
94 tail++;
95
96 return tail - str;
97}
98
99#define SYMBOL_A 0
100#define SYMBOL_B 1
101
102static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
103{
104 s64 a;
105 s64 b;
106
107 /* Prefer a symbol with non zero length */
108 a = syma->end - syma->start;
109 b = symb->end - symb->start;
110 if ((b == 0) && (a > 0))
111 return SYMBOL_A;
112 else if ((a == 0) && (b > 0))
113 return SYMBOL_B;
114
115 /* Prefer a non weak symbol over a weak one */
116 a = syma->binding == STB_WEAK;
117 b = symb->binding == STB_WEAK;
118 if (b && !a)
119 return SYMBOL_A;
120 if (a && !b)
121 return SYMBOL_B;
122
123 /* Prefer a global symbol over a non global one */
124 a = syma->binding == STB_GLOBAL;
125 b = symb->binding == STB_GLOBAL;
126 if (a && !b)
127 return SYMBOL_A;
128 if (b && !a)
129 return SYMBOL_B;
130
131 /* Prefer a symbol with less underscores */
132 a = prefix_underscores_count(syma->name);
133 b = prefix_underscores_count(symb->name);
134 if (b > a)
135 return SYMBOL_A;
136 else if (a > b)
137 return SYMBOL_B;
138
139 /* If all else fails, choose the symbol with the longest name */
140 if (strlen(syma->name) >= strlen(symb->name))
141 return SYMBOL_A;
142 else
143 return SYMBOL_B;
144}
145
146static void symbols__fixup_duplicate(struct rb_root *symbols)
147{
148 struct rb_node *nd;
149 struct symbol *curr, *next;
150
151 nd = rb_first(symbols);
152
153 while (nd) {
154 curr = rb_entry(nd, struct symbol, rb_node);
155again:
156 nd = rb_next(&curr->rb_node);
157 next = rb_entry(nd, struct symbol, rb_node);
158
159 if (!nd)
160 break;
161
162 if (curr->start != next->start)
163 continue;
164
165 if (choose_best_symbol(curr, next) == SYMBOL_A) {
166 rb_erase(&next->rb_node, symbols);
167 goto again;
168 } else {
169 nd = rb_next(&curr->rb_node);
170 rb_erase(&curr->rb_node, symbols);
171 }
172 }
173}
174
87static void symbols__fixup_end(struct rb_root *symbols) 175static void symbols__fixup_end(struct rb_root *symbols)
88{ 176{
89 struct rb_node *nd, *prevnd = rb_first(symbols); 177 struct rb_node *nd, *prevnd = rb_first(symbols);
@@ -438,18 +526,11 @@ int kallsyms__parse(const char *filename, void *arg,
438 char *line = NULL; 526 char *line = NULL;
439 size_t n; 527 size_t n;
440 int err = -1; 528 int err = -1;
441 u64 prev_start = 0;
442 char prev_symbol_type = 0;
443 char *prev_symbol_name;
444 FILE *file = fopen(filename, "r"); 529 FILE *file = fopen(filename, "r");
445 530
446 if (file == NULL) 531 if (file == NULL)
447 goto out_failure; 532 goto out_failure;
448 533
449 prev_symbol_name = malloc(KSYM_NAME_LEN);
450 if (prev_symbol_name == NULL)
451 goto out_close;
452
453 err = 0; 534 err = 0;
454 535
455 while (!feof(file)) { 536 while (!feof(file)) {
@@ -470,7 +551,7 @@ int kallsyms__parse(const char *filename, void *arg,
470 if (len + 2 >= line_len) 551 if (len + 2 >= line_len)
471 continue; 552 continue;
472 553
473 symbol_type = toupper(line[len]); 554 symbol_type = line[len];
474 len += 2; 555 len += 2;
475 symbol_name = line + len; 556 symbol_name = line + len;
476 len = line_len - len; 557 len = line_len - len;
@@ -480,24 +561,18 @@ int kallsyms__parse(const char *filename, void *arg,
480 break; 561 break;
481 } 562 }
482 563
483 if (prev_symbol_type) { 564 /*
484 u64 end = start; 565 * module symbols are not sorted so we add all
485 if (end != prev_start) 566 * symbols with zero length and rely on
486 --end; 567 * symbols__fixup_end() to fix it up.
487 err = process_symbol(arg, prev_symbol_name, 568 */
488 prev_symbol_type, prev_start, end); 569 err = process_symbol(arg, symbol_name,
489 if (err) 570 symbol_type, start, start);
490 break; 571 if (err)
491 } 572 break;
492
493 memcpy(prev_symbol_name, symbol_name, len + 1);
494 prev_symbol_type = symbol_type;
495 prev_start = start;
496 } 573 }
497 574
498 free(prev_symbol_name);
499 free(line); 575 free(line);
500out_close:
501 fclose(file); 576 fclose(file);
502 return err; 577 return err;
503 578
@@ -703,6 +778,9 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
703 if (dso__load_all_kallsyms(dso, filename, map) < 0) 778 if (dso__load_all_kallsyms(dso, filename, map) < 0)
704 return -1; 779 return -1;
705 780
781 symbols__fixup_duplicate(&dso->symbols[map->type]);
782 symbols__fixup_end(&dso->symbols[map->type]);
783
706 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 784 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
707 dso->symtab_type = SYMTAB__GUEST_KALLSYMS; 785 dso->symtab_type = SYMTAB__GUEST_KALLSYMS;
708 else 786 else
@@ -1092,8 +1170,7 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
1092 if (dso->has_build_id) { 1170 if (dso->has_build_id) {
1093 u8 build_id[BUILD_ID_SIZE]; 1171 u8 build_id[BUILD_ID_SIZE];
1094 1172
1095 if (elf_read_build_id(elf, build_id, 1173 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0)
1096 BUILD_ID_SIZE) != BUILD_ID_SIZE)
1097 goto out_elf_end; 1174 goto out_elf_end;
1098 1175
1099 if (!dso__build_id_equal(dso, build_id)) 1176 if (!dso__build_id_equal(dso, build_id))
@@ -1111,6 +1188,8 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
1111 } 1188 }
1112 1189
1113 opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx); 1190 opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx);
1191 if (opdshdr.sh_type != SHT_PROGBITS)
1192 opdsec = NULL;
1114 if (opdsec) 1193 if (opdsec)
1115 opddata = elf_rawdata(opdsec, NULL); 1194 opddata = elf_rawdata(opdsec, NULL);
1116 1195
@@ -1276,6 +1355,7 @@ new_symbol:
1276 * For misannotated, zeroed, ASM function sizes. 1355 * For misannotated, zeroed, ASM function sizes.
1277 */ 1356 */
1278 if (nr > 0) { 1357 if (nr > 0) {
1358 symbols__fixup_duplicate(&dso->symbols[map->type]);
1279 symbols__fixup_end(&dso->symbols[map->type]); 1359 symbols__fixup_end(&dso->symbols[map->type]);
1280 if (kmap) { 1360 if (kmap) {
1281 /* 1361 /*
@@ -1362,8 +1442,8 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size)
1362 ptr = data->d_buf; 1442 ptr = data->d_buf;
1363 while (ptr < (data->d_buf + data->d_size)) { 1443 while (ptr < (data->d_buf + data->d_size)) {
1364 GElf_Nhdr *nhdr = ptr; 1444 GElf_Nhdr *nhdr = ptr;
1365 int namesz = NOTE_ALIGN(nhdr->n_namesz), 1445 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
1366 descsz = NOTE_ALIGN(nhdr->n_descsz); 1446 descsz = NOTE_ALIGN(nhdr->n_descsz);
1367 const char *name; 1447 const char *name;
1368 1448
1369 ptr += sizeof(*nhdr); 1449 ptr += sizeof(*nhdr);
@@ -1372,8 +1452,10 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size)
1372 if (nhdr->n_type == NT_GNU_BUILD_ID && 1452 if (nhdr->n_type == NT_GNU_BUILD_ID &&
1373 nhdr->n_namesz == sizeof("GNU")) { 1453 nhdr->n_namesz == sizeof("GNU")) {
1374 if (memcmp(name, "GNU", sizeof("GNU")) == 0) { 1454 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
1375 memcpy(bf, ptr, BUILD_ID_SIZE); 1455 size_t sz = min(size, descsz);
1376 err = BUILD_ID_SIZE; 1456 memcpy(bf, ptr, sz);
1457 memset(bf + sz, 0, size - sz);
1458 err = descsz;
1377 break; 1459 break;
1378 } 1460 }
1379 } 1461 }
@@ -1425,7 +1507,7 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
1425 while (1) { 1507 while (1) {
1426 char bf[BUFSIZ]; 1508 char bf[BUFSIZ];
1427 GElf_Nhdr nhdr; 1509 GElf_Nhdr nhdr;
1428 int namesz, descsz; 1510 size_t namesz, descsz;
1429 1511
1430 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) 1512 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
1431 break; 1513 break;
@@ -1434,15 +1516,16 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
1434 descsz = NOTE_ALIGN(nhdr.n_descsz); 1516 descsz = NOTE_ALIGN(nhdr.n_descsz);
1435 if (nhdr.n_type == NT_GNU_BUILD_ID && 1517 if (nhdr.n_type == NT_GNU_BUILD_ID &&
1436 nhdr.n_namesz == sizeof("GNU")) { 1518 nhdr.n_namesz == sizeof("GNU")) {
1437 if (read(fd, bf, namesz) != namesz) 1519 if (read(fd, bf, namesz) != (ssize_t)namesz)
1438 break; 1520 break;
1439 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { 1521 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
1440 if (read(fd, build_id, 1522 size_t sz = min(descsz, size);
1441 BUILD_ID_SIZE) == BUILD_ID_SIZE) { 1523 if (read(fd, build_id, sz) == (ssize_t)sz) {
1524 memset(build_id + sz, 0, size - sz);
1442 err = 0; 1525 err = 0;
1443 break; 1526 break;
1444 } 1527 }
1445 } else if (read(fd, bf, descsz) != descsz) 1528 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
1446 break; 1529 break;
1447 } else { 1530 } else {
1448 int n = namesz + descsz; 1531 int n = namesz + descsz;