aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-per-CPU-kthreads.txt202
-rw-r--r--Documentation/power/devices.txt15
-rw-r--r--Documentation/power/interface.txt4
-rw-r--r--Documentation/power/notifiers.txt6
-rw-r--r--Documentation/power/states.txt30
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/common/mcpm_platsmp.c3
-rw-r--r--arch/arm/include/asm/cmpxchg.h8
-rw-r--r--arch/arm/xen/enlighten.c33
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/Makefile21
-rw-r--r--arch/parisc/include/asm/hardirq.h9
-rw-r--r--arch/parisc/include/asm/processor.h3
-rw-r--r--arch/parisc/kernel/entry.S155
-rw-r--r--arch/parisc/kernel/irq.c101
-rw-r--r--arch/parisc/mm/init.c4
-rw-r--r--arch/powerpc/Kconfig.debug23
-rw-r--r--arch/powerpc/include/asm/context_tracking.h10
-rw-r--r--arch/powerpc/include/asm/firmware.h4
-rw-r--r--arch/powerpc/include/asm/hw_irq.h5
-rw-r--r--arch/powerpc/include/asm/opal.h5
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h2
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h2
-rw-r--r--arch/powerpc/include/asm/rtas.h2
-rw-r--r--arch/powerpc/include/asm/thread_info.h7
-rw-r--r--arch/powerpc/include/asm/udbg.h1
-rw-r--r--arch/powerpc/kernel/entry_32.S2
-rw-r--r--arch/powerpc/kernel/entry_64.S5
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S8
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c4
-rw-r--r--arch/powerpc/kernel/misc_32.S11
-rw-r--r--arch/powerpc/kernel/misc_64.S11
-rw-r--r--arch/powerpc/kernel/pci-common.c5
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c3
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/kernel/ptrace.c5
-rw-r--r--arch/powerpc/kernel/rtas.c113
-rw-r--r--arch/powerpc/kernel/rtas_flash.c10
-rw-r--r--arch/powerpc/kernel/signal.c7
-rw-r--r--arch/powerpc/kernel/traps.c80
-rw-r--r--arch/powerpc/kernel/udbg.c3
-rw-r--r--arch/powerpc/mm/fault.c41
-rw-r--r--arch/powerpc/mm/hash_utils_64.c36
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/perf/core-book3s.c280
-rw-r--r--arch/powerpc/platforms/Kconfig2
-rw-r--r--arch/powerpc/platforms/powernv/opal.c30
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c9
-rw-r--r--arch/powerpc/platforms/powernv/pci.c12
-rw-r--r--arch/powerpc/platforms/powernv/pci.h2
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c16
-rw-r--r--arch/powerpc/platforms/powernv/smp.c62
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c22
-rw-r--r--arch/powerpc/platforms/wsp/ics.c2
-rw-r--r--arch/powerpc/sysdev/Makefile2
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/udbg_memcons.c105
-rw-r--r--arch/powerpc/sysdev/xics/ics-opal.c2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/kernel/microcode_intel_early.c5
-rw-r--r--arch/x86/kernel/process.c5
-rw-r--r--arch/x86/mm/init.c19
-rw-r--r--drivers/acpi/ac.c33
-rw-r--r--drivers/acpi/ec.c4
-rw-r--r--drivers/acpi/processor_driver.c8
-rw-r--r--drivers/acpi/processor_idle.c29
-rw-r--r--drivers/acpi/scan.c3
-rw-r--r--drivers/acpi/video.c8
-rw-r--r--drivers/base/power/common.c12
-rw-r--r--drivers/block/rbd.c935
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c4
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c14
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c3
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c16
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm15
-rw-r--r--drivers/cpufreq/arm_big_little.c7
-rw-r--r--drivers/cpufreq/arm_big_little.h5
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c9
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c27
-rw-r--r--drivers/cpufreq/cpufreq.c10
-rw-r--r--drivers/cpufreq/cpufreq_governor.c11
-rw-r--r--drivers/cpufreq/cpufreq_governor.h1
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c1
-rw-r--r--drivers/cpufreq/cpufreq_stats.c7
-rw-r--r--drivers/cpufreq/intel_pstate.c122
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c5
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c27
-rw-r--r--drivers/gpu/drm/drm_drv.c20
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c6
-rw-r--r--drivers/gpu/drm/drm_mm.c34
-rw-r--r--drivers/gpu/drm/drm_modes.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c77
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c16
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c44
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c90
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c29
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c17
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h7
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c1
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c12
-rw-r--r--drivers/lguest/page_tables.c1
-rw-r--r--drivers/mmc/host/mmci.c9
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c25
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c5
-rw-r--r--drivers/net/ethernet/cadence/Kconfig3
-rw-r--r--drivers/net/ethernet/calxeda/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c20
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c29
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c95
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c22
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c54
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c46
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig2
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h138
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c10
-rw-r--r--drivers/net/wireless/b43/dma.c19
-rw-r--r--drivers/net/wireless/b43/dma.h4
-rw-r--r--drivers/net/wireless/b43/main.c43
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c3
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c3
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c1
-rw-r--r--drivers/net/wireless/mwifiex/main.c1
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c21
-rw-r--r--drivers/ntb/ntb_hw.c10
-rw-r--r--drivers/ntb/ntb_transport.c175
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/spi/spi-atmel.c51
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi.c9
-rw-r--r--drivers/target/iscsi/iscsi_target.c63
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h4
-rw-r--r--drivers/target/target_core_configfs.c11
-rw-r--r--drivers/target/target_core_device.c14
-rw-r--r--drivers/target/target_core_file.c9
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_rd.c21
-rw-r--r--drivers/target/target_core_rd.h1
-rw-r--r--drivers/target/target_core_transport.c13
-rw-r--r--drivers/vhost/vringh.c3
-rw-r--r--fs/ext4/ext4.h8
-rw-r--r--fs/ext4/extents.c9
-rw-r--r--fs/ext4/extents_status.c17
-rw-r--r--fs/ext4/extents_status.h3
-rw-r--r--fs/ext4/file.c4
-rw-r--r--fs/ext4/inode.c85
-rw-r--r--fs/ext4/mballoc.c6
-rw-r--r--fs/ext4/page-io.c121
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/processor.h10
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_fb_helper.h15
-rw-r--r--include/drm/drm_os_linux.h9
-rw-r--r--include/linux/journal-head.h8
-rw-r--r--include/linux/kref.h33
-rw-r--r--include/linux/mlx4/qp.h29
-rw-r--r--include/linux/spi/spi.h4
-rw-r--r--include/linux/time.h4
-rw-r--r--include/net/sock.h12
-rw-r--r--include/target/target_core_base.h5
-rw-r--r--include/trace/events/ext4.h4
-rw-r--r--kernel/cpu/idle.c2
-rw-r--r--kernel/events/core.c240
-rw-r--r--kernel/kmod.c5
-rw-r--r--kernel/rcutree_plugin.h4
-rw-r--r--kernel/time/Kconfig5
-rw-r--r--kernel/time/tick-broadcast.c10
-rw-r--r--kernel/time/tick-sched.c3
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/trace/trace_kprobe.c53
-rw-r--r--kernel/workqueue.c19
-rw-r--r--net/batman-adv/distributed-arp-table.c13
-rw-r--r--net/batman-adv/main.c18
-rw-r--r--net/batman-adv/network-coding.c8
-rw-r--r--net/ceph/osd_client.c5
-rw-r--r--net/core/sock.c12
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/tcp_ipv6.c12
-rw-r--r--net/ipv6/udp.c13
-rw-r--r--net/ipv6/udp_impl.h2
-rw-r--r--net/ipv6/udplite.c2
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--sound/aoa/fabrics/layout.c8
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c3
-rw-r--r--sound/oss/Kconfig2
-rw-r--r--sound/pci/hda/hda_generic.c9
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/soc/codecs/ab8500-codec.h36
-rw-r--r--sound/soc/codecs/da7213.c8
-rw-r--r--sound/soc/codecs/wm0010.c1
-rw-r--r--sound/usb/proc.c22
224 files changed, 3347 insertions, 1938 deletions
diff --git a/Documentation/kernel-per-CPU-kthreads.txt b/Documentation/kernel-per-CPU-kthreads.txt
new file mode 100644
index 000000000000..cbf7ae412da4
--- /dev/null
+++ b/Documentation/kernel-per-CPU-kthreads.txt
@@ -0,0 +1,202 @@
1REDUCING OS JITTER DUE TO PER-CPU KTHREADS
2
3This document lists per-CPU kthreads in the Linux kernel and presents
4options to control their OS jitter. Note that non-per-CPU kthreads are
5not listed here. To reduce OS jitter from non-per-CPU kthreads, bind
6them to a "housekeeping" CPU dedicated to such work.
7
8
9REFERENCES
10
11o Documentation/IRQ-affinity.txt: Binding interrupts to sets of CPUs.
12
13o Documentation/cgroups: Using cgroups to bind tasks to sets of CPUs.
14
15o man taskset: Using the taskset command to bind tasks to sets
16 of CPUs.
17
18o man sched_setaffinity: Using the sched_setaffinity() system
19 call to bind tasks to sets of CPUs.
20
21o /sys/devices/system/cpu/cpuN/online: Control CPU N's hotplug state,
22 writing "0" to offline and "1" to online.
23
24o In order to locate kernel-generated OS jitter on CPU N:
25
26 cd /sys/kernel/debug/tracing
27 echo 1 > max_graph_depth # Increase the "1" for more detail
28 echo function_graph > current_tracer
29 # run workload
30 cat per_cpu/cpuN/trace
31
32
33KTHREADS
34
35Name: ehca_comp/%u
36Purpose: Periodically process Infiniband-related work.
37To reduce its OS jitter, do any of the following:
381. Don't use eHCA Infiniband hardware, instead choosing hardware
39 that does not require per-CPU kthreads. This will prevent these
40 kthreads from being created in the first place. (This will
41 work for most people, as this hardware, though important, is
42 relatively old and is produced in relatively low unit volumes.)
432. Do all eHCA-Infiniband-related work on other CPUs, including
44 interrupts.
453. Rework the eHCA driver so that its per-CPU kthreads are
46 provisioned only on selected CPUs.
47
48
49Name: irq/%d-%s
50Purpose: Handle threaded interrupts.
51To reduce its OS jitter, do the following:
521. Use irq affinity to force the irq threads to execute on
53 some other CPU.
54
55Name: kcmtpd_ctr_%d
56Purpose: Handle Bluetooth work.
57To reduce its OS jitter, do one of the following:
581. Don't use Bluetooth, in which case these kthreads won't be
59 created in the first place.
602. Use irq affinity to force Bluetooth-related interrupts to
61 occur on some other CPU and furthermore initiate all
62 Bluetooth activity on some other CPU.
63
64Name: ksoftirqd/%u
65Purpose: Execute softirq handlers when threaded or when under heavy load.
66To reduce its OS jitter, each softirq vector must be handled
67separately as follows:
68TIMER_SOFTIRQ: Do all of the following:
691. To the extent possible, keep the CPU out of the kernel when it
70 is non-idle, for example, by avoiding system calls and by forcing
71 both kernel threads and interrupts to execute elsewhere.
722. Build with CONFIG_HOTPLUG_CPU=y. After boot completes, force
73 the CPU offline, then bring it back online. This forces
74 recurring timers to migrate elsewhere. If you are concerned
75 with multiple CPUs, force them all offline before bringing the
76 first one back online. Once you have onlined the CPUs in question,
77 do not offline any other CPUs, because doing so could force the
78 timer back onto one of the CPUs in question.
79NET_TX_SOFTIRQ and NET_RX_SOFTIRQ: Do all of the following:
801. Force networking interrupts onto other CPUs.
812. Initiate any network I/O on other CPUs.
823. Once your application has started, prevent CPU-hotplug operations
83 from being initiated from tasks that might run on the CPU to
84 be de-jittered. (It is OK to force this CPU offline and then
85 bring it back online before you start your application.)
86BLOCK_SOFTIRQ: Do all of the following:
871. Force block-device interrupts onto some other CPU.
882. Initiate any block I/O on other CPUs.
893. Once your application has started, prevent CPU-hotplug operations
90 from being initiated from tasks that might run on the CPU to
91 be de-jittered. (It is OK to force this CPU offline and then
92 bring it back online before you start your application.)
93BLOCK_IOPOLL_SOFTIRQ: Do all of the following:
941. Force block-device interrupts onto some other CPU.
952. Initiate any block I/O and block-I/O polling on other CPUs.
963. Once your application has started, prevent CPU-hotplug operations
97 from being initiated from tasks that might run on the CPU to
98 be de-jittered. (It is OK to force this CPU offline and then
99 bring it back online before you start your application.)
100TASKLET_SOFTIRQ: Do one or more of the following:
1011. Avoid use of drivers that use tasklets. (Such drivers will contain
102 calls to things like tasklet_schedule().)
1032. Convert all drivers that you must use from tasklets to workqueues.
1043. Force interrupts for drivers using tasklets onto other CPUs,
105 and also do I/O involving these drivers on other CPUs.
106SCHED_SOFTIRQ: Do all of the following:
1071. Avoid sending scheduler IPIs to the CPU to be de-jittered,
108 for example, ensure that at most one runnable kthread is present
109 on that CPU. If a thread that expects to run on the de-jittered
110 CPU awakens, the scheduler will send an IPI that can result in
111 a subsequent SCHED_SOFTIRQ.
1122. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
113 CONFIG_NO_HZ_FULL=y, and, in addition, ensure that the CPU
114 to be de-jittered is marked as an adaptive-ticks CPU using the
115 "nohz_full=" boot parameter. This reduces the number of
116 scheduler-clock interrupts that the de-jittered CPU receives,
117 minimizing its chances of being selected to do the load balancing
118 work that runs in SCHED_SOFTIRQ context.
1193. To the extent possible, keep the CPU out of the kernel when it
120 is non-idle, for example, by avoiding system calls and by
121 forcing both kernel threads and interrupts to execute elsewhere.
122 This further reduces the number of scheduler-clock interrupts
123 received by the de-jittered CPU.
124HRTIMER_SOFTIRQ: Do all of the following:
1251. To the extent possible, keep the CPU out of the kernel when it
126 is non-idle. For example, avoid system calls and force both
127 kernel threads and interrupts to execute elsewhere.
1282. Build with CONFIG_HOTPLUG_CPU=y. Once boot completes, force the
129 CPU offline, then bring it back online. This forces recurring
130 timers to migrate elsewhere. If you are concerned with multiple
131 CPUs, force them all offline before bringing the first one
132 back online. Once you have onlined the CPUs in question, do not
133 offline any other CPUs, because doing so could force the timer
134 back onto one of the CPUs in question.
135RCU_SOFTIRQ: Do at least one of the following:
1361. Offload callbacks and keep the CPU in either dyntick-idle or
137 adaptive-ticks state by doing all of the following:
138 a. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
139 CONFIG_NO_HZ_FULL=y, and, in addition ensure that the CPU
140 to be de-jittered is marked as an adaptive-ticks CPU using
141 the "nohz_full=" boot parameter. Bind the rcuo kthreads
142 to housekeeping CPUs, which can tolerate OS jitter.
143 b. To the extent possible, keep the CPU out of the kernel
144 when it is non-idle, for example, by avoiding system
145 calls and by forcing both kernel threads and interrupts
146 to execute elsewhere.
1472. Enable RCU to do its processing remotely via dyntick-idle by
148 doing all of the following:
149 a. Build with CONFIG_NO_HZ=y and CONFIG_RCU_FAST_NO_HZ=y.
150 b. Ensure that the CPU goes idle frequently, allowing other
151 CPUs to detect that it has passed through an RCU quiescent
152 state. If the kernel is built with CONFIG_NO_HZ_FULL=y,
153 userspace execution also allows other CPUs to detect that
154 the CPU in question has passed through a quiescent state.
155 c. To the extent possible, keep the CPU out of the kernel
156 when it is non-idle, for example, by avoiding system
157 calls and by forcing both kernel threads and interrupts
158 to execute elsewhere.
159
160Name: rcuc/%u
161Purpose: Execute RCU callbacks in CONFIG_RCU_BOOST=y kernels.
162To reduce its OS jitter, do at least one of the following:
1631. Build the kernel with CONFIG_PREEMPT=n. This prevents these
164 kthreads from being created in the first place, and also obviates
165 the need for RCU priority boosting. This approach is feasible
166 for workloads that do not require high degrees of responsiveness.
1672. Build the kernel with CONFIG_RCU_BOOST=n. This prevents these
168 kthreads from being created in the first place. This approach
169 is feasible only if your workload never requires RCU priority
170 boosting, for example, if you ensure frequent idle time on all
171 CPUs that might execute within the kernel.
1723. Build with CONFIG_RCU_NOCB_CPU=y and CONFIG_RCU_NOCB_CPU_ALL=y,
173 which offloads all RCU callbacks to kthreads that can be moved
174 off of CPUs susceptible to OS jitter. This approach prevents the
175 rcuc/%u kthreads from having any work to do, so that they are
176 never awakened.
1774. Ensure that the CPU never enters the kernel, and, in particular,
178 avoid initiating any CPU hotplug operations on this CPU. This is
179 another way of preventing any callbacks from being queued on the
180 CPU, again preventing the rcuc/%u kthreads from having any work
181 to do.
182
183Name: rcuob/%d, rcuop/%d, and rcuos/%d
184Purpose: Offload RCU callbacks from the corresponding CPU.
185To reduce its OS jitter, do at least one of the following:
1861. Use affinity, cgroups, or other mechanism to force these kthreads
187 to execute on some other CPU.
1882. Build with CONFIG_RCU_NOCB_CPUS=n, which will prevent these
189 kthreads from being created in the first place. However, please
190 note that this will not eliminate OS jitter, but will instead
191 shift it to RCU_SOFTIRQ.
192
193Name: watchdog/%u
194Purpose: Detect software lockups on each CPU.
195To reduce its OS jitter, do at least one of the following:
1961. Build with CONFIG_LOCKUP_DETECTOR=n, which will prevent these
197 kthreads from being created in the first place.
1982. Echo a zero to /proc/sys/kernel/watchdog to disable the
199 watchdog timer.
2003. Echo a large number of /proc/sys/kernel/watchdog_thresh in
201 order to reduce the frequency of OS jitter due to the watchdog
202 timer down to a level that is acceptable for your workload.
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 504dfe4d52eb..a66c9821b5ce 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -268,7 +268,7 @@ situations.
268System Power Management Phases 268System Power Management Phases
269------------------------------ 269------------------------------
270Suspending or resuming the system is done in several phases. Different phases 270Suspending or resuming the system is done in several phases. Different phases
271are used for standby or memory sleep states ("suspend-to-RAM") and the 271are used for freeze, standby, and memory sleep states ("suspend-to-RAM") and the
272hibernation state ("suspend-to-disk"). Each phase involves executing callbacks 272hibernation state ("suspend-to-disk"). Each phase involves executing callbacks
273for every device before the next phase begins. Not all busses or classes 273for every device before the next phase begins. Not all busses or classes
274support all these callbacks and not all drivers use all the callbacks. The 274support all these callbacks and not all drivers use all the callbacks. The
@@ -309,7 +309,8 @@ execute the corresponding method from dev->driver->pm instead if there is one.
309 309
310Entering System Suspend 310Entering System Suspend
311----------------------- 311-----------------------
312When the system goes into the standby or memory sleep state, the phases are: 312When the system goes into the freeze, standby or memory sleep state,
313the phases are:
313 314
314 prepare, suspend, suspend_late, suspend_noirq. 315 prepare, suspend, suspend_late, suspend_noirq.
315 316
@@ -368,7 +369,7 @@ the devices that were suspended.
368 369
369Leaving System Suspend 370Leaving System Suspend
370---------------------- 371----------------------
371When resuming from standby or memory sleep, the phases are: 372When resuming from freeze, standby or memory sleep, the phases are:
372 373
373 resume_noirq, resume_early, resume, complete. 374 resume_noirq, resume_early, resume, complete.
374 375
@@ -433,8 +434,8 @@ the system log.
433 434
434Entering Hibernation 435Entering Hibernation
435-------------------- 436--------------------
436Hibernating the system is more complicated than putting it into the standby or 437Hibernating the system is more complicated than putting it into the other
437memory sleep state, because it involves creating and saving a system image. 438sleep states, because it involves creating and saving a system image.
438Therefore there are more phases for hibernation, with a different set of 439Therefore there are more phases for hibernation, with a different set of
439callbacks. These phases always run after tasks have been frozen and memory has 440callbacks. These phases always run after tasks have been frozen and memory has
440been freed. 441been freed.
@@ -485,8 +486,8 @@ image forms an atomic snapshot of the system state.
485 486
486At this point the system image is saved, and the devices then need to be 487At this point the system image is saved, and the devices then need to be
487prepared for the upcoming system shutdown. This is much like suspending them 488prepared for the upcoming system shutdown. This is much like suspending them
488before putting the system into the standby or memory sleep state, and the phases 489before putting the system into the freeze, standby or memory sleep state,
489are similar. 490and the phases are similar.
490 491
491 9. The prepare phase is discussed above. 492 9. The prepare phase is discussed above.
492 493
diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt
index c537834af005..f1f0f59a7c47 100644
--- a/Documentation/power/interface.txt
+++ b/Documentation/power/interface.txt
@@ -7,8 +7,8 @@ running. The interface exists in /sys/power/ directory (assuming sysfs
7is mounted at /sys). 7is mounted at /sys).
8 8
9/sys/power/state controls system power state. Reading from this file 9/sys/power/state controls system power state. Reading from this file
10returns what states are supported, which is hard-coded to 'standby' 10returns what states are supported, which is hard-coded to 'freeze',
11(Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk' 11'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
12(Suspend-to-Disk). 12(Suspend-to-Disk).
13 13
14Writing to this file one of those strings causes the system to 14Writing to this file one of those strings causes the system to
diff --git a/Documentation/power/notifiers.txt b/Documentation/power/notifiers.txt
index c2a4a346c0d9..a81fa254303d 100644
--- a/Documentation/power/notifiers.txt
+++ b/Documentation/power/notifiers.txt
@@ -15,8 +15,10 @@ A suspend/hibernation notifier may be used for this purpose.
15The subsystems or drivers having such needs can register suspend notifiers that 15The subsystems or drivers having such needs can register suspend notifiers that
16will be called upon the following events by the PM core: 16will be called upon the following events by the PM core:
17 17
18PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will 18PM_HIBERNATION_PREPARE The system is going to hibernate, tasks will be frozen
19 be frozen immediately. 19 immediately. This is different from PM_SUSPEND_PREPARE
20 below because here we do additional work between notifiers
21 and drivers freezing.
20 22
21PM_POST_HIBERNATION The system memory state has been restored from a 23PM_POST_HIBERNATION The system memory state has been restored from a
22 hibernation image or an error occurred during 24 hibernation image or an error occurred during
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt
index 4416b28630df..442d43df9b25 100644
--- a/Documentation/power/states.txt
+++ b/Documentation/power/states.txt
@@ -2,12 +2,26 @@
2System Power Management States 2System Power Management States
3 3
4 4
5The kernel supports three power management states generically, though 5The kernel supports four power management states generically, though
6each is dependent on platform support code to implement the low-level 6one is generic and the other three are dependent on platform support
7details for each state. This file describes each state, what they are 7code to implement the low-level details for each state.
8This file describes each state, what they are
8commonly called, what ACPI state they map to, and what string to write 9commonly called, what ACPI state they map to, and what string to write
9to /sys/power/state to enter that state 10to /sys/power/state to enter that state
10 11
12state: Freeze / Low-Power Idle
13ACPI state: S0
14String: "freeze"
15
16This state is a generic, pure software, light-weight, low-power state.
17It allows more energy to be saved relative to idle by freezing user
18space and putting all I/O devices into low-power states (possibly
19lower-power than available at run time), such that the processors can
20spend more time in their idle states.
21This state can be used for platforms without Standby/Suspend-to-RAM
22support, or it can be used in addition to Suspend-to-RAM (memory sleep)
23to provide reduced resume latency.
24
11 25
12State: Standby / Power-On Suspend 26State: Standby / Power-On Suspend
13ACPI State: S1 27ACPI State: S1
@@ -22,9 +36,6 @@ We try to put devices in a low-power state equivalent to D1, which
22also offers low power savings, but low resume latency. Not all devices 36also offers low power savings, but low resume latency. Not all devices
23support D1, and those that don't are left on. 37support D1, and those that don't are left on.
24 38
25A transition from Standby to the On state should take about 1-2
26seconds.
27
28 39
29State: Suspend-to-RAM 40State: Suspend-to-RAM
30ACPI State: S3 41ACPI State: S3
@@ -42,9 +53,6 @@ transition back to the On state.
42For at least ACPI, STR requires some minimal boot-strapping code to 53For at least ACPI, STR requires some minimal boot-strapping code to
43resume the system from STR. This may be true on other platforms. 54resume the system from STR. This may be true on other platforms.
44 55
45A transition from Suspend-to-RAM to the On state should take about
463-5 seconds.
47
48 56
49State: Suspend-to-disk 57State: Suspend-to-disk
50ACPI State: S4 58ACPI State: S4
@@ -74,7 +82,3 @@ low-power state (like ACPI S4), or it may simply power down. Powering
74down offers greater savings, and allows this mechanism to work on any 82down offers greater savings, and allows this mechanism to work on any
75system. However, entering a real low-power state allows the user to 83system. However, entering a real low-power state allows the user to
76trigger wake up events (e.g. pressing a key or opening a laptop lid). 84trigger wake up events (e.g. pressing a key or opening a laptop lid).
77
78A transition from Suspend-to-Disk to the On state should take about 30
79seconds, though it's typically a bit more with the current
80implementation.
diff --git a/MAINTAINERS b/MAINTAINERS
index 3d7782b9f90d..3a7b07f513da 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7854,7 +7854,7 @@ L: linux-scsi@vger.kernel.org
7854L: target-devel@vger.kernel.org 7854L: target-devel@vger.kernel.org
7855L: http://groups.google.com/group/linux-iscsi-target-dev 7855L: http://groups.google.com/group/linux-iscsi-target-dev
7856W: http://www.linux-iscsi.org 7856W: http://www.linux-iscsi.org
7857T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core.git master 7857T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
7858S: Supported 7858S: Supported
7859F: drivers/target/ 7859F: drivers/target/
7860F: include/target/ 7860F: include/target/
diff --git a/arch/Kconfig b/arch/Kconfig
index dd0e8eb8042f..a4429bcd609e 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -213,6 +213,9 @@ config USE_GENERIC_SMP_HELPERS
213config GENERIC_SMP_IDLE_THREAD 213config GENERIC_SMP_IDLE_THREAD
214 bool 214 bool
215 215
216config GENERIC_IDLE_POLL_SETUP
217 bool
218
216# Select if arch init_task initializer is different to init/init_task.c 219# Select if arch init_task initializer is different to init/init_task.c
217config ARCH_INIT_TASK 220config ARCH_INIT_TASK
218 bool 221 bool
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index d423d58f938d..49d993cee512 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -38,6 +38,7 @@ config ARM
38 select HAVE_GENERIC_HARDIRQS 38 select HAVE_GENERIC_HARDIRQS
39 select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) 39 select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
40 select HAVE_IDE if PCI || ISA || PCMCIA 40 select HAVE_IDE if PCI || ISA || PCMCIA
41 select HAVE_IRQ_TIME_ACCOUNTING
41 select HAVE_KERNEL_GZIP 42 select HAVE_KERNEL_GZIP
42 select HAVE_KERNEL_LZMA 43 select HAVE_KERNEL_LZMA
43 select HAVE_KERNEL_LZO 44 select HAVE_KERNEL_LZO
@@ -488,7 +489,7 @@ config ARCH_IXP4XX
488config ARCH_DOVE 489config ARCH_DOVE
489 bool "Marvell Dove" 490 bool "Marvell Dove"
490 select ARCH_REQUIRE_GPIOLIB 491 select ARCH_REQUIRE_GPIOLIB
491 select CPU_V7 492 select CPU_PJ4
492 select GENERIC_CLOCKEVENTS 493 select GENERIC_CLOCKEVENTS
493 select MIGHT_HAVE_PCI 494 select MIGHT_HAVE_PCI
494 select PINCTRL 495 select PINCTRL
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 47374085befd..1ba358ba16b8 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -309,7 +309,7 @@ define archhelp
309 echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' 309 echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
310 echo '* xipImage - XIP kernel image, if configured (arch/$(ARCH)/boot/xipImage)' 310 echo '* xipImage - XIP kernel image, if configured (arch/$(ARCH)/boot/xipImage)'
311 echo ' uImage - U-Boot wrapped zImage' 311 echo ' uImage - U-Boot wrapped zImage'
312 echo ' bootpImage - Combined zImage and initial RAM disk' 312 echo ' bootpImage - Combined zImage and initial RAM disk'
313 echo ' (supply initrd image via make variable INITRD=<path>)' 313 echo ' (supply initrd image via make variable INITRD=<path>)'
314 echo '* dtbs - Build device tree blobs for enabled boards' 314 echo '* dtbs - Build device tree blobs for enabled boards'
315 echo ' install - Install uncompressed kernel' 315 echo ' install - Install uncompressed kernel'
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 52b88d81b7bb..3caed0db6986 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -15,8 +15,6 @@
15#include <linux/smp.h> 15#include <linux/smp.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17 17
18#include <linux/irqchip/arm-gic.h>
19
20#include <asm/mcpm.h> 18#include <asm/mcpm.h>
21#include <asm/smp.h> 19#include <asm/smp.h>
22#include <asm/smp_plat.h> 20#include <asm/smp_plat.h>
@@ -49,7 +47,6 @@ static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *i
49static void __cpuinit mcpm_secondary_init(unsigned int cpu) 47static void __cpuinit mcpm_secondary_init(unsigned int cpu)
50{ 48{
51 mcpm_cpu_powered_up(); 49 mcpm_cpu_powered_up();
52 gic_secondary_init(0);
53} 50}
54 51
55#ifdef CONFIG_HOTPLUG_CPU 52#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 7eb18c1d8d6c..4f009c10540d 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -233,15 +233,15 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
233 ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ 233 ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
234 atomic64_t, \ 234 atomic64_t, \
235 counter), \ 235 counter), \
236 (unsigned long)(o), \ 236 (unsigned long long)(o), \
237 (unsigned long)(n))) 237 (unsigned long long)(n)))
238 238
239#define cmpxchg64_local(ptr, o, n) \ 239#define cmpxchg64_local(ptr, o, n) \
240 ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ 240 ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
241 local64_t, \ 241 local64_t, \
242 a), \ 242 a), \
243 (unsigned long)(o), \ 243 (unsigned long long)(o), \
244 (unsigned long)(n))) 244 (unsigned long long)(n)))
245 245
246#endif /* __LINUX_ARM_ARCH__ >= 6 */ 246#endif /* __LINUX_ARM_ARCH__ >= 6 */
247 247
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index d30042e39974..13609e01f4b7 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -152,11 +152,12 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
152} 152}
153EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); 153EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
154 154
155static int __init xen_secondary_init(unsigned int cpu) 155static void __init xen_percpu_init(void *unused)
156{ 156{
157 struct vcpu_register_vcpu_info info; 157 struct vcpu_register_vcpu_info info;
158 struct vcpu_info *vcpup; 158 struct vcpu_info *vcpup;
159 int err; 159 int err;
160 int cpu = get_cpu();
160 161
161 pr_info("Xen: initializing cpu%d\n", cpu); 162 pr_info("Xen: initializing cpu%d\n", cpu);
162 vcpup = per_cpu_ptr(xen_vcpu_info, cpu); 163 vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
@@ -165,14 +166,10 @@ static int __init xen_secondary_init(unsigned int cpu)
165 info.offset = offset_in_page(vcpup); 166 info.offset = offset_in_page(vcpup);
166 167
167 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); 168 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
168 if (err) { 169 BUG_ON(err);
169 pr_debug("register_vcpu_info failed: err=%d\n", err); 170 per_cpu(xen_vcpu, cpu) = vcpup;
170 } else { 171
171 /* This cpu is using the registered vcpu info, even if 172 enable_percpu_irq(xen_events_irq, 0);
172 later ones fail to. */
173 per_cpu(xen_vcpu, cpu) = vcpup;
174 }
175 return 0;
176} 173}
177 174
178static void xen_restart(char str, const char *cmd) 175static void xen_restart(char str, const char *cmd)
@@ -208,7 +205,6 @@ static int __init xen_guest_init(void)
208 const char *version = NULL; 205 const char *version = NULL;
209 const char *xen_prefix = "xen,xen-"; 206 const char *xen_prefix = "xen,xen-";
210 struct resource res; 207 struct resource res;
211 int i;
212 208
213 node = of_find_compatible_node(NULL, NULL, "xen,xen"); 209 node = of_find_compatible_node(NULL, NULL, "xen,xen");
214 if (!node) { 210 if (!node) {
@@ -265,19 +261,23 @@ static int __init xen_guest_init(void)
265 sizeof(struct vcpu_info)); 261 sizeof(struct vcpu_info));
266 if (xen_vcpu_info == NULL) 262 if (xen_vcpu_info == NULL)
267 return -ENOMEM; 263 return -ENOMEM;
268 for_each_online_cpu(i)
269 xen_secondary_init(i);
270 264
271 gnttab_init(); 265 gnttab_init();
272 if (!xen_initial_domain()) 266 if (!xen_initial_domain())
273 xenbus_probe(NULL); 267 xenbus_probe(NULL);
274 268
269 return 0;
270}
271core_initcall(xen_guest_init);
272
273static int __init xen_pm_init(void)
274{
275 pm_power_off = xen_power_off; 275 pm_power_off = xen_power_off;
276 arm_pm_restart = xen_restart; 276 arm_pm_restart = xen_restart;
277 277
278 return 0; 278 return 0;
279} 279}
280core_initcall(xen_guest_init); 280subsys_initcall(xen_pm_init);
281 281
282static irqreturn_t xen_arm_callback(int irq, void *arg) 282static irqreturn_t xen_arm_callback(int irq, void *arg)
283{ 283{
@@ -285,11 +285,6 @@ static irqreturn_t xen_arm_callback(int irq, void *arg)
285 return IRQ_HANDLED; 285 return IRQ_HANDLED;
286} 286}
287 287
288static __init void xen_percpu_enable_events(void *unused)
289{
290 enable_percpu_irq(xen_events_irq, 0);
291}
292
293static int __init xen_init_events(void) 288static int __init xen_init_events(void)
294{ 289{
295 if (!xen_domain() || xen_events_irq < 0) 290 if (!xen_domain() || xen_events_irq < 0)
@@ -303,7 +298,7 @@ static int __init xen_init_events(void)
303 return -EINVAL; 298 return -EINVAL;
304 } 299 }
305 300
306 on_each_cpu(xen_percpu_enable_events, NULL, 0); 301 on_each_cpu(xen_percpu_init, NULL, 0);
307 302
308 return 0; 303 return 0;
309} 304}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index cad060f288cf..6507dabdd5dd 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -245,7 +245,7 @@ config SMP
245 245
246config IRQSTACKS 246config IRQSTACKS
247 bool "Use separate kernel stacks when processing interrupts" 247 bool "Use separate kernel stacks when processing interrupts"
248 default n 248 default y
249 help 249 help
250 If you say Y here the kernel will use separate kernel stacks 250 If you say Y here the kernel will use separate kernel stacks
251 for handling hard and soft interrupts. This can help avoid 251 for handling hard and soft interrupts. This can help avoid
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 2f967cc6649e..197690068f88 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -23,24 +23,21 @@ NM = sh $(srctree)/arch/parisc/nm
23CHECKFLAGS += -D__hppa__=1 23CHECKFLAGS += -D__hppa__=1
24LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) 24LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
25 25
26MACHINE := $(shell uname -m)
27NATIVE := $(if $(filter parisc%,$(MACHINE)),1,0)
28
29ifdef CONFIG_64BIT 26ifdef CONFIG_64BIT
30UTS_MACHINE := parisc64 27UTS_MACHINE := parisc64
31CHECKFLAGS += -D__LP64__=1 -m64 28CHECKFLAGS += -D__LP64__=1 -m64
32WIDTH := 64 29CC_ARCHES = hppa64
33else # 32-bit 30else # 32-bit
34WIDTH := 31CC_ARCHES = hppa hppa2.0 hppa1.1
35endif 32endif
36 33
37# attempt to help out folks who are cross-compiling 34ifneq ($(SUBARCH),$(UTS_MACHINE))
38ifeq ($(NATIVE),1) 35 ifeq ($(CROSS_COMPILE),)
39CROSS_COMPILE := hppa$(WIDTH)-linux- 36 CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
40else 37 CROSS_COMPILE := $(call cc-cross-prefix, \
41 ifeq ($(CROSS_COMPILE),) 38 $(foreach a,$(CC_ARCHES), \
42 CROSS_COMPILE := hppa$(WIDTH)-linux-gnu- 39 $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
43 endif 40 endif
44endif 41endif
45 42
46OBJCOPY_FLAGS =-O binary -R .note -R .comment -S 43OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h
index 12373c4dabab..c19f7138ba48 100644
--- a/arch/parisc/include/asm/hardirq.h
+++ b/arch/parisc/include/asm/hardirq.h
@@ -11,10 +11,18 @@
11#include <linux/threads.h> 11#include <linux/threads.h>
12#include <linux/irq.h> 12#include <linux/irq.h>
13 13
14#ifdef CONFIG_IRQSTACKS
15#define __ARCH_HAS_DO_SOFTIRQ
16#endif
17
14typedef struct { 18typedef struct {
15 unsigned int __softirq_pending; 19 unsigned int __softirq_pending;
16#ifdef CONFIG_DEBUG_STACKOVERFLOW 20#ifdef CONFIG_DEBUG_STACKOVERFLOW
17 unsigned int kernel_stack_usage; 21 unsigned int kernel_stack_usage;
22#ifdef CONFIG_IRQSTACKS
23 unsigned int irq_stack_usage;
24 unsigned int irq_stack_counter;
25#endif
18#endif 26#endif
19#ifdef CONFIG_SMP 27#ifdef CONFIG_SMP
20 unsigned int irq_resched_count; 28 unsigned int irq_resched_count;
@@ -28,6 +36,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
28#define __ARCH_IRQ_STAT 36#define __ARCH_IRQ_STAT
29#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) 37#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
30#define inc_irq_stat(member) this_cpu_inc(irq_stat.member) 38#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
39#define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member)
31#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending) 40#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
32 41
33#define __ARCH_SET_SOFTIRQ_PENDING 42#define __ARCH_SET_SOFTIRQ_PENDING
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 064015547d1e..cfbc43929cf6 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -63,10 +63,13 @@
63 */ 63 */
64#ifdef __KERNEL__ 64#ifdef __KERNEL__
65 65
66#include <linux/spinlock_types.h>
67
66#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */ 68#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
67 69
68union irq_stack_union { 70union irq_stack_union {
69 unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; 71 unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
72 raw_spinlock_t lock;
70}; 73};
71 74
72DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); 75DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 4bb96ad9b0b1..ae27cb6ce19a 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -452,9 +452,41 @@
452 L2_ptep \pgd,\pte,\index,\va,\fault 452 L2_ptep \pgd,\pte,\index,\va,\fault
453 .endm 453 .endm
454 454
455 /* Acquire pa_dbit_lock lock. */
456 .macro dbit_lock spc,tmp,tmp1
457#ifdef CONFIG_SMP
458 cmpib,COND(=),n 0,\spc,2f
459 load32 PA(pa_dbit_lock),\tmp
4601: LDCW 0(\tmp),\tmp1
461 cmpib,COND(=) 0,\tmp1,1b
462 nop
4632:
464#endif
465 .endm
466
467 /* Release pa_dbit_lock lock without reloading lock address. */
468 .macro dbit_unlock0 spc,tmp
469#ifdef CONFIG_SMP
470 or,COND(=) %r0,\spc,%r0
471 stw \spc,0(\tmp)
472#endif
473 .endm
474
475 /* Release pa_dbit_lock lock. */
476 .macro dbit_unlock1 spc,tmp
477#ifdef CONFIG_SMP
478 load32 PA(pa_dbit_lock),\tmp
479 dbit_unlock0 \spc,\tmp
480#endif
481 .endm
482
455 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 483 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
456 * don't needlessly dirty the cache line if it was already set */ 484 * don't needlessly dirty the cache line if it was already set */
457 .macro update_ptep ptep,pte,tmp,tmp1 485 .macro update_ptep spc,ptep,pte,tmp,tmp1
486#ifdef CONFIG_SMP
487 or,COND(=) %r0,\spc,%r0
488 LDREG 0(\ptep),\pte
489#endif
458 ldi _PAGE_ACCESSED,\tmp1 490 ldi _PAGE_ACCESSED,\tmp1
459 or \tmp1,\pte,\tmp 491 or \tmp1,\pte,\tmp
460 and,COND(<>) \tmp1,\pte,%r0 492 and,COND(<>) \tmp1,\pte,%r0
@@ -463,7 +495,11 @@
463 495
464 /* Set the dirty bit (and accessed bit). No need to be 496 /* Set the dirty bit (and accessed bit). No need to be
465 * clever, this is only used from the dirty fault */ 497 * clever, this is only used from the dirty fault */
466 .macro update_dirty ptep,pte,tmp 498 .macro update_dirty spc,ptep,pte,tmp
499#ifdef CONFIG_SMP
500 or,COND(=) %r0,\spc,%r0
501 LDREG 0(\ptep),\pte
502#endif
467 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 503 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
468 or \tmp,\pte,\pte 504 or \tmp,\pte,\pte
469 STREG \pte,0(\ptep) 505 STREG \pte,0(\ptep)
@@ -1111,11 +1147,13 @@ dtlb_miss_20w:
1111 1147
1112 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1148 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1113 1149
1114 update_ptep ptp,pte,t0,t1 1150 dbit_lock spc,t0,t1
1151 update_ptep spc,ptp,pte,t0,t1
1115 1152
1116 make_insert_tlb spc,pte,prot 1153 make_insert_tlb spc,pte,prot
1117 1154
1118 idtlbt pte,prot 1155 idtlbt pte,prot
1156 dbit_unlock1 spc,t0
1119 1157
1120 rfir 1158 rfir
1121 nop 1159 nop
@@ -1135,11 +1173,13 @@ nadtlb_miss_20w:
1135 1173
1136 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w 1174 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1137 1175
1138 update_ptep ptp,pte,t0,t1 1176 dbit_lock spc,t0,t1
1177 update_ptep spc,ptp,pte,t0,t1
1139 1178
1140 make_insert_tlb spc,pte,prot 1179 make_insert_tlb spc,pte,prot
1141 1180
1142 idtlbt pte,prot 1181 idtlbt pte,prot
1182 dbit_unlock1 spc,t0
1143 1183
1144 rfir 1184 rfir
1145 nop 1185 nop
@@ -1161,7 +1201,8 @@ dtlb_miss_11:
1161 1201
1162 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1202 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1163 1203
1164 update_ptep ptp,pte,t0,t1 1204 dbit_lock spc,t0,t1
1205 update_ptep spc,ptp,pte,t0,t1
1165 1206
1166 make_insert_tlb_11 spc,pte,prot 1207 make_insert_tlb_11 spc,pte,prot
1167 1208
@@ -1172,6 +1213,7 @@ dtlb_miss_11:
1172 idtlbp prot,(%sr1,va) 1213 idtlbp prot,(%sr1,va)
1173 1214
1174 mtsp t0, %sr1 /* Restore sr1 */ 1215 mtsp t0, %sr1 /* Restore sr1 */
1216 dbit_unlock1 spc,t0
1175 1217
1176 rfir 1218 rfir
1177 nop 1219 nop
@@ -1192,7 +1234,8 @@ nadtlb_miss_11:
1192 1234
1193 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 1235 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1194 1236
1195 update_ptep ptp,pte,t0,t1 1237 dbit_lock spc,t0,t1
1238 update_ptep spc,ptp,pte,t0,t1
1196 1239
1197 make_insert_tlb_11 spc,pte,prot 1240 make_insert_tlb_11 spc,pte,prot
1198 1241
@@ -1204,6 +1247,7 @@ nadtlb_miss_11:
1204 idtlbp prot,(%sr1,va) 1247 idtlbp prot,(%sr1,va)
1205 1248
1206 mtsp t0, %sr1 /* Restore sr1 */ 1249 mtsp t0, %sr1 /* Restore sr1 */
1250 dbit_unlock1 spc,t0
1207 1251
1208 rfir 1252 rfir
1209 nop 1253 nop
@@ -1224,13 +1268,15 @@ dtlb_miss_20:
1224 1268
1225 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1269 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1226 1270
1227 update_ptep ptp,pte,t0,t1 1271 dbit_lock spc,t0,t1
1272 update_ptep spc,ptp,pte,t0,t1
1228 1273
1229 make_insert_tlb spc,pte,prot 1274 make_insert_tlb spc,pte,prot
1230 1275
1231 f_extend pte,t0 1276 f_extend pte,t0
1232 1277
1233 idtlbt pte,prot 1278 idtlbt pte,prot
1279 dbit_unlock1 spc,t0
1234 1280
1235 rfir 1281 rfir
1236 nop 1282 nop
@@ -1250,13 +1296,15 @@ nadtlb_miss_20:
1250 1296
1251 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 1297 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1252 1298
1253 update_ptep ptp,pte,t0,t1 1299 dbit_lock spc,t0,t1
1300 update_ptep spc,ptp,pte,t0,t1
1254 1301
1255 make_insert_tlb spc,pte,prot 1302 make_insert_tlb spc,pte,prot
1256 1303
1257 f_extend pte,t0 1304 f_extend pte,t0
1258 1305
1259 idtlbt pte,prot 1306 idtlbt pte,prot
1307 dbit_unlock1 spc,t0
1260 1308
1261 rfir 1309 rfir
1262 nop 1310 nop
@@ -1357,11 +1405,13 @@ itlb_miss_20w:
1357 1405
1358 L3_ptep ptp,pte,t0,va,itlb_fault 1406 L3_ptep ptp,pte,t0,va,itlb_fault
1359 1407
1360 update_ptep ptp,pte,t0,t1 1408 dbit_lock spc,t0,t1
1409 update_ptep spc,ptp,pte,t0,t1
1361 1410
1362 make_insert_tlb spc,pte,prot 1411 make_insert_tlb spc,pte,prot
1363 1412
1364 iitlbt pte,prot 1413 iitlbt pte,prot
1414 dbit_unlock1 spc,t0
1365 1415
1366 rfir 1416 rfir
1367 nop 1417 nop
@@ -1379,11 +1429,13 @@ naitlb_miss_20w:
1379 1429
1380 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w 1430 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1381 1431
1382 update_ptep ptp,pte,t0,t1 1432 dbit_lock spc,t0,t1
1433 update_ptep spc,ptp,pte,t0,t1
1383 1434
1384 make_insert_tlb spc,pte,prot 1435 make_insert_tlb spc,pte,prot
1385 1436
1386 iitlbt pte,prot 1437 iitlbt pte,prot
1438 dbit_unlock1 spc,t0
1387 1439
1388 rfir 1440 rfir
1389 nop 1441 nop
@@ -1405,7 +1457,8 @@ itlb_miss_11:
1405 1457
1406 L2_ptep ptp,pte,t0,va,itlb_fault 1458 L2_ptep ptp,pte,t0,va,itlb_fault
1407 1459
1408 update_ptep ptp,pte,t0,t1 1460 dbit_lock spc,t0,t1
1461 update_ptep spc,ptp,pte,t0,t1
1409 1462
1410 make_insert_tlb_11 spc,pte,prot 1463 make_insert_tlb_11 spc,pte,prot
1411 1464
@@ -1416,6 +1469,7 @@ itlb_miss_11:
1416 iitlbp prot,(%sr1,va) 1469 iitlbp prot,(%sr1,va)
1417 1470
1418 mtsp t0, %sr1 /* Restore sr1 */ 1471 mtsp t0, %sr1 /* Restore sr1 */
1472 dbit_unlock1 spc,t0
1419 1473
1420 rfir 1474 rfir
1421 nop 1475 nop
@@ -1427,7 +1481,8 @@ naitlb_miss_11:
1427 1481
1428 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 1482 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1429 1483
1430 update_ptep ptp,pte,t0,t1 1484 dbit_lock spc,t0,t1
1485 update_ptep spc,ptp,pte,t0,t1
1431 1486
1432 make_insert_tlb_11 spc,pte,prot 1487 make_insert_tlb_11 spc,pte,prot
1433 1488
@@ -1438,6 +1493,7 @@ naitlb_miss_11:
1438 iitlbp prot,(%sr1,va) 1493 iitlbp prot,(%sr1,va)
1439 1494
1440 mtsp t0, %sr1 /* Restore sr1 */ 1495 mtsp t0, %sr1 /* Restore sr1 */
1496 dbit_unlock1 spc,t0
1441 1497
1442 rfir 1498 rfir
1443 nop 1499 nop
@@ -1459,13 +1515,15 @@ itlb_miss_20:
1459 1515
1460 L2_ptep ptp,pte,t0,va,itlb_fault 1516 L2_ptep ptp,pte,t0,va,itlb_fault
1461 1517
1462 update_ptep ptp,pte,t0,t1 1518 dbit_lock spc,t0,t1
1519 update_ptep spc,ptp,pte,t0,t1
1463 1520
1464 make_insert_tlb spc,pte,prot 1521 make_insert_tlb spc,pte,prot
1465 1522
1466 f_extend pte,t0 1523 f_extend pte,t0
1467 1524
1468 iitlbt pte,prot 1525 iitlbt pte,prot
1526 dbit_unlock1 spc,t0
1469 1527
1470 rfir 1528 rfir
1471 nop 1529 nop
@@ -1477,13 +1535,15 @@ naitlb_miss_20:
1477 1535
1478 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 1536 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1479 1537
1480 update_ptep ptp,pte,t0,t1 1538 dbit_lock spc,t0,t1
1539 update_ptep spc,ptp,pte,t0,t1
1481 1540
1482 make_insert_tlb spc,pte,prot 1541 make_insert_tlb spc,pte,prot
1483 1542
1484 f_extend pte,t0 1543 f_extend pte,t0
1485 1544
1486 iitlbt pte,prot 1545 iitlbt pte,prot
1546 dbit_unlock1 spc,t0
1487 1547
1488 rfir 1548 rfir
1489 nop 1549 nop
@@ -1507,29 +1567,13 @@ dbit_trap_20w:
1507 1567
1508 L3_ptep ptp,pte,t0,va,dbit_fault 1568 L3_ptep ptp,pte,t0,va,dbit_fault
1509 1569
1510#ifdef CONFIG_SMP 1570 dbit_lock spc,t0,t1
1511 cmpib,COND(=),n 0,spc,dbit_nolock_20w 1571 update_dirty spc,ptp,pte,t1
1512 load32 PA(pa_dbit_lock),t0
1513
1514dbit_spin_20w:
1515 LDCW 0(t0),t1
1516 cmpib,COND(=) 0,t1,dbit_spin_20w
1517 nop
1518
1519dbit_nolock_20w:
1520#endif
1521 update_dirty ptp,pte,t1
1522 1572
1523 make_insert_tlb spc,pte,prot 1573 make_insert_tlb spc,pte,prot
1524 1574
1525 idtlbt pte,prot 1575 idtlbt pte,prot
1526#ifdef CONFIG_SMP 1576 dbit_unlock0 spc,t0
1527 cmpib,COND(=),n 0,spc,dbit_nounlock_20w
1528 ldi 1,t1
1529 stw t1,0(t0)
1530
1531dbit_nounlock_20w:
1532#endif
1533 1577
1534 rfir 1578 rfir
1535 nop 1579 nop
@@ -1543,18 +1587,8 @@ dbit_trap_11:
1543 1587
1544 L2_ptep ptp,pte,t0,va,dbit_fault 1588 L2_ptep ptp,pte,t0,va,dbit_fault
1545 1589
1546#ifdef CONFIG_SMP 1590 dbit_lock spc,t0,t1
1547 cmpib,COND(=),n 0,spc,dbit_nolock_11 1591 update_dirty spc,ptp,pte,t1
1548 load32 PA(pa_dbit_lock),t0
1549
1550dbit_spin_11:
1551 LDCW 0(t0),t1
1552 cmpib,= 0,t1,dbit_spin_11
1553 nop
1554
1555dbit_nolock_11:
1556#endif
1557 update_dirty ptp,pte,t1
1558 1592
1559 make_insert_tlb_11 spc,pte,prot 1593 make_insert_tlb_11 spc,pte,prot
1560 1594
@@ -1565,13 +1599,7 @@ dbit_nolock_11:
1565 idtlbp prot,(%sr1,va) 1599 idtlbp prot,(%sr1,va)
1566 1600
1567 mtsp t1, %sr1 /* Restore sr1 */ 1601 mtsp t1, %sr1 /* Restore sr1 */
1568#ifdef CONFIG_SMP 1602 dbit_unlock0 spc,t0
1569 cmpib,COND(=),n 0,spc,dbit_nounlock_11
1570 ldi 1,t1
1571 stw t1,0(t0)
1572
1573dbit_nounlock_11:
1574#endif
1575 1603
1576 rfir 1604 rfir
1577 nop 1605 nop
@@ -1583,32 +1611,15 @@ dbit_trap_20:
1583 1611
1584 L2_ptep ptp,pte,t0,va,dbit_fault 1612 L2_ptep ptp,pte,t0,va,dbit_fault
1585 1613
1586#ifdef CONFIG_SMP 1614 dbit_lock spc,t0,t1
1587 cmpib,COND(=),n 0,spc,dbit_nolock_20 1615 update_dirty spc,ptp,pte,t1
1588 load32 PA(pa_dbit_lock),t0
1589
1590dbit_spin_20:
1591 LDCW 0(t0),t1
1592 cmpib,= 0,t1,dbit_spin_20
1593 nop
1594
1595dbit_nolock_20:
1596#endif
1597 update_dirty ptp,pte,t1
1598 1616
1599 make_insert_tlb spc,pte,prot 1617 make_insert_tlb spc,pte,prot
1600 1618
1601 f_extend pte,t1 1619 f_extend pte,t1
1602 1620
1603 idtlbt pte,prot 1621 idtlbt pte,prot
1604 1622 dbit_unlock0 spc,t0
1605#ifdef CONFIG_SMP
1606 cmpib,COND(=),n 0,spc,dbit_nounlock_20
1607 ldi 1,t1
1608 stw t1,0(t0)
1609
1610dbit_nounlock_20:
1611#endif
1612 1623
1613 rfir 1624 rfir
1614 nop 1625 nop
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index e255db0bb761..55237a70e197 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -166,22 +166,32 @@ int arch_show_interrupts(struct seq_file *p, int prec)
166 seq_printf(p, "%*s: ", prec, "STK"); 166 seq_printf(p, "%*s: ", prec, "STK");
167 for_each_online_cpu(j) 167 for_each_online_cpu(j)
168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage); 168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
169 seq_printf(p, " Kernel stack usage\n"); 169 seq_puts(p, " Kernel stack usage\n");
170# ifdef CONFIG_IRQSTACKS
171 seq_printf(p, "%*s: ", prec, "IST");
172 for_each_online_cpu(j)
173 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
174 seq_puts(p, " Interrupt stack usage\n");
175 seq_printf(p, "%*s: ", prec, "ISC");
176 for_each_online_cpu(j)
177 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_counter);
178 seq_puts(p, " Interrupt stack usage counter\n");
179# endif
170#endif 180#endif
171#ifdef CONFIG_SMP 181#ifdef CONFIG_SMP
172 seq_printf(p, "%*s: ", prec, "RES"); 182 seq_printf(p, "%*s: ", prec, "RES");
173 for_each_online_cpu(j) 183 for_each_online_cpu(j)
174 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); 184 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
175 seq_printf(p, " Rescheduling interrupts\n"); 185 seq_puts(p, " Rescheduling interrupts\n");
176 seq_printf(p, "%*s: ", prec, "CAL"); 186 seq_printf(p, "%*s: ", prec, "CAL");
177 for_each_online_cpu(j) 187 for_each_online_cpu(j)
178 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); 188 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
179 seq_printf(p, " Function call interrupts\n"); 189 seq_puts(p, " Function call interrupts\n");
180#endif 190#endif
181 seq_printf(p, "%*s: ", prec, "TLB"); 191 seq_printf(p, "%*s: ", prec, "TLB");
182 for_each_online_cpu(j) 192 for_each_online_cpu(j)
183 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); 193 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
184 seq_printf(p, " TLB shootdowns\n"); 194 seq_puts(p, " TLB shootdowns\n");
185 return 0; 195 return 0;
186} 196}
187 197
@@ -378,6 +388,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
378 unsigned long sp = regs->gr[30]; 388 unsigned long sp = regs->gr[30];
379 unsigned long stack_usage; 389 unsigned long stack_usage;
380 unsigned int *last_usage; 390 unsigned int *last_usage;
391 int cpu = smp_processor_id();
381 392
382 /* if sr7 != 0, we interrupted a userspace process which we do not want 393 /* if sr7 != 0, we interrupted a userspace process which we do not want
383 * to check for stack overflow. We will only check the kernel stack. */ 394 * to check for stack overflow. We will only check the kernel stack. */
@@ -386,7 +397,31 @@ static inline void stack_overflow_check(struct pt_regs *regs)
386 397
387 /* calculate kernel stack usage */ 398 /* calculate kernel stack usage */
388 stack_usage = sp - stack_start; 399 stack_usage = sp - stack_start;
389 last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id()); 400#ifdef CONFIG_IRQSTACKS
401 if (likely(stack_usage <= THREAD_SIZE))
402 goto check_kernel_stack; /* found kernel stack */
403
404 /* check irq stack usage */
405 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
406 stack_usage = sp - stack_start;
407
408 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
409 if (unlikely(stack_usage > *last_usage))
410 *last_usage = stack_usage;
411
412 if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
413 return;
414
415 pr_emerg("stackcheck: %s will most likely overflow irq stack "
416 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
417 current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
418 goto panic_check;
419
420check_kernel_stack:
421#endif
422
423 /* check kernel stack usage */
424 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
390 425
391 if (unlikely(stack_usage > *last_usage)) 426 if (unlikely(stack_usage > *last_usage))
392 *last_usage = stack_usage; 427 *last_usage = stack_usage;
@@ -398,31 +433,69 @@ static inline void stack_overflow_check(struct pt_regs *regs)
398 "(sp:%lx, stk bottom-top:%lx-%lx)\n", 433 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
399 current->comm, sp, stack_start, stack_start + THREAD_SIZE); 434 current->comm, sp, stack_start, stack_start + THREAD_SIZE);
400 435
436#ifdef CONFIG_IRQSTACKS
437panic_check:
438#endif
401 if (sysctl_panic_on_stackoverflow) 439 if (sysctl_panic_on_stackoverflow)
402 panic("low stack detected by irq handler - check messages\n"); 440 panic("low stack detected by irq handler - check messages\n");
403#endif 441#endif
404} 442}
405 443
406#ifdef CONFIG_IRQSTACKS 444#ifdef CONFIG_IRQSTACKS
407DEFINE_PER_CPU(union irq_stack_union, irq_stack_union); 445DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
446 .lock = __RAW_SPIN_LOCK_UNLOCKED((irq_stack_union).lock)
447 };
408 448
409static void execute_on_irq_stack(void *func, unsigned long param1) 449static void execute_on_irq_stack(void *func, unsigned long param1)
410{ 450{
411 unsigned long *irq_stack_start; 451 union irq_stack_union *union_ptr;
412 unsigned long irq_stack; 452 unsigned long irq_stack;
413 int cpu = smp_processor_id(); 453 raw_spinlock_t *irq_stack_in_use;
414 454
415 irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0]; 455 union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
416 irq_stack = (unsigned long) irq_stack_start; 456 irq_stack = (unsigned long) &union_ptr->stack;
417 irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */ 457 irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.lock),
458 64); /* align for stack frame usage */
418 459
419 BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */ 460 /* We may be called recursive. If we are already using the irq stack,
420 *irq_stack_start = 1; 461 * just continue to use it. Use spinlocks to serialize
462 * the irq stack usage.
463 */
464 irq_stack_in_use = &union_ptr->lock;
465 if (!raw_spin_trylock(irq_stack_in_use)) {
466 void (*direct_call)(unsigned long p1) = func;
467
468 /* We are using the IRQ stack already.
469 * Do direct call on current stack. */
470 direct_call(param1);
471 return;
472 }
421 473
422 /* This is where we switch to the IRQ stack. */ 474 /* This is where we switch to the IRQ stack. */
423 call_on_stack(param1, func, irq_stack); 475 call_on_stack(param1, func, irq_stack);
424 476
425 *irq_stack_start = 0; 477 __inc_irq_stat(irq_stack_counter);
478
479 /* free up irq stack usage. */
480 do_raw_spin_unlock(irq_stack_in_use);
481}
482
483asmlinkage void do_softirq(void)
484{
485 __u32 pending;
486 unsigned long flags;
487
488 if (in_interrupt())
489 return;
490
491 local_irq_save(flags);
492
493 pending = local_softirq_pending();
494
495 if (pending)
496 execute_on_irq_stack(__do_softirq, 0);
497
498 local_irq_restore(flags);
426} 499}
427#endif /* CONFIG_IRQSTACKS */ 500#endif /* CONFIG_IRQSTACKS */
428 501
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index ce939ac8622b..1c965642068b 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -1069,7 +1069,7 @@ void flush_tlb_all(void)
1069{ 1069{
1070 int do_recycle; 1070 int do_recycle;
1071 1071
1072 inc_irq_stat(irq_tlb_count); 1072 __inc_irq_stat(irq_tlb_count);
1073 do_recycle = 0; 1073 do_recycle = 0;
1074 spin_lock(&sid_lock); 1074 spin_lock(&sid_lock);
1075 if (dirty_space_ids > RECYCLE_THRESHOLD) { 1075 if (dirty_space_ids > RECYCLE_THRESHOLD) {
@@ -1090,7 +1090,7 @@ void flush_tlb_all(void)
1090#else 1090#else
1091void flush_tlb_all(void) 1091void flush_tlb_all(void)
1092{ 1092{
1093 inc_irq_stat(irq_tlb_count); 1093 __inc_irq_stat(irq_tlb_count);
1094 spin_lock(&sid_lock); 1094 spin_lock(&sid_lock);
1095 flush_tlb_all_local(NULL); 1095 flush_tlb_all_local(NULL);
1096 recycle_sids(); 1096 recycle_sids();
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 5416e28a7538..863d877e0b5f 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -262,8 +262,31 @@ config PPC_EARLY_DEBUG_OPAL_HVSI
262 Select this to enable early debugging for the PowerNV platform 262 Select this to enable early debugging for the PowerNV platform
263 using an "hvsi" console 263 using an "hvsi" console
264 264
265config PPC_EARLY_DEBUG_MEMCONS
266 bool "In memory console"
267 help
268 Select this to enable early debugging using an in memory console.
269 This console provides input and output buffers stored within the
270 kernel BSS and should be safe to select on any system. A debugger
271 can then be used to read kernel output or send input to the console.
265endchoice 272endchoice
266 273
274config PPC_MEMCONS_OUTPUT_SIZE
275 int "In memory console output buffer size"
276 depends on PPC_EARLY_DEBUG_MEMCONS
277 default 4096
278 help
279 Selects the size of the output buffer (in bytes) of the in memory
280 console.
281
282config PPC_MEMCONS_INPUT_SIZE
283 int "In memory console input buffer size"
284 depends on PPC_EARLY_DEBUG_MEMCONS
285 default 128
286 help
287 Selects the size of the input buffer (in bytes) of the in memory
288 console.
289
267config PPC_EARLY_DEBUG_OPAL 290config PPC_EARLY_DEBUG_OPAL
268 def_bool y 291 def_bool y
269 depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI 292 depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI
diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h
new file mode 100644
index 000000000000..b6f5a33b8ee2
--- /dev/null
+++ b/arch/powerpc/include/asm/context_tracking.h
@@ -0,0 +1,10 @@
1#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H
2#define _ASM_POWERPC_CONTEXT_TRACKING_H
3
4#ifdef CONFIG_CONTEXT_TRACKING
5#define SCHEDULE_USER bl .schedule_user
6#else
7#define SCHEDULE_USER bl .schedule
8#endif
9
10#endif
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 0df54646f968..681bc0314b6b 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -52,6 +52,7 @@
52#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) 52#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
53#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) 53#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
54#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) 54#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
55#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000)
55 56
56#ifndef __ASSEMBLY__ 57#ifndef __ASSEMBLY__
57 58
@@ -69,7 +70,8 @@ enum {
69 FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | 70 FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
70 FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN, 71 FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
71 FW_FEATURE_PSERIES_ALWAYS = 0, 72 FW_FEATURE_PSERIES_ALWAYS = 0,
72 FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2, 73 FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 |
74 FW_FEATURE_OPALv3,
73 FW_FEATURE_POWERNV_ALWAYS = 0, 75 FW_FEATURE_POWERNV_ALWAYS = 0,
74 FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, 76 FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
75 FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, 77 FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index d615b28dda82..ba713f166fa5 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -96,11 +96,12 @@ static inline bool arch_irqs_disabled(void)
96#endif 96#endif
97 97
98#define hard_irq_disable() do { \ 98#define hard_irq_disable() do { \
99 u8 _was_enabled = get_paca()->soft_enabled; \
99 __hard_irq_disable(); \ 100 __hard_irq_disable(); \
100 if (local_paca->soft_enabled) \
101 trace_hardirqs_off(); \
102 get_paca()->soft_enabled = 0; \ 101 get_paca()->soft_enabled = 0; \
103 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \ 102 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \
103 if (_was_enabled) \
104 trace_hardirqs_off(); \
104} while(0) 105} while(0)
105 106
106static inline bool lazy_irq_pending(void) 107static inline bool lazy_irq_pending(void)
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index b6c8b58b1d76..cbb9305ab15a 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -243,7 +243,8 @@ enum OpalMCE_TlbErrorType {
243 243
244enum OpalThreadStatus { 244enum OpalThreadStatus {
245 OPAL_THREAD_INACTIVE = 0x0, 245 OPAL_THREAD_INACTIVE = 0x0,
246 OPAL_THREAD_STARTED = 0x1 246 OPAL_THREAD_STARTED = 0x1,
247 OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
247}; 248};
248 249
249enum OpalPciBusCompare { 250enum OpalPciBusCompare {
@@ -563,6 +564,8 @@ extern void opal_nvram_init(void);
563 564
564extern int opal_machine_check(struct pt_regs *regs); 565extern int opal_machine_check(struct pt_regs *regs);
565 566
567extern void opal_shutdown(void);
568
566#endif /* __ASSEMBLY__ */ 569#endif /* __ASSEMBLY__ */
567 570
568#endif /* __OPAL_H */ 571#endif /* __OPAL_H */
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 91acb12bac92..b66ae722a8e9 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -186,7 +186,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
186 186
187static inline pgtable_t pmd_pgtable(pmd_t pmd) 187static inline pgtable_t pmd_pgtable(pmd_t pmd)
188{ 188{
189 return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE); 189 return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS);
190} 190}
191 191
192static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 192static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index 3e13e23e4fdf..d836d945068d 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -47,7 +47,7 @@
47 * generic accessors and iterators here 47 * generic accessors and iterators here
48 */ 48 */
49#define __real_pte(e,p) ((real_pte_t) { \ 49#define __real_pte(e,p) ((real_pte_t) { \
50 (e), ((e) & _PAGE_COMBO) ? \ 50 (e), (pte_val(e) & _PAGE_COMBO) ? \
51 (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) 51 (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
52#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ 52#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
53 (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) 53 (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index a8bc2bb4adc9..34fd70488d83 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -264,6 +264,8 @@ extern void rtas_progress(char *s, unsigned short hex);
264extern void rtas_initialize(void); 264extern void rtas_initialize(void);
265extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); 265extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
266extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); 266extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
267extern int rtas_online_cpus_mask(cpumask_var_t cpus);
268extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
267extern int rtas_ibm_suspend_me(struct rtas_args *); 269extern int rtas_ibm_suspend_me(struct rtas_args *);
268 270
269struct rtc_time; 271struct rtc_time;
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 8ceea14d6fe4..ba7b1973866e 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -97,7 +97,7 @@ static inline struct thread_info *current_thread_info(void)
97#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ 97#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
98#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 98#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
99#define TIF_SINGLESTEP 8 /* singlestepping active */ 99#define TIF_SINGLESTEP 8 /* singlestepping active */
100#define TIF_MEMDIE 9 /* is terminating due to OOM killer */ 100#define TIF_NOHZ 9 /* in adaptive nohz mode */
101#define TIF_SECCOMP 10 /* secure computing */ 101#define TIF_SECCOMP 10 /* secure computing */
102#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ 102#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
103#define TIF_NOERROR 12 /* Force successful syscall return */ 103#define TIF_NOERROR 12 /* Force successful syscall return */
@@ -106,6 +106,7 @@ static inline struct thread_info *current_thread_info(void)
106#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ 106#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
107#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation 107#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
108 for stack store? */ 108 for stack store? */
109#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
109 110
110/* as above, but as bit values */ 111/* as above, but as bit values */
111#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 112#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -124,8 +125,10 @@ static inline struct thread_info *current_thread_info(void)
124#define _TIF_UPROBE (1<<TIF_UPROBE) 125#define _TIF_UPROBE (1<<TIF_UPROBE)
125#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 126#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
126#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) 127#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
128#define _TIF_NOHZ (1<<TIF_NOHZ)
127#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ 129#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
128 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) 130 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
131 _TIF_NOHZ)
129 132
130#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 133#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
131 _TIF_NOTIFY_RESUME | _TIF_UPROBE) 134 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 5a7510e9d09d..dc590919f8eb 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -52,6 +52,7 @@ extern void __init udbg_init_40x_realmode(void);
52extern void __init udbg_init_cpm(void); 52extern void __init udbg_init_cpm(void);
53extern void __init udbg_init_usbgecko(void); 53extern void __init udbg_init_usbgecko(void);
54extern void __init udbg_init_wsp(void); 54extern void __init udbg_init_wsp(void);
55extern void __init udbg_init_memcons(void);
55extern void __init udbg_init_ehv_bc(void); 56extern void __init udbg_init_ehv_bc(void);
56extern void __init udbg_init_ps3gelic(void); 57extern void __init udbg_init_ps3gelic(void);
57extern void __init udbg_init_debug_opal_raw(void); 58extern void __init udbg_init_debug_opal_raw(void);
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index e514de57a125..d22e73e4618b 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -439,8 +439,6 @@ ret_from_fork:
439ret_from_kernel_thread: 439ret_from_kernel_thread:
440 REST_NVGPRS(r1) 440 REST_NVGPRS(r1)
441 bl schedule_tail 441 bl schedule_tail
442 li r3,0
443 stw r3,0(r1)
444 mtlr r14 442 mtlr r14
445 mr r3,r15 443 mr r3,r15
446 PPC440EP_ERR42 444 PPC440EP_ERR42
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 915fbb4fc2fe..51cfb8fc301f 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -33,6 +33,7 @@
33#include <asm/irqflags.h> 33#include <asm/irqflags.h>
34#include <asm/ftrace.h> 34#include <asm/ftrace.h>
35#include <asm/hw_irq.h> 35#include <asm/hw_irq.h>
36#include <asm/context_tracking.h>
36 37
37/* 38/*
38 * System calls. 39 * System calls.
@@ -376,8 +377,6 @@ _GLOBAL(ret_from_fork)
376_GLOBAL(ret_from_kernel_thread) 377_GLOBAL(ret_from_kernel_thread)
377 bl .schedule_tail 378 bl .schedule_tail
378 REST_NVGPRS(r1) 379 REST_NVGPRS(r1)
379 li r3,0
380 std r3,0(r1)
381 ld r14, 0(r14) 380 ld r14, 0(r14)
382 mtlr r14 381 mtlr r14
383 mr r3,r15 382 mr r3,r15
@@ -634,7 +633,7 @@ _GLOBAL(ret_from_except_lite)
634 andi. r0,r4,_TIF_NEED_RESCHED 633 andi. r0,r4,_TIF_NEED_RESCHED
635 beq 1f 634 beq 1f
636 bl .restore_interrupts 635 bl .restore_interrupts
637 bl .schedule 636 SCHEDULE_USER
638 b .ret_from_except_lite 637 b .ret_from_except_lite
639 638
6401: bl .save_nvgprs 6391: bl .save_nvgprs
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 42a756eec9ff..645170a07ada 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -489,7 +489,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
489 */ 489 */
490 490
491 mfspr r14,SPRN_DBSR /* check single-step/branch taken */ 491 mfspr r14,SPRN_DBSR /* check single-step/branch taken */
492 andis. r15,r14,DBSR_IC@h 492 andis. r15,r14,(DBSR_IC|DBSR_BT)@h
493 beq+ 1f 493 beq+ 1f
494 494
495 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) 495 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
@@ -500,7 +500,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
500 bge+ cr1,1f 500 bge+ cr1,1f
501 501
502 /* here it looks like we got an inappropriate debug exception. */ 502 /* here it looks like we got an inappropriate debug exception. */
503 lis r14,DBSR_IC@h /* clear the IC event */ 503 lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
504 rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ 504 rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */
505 mtspr SPRN_DBSR,r14 505 mtspr SPRN_DBSR,r14
506 mtspr SPRN_CSRR1,r11 506 mtspr SPRN_CSRR1,r11
@@ -555,7 +555,7 @@ kernel_dbg_exc:
555 */ 555 */
556 556
557 mfspr r14,SPRN_DBSR /* check single-step/branch taken */ 557 mfspr r14,SPRN_DBSR /* check single-step/branch taken */
558 andis. r15,r14,DBSR_IC@h 558 andis. r15,r14,(DBSR_IC|DBSR_BT)@h
559 beq+ 1f 559 beq+ 1f
560 560
561 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) 561 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
@@ -566,7 +566,7 @@ kernel_dbg_exc:
566 bge+ cr1,1f 566 bge+ cr1,1f
567 567
568 /* here it looks like we got an inappropriate debug exception. */ 568 /* here it looks like we got an inappropriate debug exception. */
569 lis r14,DBSR_IC@h /* clear the IC event */ 569 lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
570 rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ 570 rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */
571 mtspr SPRN_DBSR,r14 571 mtspr SPRN_DBSR,r14
572 mtspr SPRN_DSRR1,r11 572 mtspr SPRN_DSRR1,r11
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 466a2908bb63..611acdf30096 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/hardirq.h>
20 21
21#include <asm/page.h> 22#include <asm/page.h>
22#include <asm/current.h> 23#include <asm/current.h>
@@ -335,10 +336,13 @@ void default_machine_kexec(struct kimage *image)
335 pr_debug("kexec: Starting switchover sequence.\n"); 336 pr_debug("kexec: Starting switchover sequence.\n");
336 337
337 /* switch to a staticly allocated stack. Based on irq stack code. 338 /* switch to a staticly allocated stack. Based on irq stack code.
339 * We setup preempt_count to avoid using VMX in memcpy.
338 * XXX: the task struct will likely be invalid once we do the copy! 340 * XXX: the task struct will likely be invalid once we do the copy!
339 */ 341 */
340 kexec_stack.thread_info.task = current_thread_info()->task; 342 kexec_stack.thread_info.task = current_thread_info()->task;
341 kexec_stack.thread_info.flags = 0; 343 kexec_stack.thread_info.flags = 0;
344 kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
345 kexec_stack.thread_info.cpu = current_thread_info()->cpu;
342 346
343 /* We need a static PACA, too; copy this CPU's PACA over and switch to 347 /* We need a static PACA, too; copy this CPU's PACA over and switch to
344 * it. Also poison per_cpu_offset to catch anyone using non-static 348 * it. Also poison per_cpu_offset to catch anyone using non-static
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 19e096bd0e73..e469f30e6eeb 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -657,6 +657,17 @@ _GLOBAL(__ucmpdi2)
657 li r3,2 657 li r3,2
658 blr 658 blr
659 659
660_GLOBAL(__bswapdi2)
661 rotlwi r9,r4,8
662 rotlwi r10,r3,8
663 rlwimi r9,r4,24,0,7
664 rlwimi r10,r3,24,0,7
665 rlwimi r9,r4,24,16,23
666 rlwimi r10,r3,24,16,23
667 mr r3,r9
668 mr r4,r10
669 blr
670
660_GLOBAL(abs) 671_GLOBAL(abs)
661 srawi r4,r3,31 672 srawi r4,r3,31
662 xor r3,r3,r4 673 xor r3,r3,r4
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 5cfa8008693b..6820e45f557b 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -234,6 +234,17 @@ _GLOBAL(__flush_dcache_icache)
234 isync 234 isync
235 blr 235 blr
236 236
237_GLOBAL(__bswapdi2)
238 srdi r8,r3,32
239 rlwinm r7,r3,8,0xffffffff
240 rlwimi r7,r3,24,0,7
241 rlwinm r9,r8,8,0xffffffff
242 rlwimi r7,r3,24,16,23
243 rlwimi r9,r8,24,0,7
244 rlwimi r9,r8,24,16,23
245 sldi r7,r7,32
246 or r3,r7,r9
247 blr
237 248
238#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) 249#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
239/* 250/*
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index f5c5c90799a7..6053f037ef0a 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -359,7 +359,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
359 enum pci_mmap_state mmap_state, 359 enum pci_mmap_state mmap_state,
360 int write_combine) 360 int write_combine)
361{ 361{
362 unsigned long prot = pgprot_val(protection);
363 362
364 /* Write combine is always 0 on non-memory space mappings. On 363 /* Write combine is always 0 on non-memory space mappings. On
365 * memory space, if the user didn't pass 1, we check for a 364 * memory space, if the user didn't pass 1, we check for a
@@ -376,9 +375,9 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
376 375
377 /* XXX would be nice to have a way to ask for write-through */ 376 /* XXX would be nice to have a way to ask for write-through */
378 if (write_combine) 377 if (write_combine)
379 return pgprot_noncached_wc(prot); 378 return pgprot_noncached_wc(protection);
380 else 379 else
381 return pgprot_noncached(prot); 380 return pgprot_noncached(protection);
382} 381}
383 382
384/* 383/*
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 78b8766fd79e..c29666586998 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -143,7 +143,8 @@ EXPORT_SYMBOL(__lshrdi3);
143int __ucmpdi2(unsigned long long, unsigned long long); 143int __ucmpdi2(unsigned long long, unsigned long long);
144EXPORT_SYMBOL(__ucmpdi2); 144EXPORT_SYMBOL(__ucmpdi2);
145#endif 145#endif
146 146long long __bswapdi2(long long);
147EXPORT_SYMBOL(__bswapdi2);
147EXPORT_SYMBOL(memcpy); 148EXPORT_SYMBOL(memcpy);
148EXPORT_SYMBOL(memset); 149EXPORT_SYMBOL(memset);
149EXPORT_SYMBOL(memmove); 150EXPORT_SYMBOL(memmove);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ceb4e7b62cf4..a902723fdc69 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -339,6 +339,13 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
339 339
340static void prime_debug_regs(struct thread_struct *thread) 340static void prime_debug_regs(struct thread_struct *thread)
341{ 341{
342 /*
343 * We could have inherited MSR_DE from userspace, since
344 * it doesn't get cleared on exception entry. Make sure
345 * MSR_DE is clear before we enable any debug events.
346 */
347 mtmsr(mfmsr() & ~MSR_DE);
348
342 mtspr(SPRN_IAC1, thread->iac1); 349 mtspr(SPRN_IAC1, thread->iac1);
343 mtspr(SPRN_IAC2, thread->iac2); 350 mtspr(SPRN_IAC2, thread->iac2);
344#if CONFIG_PPC_ADV_DEBUG_IACS > 2 351#if CONFIG_PPC_ADV_DEBUG_IACS > 2
@@ -971,6 +978,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
971 * do some house keeping and then return from the fork or clone 978 * do some house keeping and then return from the fork or clone
972 * system call, using the stack frame created above. 979 * system call, using the stack frame created above.
973 */ 980 */
981 ((unsigned long *)sp)[0] = 0;
974 sp -= sizeof(struct pt_regs); 982 sp -= sizeof(struct pt_regs);
975 kregs = (struct pt_regs *) sp; 983 kregs = (struct pt_regs *) sp;
976 sp -= STACK_FRAME_OVERHEAD; 984 sp -= STACK_FRAME_OVERHEAD;
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 3b14d320e69f..98c2fc198712 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -32,6 +32,7 @@
32#include <trace/syscall.h> 32#include <trace/syscall.h>
33#include <linux/hw_breakpoint.h> 33#include <linux/hw_breakpoint.h>
34#include <linux/perf_event.h> 34#include <linux/perf_event.h>
35#include <linux/context_tracking.h>
35 36
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37#include <asm/page.h> 38#include <asm/page.h>
@@ -1788,6 +1789,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
1788{ 1789{
1789 long ret = 0; 1790 long ret = 0;
1790 1791
1792 user_exit();
1793
1791 secure_computing_strict(regs->gpr[0]); 1794 secure_computing_strict(regs->gpr[0]);
1792 1795
1793 if (test_thread_flag(TIF_SYSCALL_TRACE) && 1796 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
@@ -1832,4 +1835,6 @@ void do_syscall_trace_leave(struct pt_regs *regs)
1832 step = test_thread_flag(TIF_SINGLESTEP); 1835 step = test_thread_flag(TIF_SINGLESTEP);
1833 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 1836 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1834 tracehook_report_syscall_exit(regs, step); 1837 tracehook_report_syscall_exit(regs, step);
1838
1839 user_enter();
1835} 1840}
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 1fd6e7b2f390..52add6f3e201 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/capability.h> 20#include <linux/capability.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/cpu.h>
22#include <linux/smp.h> 23#include <linux/smp.h>
23#include <linux/completion.h> 24#include <linux/completion.h>
24#include <linux/cpumask.h> 25#include <linux/cpumask.h>
@@ -807,6 +808,95 @@ static void rtas_percpu_suspend_me(void *info)
807 __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); 808 __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
808} 809}
809 810
811enum rtas_cpu_state {
812 DOWN,
813 UP,
814};
815
816#ifndef CONFIG_SMP
817static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
818 cpumask_var_t cpus)
819{
820 if (!cpumask_empty(cpus)) {
821 cpumask_clear(cpus);
822 return -EINVAL;
823 } else
824 return 0;
825}
826#else
827/* On return cpumask will be altered to indicate CPUs changed.
828 * CPUs with states changed will be set in the mask,
829 * CPUs with status unchanged will be unset in the mask. */
830static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
831 cpumask_var_t cpus)
832{
833 int cpu;
834 int cpuret = 0;
835 int ret = 0;
836
837 if (cpumask_empty(cpus))
838 return 0;
839
840 for_each_cpu(cpu, cpus) {
841 switch (state) {
842 case DOWN:
843 cpuret = cpu_down(cpu);
844 break;
845 case UP:
846 cpuret = cpu_up(cpu);
847 break;
848 }
849 if (cpuret) {
850 pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
851 __func__,
852 ((state == UP) ? "up" : "down"),
853 cpu, cpuret);
854 if (!ret)
855 ret = cpuret;
856 if (state == UP) {
857 /* clear bits for unchanged cpus, return */
858 cpumask_shift_right(cpus, cpus, cpu);
859 cpumask_shift_left(cpus, cpus, cpu);
860 break;
861 } else {
862 /* clear bit for unchanged cpu, continue */
863 cpumask_clear_cpu(cpu, cpus);
864 }
865 }
866 }
867
868 return ret;
869}
870#endif
871
872int rtas_online_cpus_mask(cpumask_var_t cpus)
873{
874 int ret;
875
876 ret = rtas_cpu_state_change_mask(UP, cpus);
877
878 if (ret) {
879 cpumask_var_t tmp_mask;
880
881 if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
882 return ret;
883
884 /* Use tmp_mask to preserve cpus mask from first failure */
885 cpumask_copy(tmp_mask, cpus);
886 rtas_offline_cpus_mask(tmp_mask);
887 free_cpumask_var(tmp_mask);
888 }
889
890 return ret;
891}
892EXPORT_SYMBOL(rtas_online_cpus_mask);
893
894int rtas_offline_cpus_mask(cpumask_var_t cpus)
895{
896 return rtas_cpu_state_change_mask(DOWN, cpus);
897}
898EXPORT_SYMBOL(rtas_offline_cpus_mask);
899
810int rtas_ibm_suspend_me(struct rtas_args *args) 900int rtas_ibm_suspend_me(struct rtas_args *args)
811{ 901{
812 long state; 902 long state;
@@ -814,6 +904,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
814 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 904 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
815 struct rtas_suspend_me_data data; 905 struct rtas_suspend_me_data data;
816 DECLARE_COMPLETION_ONSTACK(done); 906 DECLARE_COMPLETION_ONSTACK(done);
907 cpumask_var_t offline_mask;
908 int cpuret;
817 909
818 if (!rtas_service_present("ibm,suspend-me")) 910 if (!rtas_service_present("ibm,suspend-me"))
819 return -ENOSYS; 911 return -ENOSYS;
@@ -837,11 +929,24 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
837 return 0; 929 return 0;
838 } 930 }
839 931
932 if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
933 return -ENOMEM;
934
840 atomic_set(&data.working, 0); 935 atomic_set(&data.working, 0);
841 atomic_set(&data.done, 0); 936 atomic_set(&data.done, 0);
842 atomic_set(&data.error, 0); 937 atomic_set(&data.error, 0);
843 data.token = rtas_token("ibm,suspend-me"); 938 data.token = rtas_token("ibm,suspend-me");
844 data.complete = &done; 939 data.complete = &done;
940
941 /* All present CPUs must be online */
942 cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
943 cpuret = rtas_online_cpus_mask(offline_mask);
944 if (cpuret) {
945 pr_err("%s: Could not bring present CPUs online.\n", __func__);
946 atomic_set(&data.error, cpuret);
947 goto out;
948 }
949
845 stop_topology_update(); 950 stop_topology_update();
846 951
847 /* Call function on all CPUs. One of us will make the 952 /* Call function on all CPUs. One of us will make the
@@ -857,6 +962,14 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
857 962
858 start_topology_update(); 963 start_topology_update();
859 964
965 /* Take down CPUs not online prior to suspend */
966 cpuret = rtas_offline_cpus_mask(offline_mask);
967 if (cpuret)
968 pr_warn("%s: Could not restore CPUs to offline state.\n",
969 __func__);
970
971out:
972 free_cpumask_var(offline_mask);
860 return atomic_read(&data.error); 973 return atomic_read(&data.error);
861} 974}
862#else /* CONFIG_PPC_PSERIES */ 975#else /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 5b3022470126..2f3cdb01506d 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -89,6 +89,7 @@
89 89
90/* Array sizes */ 90/* Array sizes */
91#define VALIDATE_BUF_SIZE 4096 91#define VALIDATE_BUF_SIZE 4096
92#define VALIDATE_MSG_LEN 256
92#define RTAS_MSG_MAXLEN 64 93#define RTAS_MSG_MAXLEN 64
93 94
94/* Quirk - RTAS requires 4k list length and block size */ 95/* Quirk - RTAS requires 4k list length and block size */
@@ -466,7 +467,7 @@ static void validate_flash(struct rtas_validate_flash_t *args_buf)
466} 467}
467 468
468static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, 469static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
469 char *msg) 470 char *msg, int msglen)
470{ 471{
471 int n; 472 int n;
472 473
@@ -474,7 +475,8 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
474 n = sprintf(msg, "%d\n", args_buf->update_results); 475 n = sprintf(msg, "%d\n", args_buf->update_results);
475 if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) || 476 if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) ||
476 (args_buf->update_results == VALIDATE_TMP_UPDATE)) 477 (args_buf->update_results == VALIDATE_TMP_UPDATE))
477 n += sprintf(msg + n, "%s\n", args_buf->buf); 478 n += snprintf(msg + n, msglen - n, "%s\n",
479 args_buf->buf);
478 } else { 480 } else {
479 n = sprintf(msg, "%d\n", args_buf->status); 481 n = sprintf(msg, "%d\n", args_buf->status);
480 } 482 }
@@ -486,11 +488,11 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf,
486{ 488{
487 struct rtas_validate_flash_t *const args_buf = 489 struct rtas_validate_flash_t *const args_buf =
488 &rtas_validate_flash_data; 490 &rtas_validate_flash_data;
489 char msg[RTAS_MSG_MAXLEN]; 491 char msg[VALIDATE_MSG_LEN];
490 int msglen; 492 int msglen;
491 493
492 mutex_lock(&rtas_validate_flash_mutex); 494 mutex_lock(&rtas_validate_flash_mutex);
493 msglen = get_validate_flash_msg(args_buf, msg); 495 msglen = get_validate_flash_msg(args_buf, msg, VALIDATE_MSG_LEN);
494 mutex_unlock(&rtas_validate_flash_mutex); 496 mutex_unlock(&rtas_validate_flash_mutex);
495 497
496 return simple_read_from_buffer(buf, count, ppos, msg, msglen); 498 return simple_read_from_buffer(buf, count, ppos, msg, msglen);
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index cf12eae02de5..577a8aa69c6e 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -13,6 +13,7 @@
13#include <linux/signal.h> 13#include <linux/signal.h>
14#include <linux/uprobes.h> 14#include <linux/uprobes.h>
15#include <linux/key.h> 15#include <linux/key.h>
16#include <linux/context_tracking.h>
16#include <asm/hw_breakpoint.h> 17#include <asm/hw_breakpoint.h>
17#include <asm/uaccess.h> 18#include <asm/uaccess.h>
18#include <asm/unistd.h> 19#include <asm/unistd.h>
@@ -24,7 +25,7 @@
24 * through debug.exception-trace sysctl. 25 * through debug.exception-trace sysctl.
25 */ 26 */
26 27
27int show_unhandled_signals = 0; 28int show_unhandled_signals = 1;
28 29
29/* 30/*
30 * Allocate space for the signal frame 31 * Allocate space for the signal frame
@@ -159,6 +160,8 @@ static int do_signal(struct pt_regs *regs)
159 160
160void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) 161void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
161{ 162{
163 user_exit();
164
162 if (thread_info_flags & _TIF_UPROBE) 165 if (thread_info_flags & _TIF_UPROBE)
163 uprobe_notify_resume(regs); 166 uprobe_notify_resume(regs);
164 167
@@ -169,4 +172,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
169 clear_thread_flag(TIF_NOTIFY_RESUME); 172 clear_thread_flag(TIF_NOTIFY_RESUME);
170 tracehook_notify_resume(regs); 173 tracehook_notify_resume(regs);
171 } 174 }
175
176 user_enter();
172} 177}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 83efa2f7d926..a7a648f6b750 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -35,6 +35,7 @@
35#include <linux/kdebug.h> 35#include <linux/kdebug.h>
36#include <linux/debugfs.h> 36#include <linux/debugfs.h>
37#include <linux/ratelimit.h> 37#include <linux/ratelimit.h>
38#include <linux/context_tracking.h>
38 39
39#include <asm/emulated_ops.h> 40#include <asm/emulated_ops.h>
40#include <asm/pgtable.h> 41#include <asm/pgtable.h>
@@ -667,6 +668,7 @@ int machine_check_generic(struct pt_regs *regs)
667 668
668void machine_check_exception(struct pt_regs *regs) 669void machine_check_exception(struct pt_regs *regs)
669{ 670{
671 enum ctx_state prev_state = exception_enter();
670 int recover = 0; 672 int recover = 0;
671 673
672 __get_cpu_var(irq_stat).mce_exceptions++; 674 __get_cpu_var(irq_stat).mce_exceptions++;
@@ -683,7 +685,7 @@ void machine_check_exception(struct pt_regs *regs)
683 recover = cur_cpu_spec->machine_check(regs); 685 recover = cur_cpu_spec->machine_check(regs);
684 686
685 if (recover > 0) 687 if (recover > 0)
686 return; 688 goto bail;
687 689
688#if defined(CONFIG_8xx) && defined(CONFIG_PCI) 690#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
689 /* the qspan pci read routines can cause machine checks -- Cort 691 /* the qspan pci read routines can cause machine checks -- Cort
@@ -693,20 +695,23 @@ void machine_check_exception(struct pt_regs *regs)
693 * -- BenH 695 * -- BenH
694 */ 696 */
695 bad_page_fault(regs, regs->dar, SIGBUS); 697 bad_page_fault(regs, regs->dar, SIGBUS);
696 return; 698 goto bail;
697#endif 699#endif
698 700
699 if (debugger_fault_handler(regs)) 701 if (debugger_fault_handler(regs))
700 return; 702 goto bail;
701 703
702 if (check_io_access(regs)) 704 if (check_io_access(regs))
703 return; 705 goto bail;
704 706
705 die("Machine check", regs, SIGBUS); 707 die("Machine check", regs, SIGBUS);
706 708
707 /* Must die if the interrupt is not recoverable */ 709 /* Must die if the interrupt is not recoverable */
708 if (!(regs->msr & MSR_RI)) 710 if (!(regs->msr & MSR_RI))
709 panic("Unrecoverable Machine check"); 711 panic("Unrecoverable Machine check");
712
713bail:
714 exception_exit(prev_state);
710} 715}
711 716
712void SMIException(struct pt_regs *regs) 717void SMIException(struct pt_regs *regs)
@@ -716,20 +721,29 @@ void SMIException(struct pt_regs *regs)
716 721
717void unknown_exception(struct pt_regs *regs) 722void unknown_exception(struct pt_regs *regs)
718{ 723{
724 enum ctx_state prev_state = exception_enter();
725
719 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 726 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
720 regs->nip, regs->msr, regs->trap); 727 regs->nip, regs->msr, regs->trap);
721 728
722 _exception(SIGTRAP, regs, 0, 0); 729 _exception(SIGTRAP, regs, 0, 0);
730
731 exception_exit(prev_state);
723} 732}
724 733
725void instruction_breakpoint_exception(struct pt_regs *regs) 734void instruction_breakpoint_exception(struct pt_regs *regs)
726{ 735{
736 enum ctx_state prev_state = exception_enter();
737
727 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 738 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
728 5, SIGTRAP) == NOTIFY_STOP) 739 5, SIGTRAP) == NOTIFY_STOP)
729 return; 740 goto bail;
730 if (debugger_iabr_match(regs)) 741 if (debugger_iabr_match(regs))
731 return; 742 goto bail;
732 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 743 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
744
745bail:
746 exception_exit(prev_state);
733} 747}
734 748
735void RunModeException(struct pt_regs *regs) 749void RunModeException(struct pt_regs *regs)
@@ -739,15 +753,20 @@ void RunModeException(struct pt_regs *regs)
739 753
740void __kprobes single_step_exception(struct pt_regs *regs) 754void __kprobes single_step_exception(struct pt_regs *regs)
741{ 755{
756 enum ctx_state prev_state = exception_enter();
757
742 clear_single_step(regs); 758 clear_single_step(regs);
743 759
744 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 760 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
745 5, SIGTRAP) == NOTIFY_STOP) 761 5, SIGTRAP) == NOTIFY_STOP)
746 return; 762 goto bail;
747 if (debugger_sstep(regs)) 763 if (debugger_sstep(regs))
748 return; 764 goto bail;
749 765
750 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 766 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
767
768bail:
769 exception_exit(prev_state);
751} 770}
752 771
753/* 772/*
@@ -1005,6 +1024,7 @@ int is_valid_bugaddr(unsigned long addr)
1005 1024
1006void __kprobes program_check_exception(struct pt_regs *regs) 1025void __kprobes program_check_exception(struct pt_regs *regs)
1007{ 1026{
1027 enum ctx_state prev_state = exception_enter();
1008 unsigned int reason = get_reason(regs); 1028 unsigned int reason = get_reason(regs);
1009 extern int do_mathemu(struct pt_regs *regs); 1029 extern int do_mathemu(struct pt_regs *regs);
1010 1030
@@ -1014,26 +1034,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1014 if (reason & REASON_FP) { 1034 if (reason & REASON_FP) {
1015 /* IEEE FP exception */ 1035 /* IEEE FP exception */
1016 parse_fpe(regs); 1036 parse_fpe(regs);
1017 return; 1037 goto bail;
1018 } 1038 }
1019 if (reason & REASON_TRAP) { 1039 if (reason & REASON_TRAP) {
1020 /* Debugger is first in line to stop recursive faults in 1040 /* Debugger is first in line to stop recursive faults in
1021 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1041 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1022 if (debugger_bpt(regs)) 1042 if (debugger_bpt(regs))
1023 return; 1043 goto bail;
1024 1044
1025 /* trap exception */ 1045 /* trap exception */
1026 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1046 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1027 == NOTIFY_STOP) 1047 == NOTIFY_STOP)
1028 return; 1048 goto bail;
1029 1049
1030 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1050 if (!(regs->msr & MSR_PR) && /* not user-mode */
1031 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1051 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1032 regs->nip += 4; 1052 regs->nip += 4;
1033 return; 1053 goto bail;
1034 } 1054 }
1035 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1055 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1036 return; 1056 goto bail;
1037 } 1057 }
1038#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1058#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1039 if (reason & REASON_TM) { 1059 if (reason & REASON_TM) {
@@ -1049,7 +1069,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1049 if (!user_mode(regs) && 1069 if (!user_mode(regs) &&
1050 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1070 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1051 regs->nip += 4; 1071 regs->nip += 4;
1052 return; 1072 goto bail;
1053 } 1073 }
1054 /* If usermode caused this, it's done something illegal and 1074 /* If usermode caused this, it's done something illegal and
1055 * gets a SIGILL slap on the wrist. We call it an illegal 1075 * gets a SIGILL slap on the wrist. We call it an illegal
@@ -1059,7 +1079,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1059 */ 1079 */
1060 if (user_mode(regs)) { 1080 if (user_mode(regs)) {
1061 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1081 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1062 return; 1082 goto bail;
1063 } else { 1083 } else {
1064 printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1084 printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1065 "at %lx (msr 0x%x)\n", regs->nip, reason); 1085 "at %lx (msr 0x%x)\n", regs->nip, reason);
@@ -1083,16 +1103,16 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1083 switch (do_mathemu(regs)) { 1103 switch (do_mathemu(regs)) {
1084 case 0: 1104 case 0:
1085 emulate_single_step(regs); 1105 emulate_single_step(regs);
1086 return; 1106 goto bail;
1087 case 1: { 1107 case 1: {
1088 int code = 0; 1108 int code = 0;
1089 code = __parse_fpscr(current->thread.fpscr.val); 1109 code = __parse_fpscr(current->thread.fpscr.val);
1090 _exception(SIGFPE, regs, code, regs->nip); 1110 _exception(SIGFPE, regs, code, regs->nip);
1091 return; 1111 goto bail;
1092 } 1112 }
1093 case -EFAULT: 1113 case -EFAULT:
1094 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1114 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1095 return; 1115 goto bail;
1096 } 1116 }
1097 /* fall through on any other errors */ 1117 /* fall through on any other errors */
1098#endif /* CONFIG_MATH_EMULATION */ 1118#endif /* CONFIG_MATH_EMULATION */
@@ -1103,10 +1123,10 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1103 case 0: 1123 case 0:
1104 regs->nip += 4; 1124 regs->nip += 4;
1105 emulate_single_step(regs); 1125 emulate_single_step(regs);
1106 return; 1126 goto bail;
1107 case -EFAULT: 1127 case -EFAULT:
1108 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1128 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1109 return; 1129 goto bail;
1110 } 1130 }
1111 } 1131 }
1112 1132
@@ -1114,10 +1134,14 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1114 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1134 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1115 else 1135 else
1116 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1136 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1137
1138bail:
1139 exception_exit(prev_state);
1117} 1140}
1118 1141
1119void alignment_exception(struct pt_regs *regs) 1142void alignment_exception(struct pt_regs *regs)
1120{ 1143{
1144 enum ctx_state prev_state = exception_enter();
1121 int sig, code, fixed = 0; 1145 int sig, code, fixed = 0;
1122 1146
1123 /* We restore the interrupt state now */ 1147 /* We restore the interrupt state now */
@@ -1131,7 +1155,7 @@ void alignment_exception(struct pt_regs *regs)
1131 if (fixed == 1) { 1155 if (fixed == 1) {
1132 regs->nip += 4; /* skip over emulated instruction */ 1156 regs->nip += 4; /* skip over emulated instruction */
1133 emulate_single_step(regs); 1157 emulate_single_step(regs);
1134 return; 1158 goto bail;
1135 } 1159 }
1136 1160
1137 /* Operand address was bad */ 1161 /* Operand address was bad */
@@ -1146,6 +1170,9 @@ void alignment_exception(struct pt_regs *regs)
1146 _exception(sig, regs, code, regs->dar); 1170 _exception(sig, regs, code, regs->dar);
1147 else 1171 else
1148 bad_page_fault(regs, regs->dar, sig); 1172 bad_page_fault(regs, regs->dar, sig);
1173
1174bail:
1175 exception_exit(prev_state);
1149} 1176}
1150 1177
1151void StackOverflow(struct pt_regs *regs) 1178void StackOverflow(struct pt_regs *regs)
@@ -1174,23 +1201,32 @@ void trace_syscall(struct pt_regs *regs)
1174 1201
1175void kernel_fp_unavailable_exception(struct pt_regs *regs) 1202void kernel_fp_unavailable_exception(struct pt_regs *regs)
1176{ 1203{
1204 enum ctx_state prev_state = exception_enter();
1205
1177 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1206 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1178 "%lx at %lx\n", regs->trap, regs->nip); 1207 "%lx at %lx\n", regs->trap, regs->nip);
1179 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1208 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1209
1210 exception_exit(prev_state);
1180} 1211}
1181 1212
1182void altivec_unavailable_exception(struct pt_regs *regs) 1213void altivec_unavailable_exception(struct pt_regs *regs)
1183{ 1214{
1215 enum ctx_state prev_state = exception_enter();
1216
1184 if (user_mode(regs)) { 1217 if (user_mode(regs)) {
1185 /* A user program has executed an altivec instruction, 1218 /* A user program has executed an altivec instruction,
1186 but this kernel doesn't support altivec. */ 1219 but this kernel doesn't support altivec. */
1187 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1220 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1188 return; 1221 goto bail;
1189 } 1222 }
1190 1223
1191 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1224 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1192 "%lx at %lx\n", regs->trap, regs->nip); 1225 "%lx at %lx\n", regs->trap, regs->nip);
1193 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1226 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1227
1228bail:
1229 exception_exit(prev_state);
1194} 1230}
1195 1231
1196void vsx_unavailable_exception(struct pt_regs *regs) 1232void vsx_unavailable_exception(struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 13b867093499..9d3fdcd66290 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -64,6 +64,9 @@ void __init udbg_early_init(void)
64 udbg_init_usbgecko(); 64 udbg_init_usbgecko();
65#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP) 65#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
66 udbg_init_wsp(); 66 udbg_init_wsp();
67#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS)
68 /* In memory console */
69 udbg_init_memcons();
67#elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC) 70#elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC)
68 udbg_init_ehv_bc(); 71 udbg_init_ehv_bc();
69#elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC) 72#elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC)
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 229951ffc351..8726779e1409 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -32,6 +32,7 @@
32#include <linux/perf_event.h> 32#include <linux/perf_event.h>
33#include <linux/magic.h> 33#include <linux/magic.h>
34#include <linux/ratelimit.h> 34#include <linux/ratelimit.h>
35#include <linux/context_tracking.h>
35 36
36#include <asm/firmware.h> 37#include <asm/firmware.h>
37#include <asm/page.h> 38#include <asm/page.h>
@@ -196,6 +197,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
196int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, 197int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
197 unsigned long error_code) 198 unsigned long error_code)
198{ 199{
200 enum ctx_state prev_state = exception_enter();
199 struct vm_area_struct * vma; 201 struct vm_area_struct * vma;
200 struct mm_struct *mm = current->mm; 202 struct mm_struct *mm = current->mm;
201 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 203 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
@@ -204,6 +206,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
204 int trap = TRAP(regs); 206 int trap = TRAP(regs);
205 int is_exec = trap == 0x400; 207 int is_exec = trap == 0x400;
206 int fault; 208 int fault;
209 int rc = 0;
207 210
208#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) 211#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
209 /* 212 /*
@@ -230,28 +233,30 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
230 * look at it 233 * look at it
231 */ 234 */
232 if (error_code & ICSWX_DSI_UCT) { 235 if (error_code & ICSWX_DSI_UCT) {
233 int rc = acop_handle_fault(regs, address, error_code); 236 rc = acop_handle_fault(regs, address, error_code);
234 if (rc) 237 if (rc)
235 return rc; 238 goto bail;
236 } 239 }
237#endif /* CONFIG_PPC_ICSWX */ 240#endif /* CONFIG_PPC_ICSWX */
238 241
239 if (notify_page_fault(regs)) 242 if (notify_page_fault(regs))
240 return 0; 243 goto bail;
241 244
242 if (unlikely(debugger_fault_handler(regs))) 245 if (unlikely(debugger_fault_handler(regs)))
243 return 0; 246 goto bail;
244 247
245 /* On a kernel SLB miss we can only check for a valid exception entry */ 248 /* On a kernel SLB miss we can only check for a valid exception entry */
246 if (!user_mode(regs) && (address >= TASK_SIZE)) 249 if (!user_mode(regs) && (address >= TASK_SIZE)) {
247 return SIGSEGV; 250 rc = SIGSEGV;
251 goto bail;
252 }
248 253
249#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \ 254#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
250 defined(CONFIG_PPC_BOOK3S_64)) 255 defined(CONFIG_PPC_BOOK3S_64))
251 if (error_code & DSISR_DABRMATCH) { 256 if (error_code & DSISR_DABRMATCH) {
252 /* breakpoint match */ 257 /* breakpoint match */
253 do_break(regs, address, error_code); 258 do_break(regs, address, error_code);
254 return 0; 259 goto bail;
255 } 260 }
256#endif 261#endif
257 262
@@ -260,8 +265,10 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
260 local_irq_enable(); 265 local_irq_enable();
261 266
262 if (in_atomic() || mm == NULL) { 267 if (in_atomic() || mm == NULL) {
263 if (!user_mode(regs)) 268 if (!user_mode(regs)) {
264 return SIGSEGV; 269 rc = SIGSEGV;
270 goto bail;
271 }
265 /* in_atomic() in user mode is really bad, 272 /* in_atomic() in user mode is really bad,
266 as is current->mm == NULL. */ 273 as is current->mm == NULL. */
267 printk(KERN_EMERG "Page fault in user mode with " 274 printk(KERN_EMERG "Page fault in user mode with "
@@ -417,9 +424,11 @@ good_area:
417 */ 424 */
418 fault = handle_mm_fault(mm, vma, address, flags); 425 fault = handle_mm_fault(mm, vma, address, flags);
419 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { 426 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
420 int rc = mm_fault_error(regs, address, fault); 427 rc = mm_fault_error(regs, address, fault);
421 if (rc >= MM_FAULT_RETURN) 428 if (rc >= MM_FAULT_RETURN)
422 return rc; 429 goto bail;
430 else
431 rc = 0;
423 } 432 }
424 433
425 /* 434 /*
@@ -454,7 +463,7 @@ good_area:
454 } 463 }
455 464
456 up_read(&mm->mmap_sem); 465 up_read(&mm->mmap_sem);
457 return 0; 466 goto bail;
458 467
459bad_area: 468bad_area:
460 up_read(&mm->mmap_sem); 469 up_read(&mm->mmap_sem);
@@ -463,7 +472,7 @@ bad_area_nosemaphore:
463 /* User mode accesses cause a SIGSEGV */ 472 /* User mode accesses cause a SIGSEGV */
464 if (user_mode(regs)) { 473 if (user_mode(regs)) {
465 _exception(SIGSEGV, regs, code, address); 474 _exception(SIGSEGV, regs, code, address);
466 return 0; 475 goto bail;
467 } 476 }
468 477
469 if (is_exec && (error_code & DSISR_PROTFAULT)) 478 if (is_exec && (error_code & DSISR_PROTFAULT))
@@ -471,7 +480,11 @@ bad_area_nosemaphore:
471 " page (%lx) - exploit attempt? (uid: %d)\n", 480 " page (%lx) - exploit attempt? (uid: %d)\n",
472 address, from_kuid(&init_user_ns, current_uid())); 481 address, from_kuid(&init_user_ns, current_uid()));
473 482
474 return SIGSEGV; 483 rc = SIGSEGV;
484
485bail:
486 exception_exit(prev_state);
487 return rc;
475 488
476} 489}
477 490
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 88ac0eeaadde..e303a6d74e3a 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -33,6 +33,7 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/signal.h> 34#include <linux/signal.h>
35#include <linux/memblock.h> 35#include <linux/memblock.h>
36#include <linux/context_tracking.h>
36 37
37#include <asm/processor.h> 38#include <asm/processor.h>
38#include <asm/pgtable.h> 39#include <asm/pgtable.h>
@@ -954,6 +955,7 @@ void hash_failure_debug(unsigned long ea, unsigned long access,
954 */ 955 */
955int hash_page(unsigned long ea, unsigned long access, unsigned long trap) 956int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
956{ 957{
958 enum ctx_state prev_state = exception_enter();
957 pgd_t *pgdir; 959 pgd_t *pgdir;
958 unsigned long vsid; 960 unsigned long vsid;
959 struct mm_struct *mm; 961 struct mm_struct *mm;
@@ -973,7 +975,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
973 mm = current->mm; 975 mm = current->mm;
974 if (! mm) { 976 if (! mm) {
975 DBG_LOW(" user region with no mm !\n"); 977 DBG_LOW(" user region with no mm !\n");
976 return 1; 978 rc = 1;
979 goto bail;
977 } 980 }
978 psize = get_slice_psize(mm, ea); 981 psize = get_slice_psize(mm, ea);
979 ssize = user_segment_size(ea); 982 ssize = user_segment_size(ea);
@@ -992,19 +995,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
992 /* Not a valid range 995 /* Not a valid range
993 * Send the problem up to do_page_fault 996 * Send the problem up to do_page_fault
994 */ 997 */
995 return 1; 998 rc = 1;
999 goto bail;
996 } 1000 }
997 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); 1001 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
998 1002
999 /* Bad address. */ 1003 /* Bad address. */
1000 if (!vsid) { 1004 if (!vsid) {
1001 DBG_LOW("Bad address!\n"); 1005 DBG_LOW("Bad address!\n");
1002 return 1; 1006 rc = 1;
1007 goto bail;
1003 } 1008 }
1004 /* Get pgdir */ 1009 /* Get pgdir */
1005 pgdir = mm->pgd; 1010 pgdir = mm->pgd;
1006 if (pgdir == NULL) 1011 if (pgdir == NULL) {
1007 return 1; 1012 rc = 1;
1013 goto bail;
1014 }
1008 1015
1009 /* Check CPU locality */ 1016 /* Check CPU locality */
1010 tmp = cpumask_of(smp_processor_id()); 1017 tmp = cpumask_of(smp_processor_id());
@@ -1027,7 +1034,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
1027 ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift); 1034 ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
1028 if (ptep == NULL || !pte_present(*ptep)) { 1035 if (ptep == NULL || !pte_present(*ptep)) {
1029 DBG_LOW(" no PTE !\n"); 1036 DBG_LOW(" no PTE !\n");
1030 return 1; 1037 rc = 1;
1038 goto bail;
1031 } 1039 }
1032 1040
1033 /* Add _PAGE_PRESENT to the required access perm */ 1041 /* Add _PAGE_PRESENT to the required access perm */
@@ -1038,13 +1046,16 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
1038 */ 1046 */
1039 if (access & ~pte_val(*ptep)) { 1047 if (access & ~pte_val(*ptep)) {
1040 DBG_LOW(" no access !\n"); 1048 DBG_LOW(" no access !\n");
1041 return 1; 1049 rc = 1;
1050 goto bail;
1042 } 1051 }
1043 1052
1044#ifdef CONFIG_HUGETLB_PAGE 1053#ifdef CONFIG_HUGETLB_PAGE
1045 if (hugeshift) 1054 if (hugeshift) {
1046 return __hash_page_huge(ea, access, vsid, ptep, trap, local, 1055 rc = __hash_page_huge(ea, access, vsid, ptep, trap, local,
1047 ssize, hugeshift, psize); 1056 ssize, hugeshift, psize);
1057 goto bail;
1058 }
1048#endif /* CONFIG_HUGETLB_PAGE */ 1059#endif /* CONFIG_HUGETLB_PAGE */
1049 1060
1050#ifndef CONFIG_PPC_64K_PAGES 1061#ifndef CONFIG_PPC_64K_PAGES
@@ -1124,6 +1135,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
1124 pte_val(*(ptep + PTRS_PER_PTE))); 1135 pte_val(*(ptep + PTRS_PER_PTE)));
1125#endif 1136#endif
1126 DBG_LOW(" -> rc=%d\n", rc); 1137 DBG_LOW(" -> rc=%d\n", rc);
1138
1139bail:
1140 exception_exit(prev_state);
1127 return rc; 1141 return rc;
1128} 1142}
1129EXPORT_SYMBOL_GPL(hash_page); 1143EXPORT_SYMBOL_GPL(hash_page);
@@ -1259,6 +1273,8 @@ void flush_hash_range(unsigned long number, int local)
1259 */ 1273 */
1260void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) 1274void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
1261{ 1275{
1276 enum ctx_state prev_state = exception_enter();
1277
1262 if (user_mode(regs)) { 1278 if (user_mode(regs)) {
1263#ifdef CONFIG_PPC_SUBPAGE_PROT 1279#ifdef CONFIG_PPC_SUBPAGE_PROT
1264 if (rc == -2) 1280 if (rc == -2)
@@ -1268,6 +1284,8 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
1268 _exception(SIGBUS, regs, BUS_ADRERR, address); 1284 _exception(SIGBUS, regs, BUS_ADRERR, address);
1269 } else 1285 } else
1270 bad_page_fault(regs, address, SIGBUS); 1286 bad_page_fault(regs, address, SIGBUS);
1287
1288 exception_exit(prev_state);
1271} 1289}
1272 1290
1273long hpte_insert_repeating(unsigned long hash, unsigned long vpn, 1291long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index c2787bf779ca..a90b9c458990 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -215,7 +215,8 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
215 unsigned long phys) 215 unsigned long phys)
216{ 216{
217 int mapped = htab_bolt_mapping(start, start + page_size, phys, 217 int mapped = htab_bolt_mapping(start, start + page_size, phys,
218 PAGE_KERNEL, mmu_vmemmap_psize, 218 pgprot_val(PAGE_KERNEL),
219 mmu_vmemmap_psize,
219 mmu_kernel_ssize); 220 mmu_kernel_ssize);
220 BUG_ON(mapped < 0); 221 BUG_ON(mapped < 0);
221} 222}
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index c627843c5b2e..426180b84978 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -13,11 +13,13 @@
13#include <linux/perf_event.h> 13#include <linux/perf_event.h>
14#include <linux/percpu.h> 14#include <linux/percpu.h>
15#include <linux/hardirq.h> 15#include <linux/hardirq.h>
16#include <linux/uaccess.h>
16#include <asm/reg.h> 17#include <asm/reg.h>
17#include <asm/pmc.h> 18#include <asm/pmc.h>
18#include <asm/machdep.h> 19#include <asm/machdep.h>
19#include <asm/firmware.h> 20#include <asm/firmware.h>
20#include <asm/ptrace.h> 21#include <asm/ptrace.h>
22#include <asm/code-patching.h>
21 23
22#define BHRB_MAX_ENTRIES 32 24#define BHRB_MAX_ENTRIES 32
23#define BHRB_TARGET 0x0000000000000002 25#define BHRB_TARGET 0x0000000000000002
@@ -100,6 +102,10 @@ static inline int siar_valid(struct pt_regs *regs)
100 return 1; 102 return 1;
101} 103}
102 104
105static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
106static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
107void power_pmu_flush_branch_stack(void) {}
108static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
103#endif /* CONFIG_PPC32 */ 109#endif /* CONFIG_PPC32 */
104 110
105static bool regs_use_siar(struct pt_regs *regs) 111static bool regs_use_siar(struct pt_regs *regs)
@@ -308,6 +314,159 @@ static inline int siar_valid(struct pt_regs *regs)
308 return 1; 314 return 1;
309} 315}
310 316
317
318/* Reset all possible BHRB entries */
319static void power_pmu_bhrb_reset(void)
320{
321 asm volatile(PPC_CLRBHRB);
322}
323
324static void power_pmu_bhrb_enable(struct perf_event *event)
325{
326 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
327
328 if (!ppmu->bhrb_nr)
329 return;
330
331 /* Clear BHRB if we changed task context to avoid data leaks */
332 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
333 power_pmu_bhrb_reset();
334 cpuhw->bhrb_context = event->ctx;
335 }
336 cpuhw->bhrb_users++;
337}
338
339static void power_pmu_bhrb_disable(struct perf_event *event)
340{
341 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
342
343 if (!ppmu->bhrb_nr)
344 return;
345
346 cpuhw->bhrb_users--;
347 WARN_ON_ONCE(cpuhw->bhrb_users < 0);
348
349 if (!cpuhw->disabled && !cpuhw->bhrb_users) {
350 /* BHRB cannot be turned off when other
351 * events are active on the PMU.
352 */
353
354 /* avoid stale pointer */
355 cpuhw->bhrb_context = NULL;
356 }
357}
358
359/* Called from ctxsw to prevent one process's branch entries to
360 * mingle with the other process's entries during context switch.
361 */
362void power_pmu_flush_branch_stack(void)
363{
364 if (ppmu->bhrb_nr)
365 power_pmu_bhrb_reset();
366}
367/* Calculate the to address for a branch */
368static __u64 power_pmu_bhrb_to(u64 addr)
369{
370 unsigned int instr;
371 int ret;
372 __u64 target;
373
374 if (is_kernel_addr(addr))
375 return branch_target((unsigned int *)addr);
376
377 /* Userspace: need copy instruction here then translate it */
378 pagefault_disable();
379 ret = __get_user_inatomic(instr, (unsigned int __user *)addr);
380 if (ret) {
381 pagefault_enable();
382 return 0;
383 }
384 pagefault_enable();
385
386 target = branch_target(&instr);
387 if ((!target) || (instr & BRANCH_ABSOLUTE))
388 return target;
389
390 /* Translate relative branch target from kernel to user address */
391 return target - (unsigned long)&instr + addr;
392}
393
394/* Processing BHRB entries */
395void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
396{
397 u64 val;
398 u64 addr;
399 int r_index, u_index, pred;
400
401 r_index = 0;
402 u_index = 0;
403 while (r_index < ppmu->bhrb_nr) {
404 /* Assembly read function */
405 val = read_bhrb(r_index++);
406 if (!val)
407 /* Terminal marker: End of valid BHRB entries */
408 break;
409 else {
410 addr = val & BHRB_EA;
411 pred = val & BHRB_PREDICTION;
412
413 if (!addr)
414 /* invalid entry */
415 continue;
416
417 /* Branches are read most recent first (ie. mfbhrb 0 is
418 * the most recent branch).
419 * There are two types of valid entries:
420 * 1) a target entry which is the to address of a
421 * computed goto like a blr,bctr,btar. The next
422 * entry read from the bhrb will be branch
423 * corresponding to this target (ie. the actual
424 * blr/bctr/btar instruction).
425 * 2) a from address which is an actual branch. If a
426 * target entry proceeds this, then this is the
427 * matching branch for that target. If this is not
428 * following a target entry, then this is a branch
429 * where the target is given as an immediate field
430 * in the instruction (ie. an i or b form branch).
431 * In this case we need to read the instruction from
432 * memory to determine the target/to address.
433 */
434
435 if (val & BHRB_TARGET) {
436 /* Target branches use two entries
437 * (ie. computed gotos/XL form)
438 */
439 cpuhw->bhrb_entries[u_index].to = addr;
440 cpuhw->bhrb_entries[u_index].mispred = pred;
441 cpuhw->bhrb_entries[u_index].predicted = ~pred;
442
443 /* Get from address in next entry */
444 val = read_bhrb(r_index++);
445 addr = val & BHRB_EA;
446 if (val & BHRB_TARGET) {
447 /* Shouldn't have two targets in a
448 row.. Reset index and try again */
449 r_index--;
450 addr = 0;
451 }
452 cpuhw->bhrb_entries[u_index].from = addr;
453 } else {
454 /* Branches to immediate field
455 (ie I or B form) */
456 cpuhw->bhrb_entries[u_index].from = addr;
457 cpuhw->bhrb_entries[u_index].to =
458 power_pmu_bhrb_to(addr);
459 cpuhw->bhrb_entries[u_index].mispred = pred;
460 cpuhw->bhrb_entries[u_index].predicted = ~pred;
461 }
462 u_index++;
463
464 }
465 }
466 cpuhw->bhrb_stack.nr = u_index;
467 return;
468}
469
311#endif /* CONFIG_PPC64 */ 470#endif /* CONFIG_PPC64 */
312 471
313static void perf_event_interrupt(struct pt_regs *regs); 472static void perf_event_interrupt(struct pt_regs *regs);
@@ -904,47 +1063,6 @@ static int collect_events(struct perf_event *group, int max_count,
904 return n; 1063 return n;
905} 1064}
906 1065
907/* Reset all possible BHRB entries */
908static void power_pmu_bhrb_reset(void)
909{
910 asm volatile(PPC_CLRBHRB);
911}
912
913void power_pmu_bhrb_enable(struct perf_event *event)
914{
915 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
916
917 if (!ppmu->bhrb_nr)
918 return;
919
920 /* Clear BHRB if we changed task context to avoid data leaks */
921 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
922 power_pmu_bhrb_reset();
923 cpuhw->bhrb_context = event->ctx;
924 }
925 cpuhw->bhrb_users++;
926}
927
928void power_pmu_bhrb_disable(struct perf_event *event)
929{
930 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
931
932 if (!ppmu->bhrb_nr)
933 return;
934
935 cpuhw->bhrb_users--;
936 WARN_ON_ONCE(cpuhw->bhrb_users < 0);
937
938 if (!cpuhw->disabled && !cpuhw->bhrb_users) {
939 /* BHRB cannot be turned off when other
940 * events are active on the PMU.
941 */
942
943 /* avoid stale pointer */
944 cpuhw->bhrb_context = NULL;
945 }
946}
947
948/* 1066/*
949 * Add a event to the PMU. 1067 * Add a event to the PMU.
950 * If all events are not already frozen, then we disable and 1068 * If all events are not already frozen, then we disable and
@@ -1180,15 +1298,6 @@ int power_pmu_commit_txn(struct pmu *pmu)
1180 return 0; 1298 return 0;
1181} 1299}
1182 1300
1183/* Called from ctxsw to prevent one process's branch entries to
1184 * mingle with the other process's entries during context switch.
1185 */
1186void power_pmu_flush_branch_stack(void)
1187{
1188 if (ppmu->bhrb_nr)
1189 power_pmu_bhrb_reset();
1190}
1191
1192/* 1301/*
1193 * Return 1 if we might be able to put event on a limited PMC, 1302 * Return 1 if we might be able to put event on a limited PMC,
1194 * or 0 if not. 1303 * or 0 if not.
@@ -1458,77 +1567,6 @@ struct pmu power_pmu = {
1458 .flush_branch_stack = power_pmu_flush_branch_stack, 1567 .flush_branch_stack = power_pmu_flush_branch_stack,
1459}; 1568};
1460 1569
1461/* Processing BHRB entries */
1462void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
1463{
1464 u64 val;
1465 u64 addr;
1466 int r_index, u_index, target, pred;
1467
1468 r_index = 0;
1469 u_index = 0;
1470 while (r_index < ppmu->bhrb_nr) {
1471 /* Assembly read function */
1472 val = read_bhrb(r_index);
1473
1474 /* Terminal marker: End of valid BHRB entries */
1475 if (val == 0) {
1476 break;
1477 } else {
1478 /* BHRB field break up */
1479 addr = val & BHRB_EA;
1480 pred = val & BHRB_PREDICTION;
1481 target = val & BHRB_TARGET;
1482
1483 /* Probable Missed entry: Not applicable for POWER8 */
1484 if ((addr == 0) && (target == 0) && (pred == 1)) {
1485 r_index++;
1486 continue;
1487 }
1488
1489 /* Real Missed entry: Power8 based missed entry */
1490 if ((addr == 0) && (target == 1) && (pred == 1)) {
1491 r_index++;
1492 continue;
1493 }
1494
1495 /* Reserved condition: Not a valid entry */
1496 if ((addr == 0) && (target == 1) && (pred == 0)) {
1497 r_index++;
1498 continue;
1499 }
1500
1501 /* Is a target address */
1502 if (val & BHRB_TARGET) {
1503 /* First address cannot be a target address */
1504 if (r_index == 0) {
1505 r_index++;
1506 continue;
1507 }
1508
1509 /* Update target address for the previous entry */
1510 cpuhw->bhrb_entries[u_index - 1].to = addr;
1511 cpuhw->bhrb_entries[u_index - 1].mispred = pred;
1512 cpuhw->bhrb_entries[u_index - 1].predicted = ~pred;
1513
1514 /* Dont increment u_index */
1515 r_index++;
1516 } else {
1517 /* Update address, flags for current entry */
1518 cpuhw->bhrb_entries[u_index].from = addr;
1519 cpuhw->bhrb_entries[u_index].mispred = pred;
1520 cpuhw->bhrb_entries[u_index].predicted = ~pred;
1521
1522 /* Successfully popullated one entry */
1523 u_index++;
1524 r_index++;
1525 }
1526 }
1527 }
1528 cpuhw->bhrb_stack.nr = u_index;
1529 return;
1530}
1531
1532/* 1570/*
1533 * A counter has overflowed; update its count and record 1571 * A counter has overflowed; update its count and record
1534 * things if requested. Note that interrupts are hard-disabled 1572 * things if requested. Note that interrupts are hard-disabled
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index a881232a3cce..b62aab3e22ec 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -128,7 +128,7 @@ config PPC_RTAS_DAEMON
128 128
129config RTAS_PROC 129config RTAS_PROC
130 bool "Proc interface to RTAS" 130 bool "Proc interface to RTAS"
131 depends on PPC_RTAS 131 depends on PPC_RTAS && PROC_FS
132 default y 132 default y
133 133
134config RTAS_FLASH 134config RTAS_FLASH
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index ade4463226c6..628c564ceadb 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -15,6 +15,7 @@
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/slab.h>
18#include <asm/opal.h> 19#include <asm/opal.h>
19#include <asm/firmware.h> 20#include <asm/firmware.h>
20 21
@@ -28,6 +29,8 @@ struct opal {
28static struct device_node *opal_node; 29static struct device_node *opal_node;
29static DEFINE_SPINLOCK(opal_write_lock); 30static DEFINE_SPINLOCK(opal_write_lock);
30extern u64 opal_mc_secondary_handler[]; 31extern u64 opal_mc_secondary_handler[];
32static unsigned int *opal_irqs;
33static unsigned int opal_irq_count;
31 34
32int __init early_init_dt_scan_opal(unsigned long node, 35int __init early_init_dt_scan_opal(unsigned long node,
33 const char *uname, int depth, void *data) 36 const char *uname, int depth, void *data)
@@ -53,7 +56,11 @@ int __init early_init_dt_scan_opal(unsigned long node,
53 opal.entry, entryp, entrysz); 56 opal.entry, entryp, entrysz);
54 57
55 powerpc_firmware_features |= FW_FEATURE_OPAL; 58 powerpc_firmware_features |= FW_FEATURE_OPAL;
56 if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { 59 if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
60 powerpc_firmware_features |= FW_FEATURE_OPALv2;
61 powerpc_firmware_features |= FW_FEATURE_OPALv3;
62 printk("OPAL V3 detected !\n");
63 } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
57 powerpc_firmware_features |= FW_FEATURE_OPALv2; 64 powerpc_firmware_features |= FW_FEATURE_OPALv2;
58 printk("OPAL V2 detected !\n"); 65 printk("OPAL V2 detected !\n");
59 } else { 66 } else {
@@ -144,6 +151,13 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
144 rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) { 151 rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
145 len = total_len; 152 len = total_len;
146 rc = opal_console_write(vtermno, &len, data); 153 rc = opal_console_write(vtermno, &len, data);
154
155 /* Closed or other error drop */
156 if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
157 rc != OPAL_BUSY_EVENT) {
158 written = total_len;
159 break;
160 }
147 if (rc == OPAL_SUCCESS) { 161 if (rc == OPAL_SUCCESS) {
148 total_len -= len; 162 total_len -= len;
149 data += len; 163 data += len;
@@ -316,6 +330,8 @@ static int __init opal_init(void)
316 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); 330 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
317 pr_debug("opal: Found %d interrupts reserved for OPAL\n", 331 pr_debug("opal: Found %d interrupts reserved for OPAL\n",
318 irqs ? (irqlen / 4) : 0); 332 irqs ? (irqlen / 4) : 0);
333 opal_irq_count = irqlen / 4;
334 opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
319 for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) { 335 for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
320 unsigned int hwirq = be32_to_cpup(irqs); 336 unsigned int hwirq = be32_to_cpup(irqs);
321 unsigned int irq = irq_create_mapping(NULL, hwirq); 337 unsigned int irq = irq_create_mapping(NULL, hwirq);
@@ -327,7 +343,19 @@ static int __init opal_init(void)
327 if (rc) 343 if (rc)
328 pr_warning("opal: Error %d requesting irq %d" 344 pr_warning("opal: Error %d requesting irq %d"
329 " (0x%x)\n", rc, irq, hwirq); 345 " (0x%x)\n", rc, irq, hwirq);
346 opal_irqs[i] = irq;
330 } 347 }
331 return 0; 348 return 0;
332} 349}
333subsys_initcall(opal_init); 350subsys_initcall(opal_init);
351
352void opal_shutdown(void)
353{
354 unsigned int i;
355
356 for (i = 0; i < opal_irq_count; i++) {
357 if (opal_irqs[i])
358 free_irq(opal_irqs[i], 0);
359 opal_irqs[i] = 0;
360 }
361}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 1da578b7c1bf..3937aaae5bc4 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1048,6 +1048,12 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
1048 return phb->ioda.pe_rmap[(bus->number << 8) | devfn]; 1048 return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
1049} 1049}
1050 1050
1051static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
1052{
1053 opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET,
1054 OPAL_ASSERT_RESET);
1055}
1056
1051void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) 1057void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
1052{ 1058{
1053 struct pci_controller *hose; 1059 struct pci_controller *hose;
@@ -1178,6 +1184,9 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
1178 /* Setup TCEs */ 1184 /* Setup TCEs */
1179 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; 1185 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
1180 1186
1187 /* Setup shutdown function for kexec */
1188 phb->shutdown = pnv_pci_ioda_shutdown;
1189
1181 /* Setup MSI support */ 1190 /* Setup MSI support */
1182 pnv_pci_init_ioda_msis(phb); 1191 pnv_pci_init_ioda_msis(phb);
1183 1192
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 55dfca844ddf..163bd7422f1c 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -450,6 +450,18 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
450 pnv_pci_dma_fallback_setup(hose, pdev); 450 pnv_pci_dma_fallback_setup(hose, pdev);
451} 451}
452 452
453void pnv_pci_shutdown(void)
454{
455 struct pci_controller *hose;
456
457 list_for_each_entry(hose, &hose_list, list_node) {
458 struct pnv_phb *phb = hose->private_data;
459
460 if (phb && phb->shutdown)
461 phb->shutdown(phb);
462 }
463}
464
453/* Fixup wrong class code in p7ioc and p8 root complex */ 465/* Fixup wrong class code in p7ioc and p8 root complex */
454static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) 466static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
455{ 467{
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 48dc4bb856a1..25d76c4df50b 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -86,6 +86,7 @@ struct pnv_phb {
86 void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); 86 void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
87 void (*fixup_phb)(struct pci_controller *hose); 87 void (*fixup_phb)(struct pci_controller *hose);
88 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); 88 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
89 void (*shutdown)(struct pnv_phb *phb);
89 90
90 union { 91 union {
91 struct { 92 struct {
@@ -158,4 +159,5 @@ extern void pnv_pci_init_ioda_hub(struct device_node *np);
158extern void pnv_pci_init_ioda2_phb(struct device_node *np); 159extern void pnv_pci_init_ioda2_phb(struct device_node *np);
159extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, 160extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
160 u64 *startp, u64 *endp); 161 u64 *startp, u64 *endp);
162
161#endif /* __POWERNV_PCI_H */ 163#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 8a9df7f9667e..a1c6f83fc391 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -9,8 +9,10 @@ static inline void pnv_smp_init(void) { }
9 9
10#ifdef CONFIG_PCI 10#ifdef CONFIG_PCI
11extern void pnv_pci_init(void); 11extern void pnv_pci_init(void);
12extern void pnv_pci_shutdown(void);
12#else 13#else
13static inline void pnv_pci_init(void) { } 14static inline void pnv_pci_init(void) { }
15static inline void pnv_pci_shutdown(void) { }
14#endif 16#endif
15 17
16#endif /* _POWERNV_H */ 18#endif /* _POWERNV_H */
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index db1ad1c8f68f..d4459bfc92f7 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -78,7 +78,9 @@ static void pnv_show_cpuinfo(struct seq_file *m)
78 if (root) 78 if (root)
79 model = of_get_property(root, "model", NULL); 79 model = of_get_property(root, "model", NULL);
80 seq_printf(m, "machine\t\t: PowerNV %s\n", model); 80 seq_printf(m, "machine\t\t: PowerNV %s\n", model);
81 if (firmware_has_feature(FW_FEATURE_OPALv2)) 81 if (firmware_has_feature(FW_FEATURE_OPALv3))
82 seq_printf(m, "firmware\t: OPAL v3\n");
83 else if (firmware_has_feature(FW_FEATURE_OPALv2))
82 seq_printf(m, "firmware\t: OPAL v2\n"); 84 seq_printf(m, "firmware\t: OPAL v2\n");
83 else if (firmware_has_feature(FW_FEATURE_OPAL)) 85 else if (firmware_has_feature(FW_FEATURE_OPAL))
84 seq_printf(m, "firmware\t: OPAL v1\n"); 86 seq_printf(m, "firmware\t: OPAL v1\n");
@@ -126,6 +128,17 @@ static void pnv_progress(char *s, unsigned short hex)
126{ 128{
127} 129}
128 130
131static void pnv_shutdown(void)
132{
133 /* Let the PCI code clear up IODA tables */
134 pnv_pci_shutdown();
135
136 /* And unregister all OPAL interrupts so they don't fire
137 * up while we kexec
138 */
139 opal_shutdown();
140}
141
129#ifdef CONFIG_KEXEC 142#ifdef CONFIG_KEXEC
130static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) 143static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
131{ 144{
@@ -187,6 +200,7 @@ define_machine(powernv) {
187 .init_IRQ = pnv_init_IRQ, 200 .init_IRQ = pnv_init_IRQ,
188 .show_cpuinfo = pnv_show_cpuinfo, 201 .show_cpuinfo = pnv_show_cpuinfo,
189 .progress = pnv_progress, 202 .progress = pnv_progress,
203 .machine_shutdown = pnv_shutdown,
190 .power_save = power7_idle, 204 .power_save = power7_idle,
191 .calibrate_decr = generic_calibrate_decr, 205 .calibrate_decr = generic_calibrate_decr,
192#ifdef CONFIG_KEXEC 206#ifdef CONFIG_KEXEC
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 6a3ecca5b725..88c9459c3e07 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -71,18 +71,68 @@ int pnv_smp_kick_cpu(int nr)
71 71
72 BUG_ON(nr < 0 || nr >= NR_CPUS); 72 BUG_ON(nr < 0 || nr >= NR_CPUS);
73 73
74 /* On OPAL v2 the CPU are still spinning inside OPAL itself, 74 /*
75 * get them back now 75 * If we already started or OPALv2 is not supported, we just
76 * kick the CPU via the PACA
76 */ 77 */
77 if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) { 78 if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2))
78 pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu); 79 goto kick;
79 rc = opal_start_cpu(pcpu, start_here); 80
81 /*
82 * At this point, the CPU can either be spinning on the way in
83 * from kexec or be inside OPAL waiting to be started for the
84 * first time. OPAL v3 allows us to query OPAL to know if it
85 * has the CPUs, so we do that
86 */
87 if (firmware_has_feature(FW_FEATURE_OPALv3)) {
88 uint8_t status;
89
90 rc = opal_query_cpu_status(pcpu, &status);
80 if (rc != OPAL_SUCCESS) { 91 if (rc != OPAL_SUCCESS) {
81 pr_warn("OPAL Error %ld starting CPU %d\n", 92 pr_warn("OPAL Error %ld querying CPU %d state\n",
82 rc, nr); 93 rc, nr);
83 return -ENODEV; 94 return -ENODEV;
84 } 95 }
96
97 /*
98 * Already started, just kick it, probably coming from
99 * kexec and spinning
100 */
101 if (status == OPAL_THREAD_STARTED)
102 goto kick;
103
104 /*
105 * Available/inactive, let's kick it
106 */
107 if (status == OPAL_THREAD_INACTIVE) {
108 pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n",
109 nr, pcpu);
110 rc = opal_start_cpu(pcpu, start_here);
111 if (rc != OPAL_SUCCESS) {
112 pr_warn("OPAL Error %ld starting CPU %d\n",
113 rc, nr);
114 return -ENODEV;
115 }
116 } else {
117 /*
118 * An unavailable CPU (or any other unknown status)
119 * shouldn't be started. It should also
120 * not be in the possible map but currently it can
121 * happen
122 */
123 pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
124 " (status %d)...\n", nr, pcpu, status);
125 return -ENODEV;
126 }
127 } else {
128 /*
129 * On OPAL v2, we just kick it and hope for the best,
130 * we must not test the error from opal_start_cpu() or
131 * we would fail to get CPUs from kexec.
132 */
133 opal_start_cpu(pcpu, start_here);
85 } 134 }
135 kick:
86 return smp_generic_kick_cpu(nr); 136 return smp_generic_kick_cpu(nr);
87} 137}
88 138
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 9a0941bc4d31..023b288f895b 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -18,6 +18,7 @@ config PPC_PSERIES
18 select PPC_PCI_CHOICE if EXPERT 18 select PPC_PCI_CHOICE if EXPERT
19 select ZLIB_DEFLATE 19 select ZLIB_DEFLATE
20 select PPC_DOORBELL 20 select PPC_DOORBELL
21 select HAVE_CONTEXT_TRACKING
21 default y 22 default y
22 23
23config PPC_SPLPAR 24config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index 47226e04126d..5f997e79d570 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -16,6 +16,7 @@
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 17 */
18 18
19#include <linux/cpu.h>
19#include <linux/delay.h> 20#include <linux/delay.h>
20#include <linux/suspend.h> 21#include <linux/suspend.h>
21#include <linux/stat.h> 22#include <linux/stat.h>
@@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct device *dev,
126 struct device_attribute *attr, 127 struct device_attribute *attr,
127 const char *buf, size_t count) 128 const char *buf, size_t count)
128{ 129{
130 cpumask_var_t offline_mask;
129 int rc; 131 int rc;
130 132
131 if (!capable(CAP_SYS_ADMIN)) 133 if (!capable(CAP_SYS_ADMIN))
132 return -EPERM; 134 return -EPERM;
133 135
136 if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
137 return -ENOMEM;
138
134 stream_id = simple_strtoul(buf, NULL, 16); 139 stream_id = simple_strtoul(buf, NULL, 16);
135 140
136 do { 141 do {
@@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct device *dev,
140 } while (rc == -EAGAIN); 145 } while (rc == -EAGAIN);
141 146
142 if (!rc) { 147 if (!rc) {
148 /* All present CPUs must be online */
149 cpumask_andnot(offline_mask, cpu_present_mask,
150 cpu_online_mask);
151 rc = rtas_online_cpus_mask(offline_mask);
152 if (rc) {
153 pr_err("%s: Could not bring present CPUs online.\n",
154 __func__);
155 goto out;
156 }
157
143 stop_topology_update(); 158 stop_topology_update();
144 rc = pm_suspend(PM_SUSPEND_MEM); 159 rc = pm_suspend(PM_SUSPEND_MEM);
145 start_topology_update(); 160 start_topology_update();
161
162 /* Take down CPUs not online prior to suspend */
163 if (!rtas_offline_cpus_mask(offline_mask))
164 pr_warn("%s: Could not restore CPUs to offline "
165 "state.\n", __func__);
146 } 166 }
147 167
148 stream_id = 0; 168 stream_id = 0;
149 169
150 if (!rc) 170 if (!rc)
151 rc = count; 171 rc = count;
172out:
173 free_cpumask_var(offline_mask);
152 return rc; 174 return rc;
153} 175}
154 176
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
index 97fe82ee8633..2d3b1dd9571d 100644
--- a/arch/powerpc/platforms/wsp/ics.c
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -361,7 +361,7 @@ static int wsp_chip_set_affinity(struct irq_data *d,
361 xive = xive_set_server(xive, get_irq_server(ics, hw_irq)); 361 xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
362 wsp_ics_set_xive(ics, hw_irq, xive); 362 wsp_ics_set_xive(ics, hw_irq, xive);
363 363
364 return 0; 364 return IRQ_SET_MASK_OK;
365} 365}
366 366
367static struct irq_chip wsp_irq_chip = { 367static struct irq_chip wsp_irq_chip = {
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index b0a518e97599..99464a7bdb3b 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -64,6 +64,8 @@ endif
64 64
65obj-$(CONFIG_PPC_SCOM) += scom.o 65obj-$(CONFIG_PPC_SCOM) += scom.o
66 66
67obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o
68
67subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror 69subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
68 70
69obj-$(CONFIG_PPC_XICS) += xics/ 71obj-$(CONFIG_PPC_XICS) += xics/
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index 6e0e1005227f..9cd0e60716fe 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -81,7 +81,7 @@ int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest,
81 ev_int_set_config(src, config, prio, cpuid); 81 ev_int_set_config(src, config, prio, cpuid);
82 spin_unlock_irqrestore(&ehv_pic_lock, flags); 82 spin_unlock_irqrestore(&ehv_pic_lock, flags);
83 83
84 return 0; 84 return IRQ_SET_MASK_OK;
85} 85}
86 86
87static unsigned int ehv_pic_type_to_vecpri(unsigned int type) 87static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index ee21b5e71aec..0a13ecb270c7 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -836,7 +836,7 @@ int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
836 mpic_physmask(mask)); 836 mpic_physmask(mask));
837 } 837 }
838 838
839 return 0; 839 return IRQ_SET_MASK_OK;
840} 840}
841 841
842static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) 842static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
diff --git a/arch/powerpc/sysdev/udbg_memcons.c b/arch/powerpc/sysdev/udbg_memcons.c
new file mode 100644
index 000000000000..ce5a7b489e4b
--- /dev/null
+++ b/arch/powerpc/sysdev/udbg_memcons.c
@@ -0,0 +1,105 @@
1/*
2 * A udbg backend which logs messages and reads input from in memory
3 * buffers.
4 *
5 * The console output can be read from memcons_output which is a
6 * circular buffer whose next write position is stored in memcons.output_pos.
7 *
8 * Input may be passed by writing into the memcons_input buffer when it is
9 * empty. The input buffer is empty when both input_pos == input_start and
10 * *input_start == '\0'.
11 *
12 * Copyright (C) 2003-2005 Anton Blanchard and Milton Miller, IBM Corp
13 * Copyright (C) 2013 Alistair Popple, IBM Corp
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <asm/barrier.h>
24#include <asm/page.h>
25#include <asm/processor.h>
26#include <asm/udbg.h>
27
28struct memcons {
29 char *output_start;
30 char *output_pos;
31 char *output_end;
32 char *input_start;
33 char *input_pos;
34 char *input_end;
35};
36
37static char memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE];
38static char memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE];
39
40struct memcons memcons = {
41 .output_start = memcons_output,
42 .output_pos = memcons_output,
43 .output_end = &memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE],
44 .input_start = memcons_input,
45 .input_pos = memcons_input,
46 .input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE],
47};
48
49void memcons_putc(char c)
50{
51 char *new_output_pos;
52
53 *memcons.output_pos = c;
54 wmb();
55 new_output_pos = memcons.output_pos + 1;
56 if (new_output_pos >= memcons.output_end)
57 new_output_pos = memcons.output_start;
58
59 memcons.output_pos = new_output_pos;
60}
61
62int memcons_getc_poll(void)
63{
64 char c;
65 char *new_input_pos;
66
67 if (*memcons.input_pos) {
68 c = *memcons.input_pos;
69
70 new_input_pos = memcons.input_pos + 1;
71 if (new_input_pos >= memcons.input_end)
72 new_input_pos = memcons.input_start;
73 else if (*new_input_pos == '\0')
74 new_input_pos = memcons.input_start;
75
76 *memcons.input_pos = '\0';
77 wmb();
78 memcons.input_pos = new_input_pos;
79 return c;
80 }
81
82 return -1;
83}
84
85int memcons_getc(void)
86{
87 int c;
88
89 while (1) {
90 c = memcons_getc_poll();
91 if (c == -1)
92 cpu_relax();
93 else
94 break;
95 }
96
97 return c;
98}
99
100void udbg_init_memcons(void)
101{
102 udbg_putc = memcons_putc;
103 udbg_getc = memcons_getc;
104 udbg_getc_poll = memcons_getc_poll;
105}
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c
index f7e8609df0d5..39d72212655e 100644
--- a/arch/powerpc/sysdev/xics/ics-opal.c
+++ b/arch/powerpc/sysdev/xics/ics-opal.c
@@ -148,7 +148,7 @@ static int ics_opal_set_affinity(struct irq_data *d,
148 __func__, d->irq, hw_irq, server, rc); 148 __func__, d->irq, hw_irq, server, rc);
149 return -1; 149 return -1;
150 } 150 }
151 return 0; 151 return IRQ_SET_MASK_OK;
152} 152}
153 153
154static struct irq_chip ics_opal_irq_chip = { 154static struct irq_chip ics_opal_irq_chip = {
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6a154a91c7e7..685692c94f05 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -108,7 +108,6 @@ config X86
108 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) 108 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
109 select GENERIC_TIME_VSYSCALL if X86_64 109 select GENERIC_TIME_VSYSCALL if X86_64
110 select KTIME_SCALAR if X86_32 110 select KTIME_SCALAR if X86_32
111 select ALWAYS_USE_PERSISTENT_CLOCK
112 select GENERIC_STRNCPY_FROM_USER 111 select GENERIC_STRNCPY_FROM_USER
113 select GENERIC_STRNLEN_USER 112 select GENERIC_STRNLEN_USER
114 select HAVE_CONTEXT_TRACKING if X86_64 113 select HAVE_CONTEXT_TRACKING if X86_64
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index d893e8ed8ac9..2e9e12871c2b 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -487,6 +487,7 @@ static inline void show_saved_mc(void)
487#endif 487#endif
488 488
489#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) 489#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
490static DEFINE_MUTEX(x86_cpu_microcode_mutex);
490/* 491/*
491 * Save this mc into mc_saved_data. So it will be loaded early when a CPU is 492 * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
492 * hot added or resumes. 493 * hot added or resumes.
@@ -507,7 +508,7 @@ int save_mc_for_early(u8 *mc)
507 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in 508 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
508 * hotplug. 509 * hotplug.
509 */ 510 */
510 cpu_hotplug_driver_lock(); 511 mutex_lock(&x86_cpu_microcode_mutex);
511 512
512 mc_saved_count_init = mc_saved_data.mc_saved_count; 513 mc_saved_count_init = mc_saved_data.mc_saved_count;
513 mc_saved_count = mc_saved_data.mc_saved_count; 514 mc_saved_count = mc_saved_data.mc_saved_count;
@@ -544,7 +545,7 @@ int save_mc_for_early(u8 *mc)
544 } 545 }
545 546
546out: 547out:
547 cpu_hotplug_driver_unlock(); 548 mutex_unlock(&x86_cpu_microcode_mutex);
548 549
549 return ret; 550 return ret;
550} 551}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 607af0d4d5ef..4e7a37ff03ab 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -312,6 +312,8 @@ void arch_cpu_idle(void)
312{ 312{
313 if (cpuidle_idle_call()) 313 if (cpuidle_idle_call())
314 x86_idle(); 314 x86_idle();
315 else
316 local_irq_enable();
315} 317}
316 318
317/* 319/*
@@ -368,9 +370,6 @@ void amd_e400_remove_cpu(int cpu)
368 */ 370 */
369static void amd_e400_idle(void) 371static void amd_e400_idle(void)
370{ 372{
371 if (need_resched())
372 return;
373
374 if (!amd_e400_c1e_detected) { 373 if (!amd_e400_c1e_detected) {
375 u32 lo, hi; 374 u32 lo, hi;
376 375
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index fdc5dca14fb3..eaac1743def7 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -359,7 +359,17 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
359} 359}
360 360
361/* 361/*
362 * would have hole in the middle or ends, and only ram parts will be mapped. 362 * We need to iterate through the E820 memory map and create direct mappings
363 * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply
364 * create direct mappings for all pfns from [0 to max_low_pfn) and
365 * [4GB to max_pfn) because of possible memory holes in high addresses
366 * that cannot be marked as UC by fixed/variable range MTRRs.
367 * Depending on the alignment of E820 ranges, this may possibly result
368 * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
369 *
370 * init_mem_mapping() calls init_range_memory_mapping() with big range.
371 * That range would have hole in the middle or ends, and only ram parts
372 * will be mapped in init_range_memory_mapping().
363 */ 373 */
364static unsigned long __init init_range_memory_mapping( 374static unsigned long __init init_range_memory_mapping(
365 unsigned long r_start, 375 unsigned long r_start,
@@ -419,6 +429,13 @@ void __init init_mem_mapping(void)
419 max_pfn_mapped = 0; /* will get exact value next */ 429 max_pfn_mapped = 0; /* will get exact value next */
420 min_pfn_mapped = real_end >> PAGE_SHIFT; 430 min_pfn_mapped = real_end >> PAGE_SHIFT;
421 last_start = start = real_end; 431 last_start = start = real_end;
432
433 /*
434 * We start from the top (end of memory) and go to the bottom.
435 * The memblock_find_in_range() gets us a block of RAM from the
436 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
437 * for page table.
438 */
422 while (last_start > ISA_END_ADDRESS) { 439 while (last_start > ISA_END_ADDRESS) {
423 if (last_start > step_size) { 440 if (last_start > step_size) {
424 start = round_down(last_start - 1, step_size); 441 start = round_down(last_start - 1, step_size);
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 00d2efd674df..4f4e741d34b2 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -28,6 +28,8 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/dmi.h>
32#include <linux/delay.h>
31#ifdef CONFIG_ACPI_PROCFS_POWER 33#ifdef CONFIG_ACPI_PROCFS_POWER
32#include <linux/proc_fs.h> 34#include <linux/proc_fs.h>
33#include <linux/seq_file.h> 35#include <linux/seq_file.h>
@@ -74,6 +76,8 @@ static int acpi_ac_resume(struct device *dev);
74#endif 76#endif
75static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); 77static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
76 78
79static int ac_sleep_before_get_state_ms;
80
77static struct acpi_driver acpi_ac_driver = { 81static struct acpi_driver acpi_ac_driver = {
78 .name = "ac", 82 .name = "ac",
79 .class = ACPI_AC_CLASS, 83 .class = ACPI_AC_CLASS,
@@ -252,6 +256,16 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
252 case ACPI_AC_NOTIFY_STATUS: 256 case ACPI_AC_NOTIFY_STATUS:
253 case ACPI_NOTIFY_BUS_CHECK: 257 case ACPI_NOTIFY_BUS_CHECK:
254 case ACPI_NOTIFY_DEVICE_CHECK: 258 case ACPI_NOTIFY_DEVICE_CHECK:
259 /*
260 * A buggy BIOS may notify AC first and then sleep for
261 * a specific time before doing actual operations in the
262 * EC event handler (_Qxx). This will cause the AC state
263 * reported by the ACPI event to be incorrect, so wait for a
264 * specific time for the EC event handler to make progress.
265 */
266 if (ac_sleep_before_get_state_ms > 0)
267 msleep(ac_sleep_before_get_state_ms);
268
255 acpi_ac_get_state(ac); 269 acpi_ac_get_state(ac);
256 acpi_bus_generate_proc_event(device, event, (u32) ac->state); 270 acpi_bus_generate_proc_event(device, event, (u32) ac->state);
257 acpi_bus_generate_netlink_event(device->pnp.device_class, 271 acpi_bus_generate_netlink_event(device->pnp.device_class,
@@ -264,6 +278,24 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
264 return; 278 return;
265} 279}
266 280
281static int thinkpad_e530_quirk(const struct dmi_system_id *d)
282{
283 ac_sleep_before_get_state_ms = 1000;
284 return 0;
285}
286
287static struct dmi_system_id ac_dmi_table[] = {
288 {
289 .callback = thinkpad_e530_quirk,
290 .ident = "thinkpad e530",
291 .matches = {
292 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
293 DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"),
294 },
295 },
296 {},
297};
298
267static int acpi_ac_add(struct acpi_device *device) 299static int acpi_ac_add(struct acpi_device *device)
268{ 300{
269 int result = 0; 301 int result = 0;
@@ -312,6 +344,7 @@ static int acpi_ac_add(struct acpi_device *device)
312 kfree(ac); 344 kfree(ac);
313 } 345 }
314 346
347 dmi_check_system(ac_dmi_table);
315 return result; 348 return result;
316} 349}
317 350
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d45b2871d33b..edc00818c803 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -223,7 +223,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
223static int ec_poll(struct acpi_ec *ec) 223static int ec_poll(struct acpi_ec *ec)
224{ 224{
225 unsigned long flags; 225 unsigned long flags;
226 int repeat = 2; /* number of command restarts */ 226 int repeat = 5; /* number of command restarts */
227 while (repeat--) { 227 while (repeat--) {
228 unsigned long delay = jiffies + 228 unsigned long delay = jiffies +
229 msecs_to_jiffies(ec_delay); 229 msecs_to_jiffies(ec_delay);
@@ -241,8 +241,6 @@ static int ec_poll(struct acpi_ec *ec)
241 } 241 }
242 advance_transaction(ec, acpi_ec_read_status(ec)); 242 advance_transaction(ec, acpi_ec_read_status(ec));
243 } while (time_before(jiffies, delay)); 243 } while (time_before(jiffies, delay));
244 if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
245 break;
246 pr_debug(PREFIX "controller reset, restart transaction\n"); 244 pr_debug(PREFIX "controller reset, restart transaction\n");
247 spin_lock_irqsave(&ec->lock, flags); 245 spin_lock_irqsave(&ec->lock, flags);
248 start_transaction(ec); 246 start_transaction(ec);
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index bec717ffd25f..c266cdc11784 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -95,9 +95,6 @@ static const struct acpi_device_id processor_device_ids[] = {
95}; 95};
96MODULE_DEVICE_TABLE(acpi, processor_device_ids); 96MODULE_DEVICE_TABLE(acpi, processor_device_ids);
97 97
98static SIMPLE_DEV_PM_OPS(acpi_processor_pm,
99 acpi_processor_suspend, acpi_processor_resume);
100
101static struct acpi_driver acpi_processor_driver = { 98static struct acpi_driver acpi_processor_driver = {
102 .name = "processor", 99 .name = "processor",
103 .class = ACPI_PROCESSOR_CLASS, 100 .class = ACPI_PROCESSOR_CLASS,
@@ -107,7 +104,6 @@ static struct acpi_driver acpi_processor_driver = {
107 .remove = acpi_processor_remove, 104 .remove = acpi_processor_remove,
108 .notify = acpi_processor_notify, 105 .notify = acpi_processor_notify,
109 }, 106 },
110 .drv.pm = &acpi_processor_pm,
111}; 107};
112 108
113#define INSTALL_NOTIFY_HANDLER 1 109#define INSTALL_NOTIFY_HANDLER 1
@@ -934,6 +930,8 @@ static int __init acpi_processor_init(void)
934 if (result < 0) 930 if (result < 0)
935 return result; 931 return result;
936 932
933 acpi_processor_syscore_init();
934
937 acpi_processor_install_hotplug_notify(); 935 acpi_processor_install_hotplug_notify();
938 936
939 acpi_thermal_cpufreq_init(); 937 acpi_thermal_cpufreq_init();
@@ -956,6 +954,8 @@ static void __exit acpi_processor_exit(void)
956 954
957 acpi_processor_uninstall_hotplug_notify(); 955 acpi_processor_uninstall_hotplug_notify();
958 956
957 acpi_processor_syscore_exit();
958
959 acpi_bus_unregister_driver(&acpi_processor_driver); 959 acpi_bus_unregister_driver(&acpi_processor_driver);
960 960
961 return; 961 return;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f0df2c9434d2..eb133c77aadb 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -34,6 +34,7 @@
34#include <linux/sched.h> /* need_resched() */ 34#include <linux/sched.h> /* need_resched() */
35#include <linux/clockchips.h> 35#include <linux/clockchips.h>
36#include <linux/cpuidle.h> 36#include <linux/cpuidle.h>
37#include <linux/syscore_ops.h>
37 38
38/* 39/*
39 * Include the apic definitions for x86 to have the APIC timer related defines 40 * Include the apic definitions for x86 to have the APIC timer related defines
@@ -210,33 +211,41 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
210 211
211#endif 212#endif
212 213
214#ifdef CONFIG_PM_SLEEP
213static u32 saved_bm_rld; 215static u32 saved_bm_rld;
214 216
215static void acpi_idle_bm_rld_save(void) 217int acpi_processor_suspend(void)
216{ 218{
217 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); 219 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
220 return 0;
218} 221}
219static void acpi_idle_bm_rld_restore(void) 222
223void acpi_processor_resume(void)
220{ 224{
221 u32 resumed_bm_rld; 225 u32 resumed_bm_rld;
222 226
223 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); 227 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
228 if (resumed_bm_rld == saved_bm_rld)
229 return;
224 230
225 if (resumed_bm_rld != saved_bm_rld) 231 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
226 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
227} 232}
228 233
229int acpi_processor_suspend(struct device *dev) 234static struct syscore_ops acpi_processor_syscore_ops = {
235 .suspend = acpi_processor_suspend,
236 .resume = acpi_processor_resume,
237};
238
239void acpi_processor_syscore_init(void)
230{ 240{
231 acpi_idle_bm_rld_save(); 241 register_syscore_ops(&acpi_processor_syscore_ops);
232 return 0;
233} 242}
234 243
235int acpi_processor_resume(struct device *dev) 244void acpi_processor_syscore_exit(void)
236{ 245{
237 acpi_idle_bm_rld_restore(); 246 unregister_syscore_ops(&acpi_processor_syscore_ops);
238 return 0;
239} 247}
248#endif /* CONFIG_PM_SLEEP */
240 249
241#if defined(CONFIG_X86) 250#if defined(CONFIG_X86)
242static void tsc_check_state(int state) 251static void tsc_check_state(int state)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fe158fd4f1df..c1bc608339a6 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1785,7 +1785,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type)
1785 acpi_set_pnp_ids(handle, &pnp, type); 1785 acpi_set_pnp_ids(handle, &pnp, type);
1786 1786
1787 if (!pnp.type.hardware_id) 1787 if (!pnp.type.hardware_id)
1788 return; 1788 goto out;
1789 1789
1790 /* 1790 /*
1791 * This relies on the fact that acpi_install_notify_handler() will not 1791 * This relies on the fact that acpi_install_notify_handler() will not
@@ -1800,6 +1800,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type)
1800 } 1800 }
1801 } 1801 }
1802 1802
1803out:
1803 acpi_free_pnp_ids(&pnp); 1804 acpi_free_pnp_ids(&pnp);
1804} 1805}
1805 1806
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index c3932d0876e0..5b32e15a65ce 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -456,6 +456,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
456 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"), 456 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"),
457 }, 457 },
458 }, 458 },
459 {
460 .callback = video_ignore_initial_backlight,
461 .ident = "HP 1000 Notebook PC",
462 .matches = {
463 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
464 DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
465 },
466 },
459 {} 467 {}
460}; 468};
461 469
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 39c32529b833..5da914041305 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -61,24 +61,24 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
61int dev_pm_put_subsys_data(struct device *dev) 61int dev_pm_put_subsys_data(struct device *dev)
62{ 62{
63 struct pm_subsys_data *psd; 63 struct pm_subsys_data *psd;
64 int ret = 0; 64 int ret = 1;
65 65
66 spin_lock_irq(&dev->power.lock); 66 spin_lock_irq(&dev->power.lock);
67 67
68 psd = dev_to_psd(dev); 68 psd = dev_to_psd(dev);
69 if (!psd) { 69 if (!psd)
70 ret = -EINVAL;
71 goto out; 70 goto out;
72 }
73 71
74 if (--psd->refcount == 0) { 72 if (--psd->refcount == 0) {
75 dev->power.subsys_data = NULL; 73 dev->power.subsys_data = NULL;
76 kfree(psd); 74 } else {
77 ret = 1; 75 psd = NULL;
76 ret = 0;
78 } 77 }
79 78
80 out: 79 out:
81 spin_unlock_irq(&dev->power.lock); 80 spin_unlock_irq(&dev->power.lock);
81 kfree(psd);
82 82
83 return ret; 83 return ret;
84} 84}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index ca63104136e0..d6d314027b5d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -55,6 +55,39 @@
55#define SECTOR_SHIFT 9 55#define SECTOR_SHIFT 9
56#define SECTOR_SIZE (1ULL << SECTOR_SHIFT) 56#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
57 57
58/*
59 * Increment the given counter and return its updated value.
60 * If the counter is already 0 it will not be incremented.
61 * If the counter is already at its maximum value returns
62 * -EINVAL without updating it.
63 */
64static int atomic_inc_return_safe(atomic_t *v)
65{
66 unsigned int counter;
67
68 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
69 if (counter <= (unsigned int)INT_MAX)
70 return (int)counter;
71
72 atomic_dec(v);
73
74 return -EINVAL;
75}
76
77/* Decrement the counter. Return the resulting value, or -EINVAL */
78static int atomic_dec_return_safe(atomic_t *v)
79{
80 int counter;
81
82 counter = atomic_dec_return(v);
83 if (counter >= 0)
84 return counter;
85
86 atomic_inc(v);
87
88 return -EINVAL;
89}
90
58#define RBD_DRV_NAME "rbd" 91#define RBD_DRV_NAME "rbd"
59#define RBD_DRV_NAME_LONG "rbd (rados block device)" 92#define RBD_DRV_NAME_LONG "rbd (rados block device)"
60 93
@@ -100,21 +133,20 @@
100 * block device image metadata (in-memory version) 133 * block device image metadata (in-memory version)
101 */ 134 */
102struct rbd_image_header { 135struct rbd_image_header {
103 /* These four fields never change for a given rbd image */ 136 /* These six fields never change for a given rbd image */
104 char *object_prefix; 137 char *object_prefix;
105 u64 features;
106 __u8 obj_order; 138 __u8 obj_order;
107 __u8 crypt_type; 139 __u8 crypt_type;
108 __u8 comp_type; 140 __u8 comp_type;
141 u64 stripe_unit;
142 u64 stripe_count;
143 u64 features; /* Might be changeable someday? */
109 144
110 /* The remaining fields need to be updated occasionally */ 145 /* The remaining fields need to be updated occasionally */
111 u64 image_size; 146 u64 image_size;
112 struct ceph_snap_context *snapc; 147 struct ceph_snap_context *snapc;
113 char *snap_names; 148 char *snap_names; /* format 1 only */
114 u64 *snap_sizes; 149 u64 *snap_sizes; /* format 1 only */
115
116 u64 stripe_unit;
117 u64 stripe_count;
118}; 150};
119 151
120/* 152/*
@@ -225,6 +257,7 @@ struct rbd_obj_request {
225 }; 257 };
226 }; 258 };
227 struct page **copyup_pages; 259 struct page **copyup_pages;
260 u32 copyup_page_count;
228 261
229 struct ceph_osd_request *osd_req; 262 struct ceph_osd_request *osd_req;
230 263
@@ -257,6 +290,7 @@ struct rbd_img_request {
257 struct rbd_obj_request *obj_request; /* obj req initiator */ 290 struct rbd_obj_request *obj_request; /* obj req initiator */
258 }; 291 };
259 struct page **copyup_pages; 292 struct page **copyup_pages;
293 u32 copyup_page_count;
260 spinlock_t completion_lock;/* protects next_completion */ 294 spinlock_t completion_lock;/* protects next_completion */
261 u32 next_completion; 295 u32 next_completion;
262 rbd_img_callback_t callback; 296 rbd_img_callback_t callback;
@@ -311,6 +345,7 @@ struct rbd_device {
311 345
312 struct rbd_spec *parent_spec; 346 struct rbd_spec *parent_spec;
313 u64 parent_overlap; 347 u64 parent_overlap;
348 atomic_t parent_ref;
314 struct rbd_device *parent; 349 struct rbd_device *parent;
315 350
316 /* protects updating the header */ 351 /* protects updating the header */
@@ -359,7 +394,8 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf,
359 size_t count); 394 size_t count);
360static ssize_t rbd_remove(struct bus_type *bus, const char *buf, 395static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
361 size_t count); 396 size_t count);
362static int rbd_dev_image_probe(struct rbd_device *rbd_dev); 397static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
398static void rbd_spec_put(struct rbd_spec *spec);
363 399
364static struct bus_attribute rbd_bus_attrs[] = { 400static struct bus_attribute rbd_bus_attrs[] = {
365 __ATTR(add, S_IWUSR, NULL, rbd_add), 401 __ATTR(add, S_IWUSR, NULL, rbd_add),
@@ -426,7 +462,8 @@ static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
426static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); 462static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
427 463
428static int rbd_dev_refresh(struct rbd_device *rbd_dev); 464static int rbd_dev_refresh(struct rbd_device *rbd_dev);
429static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev); 465static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
466static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
430static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, 467static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
431 u64 snap_id); 468 u64 snap_id);
432static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 469static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
@@ -726,88 +763,123 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
726} 763}
727 764
728/* 765/*
729 * Create a new header structure, translate header format from the on-disk 766 * Fill an rbd image header with information from the given format 1
730 * header. 767 * on-disk header.
731 */ 768 */
732static int rbd_header_from_disk(struct rbd_image_header *header, 769static int rbd_header_from_disk(struct rbd_device *rbd_dev,
733 struct rbd_image_header_ondisk *ondisk) 770 struct rbd_image_header_ondisk *ondisk)
734{ 771{
772 struct rbd_image_header *header = &rbd_dev->header;
773 bool first_time = header->object_prefix == NULL;
774 struct ceph_snap_context *snapc;
775 char *object_prefix = NULL;
776 char *snap_names = NULL;
777 u64 *snap_sizes = NULL;
735 u32 snap_count; 778 u32 snap_count;
736 size_t len;
737 size_t size; 779 size_t size;
780 int ret = -ENOMEM;
738 u32 i; 781 u32 i;
739 782
740 memset(header, 0, sizeof (*header)); 783 /* Allocate this now to avoid having to handle failure below */
741 784
742 snap_count = le32_to_cpu(ondisk->snap_count); 785 if (first_time) {
786 size_t len;
743 787
744 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix)); 788 len = strnlen(ondisk->object_prefix,
745 header->object_prefix = kmalloc(len + 1, GFP_KERNEL); 789 sizeof (ondisk->object_prefix));
746 if (!header->object_prefix) 790 object_prefix = kmalloc(len + 1, GFP_KERNEL);
747 return -ENOMEM; 791 if (!object_prefix)
748 memcpy(header->object_prefix, ondisk->object_prefix, len); 792 return -ENOMEM;
749 header->object_prefix[len] = '\0'; 793 memcpy(object_prefix, ondisk->object_prefix, len);
794 object_prefix[len] = '\0';
795 }
750 796
797 /* Allocate the snapshot context and fill it in */
798
799 snap_count = le32_to_cpu(ondisk->snap_count);
800 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
801 if (!snapc)
802 goto out_err;
803 snapc->seq = le64_to_cpu(ondisk->snap_seq);
751 if (snap_count) { 804 if (snap_count) {
805 struct rbd_image_snap_ondisk *snaps;
752 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len); 806 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
753 807
754 /* Save a copy of the snapshot names */ 808 /* We'll keep a copy of the snapshot names... */
755 809
756 if (snap_names_len > (u64) SIZE_MAX) 810 if (snap_names_len > (u64)SIZE_MAX)
757 return -EIO; 811 goto out_2big;
758 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL); 812 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
759 if (!header->snap_names) 813 if (!snap_names)
760 goto out_err; 814 goto out_err;
815
816 /* ...as well as the array of their sizes. */
817
818 size = snap_count * sizeof (*header->snap_sizes);
819 snap_sizes = kmalloc(size, GFP_KERNEL);
820 if (!snap_sizes)
821 goto out_err;
822
761 /* 823 /*
762 * Note that rbd_dev_v1_header_read() guarantees 824 * Copy the names, and fill in each snapshot's id
763 * the ondisk buffer we're working with has 825 * and size.
826 *
827 * Note that rbd_dev_v1_header_info() guarantees the
828 * ondisk buffer we're working with has
764 * snap_names_len bytes beyond the end of the 829 * snap_names_len bytes beyond the end of the
765 * snapshot id array, this memcpy() is safe. 830 * snapshot id array, this memcpy() is safe.
766 */ 831 */
767 memcpy(header->snap_names, &ondisk->snaps[snap_count], 832 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
768 snap_names_len); 833 snaps = ondisk->snaps;
834 for (i = 0; i < snap_count; i++) {
835 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
836 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
837 }
838 }
769 839
770 /* Record each snapshot's size */ 840 /* We won't fail any more, fill in the header */
771 841
772 size = snap_count * sizeof (*header->snap_sizes); 842 down_write(&rbd_dev->header_rwsem);
773 header->snap_sizes = kmalloc(size, GFP_KERNEL); 843 if (first_time) {
774 if (!header->snap_sizes) 844 header->object_prefix = object_prefix;
775 goto out_err; 845 header->obj_order = ondisk->options.order;
776 for (i = 0; i < snap_count; i++) 846 header->crypt_type = ondisk->options.crypt_type;
777 header->snap_sizes[i] = 847 header->comp_type = ondisk->options.comp_type;
778 le64_to_cpu(ondisk->snaps[i].image_size); 848 /* The rest aren't used for format 1 images */
849 header->stripe_unit = 0;
850 header->stripe_count = 0;
851 header->features = 0;
779 } else { 852 } else {
780 header->snap_names = NULL; 853 ceph_put_snap_context(header->snapc);
781 header->snap_sizes = NULL; 854 kfree(header->snap_names);
855 kfree(header->snap_sizes);
782 } 856 }
783 857
784 header->features = 0; /* No features support in v1 images */ 858 /* The remaining fields always get updated (when we refresh) */
785 header->obj_order = ondisk->options.order;
786 header->crypt_type = ondisk->options.crypt_type;
787 header->comp_type = ondisk->options.comp_type;
788
789 /* Allocate and fill in the snapshot context */
790 859
791 header->image_size = le64_to_cpu(ondisk->image_size); 860 header->image_size = le64_to_cpu(ondisk->image_size);
861 header->snapc = snapc;
862 header->snap_names = snap_names;
863 header->snap_sizes = snap_sizes;
792 864
793 header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); 865 /* Make sure mapping size is consistent with header info */
794 if (!header->snapc)
795 goto out_err;
796 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
797 for (i = 0; i < snap_count; i++)
798 header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id);
799 866
800 return 0; 867 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
868 if (rbd_dev->mapping.size != header->image_size)
869 rbd_dev->mapping.size = header->image_size;
870
871 up_write(&rbd_dev->header_rwsem);
801 872
873 return 0;
874out_2big:
875 ret = -EIO;
802out_err: 876out_err:
803 kfree(header->snap_sizes); 877 kfree(snap_sizes);
804 header->snap_sizes = NULL; 878 kfree(snap_names);
805 kfree(header->snap_names); 879 ceph_put_snap_context(snapc);
806 header->snap_names = NULL; 880 kfree(object_prefix);
807 kfree(header->object_prefix);
808 header->object_prefix = NULL;
809 881
810 return -ENOMEM; 882 return ret;
811} 883}
812 884
813static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which) 885static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
@@ -934,20 +1006,11 @@ static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
934 1006
935static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) 1007static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
936{ 1008{
937 const char *snap_name = rbd_dev->spec->snap_name; 1009 u64 snap_id = rbd_dev->spec->snap_id;
938 u64 snap_id;
939 u64 size = 0; 1010 u64 size = 0;
940 u64 features = 0; 1011 u64 features = 0;
941 int ret; 1012 int ret;
942 1013
943 if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) {
944 snap_id = rbd_snap_id_by_name(rbd_dev, snap_name);
945 if (snap_id == CEPH_NOSNAP)
946 return -ENOENT;
947 } else {
948 snap_id = CEPH_NOSNAP;
949 }
950
951 ret = rbd_snap_size(rbd_dev, snap_id, &size); 1014 ret = rbd_snap_size(rbd_dev, snap_id, &size);
952 if (ret) 1015 if (ret)
953 return ret; 1016 return ret;
@@ -958,11 +1021,6 @@ static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
958 rbd_dev->mapping.size = size; 1021 rbd_dev->mapping.size = size;
959 rbd_dev->mapping.features = features; 1022 rbd_dev->mapping.features = features;
960 1023
961 /* If we are mapping a snapshot it must be marked read-only */
962
963 if (snap_id != CEPH_NOSNAP)
964 rbd_dev->mapping.read_only = true;
965
966 return 0; 1024 return 0;
967} 1025}
968 1026
@@ -970,14 +1028,6 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
970{ 1028{
971 rbd_dev->mapping.size = 0; 1029 rbd_dev->mapping.size = 0;
972 rbd_dev->mapping.features = 0; 1030 rbd_dev->mapping.features = 0;
973 rbd_dev->mapping.read_only = true;
974}
975
976static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev)
977{
978 rbd_dev->mapping.size = 0;
979 rbd_dev->mapping.features = 0;
980 rbd_dev->mapping.read_only = true;
981} 1031}
982 1032
983static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) 1033static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
@@ -1342,20 +1392,18 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1342 kref_put(&obj_request->kref, rbd_obj_request_destroy); 1392 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1343} 1393}
1344 1394
1345static void rbd_img_request_get(struct rbd_img_request *img_request) 1395static bool img_request_child_test(struct rbd_img_request *img_request);
1346{ 1396static void rbd_parent_request_destroy(struct kref *kref);
1347 dout("%s: img %p (was %d)\n", __func__, img_request,
1348 atomic_read(&img_request->kref.refcount));
1349 kref_get(&img_request->kref);
1350}
1351
1352static void rbd_img_request_destroy(struct kref *kref); 1397static void rbd_img_request_destroy(struct kref *kref);
1353static void rbd_img_request_put(struct rbd_img_request *img_request) 1398static void rbd_img_request_put(struct rbd_img_request *img_request)
1354{ 1399{
1355 rbd_assert(img_request != NULL); 1400 rbd_assert(img_request != NULL);
1356 dout("%s: img %p (was %d)\n", __func__, img_request, 1401 dout("%s: img %p (was %d)\n", __func__, img_request,
1357 atomic_read(&img_request->kref.refcount)); 1402 atomic_read(&img_request->kref.refcount));
1358 kref_put(&img_request->kref, rbd_img_request_destroy); 1403 if (img_request_child_test(img_request))
1404 kref_put(&img_request->kref, rbd_parent_request_destroy);
1405 else
1406 kref_put(&img_request->kref, rbd_img_request_destroy);
1359} 1407}
1360 1408
1361static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, 1409static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
@@ -1472,6 +1520,12 @@ static void img_request_child_set(struct rbd_img_request *img_request)
1472 smp_mb(); 1520 smp_mb();
1473} 1521}
1474 1522
1523static void img_request_child_clear(struct rbd_img_request *img_request)
1524{
1525 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1526 smp_mb();
1527}
1528
1475static bool img_request_child_test(struct rbd_img_request *img_request) 1529static bool img_request_child_test(struct rbd_img_request *img_request)
1476{ 1530{
1477 smp_mb(); 1531 smp_mb();
@@ -1484,6 +1538,12 @@ static void img_request_layered_set(struct rbd_img_request *img_request)
1484 smp_mb(); 1538 smp_mb();
1485} 1539}
1486 1540
1541static void img_request_layered_clear(struct rbd_img_request *img_request)
1542{
1543 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1544 smp_mb();
1545}
1546
1487static bool img_request_layered_test(struct rbd_img_request *img_request) 1547static bool img_request_layered_test(struct rbd_img_request *img_request)
1488{ 1548{
1489 smp_mb(); 1549 smp_mb();
@@ -1827,6 +1887,74 @@ static void rbd_obj_request_destroy(struct kref *kref)
1827 kmem_cache_free(rbd_obj_request_cache, obj_request); 1887 kmem_cache_free(rbd_obj_request_cache, obj_request);
1828} 1888}
1829 1889
1890/* It's OK to call this for a device with no parent */
1891
1892static void rbd_spec_put(struct rbd_spec *spec);
1893static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1894{
1895 rbd_dev_remove_parent(rbd_dev);
1896 rbd_spec_put(rbd_dev->parent_spec);
1897 rbd_dev->parent_spec = NULL;
1898 rbd_dev->parent_overlap = 0;
1899}
1900
1901/*
1902 * Parent image reference counting is used to determine when an
1903 * image's parent fields can be safely torn down--after there are no
1904 * more in-flight requests to the parent image. When the last
1905 * reference is dropped, cleaning them up is safe.
1906 */
1907static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1908{
1909 int counter;
1910
1911 if (!rbd_dev->parent_spec)
1912 return;
1913
1914 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1915 if (counter > 0)
1916 return;
1917
1918 /* Last reference; clean up parent data structures */
1919
1920 if (!counter)
1921 rbd_dev_unparent(rbd_dev);
1922 else
1923 rbd_warn(rbd_dev, "parent reference underflow\n");
1924}
1925
1926/*
1927 * If an image has a non-zero parent overlap, get a reference to its
1928 * parent.
1929 *
1930 * We must get the reference before checking for the overlap to
1931 * coordinate properly with zeroing the parent overlap in
1932 * rbd_dev_v2_parent_info() when an image gets flattened. We
1933 * drop it again if there is no overlap.
1934 *
1935 * Returns true if the rbd device has a parent with a non-zero
1936 * overlap and a reference for it was successfully taken, or
1937 * false otherwise.
1938 */
1939static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1940{
1941 int counter;
1942
1943 if (!rbd_dev->parent_spec)
1944 return false;
1945
1946 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1947 if (counter > 0 && rbd_dev->parent_overlap)
1948 return true;
1949
1950 /* Image was flattened, but parent is not yet torn down */
1951
1952 if (counter < 0)
1953 rbd_warn(rbd_dev, "parent reference overflow\n");
1954
1955 return false;
1956}
1957
1830/* 1958/*
1831 * Caller is responsible for filling in the list of object requests 1959 * Caller is responsible for filling in the list of object requests
1832 * that comprises the image request, and the Linux request pointer 1960 * that comprises the image request, and the Linux request pointer
@@ -1835,8 +1963,7 @@ static void rbd_obj_request_destroy(struct kref *kref)
1835static struct rbd_img_request *rbd_img_request_create( 1963static struct rbd_img_request *rbd_img_request_create(
1836 struct rbd_device *rbd_dev, 1964 struct rbd_device *rbd_dev,
1837 u64 offset, u64 length, 1965 u64 offset, u64 length,
1838 bool write_request, 1966 bool write_request)
1839 bool child_request)
1840{ 1967{
1841 struct rbd_img_request *img_request; 1968 struct rbd_img_request *img_request;
1842 1969
@@ -1861,9 +1988,7 @@ static struct rbd_img_request *rbd_img_request_create(
1861 } else { 1988 } else {
1862 img_request->snap_id = rbd_dev->spec->snap_id; 1989 img_request->snap_id = rbd_dev->spec->snap_id;
1863 } 1990 }
1864 if (child_request) 1991 if (rbd_dev_parent_get(rbd_dev))
1865 img_request_child_set(img_request);
1866 if (rbd_dev->parent_spec)
1867 img_request_layered_set(img_request); 1992 img_request_layered_set(img_request);
1868 spin_lock_init(&img_request->completion_lock); 1993 spin_lock_init(&img_request->completion_lock);
1869 img_request->next_completion = 0; 1994 img_request->next_completion = 0;
@@ -1873,9 +1998,6 @@ static struct rbd_img_request *rbd_img_request_create(
1873 INIT_LIST_HEAD(&img_request->obj_requests); 1998 INIT_LIST_HEAD(&img_request->obj_requests);
1874 kref_init(&img_request->kref); 1999 kref_init(&img_request->kref);
1875 2000
1876 rbd_img_request_get(img_request); /* Avoid a warning */
1877 rbd_img_request_put(img_request); /* TEMPORARY */
1878
1879 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev, 2001 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1880 write_request ? "write" : "read", offset, length, 2002 write_request ? "write" : "read", offset, length,
1881 img_request); 2003 img_request);
@@ -1897,15 +2019,54 @@ static void rbd_img_request_destroy(struct kref *kref)
1897 rbd_img_obj_request_del(img_request, obj_request); 2019 rbd_img_obj_request_del(img_request, obj_request);
1898 rbd_assert(img_request->obj_request_count == 0); 2020 rbd_assert(img_request->obj_request_count == 0);
1899 2021
2022 if (img_request_layered_test(img_request)) {
2023 img_request_layered_clear(img_request);
2024 rbd_dev_parent_put(img_request->rbd_dev);
2025 }
2026
1900 if (img_request_write_test(img_request)) 2027 if (img_request_write_test(img_request))
1901 ceph_put_snap_context(img_request->snapc); 2028 ceph_put_snap_context(img_request->snapc);
1902 2029
1903 if (img_request_child_test(img_request))
1904 rbd_obj_request_put(img_request->obj_request);
1905
1906 kmem_cache_free(rbd_img_request_cache, img_request); 2030 kmem_cache_free(rbd_img_request_cache, img_request);
1907} 2031}
1908 2032
2033static struct rbd_img_request *rbd_parent_request_create(
2034 struct rbd_obj_request *obj_request,
2035 u64 img_offset, u64 length)
2036{
2037 struct rbd_img_request *parent_request;
2038 struct rbd_device *rbd_dev;
2039
2040 rbd_assert(obj_request->img_request);
2041 rbd_dev = obj_request->img_request->rbd_dev;
2042
2043 parent_request = rbd_img_request_create(rbd_dev->parent,
2044 img_offset, length, false);
2045 if (!parent_request)
2046 return NULL;
2047
2048 img_request_child_set(parent_request);
2049 rbd_obj_request_get(obj_request);
2050 parent_request->obj_request = obj_request;
2051
2052 return parent_request;
2053}
2054
2055static void rbd_parent_request_destroy(struct kref *kref)
2056{
2057 struct rbd_img_request *parent_request;
2058 struct rbd_obj_request *orig_request;
2059
2060 parent_request = container_of(kref, struct rbd_img_request, kref);
2061 orig_request = parent_request->obj_request;
2062
2063 parent_request->obj_request = NULL;
2064 rbd_obj_request_put(orig_request);
2065 img_request_child_clear(parent_request);
2066
2067 rbd_img_request_destroy(kref);
2068}
2069
1909static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) 2070static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
1910{ 2071{
1911 struct rbd_img_request *img_request; 2072 struct rbd_img_request *img_request;
@@ -2114,7 +2275,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2114{ 2275{
2115 struct rbd_img_request *img_request; 2276 struct rbd_img_request *img_request;
2116 struct rbd_device *rbd_dev; 2277 struct rbd_device *rbd_dev;
2117 u64 length; 2278 struct page **pages;
2118 u32 page_count; 2279 u32 page_count;
2119 2280
2120 rbd_assert(obj_request->type == OBJ_REQUEST_BIO); 2281 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
@@ -2124,12 +2285,14 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2124 2285
2125 rbd_dev = img_request->rbd_dev; 2286 rbd_dev = img_request->rbd_dev;
2126 rbd_assert(rbd_dev); 2287 rbd_assert(rbd_dev);
2127 length = (u64)1 << rbd_dev->header.obj_order;
2128 page_count = (u32)calc_pages_for(0, length);
2129 2288
2130 rbd_assert(obj_request->copyup_pages); 2289 pages = obj_request->copyup_pages;
2131 ceph_release_page_vector(obj_request->copyup_pages, page_count); 2290 rbd_assert(pages != NULL);
2132 obj_request->copyup_pages = NULL; 2291 obj_request->copyup_pages = NULL;
2292 page_count = obj_request->copyup_page_count;
2293 rbd_assert(page_count);
2294 obj_request->copyup_page_count = 0;
2295 ceph_release_page_vector(pages, page_count);
2133 2296
2134 /* 2297 /*
2135 * We want the transfer count to reflect the size of the 2298 * We want the transfer count to reflect the size of the
@@ -2153,9 +2316,11 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2153 struct ceph_osd_client *osdc; 2316 struct ceph_osd_client *osdc;
2154 struct rbd_device *rbd_dev; 2317 struct rbd_device *rbd_dev;
2155 struct page **pages; 2318 struct page **pages;
2156 int result; 2319 u32 page_count;
2157 u64 obj_size; 2320 int img_result;
2158 u64 xferred; 2321 u64 parent_length;
2322 u64 offset;
2323 u64 length;
2159 2324
2160 rbd_assert(img_request_child_test(img_request)); 2325 rbd_assert(img_request_child_test(img_request));
2161 2326
@@ -2164,46 +2329,74 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2164 pages = img_request->copyup_pages; 2329 pages = img_request->copyup_pages;
2165 rbd_assert(pages != NULL); 2330 rbd_assert(pages != NULL);
2166 img_request->copyup_pages = NULL; 2331 img_request->copyup_pages = NULL;
2332 page_count = img_request->copyup_page_count;
2333 rbd_assert(page_count);
2334 img_request->copyup_page_count = 0;
2167 2335
2168 orig_request = img_request->obj_request; 2336 orig_request = img_request->obj_request;
2169 rbd_assert(orig_request != NULL); 2337 rbd_assert(orig_request != NULL);
2170 rbd_assert(orig_request->type == OBJ_REQUEST_BIO); 2338 rbd_assert(obj_request_type_valid(orig_request->type));
2171 result = img_request->result; 2339 img_result = img_request->result;
2172 obj_size = img_request->length; 2340 parent_length = img_request->length;
2173 xferred = img_request->xferred; 2341 rbd_assert(parent_length == img_request->xferred);
2342 rbd_img_request_put(img_request);
2174 2343
2175 rbd_dev = img_request->rbd_dev; 2344 rbd_assert(orig_request->img_request);
2345 rbd_dev = orig_request->img_request->rbd_dev;
2176 rbd_assert(rbd_dev); 2346 rbd_assert(rbd_dev);
2177 rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
2178 2347
2179 rbd_img_request_put(img_request); 2348 /*
2349 * If the overlap has become 0 (most likely because the
2350 * image has been flattened) we need to free the pages
2351 * and re-submit the original write request.
2352 */
2353 if (!rbd_dev->parent_overlap) {
2354 struct ceph_osd_client *osdc;
2180 2355
2181 if (result) 2356 ceph_release_page_vector(pages, page_count);
2182 goto out_err; 2357 osdc = &rbd_dev->rbd_client->client->osdc;
2358 img_result = rbd_obj_request_submit(osdc, orig_request);
2359 if (!img_result)
2360 return;
2361 }
2183 2362
2184 /* Allocate the new copyup osd request for the original request */ 2363 if (img_result)
2364 goto out_err;
2185 2365
2186 result = -ENOMEM; 2366 /*
2187 rbd_assert(!orig_request->osd_req); 2367 * The original osd request is of no use to use any more.
2368 * We need a new one that can hold the two ops in a copyup
2369 * request. Allocate the new copyup osd request for the
2370 * original request, and release the old one.
2371 */
2372 img_result = -ENOMEM;
2188 osd_req = rbd_osd_req_create_copyup(orig_request); 2373 osd_req = rbd_osd_req_create_copyup(orig_request);
2189 if (!osd_req) 2374 if (!osd_req)
2190 goto out_err; 2375 goto out_err;
2376 rbd_osd_req_destroy(orig_request->osd_req);
2191 orig_request->osd_req = osd_req; 2377 orig_request->osd_req = osd_req;
2192 orig_request->copyup_pages = pages; 2378 orig_request->copyup_pages = pages;
2379 orig_request->copyup_page_count = page_count;
2193 2380
2194 /* Initialize the copyup op */ 2381 /* Initialize the copyup op */
2195 2382
2196 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup"); 2383 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2197 osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0, 2384 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2198 false, false); 2385 false, false);
2199 2386
2200 /* Then the original write request op */ 2387 /* Then the original write request op */
2201 2388
2389 offset = orig_request->offset;
2390 length = orig_request->length;
2202 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE, 2391 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2203 orig_request->offset, 2392 offset, length, 0, 0);
2204 orig_request->length, 0, 0); 2393 if (orig_request->type == OBJ_REQUEST_BIO)
2205 osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list, 2394 osd_req_op_extent_osd_data_bio(osd_req, 1,
2206 orig_request->length); 2395 orig_request->bio_list, length);
2396 else
2397 osd_req_op_extent_osd_data_pages(osd_req, 1,
2398 orig_request->pages, length,
2399 offset & ~PAGE_MASK, false, false);
2207 2400
2208 rbd_osd_req_format_write(orig_request); 2401 rbd_osd_req_format_write(orig_request);
2209 2402
@@ -2211,13 +2404,13 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2211 2404
2212 orig_request->callback = rbd_img_obj_copyup_callback; 2405 orig_request->callback = rbd_img_obj_copyup_callback;
2213 osdc = &rbd_dev->rbd_client->client->osdc; 2406 osdc = &rbd_dev->rbd_client->client->osdc;
2214 result = rbd_obj_request_submit(osdc, orig_request); 2407 img_result = rbd_obj_request_submit(osdc, orig_request);
2215 if (!result) 2408 if (!img_result)
2216 return; 2409 return;
2217out_err: 2410out_err:
2218 /* Record the error code and complete the request */ 2411 /* Record the error code and complete the request */
2219 2412
2220 orig_request->result = result; 2413 orig_request->result = img_result;
2221 orig_request->xferred = 0; 2414 orig_request->xferred = 0;
2222 obj_request_done_set(orig_request); 2415 obj_request_done_set(orig_request);
2223 rbd_obj_request_complete(orig_request); 2416 rbd_obj_request_complete(orig_request);
@@ -2249,7 +2442,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2249 int result; 2442 int result;
2250 2443
2251 rbd_assert(obj_request_img_data_test(obj_request)); 2444 rbd_assert(obj_request_img_data_test(obj_request));
2252 rbd_assert(obj_request->type == OBJ_REQUEST_BIO); 2445 rbd_assert(obj_request_type_valid(obj_request->type));
2253 2446
2254 img_request = obj_request->img_request; 2447 img_request = obj_request->img_request;
2255 rbd_assert(img_request != NULL); 2448 rbd_assert(img_request != NULL);
@@ -2257,15 +2450,6 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2257 rbd_assert(rbd_dev->parent != NULL); 2450 rbd_assert(rbd_dev->parent != NULL);
2258 2451
2259 /* 2452 /*
2260 * First things first. The original osd request is of no
2261 * use to use any more, we'll need a new one that can hold
2262 * the two ops in a copyup request. We'll get that later,
2263 * but for now we can release the old one.
2264 */
2265 rbd_osd_req_destroy(obj_request->osd_req);
2266 obj_request->osd_req = NULL;
2267
2268 /*
2269 * Determine the byte range covered by the object in the 2453 * Determine the byte range covered by the object in the
2270 * child image to which the original request was to be sent. 2454 * child image to which the original request was to be sent.
2271 */ 2455 */
@@ -2295,18 +2479,16 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2295 } 2479 }
2296 2480
2297 result = -ENOMEM; 2481 result = -ENOMEM;
2298 parent_request = rbd_img_request_create(rbd_dev->parent, 2482 parent_request = rbd_parent_request_create(obj_request,
2299 img_offset, length, 2483 img_offset, length);
2300 false, true);
2301 if (!parent_request) 2484 if (!parent_request)
2302 goto out_err; 2485 goto out_err;
2303 rbd_obj_request_get(obj_request);
2304 parent_request->obj_request = obj_request;
2305 2486
2306 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages); 2487 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2307 if (result) 2488 if (result)
2308 goto out_err; 2489 goto out_err;
2309 parent_request->copyup_pages = pages; 2490 parent_request->copyup_pages = pages;
2491 parent_request->copyup_page_count = page_count;
2310 2492
2311 parent_request->callback = rbd_img_obj_parent_read_full_callback; 2493 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2312 result = rbd_img_request_submit(parent_request); 2494 result = rbd_img_request_submit(parent_request);
@@ -2314,6 +2496,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2314 return 0; 2496 return 0;
2315 2497
2316 parent_request->copyup_pages = NULL; 2498 parent_request->copyup_pages = NULL;
2499 parent_request->copyup_page_count = 0;
2317 parent_request->obj_request = NULL; 2500 parent_request->obj_request = NULL;
2318 rbd_obj_request_put(obj_request); 2501 rbd_obj_request_put(obj_request);
2319out_err: 2502out_err:
@@ -2331,6 +2514,7 @@ out_err:
2331static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) 2514static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2332{ 2515{
2333 struct rbd_obj_request *orig_request; 2516 struct rbd_obj_request *orig_request;
2517 struct rbd_device *rbd_dev;
2334 int result; 2518 int result;
2335 2519
2336 rbd_assert(!obj_request_img_data_test(obj_request)); 2520 rbd_assert(!obj_request_img_data_test(obj_request));
@@ -2353,8 +2537,21 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2353 obj_request->xferred, obj_request->length); 2537 obj_request->xferred, obj_request->length);
2354 rbd_obj_request_put(obj_request); 2538 rbd_obj_request_put(obj_request);
2355 2539
2356 rbd_assert(orig_request); 2540 /*
2357 rbd_assert(orig_request->img_request); 2541 * If the overlap has become 0 (most likely because the
2542 * image has been flattened) we need to free the pages
2543 * and re-submit the original write request.
2544 */
2545 rbd_dev = orig_request->img_request->rbd_dev;
2546 if (!rbd_dev->parent_overlap) {
2547 struct ceph_osd_client *osdc;
2548
2549 rbd_obj_request_put(orig_request);
2550 osdc = &rbd_dev->rbd_client->client->osdc;
2551 result = rbd_obj_request_submit(osdc, orig_request);
2552 if (!result)
2553 return;
2554 }
2358 2555
2359 /* 2556 /*
2360 * Our only purpose here is to determine whether the object 2557 * Our only purpose here is to determine whether the object
@@ -2512,14 +2709,36 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2512 struct rbd_obj_request *obj_request; 2709 struct rbd_obj_request *obj_request;
2513 struct rbd_device *rbd_dev; 2710 struct rbd_device *rbd_dev;
2514 u64 obj_end; 2711 u64 obj_end;
2712 u64 img_xferred;
2713 int img_result;
2515 2714
2516 rbd_assert(img_request_child_test(img_request)); 2715 rbd_assert(img_request_child_test(img_request));
2517 2716
2717 /* First get what we need from the image request and release it */
2718
2518 obj_request = img_request->obj_request; 2719 obj_request = img_request->obj_request;
2720 img_xferred = img_request->xferred;
2721 img_result = img_request->result;
2722 rbd_img_request_put(img_request);
2723
2724 /*
2725 * If the overlap has become 0 (most likely because the
2726 * image has been flattened) we need to re-submit the
2727 * original request.
2728 */
2519 rbd_assert(obj_request); 2729 rbd_assert(obj_request);
2520 rbd_assert(obj_request->img_request); 2730 rbd_assert(obj_request->img_request);
2731 rbd_dev = obj_request->img_request->rbd_dev;
2732 if (!rbd_dev->parent_overlap) {
2733 struct ceph_osd_client *osdc;
2734
2735 osdc = &rbd_dev->rbd_client->client->osdc;
2736 img_result = rbd_obj_request_submit(osdc, obj_request);
2737 if (!img_result)
2738 return;
2739 }
2521 2740
2522 obj_request->result = img_request->result; 2741 obj_request->result = img_result;
2523 if (obj_request->result) 2742 if (obj_request->result)
2524 goto out; 2743 goto out;
2525 2744
@@ -2532,7 +2751,6 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2532 */ 2751 */
2533 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length); 2752 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2534 obj_end = obj_request->img_offset + obj_request->length; 2753 obj_end = obj_request->img_offset + obj_request->length;
2535 rbd_dev = obj_request->img_request->rbd_dev;
2536 if (obj_end > rbd_dev->parent_overlap) { 2754 if (obj_end > rbd_dev->parent_overlap) {
2537 u64 xferred = 0; 2755 u64 xferred = 0;
2538 2756
@@ -2540,43 +2758,39 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2540 xferred = rbd_dev->parent_overlap - 2758 xferred = rbd_dev->parent_overlap -
2541 obj_request->img_offset; 2759 obj_request->img_offset;
2542 2760
2543 obj_request->xferred = min(img_request->xferred, xferred); 2761 obj_request->xferred = min(img_xferred, xferred);
2544 } else { 2762 } else {
2545 obj_request->xferred = img_request->xferred; 2763 obj_request->xferred = img_xferred;
2546 } 2764 }
2547out: 2765out:
2548 rbd_img_request_put(img_request);
2549 rbd_img_obj_request_read_callback(obj_request); 2766 rbd_img_obj_request_read_callback(obj_request);
2550 rbd_obj_request_complete(obj_request); 2767 rbd_obj_request_complete(obj_request);
2551} 2768}
2552 2769
2553static void rbd_img_parent_read(struct rbd_obj_request *obj_request) 2770static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2554{ 2771{
2555 struct rbd_device *rbd_dev;
2556 struct rbd_img_request *img_request; 2772 struct rbd_img_request *img_request;
2557 int result; 2773 int result;
2558 2774
2559 rbd_assert(obj_request_img_data_test(obj_request)); 2775 rbd_assert(obj_request_img_data_test(obj_request));
2560 rbd_assert(obj_request->img_request != NULL); 2776 rbd_assert(obj_request->img_request != NULL);
2561 rbd_assert(obj_request->result == (s32) -ENOENT); 2777 rbd_assert(obj_request->result == (s32) -ENOENT);
2562 rbd_assert(obj_request->type == OBJ_REQUEST_BIO); 2778 rbd_assert(obj_request_type_valid(obj_request->type));
2563 2779
2564 rbd_dev = obj_request->img_request->rbd_dev;
2565 rbd_assert(rbd_dev->parent != NULL);
2566 /* rbd_read_finish(obj_request, obj_request->length); */ 2780 /* rbd_read_finish(obj_request, obj_request->length); */
2567 img_request = rbd_img_request_create(rbd_dev->parent, 2781 img_request = rbd_parent_request_create(obj_request,
2568 obj_request->img_offset, 2782 obj_request->img_offset,
2569 obj_request->length, 2783 obj_request->length);
2570 false, true);
2571 result = -ENOMEM; 2784 result = -ENOMEM;
2572 if (!img_request) 2785 if (!img_request)
2573 goto out_err; 2786 goto out_err;
2574 2787
2575 rbd_obj_request_get(obj_request); 2788 if (obj_request->type == OBJ_REQUEST_BIO)
2576 img_request->obj_request = obj_request; 2789 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2577 2790 obj_request->bio_list);
2578 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, 2791 else
2579 obj_request->bio_list); 2792 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2793 obj_request->pages);
2580 if (result) 2794 if (result)
2581 goto out_err; 2795 goto out_err;
2582 2796
@@ -2626,6 +2840,7 @@ out:
2626static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) 2840static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2627{ 2841{
2628 struct rbd_device *rbd_dev = (struct rbd_device *)data; 2842 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2843 int ret;
2629 2844
2630 if (!rbd_dev) 2845 if (!rbd_dev)
2631 return; 2846 return;
@@ -2633,7 +2848,9 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2633 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, 2848 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2634 rbd_dev->header_name, (unsigned long long)notify_id, 2849 rbd_dev->header_name, (unsigned long long)notify_id,
2635 (unsigned int)opcode); 2850 (unsigned int)opcode);
2636 (void)rbd_dev_refresh(rbd_dev); 2851 ret = rbd_dev_refresh(rbd_dev);
2852 if (ret)
2853 rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
2637 2854
2638 rbd_obj_notify_ack(rbd_dev, notify_id); 2855 rbd_obj_notify_ack(rbd_dev, notify_id);
2639} 2856}
@@ -2642,7 +2859,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2642 * Request sync osd watch/unwatch. The value of "start" determines 2859 * Request sync osd watch/unwatch. The value of "start" determines
2643 * whether a watch request is being initiated or torn down. 2860 * whether a watch request is being initiated or torn down.
2644 */ 2861 */
2645static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) 2862static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2646{ 2863{
2647 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2864 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2648 struct rbd_obj_request *obj_request; 2865 struct rbd_obj_request *obj_request;
@@ -2676,7 +2893,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
2676 rbd_dev->watch_request->osd_req); 2893 rbd_dev->watch_request->osd_req);
2677 2894
2678 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, 2895 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2679 rbd_dev->watch_event->cookie, 0, start); 2896 rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2680 rbd_osd_req_format_write(obj_request); 2897 rbd_osd_req_format_write(obj_request);
2681 2898
2682 ret = rbd_obj_request_submit(osdc, obj_request); 2899 ret = rbd_obj_request_submit(osdc, obj_request);
@@ -2869,9 +3086,16 @@ static void rbd_request_fn(struct request_queue *q)
2869 goto end_request; /* Shouldn't happen */ 3086 goto end_request; /* Shouldn't happen */
2870 } 3087 }
2871 3088
3089 result = -EIO;
3090 if (offset + length > rbd_dev->mapping.size) {
3091 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3092 offset, length, rbd_dev->mapping.size);
3093 goto end_request;
3094 }
3095
2872 result = -ENOMEM; 3096 result = -ENOMEM;
2873 img_request = rbd_img_request_create(rbd_dev, offset, length, 3097 img_request = rbd_img_request_create(rbd_dev, offset, length,
2874 write_request, false); 3098 write_request);
2875 if (!img_request) 3099 if (!img_request)
2876 goto end_request; 3100 goto end_request;
2877 3101
@@ -3022,17 +3246,11 @@ out:
3022} 3246}
3023 3247
3024/* 3248/*
3025 * Read the complete header for the given rbd device. 3249 * Read the complete header for the given rbd device. On successful
3026 * 3250 * return, the rbd_dev->header field will contain up-to-date
3027 * Returns a pointer to a dynamically-allocated buffer containing 3251 * information about the image.
3028 * the complete and validated header. Caller can pass the address
3029 * of a variable that will be filled in with the version of the
3030 * header object at the time it was read.
3031 *
3032 * Returns a pointer-coded errno if a failure occurs.
3033 */ 3252 */
3034static struct rbd_image_header_ondisk * 3253static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3035rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
3036{ 3254{
3037 struct rbd_image_header_ondisk *ondisk = NULL; 3255 struct rbd_image_header_ondisk *ondisk = NULL;
3038 u32 snap_count = 0; 3256 u32 snap_count = 0;
@@ -3057,22 +3275,22 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
3057 size += names_size; 3275 size += names_size;
3058 ondisk = kmalloc(size, GFP_KERNEL); 3276 ondisk = kmalloc(size, GFP_KERNEL);
3059 if (!ondisk) 3277 if (!ondisk)
3060 return ERR_PTR(-ENOMEM); 3278 return -ENOMEM;
3061 3279
3062 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name, 3280 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3063 0, size, ondisk); 3281 0, size, ondisk);
3064 if (ret < 0) 3282 if (ret < 0)
3065 goto out_err; 3283 goto out;
3066 if ((size_t)ret < size) { 3284 if ((size_t)ret < size) {
3067 ret = -ENXIO; 3285 ret = -ENXIO;
3068 rbd_warn(rbd_dev, "short header read (want %zd got %d)", 3286 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3069 size, ret); 3287 size, ret);
3070 goto out_err; 3288 goto out;
3071 } 3289 }
3072 if (!rbd_dev_ondisk_valid(ondisk)) { 3290 if (!rbd_dev_ondisk_valid(ondisk)) {
3073 ret = -ENXIO; 3291 ret = -ENXIO;
3074 rbd_warn(rbd_dev, "invalid header"); 3292 rbd_warn(rbd_dev, "invalid header");
3075 goto out_err; 3293 goto out;
3076 } 3294 }
3077 3295
3078 names_size = le64_to_cpu(ondisk->snap_names_len); 3296 names_size = le64_to_cpu(ondisk->snap_names_len);
@@ -3080,85 +3298,13 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
3080 snap_count = le32_to_cpu(ondisk->snap_count); 3298 snap_count = le32_to_cpu(ondisk->snap_count);
3081 } while (snap_count != want_count); 3299 } while (snap_count != want_count);
3082 3300
3083 return ondisk; 3301 ret = rbd_header_from_disk(rbd_dev, ondisk);
3084 3302out:
3085out_err:
3086 kfree(ondisk);
3087
3088 return ERR_PTR(ret);
3089}
3090
3091/*
3092 * reload the ondisk the header
3093 */
3094static int rbd_read_header(struct rbd_device *rbd_dev,
3095 struct rbd_image_header *header)
3096{
3097 struct rbd_image_header_ondisk *ondisk;
3098 int ret;
3099
3100 ondisk = rbd_dev_v1_header_read(rbd_dev);
3101 if (IS_ERR(ondisk))
3102 return PTR_ERR(ondisk);
3103 ret = rbd_header_from_disk(header, ondisk);
3104 kfree(ondisk); 3303 kfree(ondisk);
3105 3304
3106 return ret; 3305 return ret;
3107} 3306}
3108 3307
3109static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
3110{
3111 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
3112 return;
3113
3114 if (rbd_dev->mapping.size != rbd_dev->header.image_size) {
3115 sector_t size;
3116
3117 rbd_dev->mapping.size = rbd_dev->header.image_size;
3118 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3119 dout("setting size to %llu sectors", (unsigned long long)size);
3120 set_capacity(rbd_dev->disk, size);
3121 }
3122}
3123
3124/*
3125 * only read the first part of the ondisk header, without the snaps info
3126 */
3127static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
3128{
3129 int ret;
3130 struct rbd_image_header h;
3131
3132 ret = rbd_read_header(rbd_dev, &h);
3133 if (ret < 0)
3134 return ret;
3135
3136 down_write(&rbd_dev->header_rwsem);
3137
3138 /* Update image size, and check for resize of mapped image */
3139 rbd_dev->header.image_size = h.image_size;
3140 rbd_update_mapping_size(rbd_dev);
3141
3142 /* rbd_dev->header.object_prefix shouldn't change */
3143 kfree(rbd_dev->header.snap_sizes);
3144 kfree(rbd_dev->header.snap_names);
3145 /* osd requests may still refer to snapc */
3146 ceph_put_snap_context(rbd_dev->header.snapc);
3147
3148 rbd_dev->header.image_size = h.image_size;
3149 rbd_dev->header.snapc = h.snapc;
3150 rbd_dev->header.snap_names = h.snap_names;
3151 rbd_dev->header.snap_sizes = h.snap_sizes;
3152 /* Free the extra copy of the object prefix */
3153 if (strcmp(rbd_dev->header.object_prefix, h.object_prefix))
3154 rbd_warn(rbd_dev, "object prefix changed (ignoring)");
3155 kfree(h.object_prefix);
3156
3157 up_write(&rbd_dev->header_rwsem);
3158
3159 return ret;
3160}
3161
3162/* 3308/*
3163 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to 3309 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3164 * has disappeared from the (just updated) snapshot context. 3310 * has disappeared from the (just updated) snapshot context.
@@ -3180,26 +3326,29 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
3180 3326
3181static int rbd_dev_refresh(struct rbd_device *rbd_dev) 3327static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3182{ 3328{
3183 u64 image_size; 3329 u64 mapping_size;
3184 int ret; 3330 int ret;
3185 3331
3186 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 3332 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3187 image_size = rbd_dev->header.image_size; 3333 mapping_size = rbd_dev->mapping.size;
3188 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 3334 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3189 if (rbd_dev->image_format == 1) 3335 if (rbd_dev->image_format == 1)
3190 ret = rbd_dev_v1_refresh(rbd_dev); 3336 ret = rbd_dev_v1_header_info(rbd_dev);
3191 else 3337 else
3192 ret = rbd_dev_v2_refresh(rbd_dev); 3338 ret = rbd_dev_v2_header_info(rbd_dev);
3193 3339
3194 /* If it's a mapped snapshot, validate its EXISTS flag */ 3340 /* If it's a mapped snapshot, validate its EXISTS flag */
3195 3341
3196 rbd_exists_validate(rbd_dev); 3342 rbd_exists_validate(rbd_dev);
3197 mutex_unlock(&ctl_mutex); 3343 mutex_unlock(&ctl_mutex);
3198 if (ret) 3344 if (mapping_size != rbd_dev->mapping.size) {
3199 rbd_warn(rbd_dev, "got notification but failed to " 3345 sector_t size;
3200 " update snaps: %d\n", ret); 3346
3201 if (image_size != rbd_dev->header.image_size) 3347 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3348 dout("setting size to %llu sectors", (unsigned long long)size);
3349 set_capacity(rbd_dev->disk, size);
3202 revalidate_disk(rbd_dev->disk); 3350 revalidate_disk(rbd_dev->disk);
3351 }
3203 3352
3204 return ret; 3353 return ret;
3205} 3354}
@@ -3403,6 +3552,8 @@ static ssize_t rbd_image_refresh(struct device *dev,
3403 int ret; 3552 int ret;
3404 3553
3405 ret = rbd_dev_refresh(rbd_dev); 3554 ret = rbd_dev_refresh(rbd_dev);
3555 if (ret)
3556 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3406 3557
3407 return ret < 0 ? ret : size; 3558 return ret < 0 ? ret : size;
3408} 3559}
@@ -3501,6 +3652,7 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3501 3652
3502 spin_lock_init(&rbd_dev->lock); 3653 spin_lock_init(&rbd_dev->lock);
3503 rbd_dev->flags = 0; 3654 rbd_dev->flags = 0;
3655 atomic_set(&rbd_dev->parent_ref, 0);
3504 INIT_LIST_HEAD(&rbd_dev->node); 3656 INIT_LIST_HEAD(&rbd_dev->node);
3505 init_rwsem(&rbd_dev->header_rwsem); 3657 init_rwsem(&rbd_dev->header_rwsem);
3506 3658
@@ -3650,6 +3802,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3650 __le64 snapid; 3802 __le64 snapid;
3651 void *p; 3803 void *p;
3652 void *end; 3804 void *end;
3805 u64 pool_id;
3653 char *image_id; 3806 char *image_id;
3654 u64 overlap; 3807 u64 overlap;
3655 int ret; 3808 int ret;
@@ -3680,18 +3833,37 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3680 p = reply_buf; 3833 p = reply_buf;
3681 end = reply_buf + ret; 3834 end = reply_buf + ret;
3682 ret = -ERANGE; 3835 ret = -ERANGE;
3683 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err); 3836 ceph_decode_64_safe(&p, end, pool_id, out_err);
3684 if (parent_spec->pool_id == CEPH_NOPOOL) 3837 if (pool_id == CEPH_NOPOOL) {
3838 /*
3839 * Either the parent never existed, or we have
3840 * record of it but the image got flattened so it no
3841 * longer has a parent. When the parent of a
3842 * layered image disappears we immediately set the
3843 * overlap to 0. The effect of this is that all new
3844 * requests will be treated as if the image had no
3845 * parent.
3846 */
3847 if (rbd_dev->parent_overlap) {
3848 rbd_dev->parent_overlap = 0;
3849 smp_mb();
3850 rbd_dev_parent_put(rbd_dev);
3851 pr_info("%s: clone image has been flattened\n",
3852 rbd_dev->disk->disk_name);
3853 }
3854
3685 goto out; /* No parent? No problem. */ 3855 goto out; /* No parent? No problem. */
3856 }
3686 3857
3687 /* The ceph file layout needs to fit pool id in 32 bits */ 3858 /* The ceph file layout needs to fit pool id in 32 bits */
3688 3859
3689 ret = -EIO; 3860 ret = -EIO;
3690 if (parent_spec->pool_id > (u64)U32_MAX) { 3861 if (pool_id > (u64)U32_MAX) {
3691 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n", 3862 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3692 (unsigned long long)parent_spec->pool_id, U32_MAX); 3863 (unsigned long long)pool_id, U32_MAX);
3693 goto out_err; 3864 goto out_err;
3694 } 3865 }
3866 parent_spec->pool_id = pool_id;
3695 3867
3696 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); 3868 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3697 if (IS_ERR(image_id)) { 3869 if (IS_ERR(image_id)) {
@@ -3702,9 +3874,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3702 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err); 3874 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3703 ceph_decode_64_safe(&p, end, overlap, out_err); 3875 ceph_decode_64_safe(&p, end, overlap, out_err);
3704 3876
3705 rbd_dev->parent_overlap = overlap; 3877 if (overlap) {
3706 rbd_dev->parent_spec = parent_spec; 3878 rbd_spec_put(rbd_dev->parent_spec);
3707 parent_spec = NULL; /* rbd_dev now owns this */ 3879 rbd_dev->parent_spec = parent_spec;
3880 parent_spec = NULL; /* rbd_dev now owns this */
3881 rbd_dev->parent_overlap = overlap;
3882 } else {
3883 rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n");
3884 }
3708out: 3885out:
3709 ret = 0; 3886 ret = 0;
3710out_err: 3887out_err:
@@ -4002,6 +4179,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4002 for (i = 0; i < snap_count; i++) 4179 for (i = 0; i < snap_count; i++)
4003 snapc->snaps[i] = ceph_decode_64(&p); 4180 snapc->snaps[i] = ceph_decode_64(&p);
4004 4181
4182 ceph_put_snap_context(rbd_dev->header.snapc);
4005 rbd_dev->header.snapc = snapc; 4183 rbd_dev->header.snapc = snapc;
4006 4184
4007 dout(" snap context seq = %llu, snap_count = %u\n", 4185 dout(" snap context seq = %llu, snap_count = %u\n",
@@ -4053,21 +4231,56 @@ out:
4053 return snap_name; 4231 return snap_name;
4054} 4232}
4055 4233
4056static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev) 4234static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4057{ 4235{
4236 bool first_time = rbd_dev->header.object_prefix == NULL;
4058 int ret; 4237 int ret;
4059 4238
4060 down_write(&rbd_dev->header_rwsem); 4239 down_write(&rbd_dev->header_rwsem);
4061 4240
4241 if (first_time) {
4242 ret = rbd_dev_v2_header_onetime(rbd_dev);
4243 if (ret)
4244 goto out;
4245 }
4246
4247 /*
4248 * If the image supports layering, get the parent info. We
4249 * need to probe the first time regardless. Thereafter we
4250 * only need to if there's a parent, to see if it has
4251 * disappeared due to the mapped image getting flattened.
4252 */
4253 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4254 (first_time || rbd_dev->parent_spec)) {
4255 bool warn;
4256
4257 ret = rbd_dev_v2_parent_info(rbd_dev);
4258 if (ret)
4259 goto out;
4260
4261 /*
4262 * Print a warning if this is the initial probe and
4263 * the image has a parent. Don't print it if the
4264 * image now being probed is itself a parent. We
4265 * can tell at this point because we won't know its
4266 * pool name yet (just its pool id).
4267 */
4268 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4269 if (first_time && warn)
4270 rbd_warn(rbd_dev, "WARNING: kernel layering "
4271 "is EXPERIMENTAL!");
4272 }
4273
4062 ret = rbd_dev_v2_image_size(rbd_dev); 4274 ret = rbd_dev_v2_image_size(rbd_dev);
4063 if (ret) 4275 if (ret)
4064 goto out; 4276 goto out;
4065 rbd_update_mapping_size(rbd_dev); 4277
4278 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4279 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4280 rbd_dev->mapping.size = rbd_dev->header.image_size;
4066 4281
4067 ret = rbd_dev_v2_snap_context(rbd_dev); 4282 ret = rbd_dev_v2_snap_context(rbd_dev);
4068 dout("rbd_dev_v2_snap_context returned %d\n", ret); 4283 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4069 if (ret)
4070 goto out;
4071out: 4284out:
4072 up_write(&rbd_dev->header_rwsem); 4285 up_write(&rbd_dev->header_rwsem);
4073 4286
@@ -4490,10 +4703,10 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4490{ 4703{
4491 struct rbd_image_header *header; 4704 struct rbd_image_header *header;
4492 4705
4493 rbd_dev_remove_parent(rbd_dev); 4706 /* Drop parent reference unless it's already been done (or none) */
4494 rbd_spec_put(rbd_dev->parent_spec); 4707
4495 rbd_dev->parent_spec = NULL; 4708 if (rbd_dev->parent_overlap)
4496 rbd_dev->parent_overlap = 0; 4709 rbd_dev_parent_put(rbd_dev);
4497 4710
4498 /* Free dynamic fields from the header, then zero it out */ 4711 /* Free dynamic fields from the header, then zero it out */
4499 4712
@@ -4505,72 +4718,22 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4505 memset(header, 0, sizeof (*header)); 4718 memset(header, 0, sizeof (*header));
4506} 4719}
4507 4720
4508static int rbd_dev_v1_probe(struct rbd_device *rbd_dev) 4721static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4509{ 4722{
4510 int ret; 4723 int ret;
4511 4724
4512 /* Populate rbd image metadata */
4513
4514 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
4515 if (ret < 0)
4516 goto out_err;
4517
4518 /* Version 1 images have no parent (no layering) */
4519
4520 rbd_dev->parent_spec = NULL;
4521 rbd_dev->parent_overlap = 0;
4522
4523 dout("discovered version 1 image, header name is %s\n",
4524 rbd_dev->header_name);
4525
4526 return 0;
4527
4528out_err:
4529 kfree(rbd_dev->header_name);
4530 rbd_dev->header_name = NULL;
4531 kfree(rbd_dev->spec->image_id);
4532 rbd_dev->spec->image_id = NULL;
4533
4534 return ret;
4535}
4536
4537static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4538{
4539 int ret;
4540
4541 ret = rbd_dev_v2_image_size(rbd_dev);
4542 if (ret)
4543 goto out_err;
4544
4545 /* Get the object prefix (a.k.a. block_name) for the image */
4546
4547 ret = rbd_dev_v2_object_prefix(rbd_dev); 4725 ret = rbd_dev_v2_object_prefix(rbd_dev);
4548 if (ret) 4726 if (ret)
4549 goto out_err; 4727 goto out_err;
4550 4728
4551 /* Get the and check features for the image */ 4729 /*
4552 4730 * Get the and check features for the image. Currently the
4731 * features are assumed to never change.
4732 */
4553 ret = rbd_dev_v2_features(rbd_dev); 4733 ret = rbd_dev_v2_features(rbd_dev);
4554 if (ret) 4734 if (ret)
4555 goto out_err; 4735 goto out_err;
4556 4736
4557 /* If the image supports layering, get the parent info */
4558
4559 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
4560 ret = rbd_dev_v2_parent_info(rbd_dev);
4561 if (ret)
4562 goto out_err;
4563
4564 /*
4565 * Don't print a warning for parent images. We can
4566 * tell this point because we won't know its pool
4567 * name yet (just its pool id).
4568 */
4569 if (rbd_dev->spec->pool_name)
4570 rbd_warn(rbd_dev, "WARNING: kernel layering "
4571 "is EXPERIMENTAL!");
4572 }
4573
4574 /* If the image supports fancy striping, get its parameters */ 4737 /* If the image supports fancy striping, get its parameters */
4575 4738
4576 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { 4739 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
@@ -4578,28 +4741,11 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4578 if (ret < 0) 4741 if (ret < 0)
4579 goto out_err; 4742 goto out_err;
4580 } 4743 }
4581 4744 /* No support for crypto and compression type format 2 images */
4582 /* crypto and compression type aren't (yet) supported for v2 images */
4583
4584 rbd_dev->header.crypt_type = 0;
4585 rbd_dev->header.comp_type = 0;
4586
4587 /* Get the snapshot context, plus the header version */
4588
4589 ret = rbd_dev_v2_snap_context(rbd_dev);
4590 if (ret)
4591 goto out_err;
4592
4593 dout("discovered version 2 image, header name is %s\n",
4594 rbd_dev->header_name);
4595 4745
4596 return 0; 4746 return 0;
4597out_err: 4747out_err:
4598 rbd_dev->parent_overlap = 0; 4748 rbd_dev->header.features = 0;
4599 rbd_spec_put(rbd_dev->parent_spec);
4600 rbd_dev->parent_spec = NULL;
4601 kfree(rbd_dev->header_name);
4602 rbd_dev->header_name = NULL;
4603 kfree(rbd_dev->header.object_prefix); 4749 kfree(rbd_dev->header.object_prefix);
4604 rbd_dev->header.object_prefix = NULL; 4750 rbd_dev->header.object_prefix = NULL;
4605 4751
@@ -4628,15 +4774,16 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4628 if (!parent) 4774 if (!parent)
4629 goto out_err; 4775 goto out_err;
4630 4776
4631 ret = rbd_dev_image_probe(parent); 4777 ret = rbd_dev_image_probe(parent, false);
4632 if (ret < 0) 4778 if (ret < 0)
4633 goto out_err; 4779 goto out_err;
4634 rbd_dev->parent = parent; 4780 rbd_dev->parent = parent;
4781 atomic_set(&rbd_dev->parent_ref, 1);
4635 4782
4636 return 0; 4783 return 0;
4637out_err: 4784out_err:
4638 if (parent) { 4785 if (parent) {
4639 rbd_spec_put(rbd_dev->parent_spec); 4786 rbd_dev_unparent(rbd_dev);
4640 kfree(rbd_dev->header_name); 4787 kfree(rbd_dev->header_name);
4641 rbd_dev_destroy(parent); 4788 rbd_dev_destroy(parent);
4642 } else { 4789 } else {
@@ -4651,10 +4798,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4651{ 4798{
4652 int ret; 4799 int ret;
4653 4800
4654 ret = rbd_dev_mapping_set(rbd_dev);
4655 if (ret)
4656 return ret;
4657
4658 /* generate unique id: find highest unique id, add one */ 4801 /* generate unique id: find highest unique id, add one */
4659 rbd_dev_id_get(rbd_dev); 4802 rbd_dev_id_get(rbd_dev);
4660 4803
@@ -4676,13 +4819,17 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4676 if (ret) 4819 if (ret)
4677 goto err_out_blkdev; 4820 goto err_out_blkdev;
4678 4821
4679 ret = rbd_bus_add_dev(rbd_dev); 4822 ret = rbd_dev_mapping_set(rbd_dev);
4680 if (ret) 4823 if (ret)
4681 goto err_out_disk; 4824 goto err_out_disk;
4825 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4826
4827 ret = rbd_bus_add_dev(rbd_dev);
4828 if (ret)
4829 goto err_out_mapping;
4682 4830
4683 /* Everything's ready. Announce the disk to the world. */ 4831 /* Everything's ready. Announce the disk to the world. */
4684 4832
4685 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4686 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 4833 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4687 add_disk(rbd_dev->disk); 4834 add_disk(rbd_dev->disk);
4688 4835
@@ -4691,6 +4838,8 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4691 4838
4692 return ret; 4839 return ret;
4693 4840
4841err_out_mapping:
4842 rbd_dev_mapping_clear(rbd_dev);
4694err_out_disk: 4843err_out_disk:
4695 rbd_free_disk(rbd_dev); 4844 rbd_free_disk(rbd_dev);
4696err_out_blkdev: 4845err_out_blkdev:
@@ -4731,12 +4880,7 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4731 4880
4732static void rbd_dev_image_release(struct rbd_device *rbd_dev) 4881static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4733{ 4882{
4734 int ret;
4735
4736 rbd_dev_unprobe(rbd_dev); 4883 rbd_dev_unprobe(rbd_dev);
4737 ret = rbd_dev_header_watch_sync(rbd_dev, 0);
4738 if (ret)
4739 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
4740 kfree(rbd_dev->header_name); 4884 kfree(rbd_dev->header_name);
4741 rbd_dev->header_name = NULL; 4885 rbd_dev->header_name = NULL;
4742 rbd_dev->image_format = 0; 4886 rbd_dev->image_format = 0;
@@ -4748,10 +4892,11 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4748 4892
4749/* 4893/*
4750 * Probe for the existence of the header object for the given rbd 4894 * Probe for the existence of the header object for the given rbd
4751 * device. For format 2 images this includes determining the image 4895 * device. If this image is the one being mapped (i.e., not a
4752 * id. 4896 * parent), initiate a watch on its header object before using that
4897 * object to get detailed information about the rbd image.
4753 */ 4898 */
4754static int rbd_dev_image_probe(struct rbd_device *rbd_dev) 4899static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4755{ 4900{
4756 int ret; 4901 int ret;
4757 int tmp; 4902 int tmp;
@@ -4771,14 +4916,16 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
4771 if (ret) 4916 if (ret)
4772 goto err_out_format; 4917 goto err_out_format;
4773 4918
4774 ret = rbd_dev_header_watch_sync(rbd_dev, 1); 4919 if (mapping) {
4775 if (ret) 4920 ret = rbd_dev_header_watch_sync(rbd_dev, true);
4776 goto out_header_name; 4921 if (ret)
4922 goto out_header_name;
4923 }
4777 4924
4778 if (rbd_dev->image_format == 1) 4925 if (rbd_dev->image_format == 1)
4779 ret = rbd_dev_v1_probe(rbd_dev); 4926 ret = rbd_dev_v1_header_info(rbd_dev);
4780 else 4927 else
4781 ret = rbd_dev_v2_probe(rbd_dev); 4928 ret = rbd_dev_v2_header_info(rbd_dev);
4782 if (ret) 4929 if (ret)
4783 goto err_out_watch; 4930 goto err_out_watch;
4784 4931
@@ -4787,15 +4934,22 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
4787 goto err_out_probe; 4934 goto err_out_probe;
4788 4935
4789 ret = rbd_dev_probe_parent(rbd_dev); 4936 ret = rbd_dev_probe_parent(rbd_dev);
4790 if (!ret) 4937 if (ret)
4791 return 0; 4938 goto err_out_probe;
4939
4940 dout("discovered format %u image, header name is %s\n",
4941 rbd_dev->image_format, rbd_dev->header_name);
4792 4942
4943 return 0;
4793err_out_probe: 4944err_out_probe:
4794 rbd_dev_unprobe(rbd_dev); 4945 rbd_dev_unprobe(rbd_dev);
4795err_out_watch: 4946err_out_watch:
4796 tmp = rbd_dev_header_watch_sync(rbd_dev, 0); 4947 if (mapping) {
4797 if (tmp) 4948 tmp = rbd_dev_header_watch_sync(rbd_dev, false);
4798 rbd_warn(rbd_dev, "unable to tear down watch request\n"); 4949 if (tmp)
4950 rbd_warn(rbd_dev, "unable to tear down "
4951 "watch request (%d)\n", tmp);
4952 }
4799out_header_name: 4953out_header_name:
4800 kfree(rbd_dev->header_name); 4954 kfree(rbd_dev->header_name);
4801 rbd_dev->header_name = NULL; 4955 rbd_dev->header_name = NULL;
@@ -4819,6 +4973,7 @@ static ssize_t rbd_add(struct bus_type *bus,
4819 struct rbd_spec *spec = NULL; 4973 struct rbd_spec *spec = NULL;
4820 struct rbd_client *rbdc; 4974 struct rbd_client *rbdc;
4821 struct ceph_osd_client *osdc; 4975 struct ceph_osd_client *osdc;
4976 bool read_only;
4822 int rc = -ENOMEM; 4977 int rc = -ENOMEM;
4823 4978
4824 if (!try_module_get(THIS_MODULE)) 4979 if (!try_module_get(THIS_MODULE))
@@ -4828,6 +4983,9 @@ static ssize_t rbd_add(struct bus_type *bus,
4828 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec); 4983 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4829 if (rc < 0) 4984 if (rc < 0)
4830 goto err_out_module; 4985 goto err_out_module;
4986 read_only = rbd_opts->read_only;
4987 kfree(rbd_opts);
4988 rbd_opts = NULL; /* done with this */
4831 4989
4832 rbdc = rbd_get_client(ceph_opts); 4990 rbdc = rbd_get_client(ceph_opts);
4833 if (IS_ERR(rbdc)) { 4991 if (IS_ERR(rbdc)) {
@@ -4858,14 +5016,16 @@ static ssize_t rbd_add(struct bus_type *bus,
4858 rbdc = NULL; /* rbd_dev now owns this */ 5016 rbdc = NULL; /* rbd_dev now owns this */
4859 spec = NULL; /* rbd_dev now owns this */ 5017 spec = NULL; /* rbd_dev now owns this */
4860 5018
4861 rbd_dev->mapping.read_only = rbd_opts->read_only; 5019 rc = rbd_dev_image_probe(rbd_dev, true);
4862 kfree(rbd_opts);
4863 rbd_opts = NULL; /* done with this */
4864
4865 rc = rbd_dev_image_probe(rbd_dev);
4866 if (rc < 0) 5020 if (rc < 0)
4867 goto err_out_rbd_dev; 5021 goto err_out_rbd_dev;
4868 5022
5023 /* If we are mapping a snapshot it must be marked read-only */
5024
5025 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5026 read_only = true;
5027 rbd_dev->mapping.read_only = read_only;
5028
4869 rc = rbd_dev_device_setup(rbd_dev); 5029 rc = rbd_dev_device_setup(rbd_dev);
4870 if (!rc) 5030 if (!rc)
4871 return count; 5031 return count;
@@ -4911,7 +5071,7 @@ static void rbd_dev_device_release(struct device *dev)
4911 5071
4912 rbd_free_disk(rbd_dev); 5072 rbd_free_disk(rbd_dev);
4913 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 5073 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4914 rbd_dev_clear_mapping(rbd_dev); 5074 rbd_dev_mapping_clear(rbd_dev);
4915 unregister_blkdev(rbd_dev->major, rbd_dev->name); 5075 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4916 rbd_dev->major = 0; 5076 rbd_dev->major = 0;
4917 rbd_dev_id_put(rbd_dev); 5077 rbd_dev_id_put(rbd_dev);
@@ -4978,10 +5138,13 @@ static ssize_t rbd_remove(struct bus_type *bus,
4978 spin_unlock_irq(&rbd_dev->lock); 5138 spin_unlock_irq(&rbd_dev->lock);
4979 if (ret < 0) 5139 if (ret < 0)
4980 goto done; 5140 goto done;
4981 ret = count;
4982 rbd_bus_del_dev(rbd_dev); 5141 rbd_bus_del_dev(rbd_dev);
5142 ret = rbd_dev_header_watch_sync(rbd_dev, false);
5143 if (ret)
5144 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
4983 rbd_dev_image_release(rbd_dev); 5145 rbd_dev_image_release(rbd_dev);
4984 module_put(THIS_MODULE); 5146 module_put(THIS_MODULE);
5147 ret = count;
4985done: 5148done:
4986 mutex_unlock(&ctl_mutex); 5149 mutex_unlock(&ctl_mutex);
4987 5150
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index cdd4c09fda96..a22a7a502740 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -95,9 +95,9 @@ struct si_sm_data {
95 enum bt_states state; 95 enum bt_states state;
96 unsigned char seq; /* BT sequence number */ 96 unsigned char seq; /* BT sequence number */
97 struct si_sm_io *io; 97 struct si_sm_io *io;
98 unsigned char write_data[IPMI_MAX_MSG_LENGTH]; 98 unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
99 int write_count; 99 int write_count;
100 unsigned char read_data[IPMI_MAX_MSG_LENGTH]; 100 unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
101 int read_count; 101 int read_count;
102 int truncated; 102 int truncated;
103 long timeout; /* microseconds countdown */ 103 long timeout; /* microseconds countdown */
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 9eb360ff8cab..d5a5f020810a 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -837,13 +837,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
837 return ipmi_ioctl(filep, cmd, arg); 837 return ipmi_ioctl(filep, cmd, arg);
838 } 838 }
839} 839}
840
841static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
842 unsigned long arg)
843{
844 int ret;
845
846 mutex_lock(&ipmi_mutex);
847 ret = compat_ipmi_ioctl(filep, cmd, arg);
848 mutex_unlock(&ipmi_mutex);
849
850 return ret;
851}
840#endif 852#endif
841 853
842static const struct file_operations ipmi_fops = { 854static const struct file_operations ipmi_fops = {
843 .owner = THIS_MODULE, 855 .owner = THIS_MODULE,
844 .unlocked_ioctl = ipmi_unlocked_ioctl, 856 .unlocked_ioctl = ipmi_unlocked_ioctl,
845#ifdef CONFIG_COMPAT 857#ifdef CONFIG_COMPAT
846 .compat_ioctl = compat_ipmi_ioctl, 858 .compat_ioctl = unlocked_compat_ipmi_ioctl,
847#endif 859#endif
848 .open = ipmi_open, 860 .open = ipmi_open,
849 .release = ipmi_release, 861 .release = ipmi_release,
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 4d439d2fcfd6..4445fa164a2d 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2037,12 +2037,11 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
2037 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 2037 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2038 if (!entry) 2038 if (!entry)
2039 return -ENOMEM; 2039 return -ENOMEM;
2040 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL); 2040 entry->name = kstrdup(name, GFP_KERNEL);
2041 if (!entry->name) { 2041 if (!entry->name) {
2042 kfree(entry); 2042 kfree(entry);
2043 return -ENOMEM; 2043 return -ENOMEM;
2044 } 2044 }
2045 strcpy(entry->name, name);
2046 2045
2047 file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data); 2046 file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
2048 if (!file) { 2047 if (!file) {
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 313538abe63c..af4b23ffc5a6 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -663,8 +663,10 @@ static void handle_transaction_done(struct smi_info *smi_info)
663 /* We got the flags from the SMI, now handle them. */ 663 /* We got the flags from the SMI, now handle them. */
664 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 664 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
665 if (msg[2] != 0) { 665 if (msg[2] != 0) {
666 dev_warn(smi_info->dev, "Could not enable interrupts" 666 dev_warn(smi_info->dev,
667 ", failed get, using polled mode.\n"); 667 "Couldn't get irq info: %x.\n", msg[2]);
668 dev_warn(smi_info->dev,
669 "Maybe ok, but ipmi might run very slowly.\n");
668 smi_info->si_state = SI_NORMAL; 670 smi_info->si_state = SI_NORMAL;
669 } else { 671 } else {
670 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 672 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -685,10 +687,12 @@ static void handle_transaction_done(struct smi_info *smi_info)
685 687
686 /* We got the flags from the SMI, now handle them. */ 688 /* We got the flags from the SMI, now handle them. */
687 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 689 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
688 if (msg[2] != 0) 690 if (msg[2] != 0) {
689 dev_warn(smi_info->dev, "Could not enable interrupts" 691 dev_warn(smi_info->dev,
690 ", failed set, using polled mode.\n"); 692 "Couldn't set irq info: %x.\n", msg[2]);
691 else 693 dev_warn(smi_info->dev,
694 "Maybe ok, but ipmi might run very slowly.\n");
695 } else
692 smi_info->interrupt_disabled = 0; 696 smi_info->interrupt_disabled = 0;
693 smi_info->si_state = SI_NORMAL; 697 smi_info->si_state = SI_NORMAL;
694 break; 698 break;
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a1488f58f6ca..534fcb825153 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -47,7 +47,7 @@ config CPU_FREQ_STAT_DETAILS
47 47
48choice 48choice
49 prompt "Default CPUFreq governor" 49 prompt "Default CPUFreq governor"
50 default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110 50 default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
51 default CPU_FREQ_DEFAULT_GOV_PERFORMANCE 51 default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
52 help 52 help
53 This option sets which CPUFreq governor shall be loaded at 53 This option sets which CPUFreq governor shall be loaded at
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index f3af18b9acc5..6e57543fe0b9 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -3,16 +3,17 @@
3# 3#
4 4
5config ARM_BIG_LITTLE_CPUFREQ 5config ARM_BIG_LITTLE_CPUFREQ
6 tristate 6 tristate "Generic ARM big LITTLE CPUfreq driver"
7 depends on ARM_CPU_TOPOLOGY 7 depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
8 help
9 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
8 10
9config ARM_DT_BL_CPUFREQ 11config ARM_DT_BL_CPUFREQ
10 tristate "Generic ARM big LITTLE CPUfreq driver probed via DT" 12 tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
11 select ARM_BIG_LITTLE_CPUFREQ 13 depends on ARM_BIG_LITTLE_CPUFREQ && OF
12 depends on OF && HAVE_CLK
13 help 14 help
14 This enables the Generic CPUfreq driver for ARM big.LITTLE platform. 15 This enables probing via DT for Generic CPUfreq driver for ARM
15 This gets frequency tables from DT. 16 big.LITTLE platform. This gets frequency tables from DT.
16 17
17config ARM_EXYNOS_CPUFREQ 18config ARM_EXYNOS_CPUFREQ
18 bool "SAMSUNG EXYNOS SoCs" 19 bool "SAMSUNG EXYNOS SoCs"
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index dbdf677d2f36..5d7f53fcd6f5 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -40,11 +40,6 @@ static struct clk *clk[MAX_CLUSTERS];
40static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; 40static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
41static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; 41static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
42 42
43static int cpu_to_cluster(int cpu)
44{
45 return topology_physical_package_id(cpu);
46}
47
48static unsigned int bL_cpufreq_get(unsigned int cpu) 43static unsigned int bL_cpufreq_get(unsigned int cpu)
49{ 44{
50 u32 cur_cluster = cpu_to_cluster(cpu); 45 u32 cur_cluster = cpu_to_cluster(cpu);
@@ -192,7 +187,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
192 187
193 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); 188 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
194 189
195 dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu); 190 dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
196 return 0; 191 return 0;
197} 192}
198 193
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index 70f18fc12d4a..79b2ce17884d 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -34,6 +34,11 @@ struct cpufreq_arm_bL_ops {
34 int (*init_opp_table)(struct device *cpu_dev); 34 int (*init_opp_table)(struct device *cpu_dev);
35}; 35};
36 36
37static inline int cpu_to_cluster(int cpu)
38{
39 return topology_physical_package_id(cpu);
40}
41
37int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); 42int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
38void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); 43void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
39 44
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 44be3115375c..173ed059d95f 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -66,8 +66,8 @@ static int dt_get_transition_latency(struct device *cpu_dev)
66 66
67 parent = of_find_node_by_path("/cpus"); 67 parent = of_find_node_by_path("/cpus");
68 if (!parent) { 68 if (!parent) {
69 pr_err("failed to find OF /cpus\n"); 69 pr_info("Failed to find OF /cpus. Use CPUFREQ_ETERNAL transition latency\n");
70 return -ENOENT; 70 return CPUFREQ_ETERNAL;
71 } 71 }
72 72
73 for_each_child_of_node(parent, np) { 73 for_each_child_of_node(parent, np) {
@@ -78,10 +78,11 @@ static int dt_get_transition_latency(struct device *cpu_dev)
78 of_node_put(np); 78 of_node_put(np);
79 of_node_put(parent); 79 of_node_put(parent);
80 80
81 return 0; 81 return transition_latency;
82 } 82 }
83 83
84 return -ENODEV; 84 pr_info("clock-latency isn't found, use CPUFREQ_ETERNAL transition latency\n");
85 return CPUFREQ_ETERNAL;
85} 86}
86 87
87static struct cpufreq_arm_bL_ops dt_bL_ops = { 88static struct cpufreq_arm_bL_ops dt_bL_ops = {
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 3ab8294eab04..a64eb8b70444 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -189,12 +189,29 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
189 189
190 if (!np) { 190 if (!np) {
191 pr_err("failed to find cpu0 node\n"); 191 pr_err("failed to find cpu0 node\n");
192 return -ENOENT; 192 ret = -ENOENT;
193 goto out_put_parent;
193 } 194 }
194 195
195 cpu_dev = &pdev->dev; 196 cpu_dev = &pdev->dev;
196 cpu_dev->of_node = np; 197 cpu_dev->of_node = np;
197 198
199 cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
200 if (IS_ERR(cpu_reg)) {
201 /*
202 * If cpu0 regulator supply node is present, but regulator is
203 * not yet registered, we should try defering probe.
204 */
205 if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
206 dev_err(cpu_dev, "cpu0 regulator not ready, retry\n");
207 ret = -EPROBE_DEFER;
208 goto out_put_node;
209 }
210 pr_warn("failed to get cpu0 regulator: %ld\n",
211 PTR_ERR(cpu_reg));
212 cpu_reg = NULL;
213 }
214
198 cpu_clk = devm_clk_get(cpu_dev, NULL); 215 cpu_clk = devm_clk_get(cpu_dev, NULL);
199 if (IS_ERR(cpu_clk)) { 216 if (IS_ERR(cpu_clk)) {
200 ret = PTR_ERR(cpu_clk); 217 ret = PTR_ERR(cpu_clk);
@@ -202,12 +219,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
202 goto out_put_node; 219 goto out_put_node;
203 } 220 }
204 221
205 cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
206 if (IS_ERR(cpu_reg)) {
207 pr_warn("failed to get cpu0 regulator\n");
208 cpu_reg = NULL;
209 }
210
211 ret = of_init_opp_table(cpu_dev); 222 ret = of_init_opp_table(cpu_dev);
212 if (ret) { 223 if (ret) {
213 pr_err("failed to init OPP table: %d\n", ret); 224 pr_err("failed to init OPP table: %d\n", ret);
@@ -264,6 +275,8 @@ out_free_table:
264 opp_free_cpufreq_table(cpu_dev, &freq_table); 275 opp_free_cpufreq_table(cpu_dev, &freq_table);
265out_put_node: 276out_put_node:
266 of_node_put(np); 277 of_node_put(np);
278out_put_parent:
279 of_node_put(parent);
267 return ret; 280 return ret;
268} 281}
269 282
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1b8a48eaf90f..4b8c7f297d74 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1075,14 +1075,14 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1075 __func__, cpu_dev->id, cpu); 1075 __func__, cpu_dev->id, cpu);
1076 } 1076 }
1077 1077
1078 if ((cpus == 1) && (cpufreq_driver->target))
1079 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1080
1078 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); 1081 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1079 cpufreq_cpu_put(data); 1082 cpufreq_cpu_put(data);
1080 1083
1081 /* If cpu is last user of policy, free policy */ 1084 /* If cpu is last user of policy, free policy */
1082 if (cpus == 1) { 1085 if (cpus == 1) {
1083 if (cpufreq_driver->target)
1084 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1085
1086 lock_policy_rwsem_read(cpu); 1086 lock_policy_rwsem_read(cpu);
1087 kobj = &data->kobj; 1087 kobj = &data->kobj;
1088 cmp = &data->kobj_unregister; 1088 cmp = &data->kobj_unregister;
@@ -1832,15 +1832,13 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1832 if (dev) { 1832 if (dev) {
1833 switch (action) { 1833 switch (action) {
1834 case CPU_ONLINE: 1834 case CPU_ONLINE:
1835 case CPU_ONLINE_FROZEN:
1836 cpufreq_add_dev(dev, NULL); 1835 cpufreq_add_dev(dev, NULL);
1837 break; 1836 break;
1838 case CPU_DOWN_PREPARE: 1837 case CPU_DOWN_PREPARE:
1839 case CPU_DOWN_PREPARE_FROZEN: 1838 case CPU_UP_CANCELED_FROZEN:
1840 __cpufreq_remove_dev(dev, NULL); 1839 __cpufreq_remove_dev(dev, NULL);
1841 break; 1840 break;
1842 case CPU_DOWN_FAILED: 1841 case CPU_DOWN_FAILED:
1843 case CPU_DOWN_FAILED_FROZEN:
1844 cpufreq_add_dev(dev, NULL); 1842 cpufreq_add_dev(dev, NULL);
1845 break; 1843 break;
1846 } 1844 }
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 443442df113b..5af40ad82d23 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -255,6 +255,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
255 if (have_governor_per_policy()) { 255 if (have_governor_per_policy()) {
256 WARN_ON(dbs_data); 256 WARN_ON(dbs_data);
257 } else if (dbs_data) { 257 } else if (dbs_data) {
258 dbs_data->usage_count++;
258 policy->governor_data = dbs_data; 259 policy->governor_data = dbs_data;
259 return 0; 260 return 0;
260 } 261 }
@@ -266,6 +267,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
266 } 267 }
267 268
268 dbs_data->cdata = cdata; 269 dbs_data->cdata = cdata;
270 dbs_data->usage_count = 1;
269 rc = cdata->init(dbs_data); 271 rc = cdata->init(dbs_data);
270 if (rc) { 272 if (rc) {
271 pr_err("%s: POLICY_INIT: init() failed\n", __func__); 273 pr_err("%s: POLICY_INIT: init() failed\n", __func__);
@@ -294,7 +296,8 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
294 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, 296 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
295 latency * LATENCY_MULTIPLIER)); 297 latency * LATENCY_MULTIPLIER));
296 298
297 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { 299 if ((cdata->governor == GOV_CONSERVATIVE) &&
300 (!policy->governor->initialized)) {
298 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; 301 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
299 302
300 cpufreq_register_notifier(cs_ops->notifier_block, 303 cpufreq_register_notifier(cs_ops->notifier_block,
@@ -306,12 +309,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
306 309
307 return 0; 310 return 0;
308 case CPUFREQ_GOV_POLICY_EXIT: 311 case CPUFREQ_GOV_POLICY_EXIT:
309 if ((policy->governor->initialized == 1) || 312 if (!--dbs_data->usage_count) {
310 have_governor_per_policy()) {
311 sysfs_remove_group(get_governor_parent_kobj(policy), 313 sysfs_remove_group(get_governor_parent_kobj(policy),
312 get_sysfs_attr(dbs_data)); 314 get_sysfs_attr(dbs_data));
313 315
314 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { 316 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
317 (policy->governor->initialized == 1)) {
315 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; 318 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
316 319
317 cpufreq_unregister_notifier(cs_ops->notifier_block, 320 cpufreq_unregister_notifier(cs_ops->notifier_block,
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 8ac33538d0bd..e16a96130cb3 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -211,6 +211,7 @@ struct common_dbs_data {
211struct dbs_data { 211struct dbs_data {
212 struct common_dbs_data *cdata; 212 struct common_dbs_data *cdata;
213 unsigned int min_sampling_rate; 213 unsigned int min_sampling_rate;
214 int usage_count;
214 void *tuners; 215 void *tuners;
215 216
216 /* dbs_mutex protects dbs_enable in governor start/stop */ 217 /* dbs_mutex protects dbs_enable in governor start/stop */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index b0ffef96bf77..4b9bb5def6f1 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -547,7 +547,6 @@ static int od_init(struct dbs_data *dbs_data)
547 tuners->io_is_busy = should_io_be_busy(); 547 tuners->io_is_busy = should_io_be_busy();
548 548
549 dbs_data->tuners = tuners; 549 dbs_data->tuners = tuners;
550 pr_info("%s: tuners %p\n", __func__, tuners);
551 mutex_init(&dbs_data->mutex); 550 mutex_init(&dbs_data->mutex);
552 return 0; 551 return 0;
553} 552}
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index bfd6273fd873..fb65decffa28 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -349,15 +349,16 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
349 349
350 switch (action) { 350 switch (action) {
351 case CPU_ONLINE: 351 case CPU_ONLINE:
352 case CPU_ONLINE_FROZEN:
353 cpufreq_update_policy(cpu); 352 cpufreq_update_policy(cpu);
354 break; 353 break;
355 case CPU_DOWN_PREPARE: 354 case CPU_DOWN_PREPARE:
356 case CPU_DOWN_PREPARE_FROZEN:
357 cpufreq_stats_free_sysfs(cpu); 355 cpufreq_stats_free_sysfs(cpu);
358 break; 356 break;
359 case CPU_DEAD: 357 case CPU_DEAD:
360 case CPU_DEAD_FROZEN: 358 cpufreq_stats_free_table(cpu);
359 break;
360 case CPU_UP_CANCELED_FROZEN:
361 cpufreq_stats_free_sysfs(cpu);
361 cpufreq_stats_free_table(cpu); 362 cpufreq_stats_free_table(cpu);
362 break; 363 break;
363 } 364 }
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cc3a8e6c92be..9c36ace92a39 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -48,12 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
48} 48}
49 49
50struct sample { 50struct sample {
51 ktime_t start_time;
52 ktime_t end_time;
53 int core_pct_busy; 51 int core_pct_busy;
54 int pstate_pct_busy;
55 u64 duration_us;
56 u64 idletime_us;
57 u64 aperf; 52 u64 aperf;
58 u64 mperf; 53 u64 mperf;
59 int freq; 54 int freq;
@@ -86,13 +81,9 @@ struct cpudata {
86 struct pstate_adjust_policy *pstate_policy; 81 struct pstate_adjust_policy *pstate_policy;
87 struct pstate_data pstate; 82 struct pstate_data pstate;
88 struct _pid pid; 83 struct _pid pid;
89 struct _pid idle_pid;
90 84
91 int min_pstate_count; 85 int min_pstate_count;
92 int idle_mode;
93 86
94 ktime_t prev_sample;
95 u64 prev_idle_time_us;
96 u64 prev_aperf; 87 u64 prev_aperf;
97 u64 prev_mperf; 88 u64 prev_mperf;
98 int sample_ptr; 89 int sample_ptr;
@@ -124,6 +115,8 @@ struct perf_limits {
124 int min_perf_pct; 115 int min_perf_pct;
125 int32_t max_perf; 116 int32_t max_perf;
126 int32_t min_perf; 117 int32_t min_perf;
118 int max_policy_pct;
119 int max_sysfs_pct;
127}; 120};
128 121
129static struct perf_limits limits = { 122static struct perf_limits limits = {
@@ -132,6 +125,8 @@ static struct perf_limits limits = {
132 .max_perf = int_tofp(1), 125 .max_perf = int_tofp(1),
133 .min_perf_pct = 0, 126 .min_perf_pct = 0,
134 .min_perf = 0, 127 .min_perf = 0,
128 .max_policy_pct = 100,
129 .max_sysfs_pct = 100,
135}; 130};
136 131
137static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 132static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
@@ -202,19 +197,6 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
202 0); 197 0);
203} 198}
204 199
205static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu)
206{
207 pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct);
208 pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct);
209 pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct);
210
211 pid_reset(&cpu->idle_pid,
212 75,
213 50,
214 cpu->pstate_policy->deadband,
215 0);
216}
217
218static inline void intel_pstate_reset_all_pid(void) 200static inline void intel_pstate_reset_all_pid(void)
219{ 201{
220 unsigned int cpu; 202 unsigned int cpu;
@@ -302,7 +284,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
302 if (ret != 1) 284 if (ret != 1)
303 return -EINVAL; 285 return -EINVAL;
304 286
305 limits.max_perf_pct = clamp_t(int, input, 0 , 100); 287 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
288 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
306 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 289 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
307 return count; 290 return count;
308} 291}
@@ -408,9 +391,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
408 if (pstate == cpu->pstate.current_pstate) 391 if (pstate == cpu->pstate.current_pstate)
409 return; 392 return;
410 393
411#ifndef MODULE
412 trace_cpu_frequency(pstate * 100000, cpu->cpu); 394 trace_cpu_frequency(pstate * 100000, cpu->cpu);
413#endif 395
414 cpu->pstate.current_pstate = pstate; 396 cpu->pstate.current_pstate = pstate;
415 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); 397 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
416 398
@@ -450,48 +432,26 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
450 struct sample *sample) 432 struct sample *sample)
451{ 433{
452 u64 core_pct; 434 u64 core_pct;
453 sample->pstate_pct_busy = 100 - div64_u64(
454 sample->idletime_us * 100,
455 sample->duration_us);
456 core_pct = div64_u64(sample->aperf * 100, sample->mperf); 435 core_pct = div64_u64(sample->aperf * 100, sample->mperf);
457 sample->freq = cpu->pstate.max_pstate * core_pct * 1000; 436 sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
458 437
459 sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), 438 sample->core_pct_busy = core_pct;
460 100);
461} 439}
462 440
463static inline void intel_pstate_sample(struct cpudata *cpu) 441static inline void intel_pstate_sample(struct cpudata *cpu)
464{ 442{
465 ktime_t now;
466 u64 idle_time_us;
467 u64 aperf, mperf; 443 u64 aperf, mperf;
468 444
469 now = ktime_get();
470 idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL);
471
472 rdmsrl(MSR_IA32_APERF, aperf); 445 rdmsrl(MSR_IA32_APERF, aperf);
473 rdmsrl(MSR_IA32_MPERF, mperf); 446 rdmsrl(MSR_IA32_MPERF, mperf);
474 /* for the first sample, don't actually record a sample, just 447 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
475 * set the baseline */ 448 cpu->samples[cpu->sample_ptr].aperf = aperf;
476 if (cpu->prev_idle_time_us > 0) { 449 cpu->samples[cpu->sample_ptr].mperf = mperf;
477 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; 450 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
478 cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample; 451 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
479 cpu->samples[cpu->sample_ptr].end_time = now; 452
480 cpu->samples[cpu->sample_ptr].duration_us = 453 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
481 ktime_us_delta(now, cpu->prev_sample);
482 cpu->samples[cpu->sample_ptr].idletime_us =
483 idle_time_us - cpu->prev_idle_time_us;
484
485 cpu->samples[cpu->sample_ptr].aperf = aperf;
486 cpu->samples[cpu->sample_ptr].mperf = mperf;
487 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
488 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
489
490 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
491 }
492 454
493 cpu->prev_sample = now;
494 cpu->prev_idle_time_us = idle_time_us;
495 cpu->prev_aperf = aperf; 455 cpu->prev_aperf = aperf;
496 cpu->prev_mperf = mperf; 456 cpu->prev_mperf = mperf;
497} 457}
@@ -505,16 +465,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
505 mod_timer_pinned(&cpu->timer, jiffies + delay); 465 mod_timer_pinned(&cpu->timer, jiffies + delay);
506} 466}
507 467
508static inline void intel_pstate_idle_mode(struct cpudata *cpu)
509{
510 cpu->idle_mode = 1;
511}
512
513static inline void intel_pstate_normal_mode(struct cpudata *cpu)
514{
515 cpu->idle_mode = 0;
516}
517
518static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) 468static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
519{ 469{
520 int32_t busy_scaled; 470 int32_t busy_scaled;
@@ -547,50 +497,21 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
547 intel_pstate_pstate_decrease(cpu, steps); 497 intel_pstate_pstate_decrease(cpu, steps);
548} 498}
549 499
550static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu)
551{
552 int busy_scaled;
553 struct _pid *pid;
554 int ctl = 0;
555 int steps;
556
557 pid = &cpu->idle_pid;
558
559 busy_scaled = intel_pstate_get_scaled_busy(cpu);
560
561 ctl = pid_calc(pid, 100 - busy_scaled);
562
563 steps = abs(ctl);
564 if (ctl < 0)
565 intel_pstate_pstate_decrease(cpu, steps);
566 else
567 intel_pstate_pstate_increase(cpu, steps);
568
569 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate)
570 intel_pstate_normal_mode(cpu);
571}
572
573static void intel_pstate_timer_func(unsigned long __data) 500static void intel_pstate_timer_func(unsigned long __data)
574{ 501{
575 struct cpudata *cpu = (struct cpudata *) __data; 502 struct cpudata *cpu = (struct cpudata *) __data;
576 503
577 intel_pstate_sample(cpu); 504 intel_pstate_sample(cpu);
505 intel_pstate_adjust_busy_pstate(cpu);
578 506
579 if (!cpu->idle_mode)
580 intel_pstate_adjust_busy_pstate(cpu);
581 else
582 intel_pstate_adjust_idle_pstate(cpu);
583
584#if defined(XPERF_FIX)
585 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { 507 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
586 cpu->min_pstate_count++; 508 cpu->min_pstate_count++;
587 if (!(cpu->min_pstate_count % 5)) { 509 if (!(cpu->min_pstate_count % 5)) {
588 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); 510 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
589 intel_pstate_idle_mode(cpu);
590 } 511 }
591 } else 512 } else
592 cpu->min_pstate_count = 0; 513 cpu->min_pstate_count = 0;
593#endif 514
594 intel_pstate_set_sample_time(cpu); 515 intel_pstate_set_sample_time(cpu);
595} 516}
596 517
@@ -631,7 +552,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
631 (unsigned long)cpu; 552 (unsigned long)cpu;
632 cpu->timer.expires = jiffies + HZ/100; 553 cpu->timer.expires = jiffies + HZ/100;
633 intel_pstate_busy_pid_reset(cpu); 554 intel_pstate_busy_pid_reset(cpu);
634 intel_pstate_idle_pid_reset(cpu);
635 intel_pstate_sample(cpu); 555 intel_pstate_sample(cpu);
636 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); 556 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
637 557
@@ -675,8 +595,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
675 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 595 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
676 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 596 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
677 597
678 limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq; 598 limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
679 limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100); 599 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
600 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
680 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 601 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
681 602
682 return 0; 603 return 0;
@@ -788,10 +709,9 @@ static int __init intel_pstate_init(void)
788 709
789 pr_info("Intel P-state driver initializing.\n"); 710 pr_info("Intel P-state driver initializing.\n");
790 711
791 all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); 712 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
792 if (!all_cpu_data) 713 if (!all_cpu_data)
793 return -ENOMEM; 714 return -ENOMEM;
794 memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus());
795 715
796 rc = cpufreq_register_driver(&intel_pstate_driver); 716 rc = cpufreq_register_driver(&intel_pstate_driver);
797 if (rc) 717 if (rc)
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index d36ea8dc96eb..b2644af985ec 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -171,10 +171,6 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
171 priv.dev = &pdev->dev; 171 priv.dev = &pdev->dev;
172 172
173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
174 if (!res) {
175 dev_err(&pdev->dev, "Cannot get memory resource\n");
176 return -ENODEV;
177 }
178 priv.base = devm_ioremap_resource(&pdev->dev, res); 174 priv.base = devm_ioremap_resource(&pdev->dev, res);
179 if (IS_ERR(priv.base)) 175 if (IS_ERR(priv.base))
180 return PTR_ERR(priv.base); 176 return PTR_ERR(priv.base);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 3a8f7e6db295..e7e92429d10f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
78{ 78{
79 struct drm_crtc *crtc; 79 struct drm_crtc *crtc;
80 80
81 /* Locking is currently fubar in the panic handler. */
82 if (oops_in_progress)
83 return;
84
81 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 85 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
82 WARN_ON(!mutex_is_locked(&crtc->mutex)); 86 WARN_ON(!mutex_is_locked(&crtc->mutex));
83 87
@@ -246,6 +250,7 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
246 else 250 else
247 return "unknown"; 251 return "unknown";
248} 252}
253EXPORT_SYMBOL(drm_get_connector_status_name);
249 254
250/** 255/**
251 * drm_mode_object_get - allocate a new modeset identifier 256 * drm_mode_object_get - allocate a new modeset identifier
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index e974f9309b72..ed1334e27c33 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -121,6 +121,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
121 connector->helper_private; 121 connector->helper_private;
122 int count = 0; 122 int count = 0;
123 int mode_flags = 0; 123 int mode_flags = 0;
124 bool verbose_prune = true;
124 125
125 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 126 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
126 drm_get_connector_name(connector)); 127 drm_get_connector_name(connector));
@@ -149,6 +150,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
149 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", 150 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
150 connector->base.id, drm_get_connector_name(connector)); 151 connector->base.id, drm_get_connector_name(connector));
151 drm_mode_connector_update_edid_property(connector, NULL); 152 drm_mode_connector_update_edid_property(connector, NULL);
153 verbose_prune = false;
152 goto prune; 154 goto prune;
153 } 155 }
154 156
@@ -182,7 +184,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
182 } 184 }
183 185
184prune: 186prune:
185 drm_mode_prune_invalid(dev, &connector->modes, true); 187 drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
186 188
187 if (list_empty(&connector->modes)) 189 if (list_empty(&connector->modes))
188 return 0; 190 return 0;
@@ -1005,12 +1007,20 @@ static void output_poll_execute(struct work_struct *work)
1005 continue; 1007 continue;
1006 1008
1007 connector->status = connector->funcs->detect(connector, false); 1009 connector->status = connector->funcs->detect(connector, false);
1008 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 1010 if (old_status != connector->status) {
1009 connector->base.id, 1011 const char *old, *new;
1010 drm_get_connector_name(connector), 1012
1011 old_status, connector->status); 1013 old = drm_get_connector_status_name(old_status);
1012 if (old_status != connector->status) 1014 new = drm_get_connector_status_name(connector->status);
1015
1016 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
1017 "status updated from %s to %s\n",
1018 connector->base.id,
1019 drm_get_connector_name(connector),
1020 old, new);
1021
1013 changed = true; 1022 changed = true;
1023 }
1014 } 1024 }
1015 1025
1016 mutex_unlock(&dev->mode_config.mutex); 1026 mutex_unlock(&dev->mode_config.mutex);
@@ -1083,10 +1093,11 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
1083 old_status = connector->status; 1093 old_status = connector->status;
1084 1094
1085 connector->status = connector->funcs->detect(connector, false); 1095 connector->status = connector->funcs->detect(connector, false);
1086 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 1096 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1087 connector->base.id, 1097 connector->base.id,
1088 drm_get_connector_name(connector), 1098 drm_get_connector_name(connector),
1089 old_status, connector->status); 1099 drm_get_connector_status_name(old_status),
1100 drm_get_connector_status_name(connector->status));
1090 if (old_status != connector->status) 1101 if (old_status != connector->status)
1091 changed = true; 1102 changed = true;
1092 } 1103 }
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 8d4f29075af5..9cc247f55502 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -57,7 +57,7 @@ static int drm_version(struct drm_device *dev, void *data,
57 struct drm_file *file_priv); 57 struct drm_file *file_priv);
58 58
59#define DRM_IOCTL_DEF(ioctl, _func, _flags) \ 59#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
60 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0} 60 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
61 61
62/** Ioctl table */ 62/** Ioctl table */
63static const struct drm_ioctl_desc drm_ioctls[] = { 63static const struct drm_ioctl_desc drm_ioctls[] = {
@@ -375,7 +375,7 @@ long drm_ioctl(struct file *filp,
375{ 375{
376 struct drm_file *file_priv = filp->private_data; 376 struct drm_file *file_priv = filp->private_data;
377 struct drm_device *dev; 377 struct drm_device *dev;
378 const struct drm_ioctl_desc *ioctl; 378 const struct drm_ioctl_desc *ioctl = NULL;
379 drm_ioctl_t *func; 379 drm_ioctl_t *func;
380 unsigned int nr = DRM_IOCTL_NR(cmd); 380 unsigned int nr = DRM_IOCTL_NR(cmd);
381 int retcode = -EINVAL; 381 int retcode = -EINVAL;
@@ -392,11 +392,6 @@ long drm_ioctl(struct file *filp,
392 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); 392 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
393 ++file_priv->ioctl_count; 393 ++file_priv->ioctl_count;
394 394
395 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
396 task_pid_nr(current), cmd, nr,
397 (long)old_encode_dev(file_priv->minor->device),
398 file_priv->authenticated);
399
400 if ((nr >= DRM_CORE_IOCTL_COUNT) && 395 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
401 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) 396 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
402 goto err_i1; 397 goto err_i1;
@@ -417,6 +412,11 @@ long drm_ioctl(struct file *filp,
417 } else 412 } else
418 goto err_i1; 413 goto err_i1;
419 414
415 DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
416 task_pid_nr(current),
417 (long)old_encode_dev(file_priv->minor->device),
418 file_priv->authenticated, ioctl->name);
419
420 /* Do not trust userspace, use our own definition */ 420 /* Do not trust userspace, use our own definition */
421 func = ioctl->func; 421 func = ioctl->func;
422 /* is there a local override? */ 422 /* is there a local override? */
@@ -471,6 +471,12 @@ long drm_ioctl(struct file *filp,
471 } 471 }
472 472
473 err_i1: 473 err_i1:
474 if (!ioctl)
475 DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
476 task_pid_nr(current),
477 (long)old_encode_dev(file_priv->minor->device),
478 file_priv->authenticated, cmd, nr);
479
474 if (kdata != stack_kdata) 480 if (kdata != stack_kdata)
475 kfree(kdata); 481 kfree(kdata);
476 atomic_dec(&dev->ioctl_count); 482 atomic_dec(&dev->ioctl_count);
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index 48c52f7df4e6..0cfb60f54766 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -54,16 +54,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
54 struct i2c_adapter *adap, 54 struct i2c_adapter *adap,
55 const struct i2c_board_info *info) 55 const struct i2c_board_info *info)
56{ 56{
57 char modalias[sizeof(I2C_MODULE_PREFIX)
58 + I2C_NAME_SIZE];
59 struct module *module = NULL; 57 struct module *module = NULL;
60 struct i2c_client *client; 58 struct i2c_client *client;
61 struct drm_i2c_encoder_driver *encoder_drv; 59 struct drm_i2c_encoder_driver *encoder_drv;
62 int err = 0; 60 int err = 0;
63 61
64 snprintf(modalias, sizeof(modalias), 62 request_module("%s%s", I2C_MODULE_PREFIX, info->type);
65 "%s%s", I2C_MODULE_PREFIX, info->type);
66 request_module(modalias);
67 63
68 client = i2c_new_device(adap, info); 64 client = i2c_new_device(adap, info);
69 if (!client) { 65 if (!client) {
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index db1e2d6f90d7..07cf99cc8862 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
755EXPORT_SYMBOL(drm_mm_debug_table); 755EXPORT_SYMBOL(drm_mm_debug_table);
756 756
757#if defined(CONFIG_DEBUG_FS) 757#if defined(CONFIG_DEBUG_FS)
758int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 758static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
759{ 759{
760 struct drm_mm_node *entry;
761 unsigned long total_used = 0, total_free = 0, total = 0;
762 unsigned long hole_start, hole_end, hole_size; 760 unsigned long hole_start, hole_end, hole_size;
763 761
764 hole_start = drm_mm_hole_node_start(&mm->head_node); 762 if (entry->hole_follows) {
765 hole_end = drm_mm_hole_node_end(&mm->head_node); 763 hole_start = drm_mm_hole_node_start(entry);
766 hole_size = hole_end - hole_start; 764 hole_end = drm_mm_hole_node_end(entry);
767 if (hole_size) 765 hole_size = hole_end - hole_start;
768 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", 766 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
769 hole_start, hole_end, hole_size); 767 hole_start, hole_end, hole_size);
770 total_free += hole_size; 768 return hole_size;
769 }
770
771 return 0;
772}
773
774int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
775{
776 struct drm_mm_node *entry;
777 unsigned long total_used = 0, total_free = 0, total = 0;
778
779 total_free += drm_mm_dump_hole(m, &mm->head_node);
771 780
772 drm_mm_for_each_node(entry, mm) { 781 drm_mm_for_each_node(entry, mm) {
773 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", 782 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
774 entry->start, entry->start + entry->size, 783 entry->start, entry->start + entry->size,
775 entry->size); 784 entry->size);
776 total_used += entry->size; 785 total_used += entry->size;
777 if (entry->hole_follows) { 786 total_free += drm_mm_dump_hole(m, entry);
778 hole_start = drm_mm_hole_node_start(entry);
779 hole_end = drm_mm_hole_node_end(entry);
780 hole_size = hole_end - hole_start;
781 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
782 hole_start, hole_end, hole_size);
783 total_free += hole_size;
784 }
785 } 787 }
786 total = total_free + total_used; 788 total = total_free + total_used;
787 789
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index faa79df02648..a371ff865a88 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1143,6 +1143,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1143 was_digit = false; 1143 was_digit = false;
1144 } else 1144 } else
1145 goto done; 1145 goto done;
1146 break;
1146 case '0' ... '9': 1147 case '0' ... '9':
1147 was_digit = true; 1148 was_digit = true;
1148 break; 1149 break;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6be940effefd..6165535d15f0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1045,6 +1045,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1045 if (timeout) { 1045 if (timeout) {
1046 struct timespec sleep_time = timespec_sub(now, before); 1046 struct timespec sleep_time = timespec_sub(now, before);
1047 *timeout = timespec_sub(*timeout, sleep_time); 1047 *timeout = timespec_sub(*timeout, sleep_time);
1048 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1049 set_normalized_timespec(timeout, 0, 0);
1048 } 1050 }
1049 1051
1050 switch (end) { 1052 switch (end) {
@@ -1053,8 +1055,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1053 case -ERESTARTSYS: /* Signal */ 1055 case -ERESTARTSYS: /* Signal */
1054 return (int)end; 1056 return (int)end;
1055 case 0: /* Timeout */ 1057 case 0: /* Timeout */
1056 if (timeout)
1057 set_normalized_timespec(timeout, 0, 0);
1058 return -ETIME; 1058 return -ETIME;
1059 default: /* Completed */ 1059 default: /* Completed */
1060 WARN_ON(end < 0); /* We're not aware of other errors */ 1060 WARN_ON(end < 0); /* We're not aware of other errors */
@@ -2377,10 +2377,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2377 mutex_unlock(&dev->struct_mutex); 2377 mutex_unlock(&dev->struct_mutex);
2378 2378
2379 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); 2379 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2380 if (timeout) { 2380 if (timeout)
2381 WARN_ON(!timespec_valid(timeout));
2382 args->timeout_ns = timespec_to_ns(timeout); 2381 args->timeout_ns = timespec_to_ns(timeout);
2383 }
2384 return ret; 2382 return ret;
2385 2383
2386out: 2384out:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index dca614de71b6..bdb0d7717bc7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -709,15 +709,6 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
709 return snb_gmch_ctl << 25; /* 32 MB units */ 709 return snb_gmch_ctl << 25; /* 32 MB units */
710} 710}
711 711
712static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
713{
714 static const int stolen_decoder[] = {
715 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
716 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
717 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
718 return stolen_decoder[snb_gmch_ctl] << 20;
719}
720
721static int gen6_gmch_probe(struct drm_device *dev, 712static int gen6_gmch_probe(struct drm_device *dev,
722 size_t *gtt_total, 713 size_t *gtt_total,
723 size_t *stolen, 714 size_t *stolen,
@@ -747,11 +738,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
747 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 738 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
748 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); 739 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
749 740
750 if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) 741 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
751 *stolen = gen7_get_stolen_size(snb_gmch_ctl);
752 else
753 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
754
755 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; 742 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
756 743
757 /* For Modern GENs the PTEs and register space are split in the BAR */ 744 /* For Modern GENs the PTEs and register space are split in the BAR */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 83f9c26e1adb..2d6b62e42daf 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -46,8 +46,6 @@
46#define SNB_GMCH_GGMS_MASK 0x3 46#define SNB_GMCH_GGMS_MASK 0x3
47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ 47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
48#define SNB_GMCH_GMS_MASK 0x1f 48#define SNB_GMCH_GMS_MASK 0x1f
49#define IVB_GMCH_GMS_SHIFT 4
50#define IVB_GMCH_GMS_MASK 0xf
51 49
52 50
53/* PCI config space */ 51/* PCI config space */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 26a0a570f92e..fb961bb81903 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1265,6 +1265,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1265 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1265 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1266 intel_dp_start_link_train(intel_dp); 1266 intel_dp_start_link_train(intel_dp);
1267 intel_dp_complete_link_train(intel_dp); 1267 intel_dp_complete_link_train(intel_dp);
1268 if (port != PORT_A)
1269 intel_dp_stop_link_train(intel_dp);
1268 } 1270 }
1269} 1271}
1270 1272
@@ -1326,6 +1328,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1326 } else if (type == INTEL_OUTPUT_EDP) { 1328 } else if (type == INTEL_OUTPUT_EDP) {
1327 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1329 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1328 1330
1331 if (port == PORT_A)
1332 intel_dp_stop_link_train(intel_dp);
1333
1329 ironlake_edp_backlight_on(intel_dp); 1334 ironlake_edp_backlight_on(intel_dp);
1330 } 1335 }
1331 1336
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fb2fbc1e08b9..3d704b706a8d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -702,6 +702,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
702 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 702 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
703 * bpc in between. */ 703 * bpc in between. */
704 bpp = min_t(int, 8*3, pipe_config->pipe_bpp); 704 bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
705 if (is_edp(intel_dp) && dev_priv->edp.bpp)
706 bpp = min_t(int, bpp, dev_priv->edp.bpp);
707
705 for (; bpp >= 6*3; bpp -= 2*3) { 708 for (; bpp >= 6*3; bpp -= 2*3) {
706 mode_rate = intel_dp_link_required(target_clock, bpp); 709 mode_rate = intel_dp_link_required(target_clock, bpp);
707 710
@@ -739,6 +742,7 @@ found:
739 intel_dp->link_bw = bws[clock]; 742 intel_dp->link_bw = bws[clock];
740 intel_dp->lane_count = lane_count; 743 intel_dp->lane_count = lane_count;
741 adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 744 adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
745 pipe_config->pipe_bpp = bpp;
742 pipe_config->pixel_target_clock = target_clock; 746 pipe_config->pixel_target_clock = target_clock;
743 747
744 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", 748 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
@@ -751,20 +755,6 @@ found:
751 target_clock, adjusted_mode->clock, 755 target_clock, adjusted_mode->clock,
752 &pipe_config->dp_m_n); 756 &pipe_config->dp_m_n);
753 757
754 /*
755 * XXX: We have a strange regression where using the vbt edp bpp value
756 * for the link bw computation results in black screens, the panel only
757 * works when we do the computation at the usual 24bpp (but still
758 * requires us to use 18bpp). Until that's fully debugged, stay
759 * bug-for-bug compatible with the old code.
760 */
761 if (is_edp(intel_dp) && dev_priv->edp.bpp) {
762 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n",
763 bpp, dev_priv->edp.bpp);
764 bpp = min_t(int, bpp, dev_priv->edp.bpp);
765 }
766 pipe_config->pipe_bpp = bpp;
767
768 return true; 758 return true;
769} 759}
770 760
@@ -1389,6 +1379,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1389 ironlake_edp_panel_on(intel_dp); 1379 ironlake_edp_panel_on(intel_dp);
1390 ironlake_edp_panel_vdd_off(intel_dp, true); 1380 ironlake_edp_panel_vdd_off(intel_dp, true);
1391 intel_dp_complete_link_train(intel_dp); 1381 intel_dp_complete_link_train(intel_dp);
1382 intel_dp_stop_link_train(intel_dp);
1392 ironlake_edp_backlight_on(intel_dp); 1383 ironlake_edp_backlight_on(intel_dp);
1393} 1384}
1394 1385
@@ -1711,10 +1702,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1711 struct drm_i915_private *dev_priv = dev->dev_private; 1702 struct drm_i915_private *dev_priv = dev->dev_private;
1712 enum port port = intel_dig_port->port; 1703 enum port port = intel_dig_port->port;
1713 int ret; 1704 int ret;
1714 uint32_t temp;
1715 1705
1716 if (HAS_DDI(dev)) { 1706 if (HAS_DDI(dev)) {
1717 temp = I915_READ(DP_TP_CTL(port)); 1707 uint32_t temp = I915_READ(DP_TP_CTL(port));
1718 1708
1719 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 1709 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1720 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 1710 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -1724,18 +1714,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1724 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1714 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1725 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1715 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1726 case DP_TRAINING_PATTERN_DISABLE: 1716 case DP_TRAINING_PATTERN_DISABLE:
1727
1728 if (port != PORT_A) {
1729 temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
1730 I915_WRITE(DP_TP_CTL(port), temp);
1731
1732 if (wait_for((I915_READ(DP_TP_STATUS(port)) &
1733 DP_TP_STATUS_IDLE_DONE), 1))
1734 DRM_ERROR("Timed out waiting for DP idle patterns\n");
1735
1736 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1737 }
1738
1739 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 1717 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1740 1718
1741 break; 1719 break;
@@ -1811,6 +1789,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1811 return true; 1789 return true;
1812} 1790}
1813 1791
1792static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
1793{
1794 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1795 struct drm_device *dev = intel_dig_port->base.base.dev;
1796 struct drm_i915_private *dev_priv = dev->dev_private;
1797 enum port port = intel_dig_port->port;
1798 uint32_t val;
1799
1800 if (!HAS_DDI(dev))
1801 return;
1802
1803 val = I915_READ(DP_TP_CTL(port));
1804 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1805 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
1806 I915_WRITE(DP_TP_CTL(port), val);
1807
1808 /*
1809 * On PORT_A we can have only eDP in SST mode. There the only reason
1810 * we need to set idle transmission mode is to work around a HW issue
1811 * where we enable the pipe while not in idle link-training mode.
1812 * In this case there is requirement to wait for a minimum number of
1813 * idle patterns to be sent.
1814 */
1815 if (port == PORT_A)
1816 return;
1817
1818 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
1819 1))
1820 DRM_ERROR("Timed out waiting for DP idle patterns\n");
1821}
1822
1814/* Enable corresponding port and start training pattern 1 */ 1823/* Enable corresponding port and start training pattern 1 */
1815void 1824void
1816intel_dp_start_link_train(struct intel_dp *intel_dp) 1825intel_dp_start_link_train(struct intel_dp *intel_dp)
@@ -1953,10 +1962,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1953 ++tries; 1962 ++tries;
1954 } 1963 }
1955 1964
1965 intel_dp_set_idle_link_train(intel_dp);
1966
1967 intel_dp->DP = DP;
1968
1956 if (channel_eq) 1969 if (channel_eq)
1957 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); 1970 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
1958 1971
1959 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 1972}
1973
1974void intel_dp_stop_link_train(struct intel_dp *intel_dp)
1975{
1976 intel_dp_set_link_train(intel_dp, intel_dp->DP,
1977 DP_TRAINING_PATTERN_DISABLE);
1960} 1978}
1961 1979
1962static void 1980static void
@@ -2164,6 +2182,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
2164 drm_get_encoder_name(&intel_encoder->base)); 2182 drm_get_encoder_name(&intel_encoder->base));
2165 intel_dp_start_link_train(intel_dp); 2183 intel_dp_start_link_train(intel_dp);
2166 intel_dp_complete_link_train(intel_dp); 2184 intel_dp_complete_link_train(intel_dp);
2185 intel_dp_stop_link_train(intel_dp);
2167 } 2186 }
2168} 2187}
2169 2188
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b5b6d19e6dd3..624a9e6b8d71 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -499,6 +499,7 @@ extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
499extern void intel_dp_init_link_config(struct intel_dp *intel_dp); 499extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
500extern void intel_dp_start_link_train(struct intel_dp *intel_dp); 500extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
501extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); 501extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
502extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
502extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 503extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
503extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); 504extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
504extern void intel_dp_check_link_status(struct intel_dp *intel_dp); 505extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 0e19e575a1b4..6b7c3ca2c035 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -262,10 +262,22 @@ void intel_fbdev_fini(struct drm_device *dev)
262void intel_fbdev_set_suspend(struct drm_device *dev, int state) 262void intel_fbdev_set_suspend(struct drm_device *dev, int state)
263{ 263{
264 drm_i915_private_t *dev_priv = dev->dev_private; 264 drm_i915_private_t *dev_priv = dev->dev_private;
265 if (!dev_priv->fbdev) 265 struct intel_fbdev *ifbdev = dev_priv->fbdev;
266 struct fb_info *info;
267
268 if (!ifbdev)
266 return; 269 return;
267 270
268 fb_set_suspend(dev_priv->fbdev->helper.fbdev, state); 271 info = ifbdev->helper.fbdev;
272
273 /* On resume from hibernation: If the object is shmemfs backed, it has
274 * been restored from swap. If the object is stolen however, it will be
275 * full of whatever garbage was left in there.
276 */
277 if (!state && ifbdev->ifb.obj->stolen)
278 memset_io(info->screen_base, 0, info->screen_size);
279
280 fb_set_suspend(info, state);
269} 281}
270 282
271MODULE_LICENSE("GPL and additional rights"); 283MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index de3b0dc5658b..aa01128ff192 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1301,17 +1301,17 @@ static void valleyview_update_wm(struct drm_device *dev)
1301 1301
1302 vlv_update_drain_latency(dev); 1302 vlv_update_drain_latency(dev);
1303 1303
1304 if (g4x_compute_wm0(dev, 0, 1304 if (g4x_compute_wm0(dev, PIPE_A,
1305 &valleyview_wm_info, latency_ns, 1305 &valleyview_wm_info, latency_ns,
1306 &valleyview_cursor_wm_info, latency_ns, 1306 &valleyview_cursor_wm_info, latency_ns,
1307 &planea_wm, &cursora_wm)) 1307 &planea_wm, &cursora_wm))
1308 enabled |= 1; 1308 enabled |= 1 << PIPE_A;
1309 1309
1310 if (g4x_compute_wm0(dev, 1, 1310 if (g4x_compute_wm0(dev, PIPE_B,
1311 &valleyview_wm_info, latency_ns, 1311 &valleyview_wm_info, latency_ns,
1312 &valleyview_cursor_wm_info, latency_ns, 1312 &valleyview_cursor_wm_info, latency_ns,
1313 &planeb_wm, &cursorb_wm)) 1313 &planeb_wm, &cursorb_wm))
1314 enabled |= 2; 1314 enabled |= 1 << PIPE_B;
1315 1315
1316 if (single_plane_enabled(enabled) && 1316 if (single_plane_enabled(enabled) &&
1317 g4x_compute_srwm(dev, ffs(enabled) - 1, 1317 g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1357,17 +1357,17 @@ static void g4x_update_wm(struct drm_device *dev)
1357 int plane_sr, cursor_sr; 1357 int plane_sr, cursor_sr;
1358 unsigned int enabled = 0; 1358 unsigned int enabled = 0;
1359 1359
1360 if (g4x_compute_wm0(dev, 0, 1360 if (g4x_compute_wm0(dev, PIPE_A,
1361 &g4x_wm_info, latency_ns, 1361 &g4x_wm_info, latency_ns,
1362 &g4x_cursor_wm_info, latency_ns, 1362 &g4x_cursor_wm_info, latency_ns,
1363 &planea_wm, &cursora_wm)) 1363 &planea_wm, &cursora_wm))
1364 enabled |= 1; 1364 enabled |= 1 << PIPE_A;
1365 1365
1366 if (g4x_compute_wm0(dev, 1, 1366 if (g4x_compute_wm0(dev, PIPE_B,
1367 &g4x_wm_info, latency_ns, 1367 &g4x_wm_info, latency_ns,
1368 &g4x_cursor_wm_info, latency_ns, 1368 &g4x_cursor_wm_info, latency_ns,
1369 &planeb_wm, &cursorb_wm)) 1369 &planeb_wm, &cursorb_wm))
1370 enabled |= 2; 1370 enabled |= 1 << PIPE_B;
1371 1371
1372 if (single_plane_enabled(enabled) && 1372 if (single_plane_enabled(enabled) &&
1373 g4x_compute_srwm(dev, ffs(enabled) - 1, 1373 g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1716,7 +1716,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1716 unsigned int enabled; 1716 unsigned int enabled;
1717 1717
1718 enabled = 0; 1718 enabled = 0;
1719 if (g4x_compute_wm0(dev, 0, 1719 if (g4x_compute_wm0(dev, PIPE_A,
1720 &ironlake_display_wm_info, 1720 &ironlake_display_wm_info,
1721 ILK_LP0_PLANE_LATENCY, 1721 ILK_LP0_PLANE_LATENCY,
1722 &ironlake_cursor_wm_info, 1722 &ironlake_cursor_wm_info,
@@ -1727,10 +1727,10 @@ static void ironlake_update_wm(struct drm_device *dev)
1727 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1727 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1728 " plane %d, " "cursor: %d\n", 1728 " plane %d, " "cursor: %d\n",
1729 plane_wm, cursor_wm); 1729 plane_wm, cursor_wm);
1730 enabled |= 1; 1730 enabled |= 1 << PIPE_A;
1731 } 1731 }
1732 1732
1733 if (g4x_compute_wm0(dev, 1, 1733 if (g4x_compute_wm0(dev, PIPE_B,
1734 &ironlake_display_wm_info, 1734 &ironlake_display_wm_info,
1735 ILK_LP0_PLANE_LATENCY, 1735 ILK_LP0_PLANE_LATENCY,
1736 &ironlake_cursor_wm_info, 1736 &ironlake_cursor_wm_info,
@@ -1741,7 +1741,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1741 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1741 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1742 " plane %d, cursor: %d\n", 1742 " plane %d, cursor: %d\n",
1743 plane_wm, cursor_wm); 1743 plane_wm, cursor_wm);
1744 enabled |= 2; 1744 enabled |= 1 << PIPE_B;
1745 } 1745 }
1746 1746
1747 /* 1747 /*
@@ -1801,7 +1801,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1801 unsigned int enabled; 1801 unsigned int enabled;
1802 1802
1803 enabled = 0; 1803 enabled = 0;
1804 if (g4x_compute_wm0(dev, 0, 1804 if (g4x_compute_wm0(dev, PIPE_A,
1805 &sandybridge_display_wm_info, latency, 1805 &sandybridge_display_wm_info, latency,
1806 &sandybridge_cursor_wm_info, latency, 1806 &sandybridge_cursor_wm_info, latency,
1807 &plane_wm, &cursor_wm)) { 1807 &plane_wm, &cursor_wm)) {
@@ -1812,10 +1812,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
1812 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1812 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1813 " plane %d, " "cursor: %d\n", 1813 " plane %d, " "cursor: %d\n",
1814 plane_wm, cursor_wm); 1814 plane_wm, cursor_wm);
1815 enabled |= 1; 1815 enabled |= 1 << PIPE_A;
1816 } 1816 }
1817 1817
1818 if (g4x_compute_wm0(dev, 1, 1818 if (g4x_compute_wm0(dev, PIPE_B,
1819 &sandybridge_display_wm_info, latency, 1819 &sandybridge_display_wm_info, latency,
1820 &sandybridge_cursor_wm_info, latency, 1820 &sandybridge_cursor_wm_info, latency,
1821 &plane_wm, &cursor_wm)) { 1821 &plane_wm, &cursor_wm)) {
@@ -1826,7 +1826,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1826 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1826 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1827 " plane %d, cursor: %d\n", 1827 " plane %d, cursor: %d\n",
1828 plane_wm, cursor_wm); 1828 plane_wm, cursor_wm);
1829 enabled |= 2; 1829 enabled |= 1 << PIPE_B;
1830 } 1830 }
1831 1831
1832 /* 1832 /*
@@ -1904,7 +1904,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
1904 unsigned int enabled; 1904 unsigned int enabled;
1905 1905
1906 enabled = 0; 1906 enabled = 0;
1907 if (g4x_compute_wm0(dev, 0, 1907 if (g4x_compute_wm0(dev, PIPE_A,
1908 &sandybridge_display_wm_info, latency, 1908 &sandybridge_display_wm_info, latency,
1909 &sandybridge_cursor_wm_info, latency, 1909 &sandybridge_cursor_wm_info, latency,
1910 &plane_wm, &cursor_wm)) { 1910 &plane_wm, &cursor_wm)) {
@@ -1915,10 +1915,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
1915 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1915 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1916 " plane %d, " "cursor: %d\n", 1916 " plane %d, " "cursor: %d\n",
1917 plane_wm, cursor_wm); 1917 plane_wm, cursor_wm);
1918 enabled |= 1; 1918 enabled |= 1 << PIPE_A;
1919 } 1919 }
1920 1920
1921 if (g4x_compute_wm0(dev, 1, 1921 if (g4x_compute_wm0(dev, PIPE_B,
1922 &sandybridge_display_wm_info, latency, 1922 &sandybridge_display_wm_info, latency,
1923 &sandybridge_cursor_wm_info, latency, 1923 &sandybridge_cursor_wm_info, latency,
1924 &plane_wm, &cursor_wm)) { 1924 &plane_wm, &cursor_wm)) {
@@ -1929,10 +1929,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
1929 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1929 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1930 " plane %d, cursor: %d\n", 1930 " plane %d, cursor: %d\n",
1931 plane_wm, cursor_wm); 1931 plane_wm, cursor_wm);
1932 enabled |= 2; 1932 enabled |= 1 << PIPE_B;
1933 } 1933 }
1934 1934
1935 if (g4x_compute_wm0(dev, 2, 1935 if (g4x_compute_wm0(dev, PIPE_C,
1936 &sandybridge_display_wm_info, latency, 1936 &sandybridge_display_wm_info, latency,
1937 &sandybridge_cursor_wm_info, latency, 1937 &sandybridge_cursor_wm_info, latency,
1938 &plane_wm, &cursor_wm)) { 1938 &plane_wm, &cursor_wm)) {
@@ -1943,7 +1943,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
1943 DRM_DEBUG_KMS("FIFO watermarks For pipe C -" 1943 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1944 " plane %d, cursor: %d\n", 1944 " plane %d, cursor: %d\n",
1945 plane_wm, cursor_wm); 1945 plane_wm, cursor_wm);
1946 enabled |= 3; 1946 enabled |= 1 << PIPE_C;
1947 } 1947 }
1948 1948
1949 /* 1949 /*
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index f9889658329b..77b8a45fb10a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -46,29 +46,26 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
46 46
47static inline void mga_wait_vsync(struct mga_device *mdev) 47static inline void mga_wait_vsync(struct mga_device *mdev)
48{ 48{
49 unsigned int count = 0; 49 unsigned long timeout = jiffies + HZ/10;
50 unsigned int status = 0; 50 unsigned int status = 0;
51 51
52 do { 52 do {
53 status = RREG32(MGAREG_Status); 53 status = RREG32(MGAREG_Status);
54 count++; 54 } while ((status & 0x08) && time_before(jiffies, timeout));
55 } while ((status & 0x08) && (count < 250000)); 55 timeout = jiffies + HZ/10;
56 count = 0;
57 status = 0; 56 status = 0;
58 do { 57 do {
59 status = RREG32(MGAREG_Status); 58 status = RREG32(MGAREG_Status);
60 count++; 59 } while (!(status & 0x08) && time_before(jiffies, timeout));
61 } while (!(status & 0x08) && (count < 250000));
62} 60}
63 61
64static inline void mga_wait_busy(struct mga_device *mdev) 62static inline void mga_wait_busy(struct mga_device *mdev)
65{ 63{
66 unsigned int count = 0; 64 unsigned long timeout = jiffies + HZ;
67 unsigned int status = 0; 65 unsigned int status = 0;
68 do { 66 do {
69 status = RREG8(MGAREG_Status + 2); 67 status = RREG8(MGAREG_Status + 2);
70 count++; 68 } while ((status & 0x01) && time_before(jiffies, timeout));
71 } while ((status & 0x01) && (count < 500000));
72} 69}
73 70
74/* 71/*
@@ -189,12 +186,12 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
189 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 186 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
190 tmp = RREG8(DAC_DATA); 187 tmp = RREG8(DAC_DATA);
191 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 188 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
192 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 189 WREG8(DAC_DATA, tmp);
193 190
194 WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 191 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
195 tmp = RREG8(DAC_DATA); 192 tmp = RREG8(DAC_DATA);
196 tmp |= MGA1064_REMHEADCTL_CLKDIS; 193 tmp |= MGA1064_REMHEADCTL_CLKDIS;
197 WREG_DAC(MGA1064_REMHEADCTL, tmp); 194 WREG8(DAC_DATA, tmp);
198 195
199 /* select PLL Set C */ 196 /* select PLL Set C */
200 tmp = RREG8(MGAREG_MEM_MISC_READ); 197 tmp = RREG8(MGAREG_MEM_MISC_READ);
@@ -204,7 +201,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
204 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 201 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
205 tmp = RREG8(DAC_DATA); 202 tmp = RREG8(DAC_DATA);
206 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80; 203 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
207 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 204 WREG8(DAC_DATA, tmp);
208 205
209 udelay(500); 206 udelay(500);
210 207
@@ -212,7 +209,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
212 WREG8(DAC_INDEX, MGA1064_VREF_CTL); 209 WREG8(DAC_INDEX, MGA1064_VREF_CTL);
213 tmp = RREG8(DAC_DATA); 210 tmp = RREG8(DAC_DATA);
214 tmp &= ~0x04; 211 tmp &= ~0x04;
215 WREG_DAC(MGA1064_VREF_CTL, tmp); 212 WREG8(DAC_DATA, tmp);
216 213
217 udelay(50); 214 udelay(50);
218 215
@@ -236,13 +233,13 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
236 tmp = RREG8(DAC_DATA); 233 tmp = RREG8(DAC_DATA);
237 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 234 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
238 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 235 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
239 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 236 WREG8(DAC_DATA, tmp);
240 237
241 WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 238 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
242 tmp = RREG8(DAC_DATA); 239 tmp = RREG8(DAC_DATA);
243 tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK; 240 tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
244 tmp |= MGA1064_REMHEADCTL_CLKSL_PLL; 241 tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
245 WREG_DAC(MGA1064_REMHEADCTL, tmp); 242 WREG8(DAC_DATA, tmp);
246 243
247 /* reset dotclock rate bit */ 244 /* reset dotclock rate bit */
248 WREG8(MGAREG_SEQ_INDEX, 1); 245 WREG8(MGAREG_SEQ_INDEX, 1);
@@ -253,7 +250,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
253 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 250 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
254 tmp = RREG8(DAC_DATA); 251 tmp = RREG8(DAC_DATA);
255 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 252 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
256 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 253 WREG8(DAC_DATA, tmp);
257 254
258 vcount = RREG8(MGAREG_VCOUNT); 255 vcount = RREG8(MGAREG_VCOUNT);
259 256
@@ -318,7 +315,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
318 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 315 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
319 tmp = RREG8(DAC_DATA); 316 tmp = RREG8(DAC_DATA);
320 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 317 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
321 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 318 WREG8(DAC_DATA, tmp);
322 319
323 tmp = RREG8(MGAREG_MEM_MISC_READ); 320 tmp = RREG8(MGAREG_MEM_MISC_READ);
324 tmp |= 0x3 << 2; 321 tmp |= 0x3 << 2;
@@ -326,12 +323,12 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
326 323
327 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); 324 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
328 tmp = RREG8(DAC_DATA); 325 tmp = RREG8(DAC_DATA);
329 WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40); 326 WREG8(DAC_DATA, tmp & ~0x40);
330 327
331 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 328 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
332 tmp = RREG8(DAC_DATA); 329 tmp = RREG8(DAC_DATA);
333 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 330 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
334 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 331 WREG8(DAC_DATA, tmp);
335 332
336 WREG_DAC(MGA1064_EV_PIX_PLLC_M, m); 333 WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
337 WREG_DAC(MGA1064_EV_PIX_PLLC_N, n); 334 WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
@@ -342,7 +339,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
342 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 339 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
343 tmp = RREG8(DAC_DATA); 340 tmp = RREG8(DAC_DATA);
344 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 341 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
345 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 342 WREG8(DAC_DATA, tmp);
346 343
347 udelay(500); 344 udelay(500);
348 345
@@ -350,11 +347,11 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
350 tmp = RREG8(DAC_DATA); 347 tmp = RREG8(DAC_DATA);
351 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 348 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
352 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 349 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
353 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 350 WREG8(DAC_DATA, tmp);
354 351
355 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); 352 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
356 tmp = RREG8(DAC_DATA); 353 tmp = RREG8(DAC_DATA);
357 WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40); 354 WREG8(DAC_DATA, tmp | 0x40);
358 355
359 tmp = RREG8(MGAREG_MEM_MISC_READ); 356 tmp = RREG8(MGAREG_MEM_MISC_READ);
360 tmp |= (0x3 << 2); 357 tmp |= (0x3 << 2);
@@ -363,7 +360,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
363 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 360 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
364 tmp = RREG8(DAC_DATA); 361 tmp = RREG8(DAC_DATA);
365 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 362 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
366 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 363 WREG8(DAC_DATA, tmp);
367 364
368 return 0; 365 return 0;
369} 366}
@@ -416,7 +413,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
416 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 413 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
417 tmp = RREG8(DAC_DATA); 414 tmp = RREG8(DAC_DATA);
418 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 415 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
419 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 416 WREG8(DAC_DATA, tmp);
420 417
421 tmp = RREG8(MGAREG_MEM_MISC_READ); 418 tmp = RREG8(MGAREG_MEM_MISC_READ);
422 tmp |= 0x3 << 2; 419 tmp |= 0x3 << 2;
@@ -425,7 +422,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
425 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 422 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
426 tmp = RREG8(DAC_DATA); 423 tmp = RREG8(DAC_DATA);
427 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 424 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
428 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 425 WREG8(DAC_DATA, tmp);
429 426
430 udelay(500); 427 udelay(500);
431 428
@@ -439,13 +436,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
439 tmp = RREG8(DAC_DATA); 436 tmp = RREG8(DAC_DATA);
440 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 437 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
441 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 438 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
442 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 439 WREG8(DAC_DATA, tmp);
443 440
444 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 441 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
445 tmp = RREG8(DAC_DATA); 442 tmp = RREG8(DAC_DATA);
446 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 443 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
447 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 444 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
448 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 445 WREG8(DAC_DATA, tmp);
449 446
450 vcount = RREG8(MGAREG_VCOUNT); 447 vcount = RREG8(MGAREG_VCOUNT);
451 448
@@ -515,12 +512,12 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
515 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 512 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
516 tmp = RREG8(DAC_DATA); 513 tmp = RREG8(DAC_DATA);
517 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 514 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
518 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 515 WREG8(DAC_DATA, tmp);
519 516
520 WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 517 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
521 tmp = RREG8(DAC_DATA); 518 tmp = RREG8(DAC_DATA);
522 tmp |= MGA1064_REMHEADCTL_CLKDIS; 519 tmp |= MGA1064_REMHEADCTL_CLKDIS;
523 WREG_DAC(MGA1064_REMHEADCTL, tmp); 520 WREG8(DAC_DATA, tmp);
524 521
525 tmp = RREG8(MGAREG_MEM_MISC_READ); 522 tmp = RREG8(MGAREG_MEM_MISC_READ);
526 tmp |= (0x3<<2) | 0xc0; 523 tmp |= (0x3<<2) | 0xc0;
@@ -530,7 +527,7 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
530 tmp = RREG8(DAC_DATA); 527 tmp = RREG8(DAC_DATA);
531 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 528 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
532 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 529 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
533 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 530 WREG8(DAC_DATA, tmp);
534 531
535 udelay(500); 532 udelay(500);
536 533
@@ -657,12 +654,26 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
657 WREG_DAC(MGA1064_GEN_IO_DATA, tmp); 654 WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
658} 655}
659 656
660 657/*
658 This is how the framebuffer base address is stored in g200 cards:
659 * Assume @offset is the gpu_addr variable of the framebuffer object
660 * Then addr is the number of _pixels_ (not bytes) from the start of
661 VRAM to the first pixel we want to display. (divided by 2 for 32bit
662 framebuffers)
663 * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
664 addr<20> -> CRTCEXT0<6>
665 addr<19-16> -> CRTCEXT0<3-0>
666 addr<15-8> -> CRTCC<7-0>
667 addr<7-0> -> CRTCD<7-0>
668 CRTCEXT0 has to be programmed last to trigger an update and make the
669 new addr variable take effect.
670 */
661void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) 671void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
662{ 672{
663 struct mga_device *mdev = crtc->dev->dev_private; 673 struct mga_device *mdev = crtc->dev->dev_private;
664 u32 addr; 674 u32 addr;
665 int count; 675 int count;
676 u8 crtcext0;
666 677
667 while (RREG8(0x1fda) & 0x08); 678 while (RREG8(0x1fda) & 0x08);
668 while (!(RREG8(0x1fda) & 0x08)); 679 while (!(RREG8(0x1fda) & 0x08));
@@ -670,10 +681,17 @@ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
670 count = RREG8(MGAREG_VCOUNT) + 2; 681 count = RREG8(MGAREG_VCOUNT) + 2;
671 while (RREG8(MGAREG_VCOUNT) < count); 682 while (RREG8(MGAREG_VCOUNT) < count);
672 683
673 addr = offset >> 2; 684 WREG8(MGAREG_CRTCEXT_INDEX, 0);
685 crtcext0 = RREG8(MGAREG_CRTCEXT_DATA);
686 crtcext0 &= 0xB0;
687 addr = offset / 8;
688 /* Can't store addresses any higher than that...
689 but we also don't have more than 16MB of memory, so it should be fine. */
690 WARN_ON(addr > 0x1fffff);
691 crtcext0 |= (!!(addr & (1<<20)))<<6;
674 WREG_CRT(0x0d, (u8)(addr & 0xff)); 692 WREG_CRT(0x0d, (u8)(addr & 0xff));
675 WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff); 693 WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
676 WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf); 694 WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
677} 695}
678 696
679 697
@@ -829,11 +847,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
829 847
830 848
831 for (i = 0; i < sizeof(dacvalue); i++) { 849 for (i = 0; i < sizeof(dacvalue); i++) {
832 if ((i <= 0x03) || 850 if ((i <= 0x17) ||
833 (i == 0x07) ||
834 (i == 0x0b) ||
835 (i == 0x0f) ||
836 ((i >= 0x13) && (i <= 0x17)) ||
837 (i == 0x1b) || 851 (i == 0x1b) ||
838 (i == 0x1c) || 852 (i == 0x1c) ||
839 ((i >= 0x1f) && (i <= 0x29)) || 853 ((i >= 0x1f) && (i <= 0x29)) ||
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 08b0823c93d5..f86771481317 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -277,7 +277,7 @@ out_unref:
277 return 0; 277 return 0;
278} 278}
279 279
280static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port) 280static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
281{ 281{
282 int irq_num; 282 int irq_num;
283 long addr = qdev->io_base + port; 283 long addr = qdev->io_base + port;
@@ -285,20 +285,29 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port)
285 285
286 mutex_lock(&qdev->async_io_mutex); 286 mutex_lock(&qdev->async_io_mutex);
287 irq_num = atomic_read(&qdev->irq_received_io_cmd); 287 irq_num = atomic_read(&qdev->irq_received_io_cmd);
288
289
290 if (qdev->last_sent_io_cmd > irq_num) { 288 if (qdev->last_sent_io_cmd > irq_num) {
291 ret = wait_event_interruptible(qdev->io_cmd_event, 289 if (intr)
292 atomic_read(&qdev->irq_received_io_cmd) > irq_num); 290 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
293 if (ret) 291 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
292 else
293 ret = wait_event_timeout(qdev->io_cmd_event,
294 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
295 /* 0 is timeout, just bail the "hw" has gone away */
296 if (ret <= 0)
294 goto out; 297 goto out;
295 irq_num = atomic_read(&qdev->irq_received_io_cmd); 298 irq_num = atomic_read(&qdev->irq_received_io_cmd);
296 } 299 }
297 outb(val, addr); 300 outb(val, addr);
298 qdev->last_sent_io_cmd = irq_num + 1; 301 qdev->last_sent_io_cmd = irq_num + 1;
299 ret = wait_event_interruptible(qdev->io_cmd_event, 302 if (intr)
300 atomic_read(&qdev->irq_received_io_cmd) > irq_num); 303 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
304 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
305 else
306 ret = wait_event_timeout(qdev->io_cmd_event,
307 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
301out: 308out:
309 if (ret > 0)
310 ret = 0;
302 mutex_unlock(&qdev->async_io_mutex); 311 mutex_unlock(&qdev->async_io_mutex);
303 return ret; 312 return ret;
304} 313}
@@ -308,7 +317,7 @@ static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
308 int ret; 317 int ret;
309 318
310restart: 319restart:
311 ret = wait_for_io_cmd_user(qdev, val, port); 320 ret = wait_for_io_cmd_user(qdev, val, port, false);
312 if (ret == -ERESTARTSYS) 321 if (ret == -ERESTARTSYS)
313 goto restart; 322 goto restart;
314} 323}
@@ -340,7 +349,7 @@ int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
340 mutex_lock(&qdev->update_area_mutex); 349 mutex_lock(&qdev->update_area_mutex);
341 qdev->ram_header->update_area = *area; 350 qdev->ram_header->update_area = *area;
342 qdev->ram_header->update_surface = surface_id; 351 qdev->ram_header->update_surface = surface_id;
343 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC); 352 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
344 mutex_unlock(&qdev->update_area_mutex); 353 mutex_unlock(&qdev->update_area_mutex);
345 return ret; 354 return ret;
346} 355}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index fcfd4436ceed..823d29e926ec 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -428,10 +428,10 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
428 int inc = 1; 428 int inc = 1;
429 429
430 qobj = gem_to_qxl_bo(qxl_fb->obj); 430 qobj = gem_to_qxl_bo(qxl_fb->obj);
431 if (qxl_fb != qdev->active_user_framebuffer) { 431 /* if we aren't primary surface ignore this */
432 DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n", 432 if (!qobj->is_primary)
433 __func__, qxl_fb, qdev->active_user_framebuffer); 433 return 0;
434 } 434
435 if (!num_clips) { 435 if (!num_clips) {
436 num_clips = 1; 436 num_clips = 1;
437 clips = &norect; 437 clips = &norect;
@@ -604,7 +604,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
604 mode->hdisplay, 604 mode->hdisplay,
605 mode->vdisplay); 605 mode->vdisplay);
606 } 606 }
607 qdev->mode_set = true;
608 return 0; 607 return 0;
609} 608}
610 609
@@ -893,7 +892,6 @@ qxl_user_framebuffer_create(struct drm_device *dev,
893{ 892{
894 struct drm_gem_object *obj; 893 struct drm_gem_object *obj;
895 struct qxl_framebuffer *qxl_fb; 894 struct qxl_framebuffer *qxl_fb;
896 struct qxl_device *qdev = dev->dev_private;
897 int ret; 895 int ret;
898 896
899 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); 897 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
@@ -909,13 +907,6 @@ qxl_user_framebuffer_create(struct drm_device *dev,
909 return NULL; 907 return NULL;
910 } 908 }
911 909
912 if (qdev->active_user_framebuffer) {
913 DRM_INFO("%s: active_user_framebuffer %p -> %p\n",
914 __func__,
915 qdev->active_user_framebuffer, qxl_fb);
916 }
917 qdev->active_user_framebuffer = qxl_fb;
918
919 return &qxl_fb->base; 910 return &qxl_fb->base;
920} 911}
921 912
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 52b582c211da..43d06ab28a21 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -255,12 +255,6 @@ struct qxl_device {
255 struct qxl_gem gem; 255 struct qxl_gem gem;
256 struct qxl_mode_info mode_info; 256 struct qxl_mode_info mode_info;
257 257
258 /*
259 * last created framebuffer with fb_create
260 * only used by debugfs dumbppm
261 */
262 struct qxl_framebuffer *active_user_framebuffer;
263
264 struct fb_info *fbdev_info; 258 struct fb_info *fbdev_info;
265 struct qxl_framebuffer *fbdev_qfb; 259 struct qxl_framebuffer *fbdev_qfb;
266 void *ram_physical; 260 void *ram_physical;
@@ -270,7 +264,6 @@ struct qxl_device {
270 struct qxl_ring *cursor_ring; 264 struct qxl_ring *cursor_ring;
271 265
272 struct qxl_ram_header *ram_header; 266 struct qxl_ram_header *ram_header;
273 bool mode_set;
274 267
275 bool primary_created; 268 bool primary_created;
276 269
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 04b64f9cbfdb..6db7370373ea 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -294,6 +294,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
294 goto out; 294 goto out;
295 295
296 if (!qobj->pin_count) { 296 if (!qobj->pin_count) {
297 qxl_ttm_placement_from_domain(qobj, qobj->type);
297 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, 298 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
298 true, false); 299 true, false);
299 if (unlikely(ret)) 300 if (unlikely(ret))
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 865e2c9980db..60170ea5e3a2 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
75 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); 75 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
76 76
77 for (i = 0; i < nr; ++i) { 77 for (i = 0; i < nr; ++i) {
78 if (DRM_COPY_FROM_USER_UNCHECKED 78 if (DRM_COPY_FROM_USER
79 (&box, &cmdbuf->boxes[n + i], sizeof(box))) { 79 (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
80 DRM_ERROR("copy cliprect faulted\n"); 80 DRM_ERROR("copy cliprect faulted\n");
81 return -EFAULT; 81 return -EFAULT;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index d33f484ace48..094e7e5ea39e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -147,7 +147,7 @@ static inline void radeon_unregister_atpx_handler(void) {}
147#endif 147#endif
148 148
149int radeon_no_wb; 149int radeon_no_wb;
150int radeon_modeset = 1; 150int radeon_modeset = -1;
151int radeon_dynclks = -1; 151int radeon_dynclks = -1;
152int radeon_r4xx_atom = 0; 152int radeon_r4xx_atom = 0;
153int radeon_agpmode = 0; 153int radeon_agpmode = 0;
@@ -456,6 +456,16 @@ static struct pci_driver radeon_kms_pci_driver = {
456 456
457static int __init radeon_init(void) 457static int __init radeon_init(void)
458{ 458{
459#ifdef CONFIG_VGA_CONSOLE
460 if (vgacon_text_force() && radeon_modeset == -1) {
461 DRM_INFO("VGACON disable radeon kernel modesetting.\n");
462 radeon_modeset = 0;
463 }
464#endif
465 /* set to modesetting by default if not nomodeset */
466 if (radeon_modeset == -1)
467 radeon_modeset = 1;
468
459 if (radeon_modeset == 1) { 469 if (radeon_modeset == 1) {
460 DRM_INFO("radeon kernel modesetting enabled.\n"); 470 DRM_INFO("radeon kernel modesetting enabled.\n");
461 driver = &kms_driver; 471 driver = &kms_driver;
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 699187ab3800..5b9ac32801c7 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -1002,6 +1002,7 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
1002 kill_guest(&lg->cpus[0], 1002 kill_guest(&lg->cpus[0],
1003 "Cannot populate switcher mapping"); 1003 "Cannot populate switcher mapping");
1004 } 1004 }
1005 lg->pgdirs[pgdir].last_host_cpu = -1;
1005 } 1006 }
1006} 1007}
1007 1008
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 375c109607ff..f4f3038c1df0 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1130,6 +1130,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1130 struct variant_data *variant = host->variant; 1130 struct variant_data *variant = host->variant;
1131 u32 pwr = 0; 1131 u32 pwr = 0;
1132 unsigned long flags; 1132 unsigned long flags;
1133 int ret;
1133 1134
1134 pm_runtime_get_sync(mmc_dev(mmc)); 1135 pm_runtime_get_sync(mmc_dev(mmc));
1135 1136
@@ -1161,8 +1162,12 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1161 break; 1162 break;
1162 case MMC_POWER_ON: 1163 case MMC_POWER_ON:
1163 if (!IS_ERR(mmc->supply.vqmmc) && 1164 if (!IS_ERR(mmc->supply.vqmmc) &&
1164 !regulator_is_enabled(mmc->supply.vqmmc)) 1165 !regulator_is_enabled(mmc->supply.vqmmc)) {
1165 regulator_enable(mmc->supply.vqmmc); 1166 ret = regulator_enable(mmc->supply.vqmmc);
1167 if (ret < 0)
1168 dev_err(mmc_dev(mmc),
1169 "failed to enable vqmmc regulator\n");
1170 }
1166 1171
1167 pwr |= MCI_PWR_ON; 1172 pwr |= MCI_PWR_ON;
1168 break; 1173 break;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 7ffc756131a2..547098086773 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -43,7 +43,7 @@ config CAIF_HSI
43 43
44config CAIF_VIRTIO 44config CAIF_VIRTIO
45 tristate "CAIF virtio transport driver" 45 tristate "CAIF virtio transport driver"
46 depends on CAIF 46 depends on CAIF && HAS_DMA
47 select VHOST_RING 47 select VHOST_RING
48 select VIRTIO 48 select VIRTIO
49 select GENERIC_ALLOCATOR 49 select GENERIC_ALLOCATOR
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index de570a8f8967..072c6f14e8fc 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -632,7 +632,6 @@ struct vortex_private {
632 pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ 632 pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */
633 open:1, 633 open:1,
634 medialock:1, 634 medialock:1,
635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
636 large_frames:1, /* accept large frames */ 635 large_frames:1, /* accept large frames */
637 handling_irq:1; /* private in_irq indicator */ 636 handling_irq:1; /* private in_irq indicator */
638 /* {get|set}_wol operations are already serialized by rtnl. 637 /* {get|set}_wol operations are already serialized by rtnl.
@@ -1012,6 +1011,12 @@ static int vortex_init_one(struct pci_dev *pdev,
1012 if (rc < 0) 1011 if (rc < 0)
1013 goto out; 1012 goto out;
1014 1013
1014 rc = pci_request_regions(pdev, DRV_NAME);
1015 if (rc < 0) {
1016 pci_disable_device(pdev);
1017 goto out;
1018 }
1019
1015 unit = vortex_cards_found; 1020 unit = vortex_cards_found;
1016 1021
1017 if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { 1022 if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
@@ -1027,6 +1032,7 @@ static int vortex_init_one(struct pci_dev *pdev,
1027 if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ 1032 if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
1028 ioaddr = pci_iomap(pdev, 0, 0); 1033 ioaddr = pci_iomap(pdev, 0, 0);
1029 if (!ioaddr) { 1034 if (!ioaddr) {
1035 pci_release_regions(pdev);
1030 pci_disable_device(pdev); 1036 pci_disable_device(pdev);
1031 rc = -ENOMEM; 1037 rc = -ENOMEM;
1032 goto out; 1038 goto out;
@@ -1036,6 +1042,7 @@ static int vortex_init_one(struct pci_dev *pdev,
1036 ent->driver_data, unit); 1042 ent->driver_data, unit);
1037 if (rc < 0) { 1043 if (rc < 0) {
1038 pci_iounmap(pdev, ioaddr); 1044 pci_iounmap(pdev, ioaddr);
1045 pci_release_regions(pdev);
1039 pci_disable_device(pdev); 1046 pci_disable_device(pdev);
1040 goto out; 1047 goto out;
1041 } 1048 }
@@ -1178,11 +1185,6 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1178 1185
1179 /* PCI-only startup logic */ 1186 /* PCI-only startup logic */
1180 if (pdev) { 1187 if (pdev) {
1181 /* EISA resources already marked, so only PCI needs to do this here */
1182 /* Ignore return value, because Cardbus drivers already allocate for us */
1183 if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
1184 vp->must_free_region = 1;
1185
1186 /* enable bus-mastering if necessary */ 1188 /* enable bus-mastering if necessary */
1187 if (vci->flags & PCI_USES_MASTER) 1189 if (vci->flags & PCI_USES_MASTER)
1188 pci_set_master(pdev); 1190 pci_set_master(pdev);
@@ -1220,7 +1222,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1220 &vp->rx_ring_dma); 1222 &vp->rx_ring_dma);
1221 retval = -ENOMEM; 1223 retval = -ENOMEM;
1222 if (!vp->rx_ring) 1224 if (!vp->rx_ring)
1223 goto free_region; 1225 goto free_device;
1224 1226
1225 vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); 1227 vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
1226 vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; 1228 vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
@@ -1484,9 +1486,7 @@ free_ring:
1484 + sizeof(struct boom_tx_desc) * TX_RING_SIZE, 1486 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1485 vp->rx_ring, 1487 vp->rx_ring,
1486 vp->rx_ring_dma); 1488 vp->rx_ring_dma);
1487free_region: 1489free_device:
1488 if (vp->must_free_region)
1489 release_region(dev->base_addr, vci->io_size);
1490 free_netdev(dev); 1490 free_netdev(dev);
1491 pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); 1491 pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
1492out: 1492out:
@@ -3254,8 +3254,9 @@ static void vortex_remove_one(struct pci_dev *pdev)
3254 + sizeof(struct boom_tx_desc) * TX_RING_SIZE, 3254 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
3255 vp->rx_ring, 3255 vp->rx_ring,
3256 vp->rx_ring_dma); 3256 vp->rx_ring_dma);
3257 if (vp->must_free_region) 3257
3258 release_region(dev->base_addr, vp->io_size); 3258 pci_release_regions(pdev);
3259
3259 free_netdev(dev); 3260 free_netdev(dev);
3260} 3261}
3261 3262
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ce4a030d3d0c..07f7ef05c3f2 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3236,9 +3236,10 @@ bnad_init(struct bnad *bnad,
3236 3236
3237 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); 3237 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3238 bnad->work_q = create_singlethread_workqueue(bnad->wq_name); 3238 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3239 3239 if (!bnad->work_q) {
3240 if (!bnad->work_q) 3240 iounmap(bnad->bar0);
3241 return -ENOMEM; 3241 return -ENOMEM;
3242 }
3242 3243
3243 return 0; 3244 return 0;
3244} 3245}
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 1194446f859a..768285ec10f4 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -22,7 +22,7 @@ if NET_CADENCE
22 22
23config ARM_AT91_ETHER 23config ARM_AT91_ETHER
24 tristate "AT91RM9200 Ethernet support" 24 tristate "AT91RM9200 Ethernet support"
25 depends on GENERIC_HARDIRQS 25 depends on GENERIC_HARDIRQS && HAS_DMA
26 select NET_CORE 26 select NET_CORE
27 select MACB 27 select MACB
28 ---help--- 28 ---help---
@@ -31,6 +31,7 @@ config ARM_AT91_ETHER
31 31
32config MACB 32config MACB
33 tristate "Cadence MACB/GEM support" 33 tristate "Cadence MACB/GEM support"
34 depends on HAS_DMA
34 select PHYLIB 35 select PHYLIB
35 ---help--- 36 ---help---
36 The Cadence MACB ethernet interface is found on many Atmel AT32 and 37 The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
index aba435c3d4ae..184a063bed5f 100644
--- a/drivers/net/ethernet/calxeda/Kconfig
+++ b/drivers/net/ethernet/calxeda/Kconfig
@@ -1,6 +1,6 @@
1config NET_CALXEDA_XGMAC 1config NET_CALXEDA_XGMAC
2 tristate "Calxeda 1G/10G XGMAC Ethernet driver" 2 tristate "Calxeda 1G/10G XGMAC Ethernet driver"
3 depends on HAS_IOMEM 3 depends on HAS_IOMEM && HAS_DMA
4 select CRC32 4 select CRC32
5 help 5 help
6 This is the driver for the XGMAC Ethernet IP block found on Calxeda 6 This is the driver for the XGMAC Ethernet IP block found on Calxeda
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index aff0310a778b..ca9825ca88c9 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -87,6 +87,8 @@
87#define FEC_QUIRK_HAS_GBIT (1 << 3) 87#define FEC_QUIRK_HAS_GBIT (1 << 3)
88/* Controller has extend desc buffer */ 88/* Controller has extend desc buffer */
89#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) 89#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
90/* Controller has hardware checksum support */
91#define FEC_QUIRK_HAS_CSUM (1 << 5)
90 92
91static struct platform_device_id fec_devtype[] = { 93static struct platform_device_id fec_devtype[] = {
92 { 94 {
@@ -105,7 +107,7 @@ static struct platform_device_id fec_devtype[] = {
105 }, { 107 }, {
106 .name = "imx6q-fec", 108 .name = "imx6q-fec",
107 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 109 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
108 FEC_QUIRK_HAS_BUFDESC_EX, 110 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM,
109 }, { 111 }, {
110 .name = "mvf-fec", 112 .name = "mvf-fec",
111 .driver_data = FEC_QUIRK_ENET_MAC, 113 .driver_data = FEC_QUIRK_ENET_MAC,
@@ -1744,6 +1746,8 @@ static const struct net_device_ops fec_netdev_ops = {
1744static int fec_enet_init(struct net_device *ndev) 1746static int fec_enet_init(struct net_device *ndev)
1745{ 1747{
1746 struct fec_enet_private *fep = netdev_priv(ndev); 1748 struct fec_enet_private *fep = netdev_priv(ndev);
1749 const struct platform_device_id *id_entry =
1750 platform_get_device_id(fep->pdev);
1747 struct bufdesc *cbd_base; 1751 struct bufdesc *cbd_base;
1748 1752
1749 /* Allocate memory for buffer descriptors. */ 1753 /* Allocate memory for buffer descriptors. */
@@ -1775,12 +1779,14 @@ static int fec_enet_init(struct net_device *ndev)
1775 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 1779 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
1776 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); 1780 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
1777 1781
1778 /* enable hw accelerator */ 1782 if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
1779 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 1783 /* enable hw accelerator */
1780 | NETIF_F_RXCSUM); 1784 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
1781 ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 1785 | NETIF_F_RXCSUM);
1782 | NETIF_F_RXCSUM); 1786 ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
1783 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 1787 | NETIF_F_RXCSUM);
1788 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
1789 }
1784 1790
1785 fec_restart(ndev, 0); 1791 fec_restart(ndev, 0);
1786 1792
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 4989481c19f0..d300a0c0eafc 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -359,10 +359,26 @@ static int emac_reset(struct emac_instance *dev)
359 } 359 }
360 360
361#ifdef CONFIG_PPC_DCR_NATIVE 361#ifdef CONFIG_PPC_DCR_NATIVE
362 /* Enable internal clock source */ 362 /*
363 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) 363 * PPC460EX/GT Embedded Processor Advanced User's Manual
364 dcri_clrset(SDR0, SDR0_ETH_CFG, 364 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
365 0, SDR0_ETH_CFG_ECS << dev->cell_index); 365 * Note: The PHY must provide a TX Clk in order to perform a soft reset
366 * of the EMAC. If none is present, select the internal clock
367 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
368 * After a soft reset, select the external clock.
369 */
370 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
371 if (dev->phy_address == 0xffffffff &&
372 dev->phy_map == 0xffffffff) {
373 /* No PHY: select internal loop clock before reset */
374 dcri_clrset(SDR0, SDR0_ETH_CFG,
375 0, SDR0_ETH_CFG_ECS << dev->cell_index);
376 } else {
377 /* PHY present: select external clock before reset */
378 dcri_clrset(SDR0, SDR0_ETH_CFG,
379 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
380 }
381 }
366#endif 382#endif
367 383
368 out_be32(&p->mr0, EMAC_MR0_SRST); 384 out_be32(&p->mr0, EMAC_MR0_SRST);
@@ -370,10 +386,14 @@ static int emac_reset(struct emac_instance *dev)
370 --n; 386 --n;
371 387
372#ifdef CONFIG_PPC_DCR_NATIVE 388#ifdef CONFIG_PPC_DCR_NATIVE
373 /* Enable external clock source */ 389 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
374 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) 390 if (dev->phy_address == 0xffffffff &&
375 dcri_clrset(SDR0, SDR0_ETH_CFG, 391 dev->phy_map == 0xffffffff) {
376 SDR0_ETH_CFG_ECS << dev->cell_index, 0); 392 /* No PHY: restore external clock source after reset */
393 dcri_clrset(SDR0, SDR0_ETH_CFG,
394 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
395 }
396 }
377#endif 397#endif
378 398
379 if (n) { 399 if (n) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 91f2b2c43c12..d3f508697a3d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -60,7 +60,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
60 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; 60 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
61 if (user_prio >= 0) { 61 if (user_prio >= 0) {
62 context->pri_path.sched_queue |= user_prio << 3; 62 context->pri_path.sched_queue |= user_prio << 3;
63 context->pri_path.feup = 1 << 6; 63 context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
64 } 64 }
65 context->pri_path.counter_index = 0xff; 65 context->pri_path.counter_index = 0xff;
66 context->cqn_send = cpu_to_be32(cqn); 66 context->cqn_send = cpu_to_be32(cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index b147bdd40768..58a8e535d698 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -131,7 +131,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
131 [2] = "RSS XOR Hash Function support", 131 [2] = "RSS XOR Hash Function support",
132 [3] = "Device manage flow steering support", 132 [3] = "Device manage flow steering support",
133 [4] = "Automatic MAC reassignment support", 133 [4] = "Automatic MAC reassignment support",
134 [5] = "Time stamping support" 134 [5] = "Time stamping support",
135 [6] = "VST (control vlan insertion/stripping) support",
136 [7] = "FSM (MAC anti-spoofing) support"
135 }; 137 };
136 int i; 138 int i;
137 139
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index e12e0d2e0ee0..1157f028a90f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -372,24 +372,29 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
372 if (MLX4_QP_ST_RC == qp_type) 372 if (MLX4_QP_ST_RC == qp_type)
373 return -EINVAL; 373 return -EINVAL;
374 374
375 /* force strip vlan by clear vsd */
376 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
377 if (0 != vp_oper->state.default_vlan) {
378 qpc->pri_path.vlan_control =
379 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
380 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
381 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
382 } else { /* priority tagged */
383 qpc->pri_path.vlan_control =
384 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
385 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
386 }
387
388 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
375 qpc->pri_path.vlan_index = vp_oper->vlan_idx; 389 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
376 qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/ 390 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
377 qpc->pri_path.feup |= 1 << 3; /* set fvl bit */ 391 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
378 qpc->pri_path.sched_queue &= 0xC7; 392 qpc->pri_path.sched_queue &= 0xC7;
379 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; 393 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
380 mlx4_dbg(dev, "qp %d port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n",
381 be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
382 (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan,
383 vp_oper->vlan_idx, (int)(qpc->pri_path.feup),
384 (int)(qpc->pri_path.fl));
385 } 394 }
386 if (vp_oper->state.spoofchk) { 395 if (vp_oper->state.spoofchk) {
387 qpc->pri_path.feup |= 1 << 5; /* set fsm bit */; 396 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
388 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; 397 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
389 mlx4_dbg(dev, "spoof qp %d port %d feup 0x%x, myLmc 0x%x mindx %d\n",
390 be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
391 (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc,
392 vp_oper->mac_idx);
393 } 398 }
394 return 0; 399 return 0;
395} 400}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 90c253b145ef..019c5f78732e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -429,6 +429,7 @@ struct qlcnic_hardware_context {
429 429
430 u16 port_type; 430 u16 port_type;
431 u16 board_type; 431 u16 board_type;
432 u16 supported_type;
432 433
433 u16 link_speed; 434 u16 link_speed;
434 u16 link_duplex; 435 u16 link_duplex;
@@ -1514,6 +1515,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
1514void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); 1515void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
1515void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter); 1516void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
1516void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter); 1517void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
1518int qlcnic_82xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
1517 1519
1518int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); 1520int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
1519int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); 1521int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index ea790a93ee7c..b4ff1e35a11d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -696,15 +696,14 @@ u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
696 return 1; 696 return 1;
697} 697}
698 698
699u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter) 699u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time)
700{ 700{
701 u32 data; 701 u32 data;
702 unsigned long wait_time = 0;
703 struct qlcnic_hardware_context *ahw = adapter->ahw; 702 struct qlcnic_hardware_context *ahw = adapter->ahw;
704 /* wait for mailbox completion */ 703 /* wait for mailbox completion */
705 do { 704 do {
706 data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); 705 data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
707 if (++wait_time > QLCNIC_MBX_TIMEOUT) { 706 if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) {
708 data = QLCNIC_RCODE_TIMEOUT; 707 data = QLCNIC_RCODE_TIMEOUT;
709 break; 708 break;
710 } 709 }
@@ -720,8 +719,8 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
720 u16 opcode; 719 u16 opcode;
721 u8 mbx_err_code; 720 u8 mbx_err_code;
722 unsigned long flags; 721 unsigned long flags;
723 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd;
724 struct qlcnic_hardware_context *ahw = adapter->ahw; 722 struct qlcnic_hardware_context *ahw = adapter->ahw;
723 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0;
725 724
726 opcode = LSW(cmd->req.arg[0]); 725 opcode = LSW(cmd->req.arg[0]);
727 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { 726 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
@@ -754,15 +753,13 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
754 /* Signal FW about the impending command */ 753 /* Signal FW about the impending command */
755 QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); 754 QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
756poll: 755poll:
757 rsp = qlcnic_83xx_mbx_poll(adapter); 756 rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
758 if (rsp != QLCNIC_RCODE_TIMEOUT) { 757 if (rsp != QLCNIC_RCODE_TIMEOUT) {
759 /* Get the FW response data */ 758 /* Get the FW response data */
760 fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); 759 fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
761 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { 760 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
762 __qlcnic_83xx_process_aen(adapter); 761 __qlcnic_83xx_process_aen(adapter);
763 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); 762 goto poll;
764 if (mbx_val)
765 goto poll;
766 } 763 }
767 mbx_err_code = QLCNIC_MBX_STATUS(fw_data); 764 mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
768 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); 765 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
@@ -1276,11 +1273,13 @@ out:
1276 return err; 1273 return err;
1277} 1274}
1278 1275
1279static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test) 1276static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
1277 int num_sds_ring)
1280{ 1278{
1281 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1279 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1282 struct qlcnic_host_sds_ring *sds_ring; 1280 struct qlcnic_host_sds_ring *sds_ring;
1283 struct qlcnic_host_rds_ring *rds_ring; 1281 struct qlcnic_host_rds_ring *rds_ring;
1282 u16 adapter_state = adapter->is_up;
1284 u8 ring; 1283 u8 ring;
1285 int ret; 1284 int ret;
1286 1285
@@ -1304,6 +1303,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test)
1304 ret = qlcnic_fw_create_ctx(adapter); 1303 ret = qlcnic_fw_create_ctx(adapter);
1305 if (ret) { 1304 if (ret) {
1306 qlcnic_detach(adapter); 1305 qlcnic_detach(adapter);
1306 if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
1307 adapter->max_sds_rings = num_sds_ring;
1308 qlcnic_attach(adapter);
1309 }
1307 netif_device_attach(netdev); 1310 netif_device_attach(netdev);
1308 return ret; 1311 return ret;
1309 } 1312 }
@@ -1596,7 +1599,8 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1596 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 1599 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1597 return -EBUSY; 1600 return -EBUSY;
1598 1601
1599 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); 1602 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
1603 max_sds_rings);
1600 if (ret) 1604 if (ret)
1601 goto fail_diag_alloc; 1605 goto fail_diag_alloc;
1602 1606
@@ -2830,6 +2834,23 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
2830 break; 2834 break;
2831 } 2835 }
2832 config = cmd.rsp.arg[3]; 2836 config = cmd.rsp.arg[3];
2837 if (QLC_83XX_SFP_PRESENT(config)) {
2838 switch (ahw->module_type) {
2839 case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
2840 case LINKEVENT_MODULE_OPTICAL_SRLR:
2841 case LINKEVENT_MODULE_OPTICAL_LRM:
2842 case LINKEVENT_MODULE_OPTICAL_SFP_1G:
2843 ahw->supported_type = PORT_FIBRE;
2844 break;
2845 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
2846 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
2847 case LINKEVENT_MODULE_TWINAX:
2848 ahw->supported_type = PORT_TP;
2849 break;
2850 default:
2851 ahw->supported_type = PORT_OTHER;
2852 }
2853 }
2833 if (config & 1) 2854 if (config & 1)
2834 err = 1; 2855 err = 1;
2835 } 2856 }
@@ -2838,7 +2859,8 @@ out:
2838 return config; 2859 return config;
2839} 2860}
2840 2861
2841int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter) 2862int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
2863 struct ethtool_cmd *ecmd)
2842{ 2864{
2843 u32 config = 0; 2865 u32 config = 0;
2844 int status = 0; 2866 int status = 0;
@@ -2851,6 +2873,54 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter)
2851 ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config); 2873 ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
2852 /* hard code until there is a way to get it from flash */ 2874 /* hard code until there is a way to get it from flash */
2853 ahw->board_type = QLCNIC_BRDTYPE_83XX_10G; 2875 ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
2876
2877 if (netif_running(adapter->netdev) && ahw->has_link_events) {
2878 ethtool_cmd_speed_set(ecmd, ahw->link_speed);
2879 ecmd->duplex = ahw->link_duplex;
2880 ecmd->autoneg = ahw->link_autoneg;
2881 } else {
2882 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
2883 ecmd->duplex = DUPLEX_UNKNOWN;
2884 ecmd->autoneg = AUTONEG_DISABLE;
2885 }
2886
2887 if (ahw->port_type == QLCNIC_XGBE) {
2888 ecmd->supported = SUPPORTED_1000baseT_Full;
2889 ecmd->advertising = ADVERTISED_1000baseT_Full;
2890 } else {
2891 ecmd->supported = (SUPPORTED_10baseT_Half |
2892 SUPPORTED_10baseT_Full |
2893 SUPPORTED_100baseT_Half |
2894 SUPPORTED_100baseT_Full |
2895 SUPPORTED_1000baseT_Half |
2896 SUPPORTED_1000baseT_Full);
2897 ecmd->advertising = (ADVERTISED_100baseT_Half |
2898 ADVERTISED_100baseT_Full |
2899 ADVERTISED_1000baseT_Half |
2900 ADVERTISED_1000baseT_Full);
2901 }
2902
2903 switch (ahw->supported_type) {
2904 case PORT_FIBRE:
2905 ecmd->supported |= SUPPORTED_FIBRE;
2906 ecmd->advertising |= ADVERTISED_FIBRE;
2907 ecmd->port = PORT_FIBRE;
2908 ecmd->transceiver = XCVR_EXTERNAL;
2909 break;
2910 case PORT_TP:
2911 ecmd->supported |= SUPPORTED_TP;
2912 ecmd->advertising |= ADVERTISED_TP;
2913 ecmd->port = PORT_TP;
2914 ecmd->transceiver = XCVR_INTERNAL;
2915 break;
2916 default:
2917 ecmd->supported |= SUPPORTED_FIBRE;
2918 ecmd->advertising |= ADVERTISED_FIBRE;
2919 ecmd->port = PORT_OTHER;
2920 ecmd->transceiver = XCVR_EXTERNAL;
2921 break;
2922 }
2923 ecmd->phy_address = ahw->physical_port;
2854 return status; 2924 return status;
2855} 2925}
2856 2926
@@ -3046,7 +3116,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
3046 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 3116 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3047 return -EIO; 3117 return -EIO;
3048 3118
3049 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); 3119 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
3120 max_sds_rings);
3050 if (ret) 3121 if (ret)
3051 goto fail_diag_irq; 3122 goto fail_diag_irq;
3052 3123
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 1f1d85e6f2af..f5db67fc9f55 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -603,7 +603,7 @@ int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
603 603
604void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); 604void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
605void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); 605void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
606int qlcnic_83xx_get_settings(struct qlcnic_adapter *); 606int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
607int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *); 607int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
608void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, 608void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
609 struct ethtool_pauseparam *); 609 struct ethtool_pauseparam *);
@@ -620,7 +620,7 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
620int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *); 620int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
621int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *); 621int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
622u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *); 622u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
623u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *); 623u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *);
624void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); 624void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
625void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); 625void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
626#endif 626#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index ab1d8d99cbd5..c67d1eb35e8f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -435,10 +435,6 @@ static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter)
435 } 435 }
436done: 436done:
437 netif_device_attach(netdev); 437 netif_device_attach(netdev);
438 if (netif_running(netdev)) {
439 netif_carrier_on(netdev);
440 netif_wake_queue(netdev);
441 }
442} 438}
443 439
444static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter, 440static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter,
@@ -642,15 +638,21 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
642 638
643static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) 639static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
644{ 640{
641 struct qlcnic_hardware_context *ahw = adapter->ahw;
642
645 qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); 643 qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
646 clear_bit(__QLCNIC_RESETTING, &adapter->state);
647 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); 644 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
648 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); 645 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
649 set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); 646 set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
650 adapter->ahw->idc.quiesce_req = 0; 647
651 adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; 648 ahw->idc.quiesce_req = 0;
652 adapter->ahw->idc.err_code = 0; 649 ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
653 adapter->ahw->idc.collect_dump = 0; 650 ahw->idc.err_code = 0;
651 ahw->idc.collect_dump = 0;
652 ahw->reset_context = 0;
653 adapter->tx_timeo_cnt = 0;
654
655 clear_bit(__QLCNIC_RESETTING, &adapter->state);
654} 656}
655 657
656/** 658/**
@@ -851,6 +853,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
851 /* Check for soft reset request */ 853 /* Check for soft reset request */
852 if (ahw->reset_context && 854 if (ahw->reset_context &&
853 !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { 855 !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
856 adapter->ahw->reset_context = 0;
854 qlcnic_83xx_idc_tx_soft_reset(adapter); 857 qlcnic_83xx_idc_tx_soft_reset(adapter);
855 return ret; 858 return ret;
856 } 859 }
@@ -914,6 +917,7 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
914static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) 917static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
915{ 918{
916 dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); 919 dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__);
920 clear_bit(__QLCNIC_RESETTING, &adapter->state);
917 adapter->ahw->idc.err_code = -EIO; 921 adapter->ahw->idc.err_code = -EIO;
918 922
919 return 0; 923 return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 08efb4635007..f67652de5a63 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -131,12 +131,13 @@ static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
131 "ctx_lro_pkt_cnt", 131 "ctx_lro_pkt_cnt",
132 "ctx_ip_csum_error", 132 "ctx_ip_csum_error",
133 "ctx_rx_pkts_wo_ctx", 133 "ctx_rx_pkts_wo_ctx",
134 "ctx_rx_pkts_dropped_wo_sts", 134 "ctx_rx_pkts_drop_wo_sds_on_card",
135 "ctx_rx_pkts_drop_wo_sds_on_host",
135 "ctx_rx_osized_pkts", 136 "ctx_rx_osized_pkts",
136 "ctx_rx_pkts_dropped_wo_rds", 137 "ctx_rx_pkts_dropped_wo_rds",
137 "ctx_rx_unexpected_mcast_pkts", 138 "ctx_rx_unexpected_mcast_pkts",
138 "ctx_invalid_mac_address", 139 "ctx_invalid_mac_address",
139 "ctx_rx_rds_ring_prim_attemoted", 140 "ctx_rx_rds_ring_prim_attempted",
140 "ctx_rx_rds_ring_prim_success", 141 "ctx_rx_rds_ring_prim_success",
141 "ctx_num_lro_flows_added", 142 "ctx_num_lro_flows_added",
142 "ctx_num_lro_flows_removed", 143 "ctx_num_lro_flows_removed",
@@ -251,6 +252,18 @@ static int
251qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 252qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
252{ 253{
253 struct qlcnic_adapter *adapter = netdev_priv(dev); 254 struct qlcnic_adapter *adapter = netdev_priv(dev);
255
256 if (qlcnic_82xx_check(adapter))
257 return qlcnic_82xx_get_settings(adapter, ecmd);
258 else if (qlcnic_83xx_check(adapter))
259 return qlcnic_83xx_get_settings(adapter, ecmd);
260
261 return -EIO;
262}
263
264int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
265 struct ethtool_cmd *ecmd)
266{
254 struct qlcnic_hardware_context *ahw = adapter->ahw; 267 struct qlcnic_hardware_context *ahw = adapter->ahw;
255 u32 speed, reg; 268 u32 speed, reg;
256 int check_sfp_module = 0; 269 int check_sfp_module = 0;
@@ -276,10 +289,7 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
276 289
277 } else if (adapter->ahw->port_type == QLCNIC_XGBE) { 290 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
278 u32 val = 0; 291 u32 val = 0;
279 if (qlcnic_83xx_check(adapter)) 292 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
280 qlcnic_83xx_get_settings(adapter);
281 else
282 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
283 293
284 if (val == QLCNIC_PORT_MODE_802_3_AP) { 294 if (val == QLCNIC_PORT_MODE_802_3_AP) {
285 ecmd->supported = SUPPORTED_1000baseT_Full; 295 ecmd->supported = SUPPORTED_1000baseT_Full;
@@ -289,16 +299,13 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
289 ecmd->advertising = ADVERTISED_10000baseT_Full; 299 ecmd->advertising = ADVERTISED_10000baseT_Full;
290 } 300 }
291 301
292 if (netif_running(dev) && adapter->ahw->has_link_events) { 302 if (netif_running(adapter->netdev) && ahw->has_link_events) {
293 if (qlcnic_82xx_check(adapter)) { 303 reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
294 reg = QLCRD32(adapter, 304 speed = P3P_LINK_SPEED_VAL(pcifn, reg);
295 P3P_LINK_SPEED_REG(pcifn)); 305 ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
296 speed = P3P_LINK_SPEED_VAL(pcifn, reg); 306 ethtool_cmd_speed_set(ecmd, ahw->link_speed);
297 ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; 307 ecmd->autoneg = ahw->link_autoneg;
298 } 308 ecmd->duplex = ahw->link_duplex;
299 ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
300 ecmd->autoneg = adapter->ahw->link_autoneg;
301 ecmd->duplex = adapter->ahw->link_duplex;
302 goto skip; 309 goto skip;
303 } 310 }
304 311
@@ -340,8 +347,8 @@ skip:
340 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: 347 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
341 ecmd->advertising |= ADVERTISED_TP; 348 ecmd->advertising |= ADVERTISED_TP;
342 ecmd->supported |= SUPPORTED_TP; 349 ecmd->supported |= SUPPORTED_TP;
343 check_sfp_module = netif_running(dev) && 350 check_sfp_module = netif_running(adapter->netdev) &&
344 adapter->ahw->has_link_events; 351 ahw->has_link_events;
345 case QLCNIC_BRDTYPE_P3P_10G_XFP: 352 case QLCNIC_BRDTYPE_P3P_10G_XFP:
346 ecmd->supported |= SUPPORTED_FIBRE; 353 ecmd->supported |= SUPPORTED_FIBRE;
347 ecmd->advertising |= ADVERTISED_FIBRE; 354 ecmd->advertising |= ADVERTISED_FIBRE;
@@ -355,8 +362,8 @@ skip:
355 ecmd->advertising |= 362 ecmd->advertising |=
356 (ADVERTISED_FIBRE | ADVERTISED_TP); 363 (ADVERTISED_FIBRE | ADVERTISED_TP);
357 ecmd->port = PORT_FIBRE; 364 ecmd->port = PORT_FIBRE;
358 check_sfp_module = netif_running(dev) && 365 check_sfp_module = netif_running(adapter->netdev) &&
359 adapter->ahw->has_link_events; 366 ahw->has_link_events;
360 } else { 367 } else {
361 ecmd->autoneg = AUTONEG_ENABLE; 368 ecmd->autoneg = AUTONEG_ENABLE;
362 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); 369 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
@@ -365,13 +372,6 @@ skip:
365 ecmd->port = PORT_TP; 372 ecmd->port = PORT_TP;
366 } 373 }
367 break; 374 break;
368 case QLCNIC_BRDTYPE_83XX_10G:
369 ecmd->autoneg = AUTONEG_DISABLE;
370 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
371 ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP);
372 ecmd->port = PORT_FIBRE;
373 check_sfp_module = netif_running(dev) && ahw->has_link_events;
374 break;
375 default: 375 default:
376 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", 376 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
377 adapter->ahw->board_type); 377 adapter->ahw->board_type);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 95b1b5732838..b6818f4356b9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -134,7 +134,7 @@ struct qlcnic_mailbox_metadata {
134 134
135#define QLCNIC_SET_OWNER 1 135#define QLCNIC_SET_OWNER 1
136#define QLCNIC_CLR_OWNER 0 136#define QLCNIC_CLR_OWNER 0
137#define QLCNIC_MBX_TIMEOUT 10000 137#define QLCNIC_MBX_TIMEOUT 5000
138 138
139#define QLCNIC_MBX_RSP_OK 1 139#define QLCNIC_MBX_RSP_OK 1
140#define QLCNIC_MBX_PORT_RSP_OK 0x1a 140#define QLCNIC_MBX_PORT_RSP_OK 0x1a
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 264d5a4f8153..8fb836d4129f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -37,24 +37,24 @@ MODULE_PARM_DESC(qlcnic_mac_learn,
37 "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)"); 37 "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)");
38 38
39int qlcnic_use_msi = 1; 39int qlcnic_use_msi = 1;
40MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); 40MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)");
41module_param_named(use_msi, qlcnic_use_msi, int, 0444); 41module_param_named(use_msi, qlcnic_use_msi, int, 0444);
42 42
43int qlcnic_use_msi_x = 1; 43int qlcnic_use_msi_x = 1;
44MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); 44MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)");
45module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444); 45module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
46 46
47int qlcnic_auto_fw_reset = 1; 47int qlcnic_auto_fw_reset = 1;
48MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); 48MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)");
49module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644); 49module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
50 50
51int qlcnic_load_fw_file; 51int qlcnic_load_fw_file;
52MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); 52MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)");
53module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); 53module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
54 54
55int qlcnic_config_npars; 55int qlcnic_config_npars;
56module_param(qlcnic_config_npars, int, 0444); 56module_param(qlcnic_config_npars, int, 0444);
57MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); 57MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled)");
58 58
59static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 59static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
60static void qlcnic_remove(struct pci_dev *pdev); 60static void qlcnic_remove(struct pci_dev *pdev);
@@ -308,6 +308,23 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
308 return 0; 308 return 0;
309} 309}
310 310
311static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
312{
313 struct qlcnic_mac_list_s *cur;
314 struct list_head *head;
315
316 list_for_each(head, &adapter->mac_list) {
317 cur = list_entry(head, struct qlcnic_mac_list_s, list);
318 if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) {
319 qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
320 0, QLCNIC_MAC_DEL);
321 list_del(&cur->list);
322 kfree(cur);
323 return;
324 }
325 }
326}
327
311static int qlcnic_set_mac(struct net_device *netdev, void *p) 328static int qlcnic_set_mac(struct net_device *netdev, void *p)
312{ 329{
313 struct qlcnic_adapter *adapter = netdev_priv(netdev); 330 struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -322,11 +339,15 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
322 if (!is_valid_ether_addr(addr->sa_data)) 339 if (!is_valid_ether_addr(addr->sa_data))
323 return -EINVAL; 340 return -EINVAL;
324 341
342 if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN))
343 return 0;
344
325 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 345 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
326 netif_device_detach(netdev); 346 netif_device_detach(netdev);
327 qlcnic_napi_disable(adapter); 347 qlcnic_napi_disable(adapter);
328 } 348 }
329 349
350 qlcnic_delete_adapter_mac(adapter);
330 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); 351 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
331 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 352 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
332 qlcnic_set_multi(adapter->netdev); 353 qlcnic_set_multi(adapter->netdev);
@@ -2481,12 +2502,17 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
2481 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) 2502 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2482 return; 2503 return;
2483 2504
2484 dev_err(&netdev->dev, "transmit timeout, resetting.\n"); 2505 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) {
2485 2506 netdev_info(netdev, "Tx timeout, reset the adapter.\n");
2486 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) 2507 if (qlcnic_82xx_check(adapter))
2487 adapter->need_fw_reset = 1; 2508 adapter->need_fw_reset = 1;
2488 else 2509 else if (qlcnic_83xx_check(adapter))
2510 qlcnic_83xx_idc_request_reset(adapter,
2511 QLCNIC_FORCE_FW_DUMP_KEY);
2512 } else {
2513 netdev_info(netdev, "Tx timeout, reset adapter context.\n");
2489 adapter->ahw->reset_context = 1; 2514 adapter->ahw->reset_context = 1;
2515 }
2490} 2516}
2491 2517
2492static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) 2518static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 44d547d78b84..3869c3864deb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -280,9 +280,9 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
280static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, 280static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
281 u32 *pay, u8 pci_func, u8 size) 281 u32 *pay, u8 pci_func, u8 size)
282{ 282{
283 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0;
283 struct qlcnic_hardware_context *ahw = adapter->ahw; 284 struct qlcnic_hardware_context *ahw = adapter->ahw;
284 unsigned long flags; 285 unsigned long flags;
285 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val;
286 u16 opcode; 286 u16 opcode;
287 u8 mbx_err_code; 287 u8 mbx_err_code;
288 int i, j; 288 int i, j;
@@ -330,15 +330,13 @@ static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
330 * assume something is wrong. 330 * assume something is wrong.
331 */ 331 */
332poll: 332poll:
333 rsp = qlcnic_83xx_mbx_poll(adapter); 333 rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
334 if (rsp != QLCNIC_RCODE_TIMEOUT) { 334 if (rsp != QLCNIC_RCODE_TIMEOUT) {
335 /* Get the FW response data */ 335 /* Get the FW response data */
336 fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); 336 fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
337 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { 337 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
338 __qlcnic_83xx_process_aen(adapter); 338 __qlcnic_83xx_process_aen(adapter);
339 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); 339 goto poll;
340 if (mbx_val)
341 goto poll;
342 } 340 }
343 mbx_err_code = QLCNIC_MBX_STATUS(fw_data); 341 mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
344 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); 342 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index c81be2da119b..1a66ccded235 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1133,9 +1133,6 @@ static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf,
1133 if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) 1133 if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
1134 return -EINVAL; 1134 return -EINVAL;
1135 1135
1136 if (!(cmd->req.arg[1] & BIT_8))
1137 return -EINVAL;
1138
1139 return 0; 1136 return 0;
1140} 1137}
1141 1138
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 87463bc701a6..50235d201592 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1106,6 +1106,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1106 if (pci_dma_mapping_error(qdev->pdev, map)) { 1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page, 1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order); 1108 qdev->lbq_buf_order);
1109 rx_ring->pg_chunk.page = NULL;
1109 netif_err(qdev, drv, qdev->ndev, 1110 netif_err(qdev, drv, qdev->ndev,
1110 "PCI mapping failed.\n"); 1111 "PCI mapping failed.\n");
1111 return -ENOMEM; 1112 return -ENOMEM;
@@ -2777,6 +2778,12 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
2777 curr_idx = 0; 2778 curr_idx = 0;
2778 2779
2779 } 2780 }
2781 if (rx_ring->pg_chunk.page) {
2782 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2783 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2784 put_page(rx_ring->pg_chunk.page);
2785 rx_ring->pg_chunk.page = NULL;
2786 }
2780} 2787}
2781 2788
2782static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) 2789static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index f695a50bac47..43c1f3223322 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -1,6 +1,6 @@
1config STMMAC_ETH 1config STMMAC_ETH
2 tristate "STMicroelectronics 10/100/1000 Ethernet driver" 2 tristate "STMicroelectronics 10/100/1000 Ethernet driver"
3 depends on HAS_IOMEM 3 depends on HAS_IOMEM && HAS_DMA
4 select NET_CORE 4 select NET_CORE
5 select MII 5 select MII
6 select PHYLIB 6 select PHYLIB
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d5a141c7c4e7..1c502bb0c916 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -229,7 +229,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
229 } 229 }
230 230
231 if (port->passthru) 231 if (port->passthru)
232 vlan = list_first_entry(&port->vlans, struct macvlan_dev, list); 232 vlan = list_first_or_null_rcu(&port->vlans,
233 struct macvlan_dev, list);
233 else 234 else
234 vlan = macvlan_hash_lookup(port, eth->h_dest); 235 vlan = macvlan_hash_lookup(port, eth->h_dest);
235 if (vlan == NULL) 236 if (vlan == NULL)
@@ -814,7 +815,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
814 if (err < 0) 815 if (err < 0)
815 goto upper_dev_unlink; 816 goto upper_dev_unlink;
816 817
817 list_add_tail(&vlan->list, &port->vlans); 818 list_add_tail_rcu(&vlan->list, &port->vlans);
818 netif_stacked_transfer_operstate(lowerdev, dev); 819 netif_stacked_transfer_operstate(lowerdev, dev);
819 820
820 return 0; 821 return 0;
@@ -842,7 +843,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head)
842{ 843{
843 struct macvlan_dev *vlan = netdev_priv(dev); 844 struct macvlan_dev *vlan = netdev_priv(dev);
844 845
845 list_del(&vlan->list); 846 list_del_rcu(&vlan->list);
846 unregister_netdevice_queue(dev, head); 847 unregister_netdevice_queue(dev, head);
847 netdev_upper_dev_unlink(vlan->lowerdev, dev); 848 netdev_upper_dev_unlink(vlan->lowerdev, dev);
848} 849}
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index ed947dd76fbd..f3cdf64997d6 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -375,6 +375,8 @@ static void ntb_netdev_remove(struct pci_dev *pdev)
375 if (dev == NULL) 375 if (dev == NULL)
376 return; 376 return;
377 377
378 list_del(&dev->list);
379
378 ndev = dev->ndev; 380 ndev = dev->ndev;
379 381
380 unregister_netdev(ndev); 382 unregister_netdev(ndev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3c23fdc27bf0..655bb25eed2b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -28,7 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/cpu.h> 29#include <linux/cpu.h>
30 30
31static int napi_weight = 128; 31static int napi_weight = NAPI_POLL_WEIGHT;
32module_param(napi_weight, int, 0444); 32module_param(napi_weight, int, 0444);
33 33
34static bool csum = true, gso = true; 34static bool csum = true, gso = true;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 9b20d9ee2719..7f702fe3ecc2 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2369,6 +2369,9 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
2369 int i; 2369 int i;
2370 bool needreset = false; 2370 bool needreset = false;
2371 2371
2372 if (!test_bit(ATH_STAT_STARTED, ah->status))
2373 return;
2374
2372 mutex_lock(&ah->lock); 2375 mutex_lock(&ah->lock);
2373 2376
2374 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { 2377 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
@@ -2676,6 +2679,7 @@ done:
2676 mmiowb(); 2679 mmiowb();
2677 mutex_unlock(&ah->lock); 2680 mutex_unlock(&ah->lock);
2678 2681
2682 set_bit(ATH_STAT_STARTED, ah->status);
2679 ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work, 2683 ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2680 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); 2684 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2681 2685
@@ -2737,6 +2741,7 @@ void ath5k_stop(struct ieee80211_hw *hw)
2737 2741
2738 ath5k_stop_tasklets(ah); 2742 ath5k_stop_tasklets(ah);
2739 2743
2744 clear_bit(ATH_STAT_STARTED, ah->status);
2740 cancel_delayed_work_sync(&ah->tx_complete_work); 2745 cancel_delayed_work_sync(&ah->tx_complete_work);
2741 2746
2742 if (!ath5k_modparam_no_hw_rfkill_switch) 2747 if (!ath5k_modparam_no_hw_rfkill_switch)
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 17507dc8a1e7..f3dc124c60c7 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -17,7 +17,7 @@ config ATH9K_BTCOEX_SUPPORT
17 17
18config ATH9K 18config ATH9K
19 tristate "Atheros 802.11n wireless cards support" 19 tristate "Atheros 802.11n wireless cards support"
20 depends on MAC80211 20 depends on MAC80211 && HAS_DMA
21 select ATH9K_HW 21 select ATH9K_HW
22 select MAC80211_LEDS 22 select MAC80211_LEDS
23 select LEDS_CLASS 23 select LEDS_CLASS
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index 0c2ac0c6dc89..e85a8b076c22 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -233,9 +233,9 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
233 {0x00009d10, 0x01834061}, 233 {0x00009d10, 0x01834061},
234 {0x00009d14, 0x00c00400}, 234 {0x00009d14, 0x00c00400},
235 {0x00009d18, 0x00000000}, 235 {0x00009d18, 0x00000000},
236 {0x00009e08, 0x0078230c}, 236 {0x00009e08, 0x0038230c},
237 {0x00009e24, 0x990bb515}, 237 {0x00009e24, 0x9907b515},
238 {0x00009e28, 0x126f0000}, 238 {0x00009e28, 0x126f0600},
239 {0x00009e30, 0x06336f77}, 239 {0x00009e30, 0x06336f77},
240 {0x00009e34, 0x6af6532f}, 240 {0x00009e34, 0x6af6532f},
241 {0x00009e38, 0x0cc80c00}, 241 {0x00009e38, 0x0cc80c00},
@@ -337,7 +337,7 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
337 337
338static const u32 ar9565_1p0_baseband_postamble[][5] = { 338static const u32 ar9565_1p0_baseband_postamble[][5] = {
339 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 339 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
340 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a800d}, 340 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8009},
341 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae}, 341 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
342 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da}, 342 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
343 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81}, 343 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81},
@@ -345,9 +345,9 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = {
345 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, 345 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
346 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, 346 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
347 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0}, 347 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
348 {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020}, 348 {0x00009e04, 0x00802020, 0x00802020, 0x00142020, 0x00142020},
349 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, 349 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
350 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, 350 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
351 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e}, 351 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
352 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 352 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
353 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 353 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
@@ -450,6 +450,8 @@ static const u32 ar9565_1p0_soc_postamble[][5] = {
450 450
451static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { 451static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
452 /* Addr allmodes */ 452 /* Addr allmodes */
453 {0x00004050, 0x00300300},
454 {0x0000406c, 0x00100000},
453 {0x0000a000, 0x00010000}, 455 {0x0000a000, 0x00010000},
454 {0x0000a004, 0x00030002}, 456 {0x0000a004, 0x00030002},
455 {0x0000a008, 0x00050004}, 457 {0x0000a008, 0x00050004},
@@ -498,27 +500,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
498 {0x0000a0b4, 0x00000000}, 500 {0x0000a0b4, 0x00000000},
499 {0x0000a0b8, 0x00000000}, 501 {0x0000a0b8, 0x00000000},
500 {0x0000a0bc, 0x00000000}, 502 {0x0000a0bc, 0x00000000},
501 {0x0000a0c0, 0x001f0000}, 503 {0x0000a0c0, 0x00bf00a0},
502 {0x0000a0c4, 0x01000101}, 504 {0x0000a0c4, 0x11a011a1},
503 {0x0000a0c8, 0x011e011f}, 505 {0x0000a0c8, 0x11be11bf},
504 {0x0000a0cc, 0x011c011d}, 506 {0x0000a0cc, 0x11bc11bd},
505 {0x0000a0d0, 0x02030204}, 507 {0x0000a0d0, 0x22632264},
506 {0x0000a0d4, 0x02010202}, 508 {0x0000a0d4, 0x22612262},
507 {0x0000a0d8, 0x021f0200}, 509 {0x0000a0d8, 0x227f2260},
508 {0x0000a0dc, 0x0302021e}, 510 {0x0000a0dc, 0x4322227e},
509 {0x0000a0e0, 0x03000301}, 511 {0x0000a0e0, 0x43204321},
510 {0x0000a0e4, 0x031e031f}, 512 {0x0000a0e4, 0x433e433f},
511 {0x0000a0e8, 0x0402031d}, 513 {0x0000a0e8, 0x4462433d},
512 {0x0000a0ec, 0x04000401}, 514 {0x0000a0ec, 0x44604461},
513 {0x0000a0f0, 0x041e041f}, 515 {0x0000a0f0, 0x447e447f},
514 {0x0000a0f4, 0x0502041d}, 516 {0x0000a0f4, 0x5582447d},
515 {0x0000a0f8, 0x05000501}, 517 {0x0000a0f8, 0x55805581},
516 {0x0000a0fc, 0x051e051f}, 518 {0x0000a0fc, 0x559e559f},
517 {0x0000a100, 0x06010602}, 519 {0x0000a100, 0x66816682},
518 {0x0000a104, 0x061f0600}, 520 {0x0000a104, 0x669f6680},
519 {0x0000a108, 0x061d061e}, 521 {0x0000a108, 0x669d669e},
520 {0x0000a10c, 0x07020703}, 522 {0x0000a10c, 0x77627763},
521 {0x0000a110, 0x07000701}, 523 {0x0000a110, 0x77607761},
522 {0x0000a114, 0x00000000}, 524 {0x0000a114, 0x00000000},
523 {0x0000a118, 0x00000000}, 525 {0x0000a118, 0x00000000},
524 {0x0000a11c, 0x00000000}, 526 {0x0000a11c, 0x00000000},
@@ -530,27 +532,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
530 {0x0000a134, 0x00000000}, 532 {0x0000a134, 0x00000000},
531 {0x0000a138, 0x00000000}, 533 {0x0000a138, 0x00000000},
532 {0x0000a13c, 0x00000000}, 534 {0x0000a13c, 0x00000000},
533 {0x0000a140, 0x001f0000}, 535 {0x0000a140, 0x00bf00a0},
534 {0x0000a144, 0x01000101}, 536 {0x0000a144, 0x11a011a1},
535 {0x0000a148, 0x011e011f}, 537 {0x0000a148, 0x11be11bf},
536 {0x0000a14c, 0x011c011d}, 538 {0x0000a14c, 0x11bc11bd},
537 {0x0000a150, 0x02030204}, 539 {0x0000a150, 0x22632264},
538 {0x0000a154, 0x02010202}, 540 {0x0000a154, 0x22612262},
539 {0x0000a158, 0x021f0200}, 541 {0x0000a158, 0x227f2260},
540 {0x0000a15c, 0x0302021e}, 542 {0x0000a15c, 0x4322227e},
541 {0x0000a160, 0x03000301}, 543 {0x0000a160, 0x43204321},
542 {0x0000a164, 0x031e031f}, 544 {0x0000a164, 0x433e433f},
543 {0x0000a168, 0x0402031d}, 545 {0x0000a168, 0x4462433d},
544 {0x0000a16c, 0x04000401}, 546 {0x0000a16c, 0x44604461},
545 {0x0000a170, 0x041e041f}, 547 {0x0000a170, 0x447e447f},
546 {0x0000a174, 0x0502041d}, 548 {0x0000a174, 0x5582447d},
547 {0x0000a178, 0x05000501}, 549 {0x0000a178, 0x55805581},
548 {0x0000a17c, 0x051e051f}, 550 {0x0000a17c, 0x559e559f},
549 {0x0000a180, 0x06010602}, 551 {0x0000a180, 0x66816682},
550 {0x0000a184, 0x061f0600}, 552 {0x0000a184, 0x669f6680},
551 {0x0000a188, 0x061d061e}, 553 {0x0000a188, 0x669d669e},
552 {0x0000a18c, 0x07020703}, 554 {0x0000a18c, 0x77e677e7},
553 {0x0000a190, 0x07000701}, 555 {0x0000a190, 0x77e477e5},
554 {0x0000a194, 0x00000000}, 556 {0x0000a194, 0x00000000},
555 {0x0000a198, 0x00000000}, 557 {0x0000a198, 0x00000000},
556 {0x0000a19c, 0x00000000}, 558 {0x0000a19c, 0x00000000},
@@ -770,7 +772,7 @@ static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = {
770 772
771static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = { 773static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = {
772 /* Addr allmodes */ 774 /* Addr allmodes */
773 {0x00018c00, 0x18213ede}, 775 {0x00018c00, 0x18212ede},
774 {0x00018c04, 0x000801d8}, 776 {0x00018c04, 0x000801d8},
775 {0x00018c08, 0x0003780c}, 777 {0x00018c08, 0x0003780c},
776}; 778};
@@ -889,8 +891,8 @@ static const u32 ar9565_1p0_common_wo_xlna_rx_gain_table[][2] = {
889 {0x0000a180, 0x66816682}, 891 {0x0000a180, 0x66816682},
890 {0x0000a184, 0x669f6680}, 892 {0x0000a184, 0x669f6680},
891 {0x0000a188, 0x669d669e}, 893 {0x0000a188, 0x669d669e},
892 {0x0000a18c, 0x77627763}, 894 {0x0000a18c, 0x77e677e7},
893 {0x0000a190, 0x77607761}, 895 {0x0000a190, 0x77e477e5},
894 {0x0000a194, 0x00000000}, 896 {0x0000a194, 0x00000000},
895 {0x0000a198, 0x00000000}, 897 {0x0000a198, 0x00000000},
896 {0x0000a19c, 0x00000000}, 898 {0x0000a19c, 0x00000000},
@@ -1114,7 +1116,7 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = {
1114 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, 1116 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
1115 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, 1117 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
1116 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, 1118 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
1117 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 1119 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df},
1118 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 1120 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
1119 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, 1121 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
1120 {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004}, 1122 {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004},
@@ -1140,13 +1142,13 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = {
1140 {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5}, 1142 {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5},
1141 {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9}, 1143 {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9},
1142 {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb}, 1144 {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb},
1143 {0x0000a564, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1145 {0x0000a564, 0x7804ff56, 0x7804ff56, 0x60001cf0, 0x60001cf0},
1144 {0x0000a568, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1146 {0x0000a568, 0x7804ff56, 0x7804ff56, 0x61001cf1, 0x61001cf1},
1145 {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1147 {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x62001cf2, 0x62001cf2},
1146 {0x0000a570, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1148 {0x0000a570, 0x7804ff56, 0x7804ff56, 0x63001cf3, 0x63001cf3},
1147 {0x0000a574, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1149 {0x0000a574, 0x7804ff56, 0x7804ff56, 0x64001cf4, 0x64001cf4},
1148 {0x0000a578, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1150 {0x0000a578, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6},
1149 {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1151 {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6},
1150 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1152 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1151 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1153 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1152 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1154 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1174,7 +1176,7 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
1174 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, 1176 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
1175 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, 1177 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
1176 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, 1178 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
1177 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 1179 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df},
1178 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 1180 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
1179 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, 1181 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
1180 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004}, 1182 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
@@ -1200,13 +1202,13 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
1200 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, 1202 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
1201 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, 1203 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
1202 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb}, 1204 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
1203 {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1205 {0x0000a564, 0x7504ff56, 0x7504ff56, 0x59001cf0, 0x59001cf0},
1204 {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1206 {0x0000a568, 0x7504ff56, 0x7504ff56, 0x5a001cf1, 0x5a001cf1},
1205 {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1207 {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x5b001cf2, 0x5b001cf2},
1206 {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1208 {0x0000a570, 0x7504ff56, 0x7504ff56, 0x5c001cf3, 0x5c001cf3},
1207 {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1209 {0x0000a574, 0x7504ff56, 0x7504ff56, 0x5d001cf4, 0x5d001cf4},
1208 {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1210 {0x0000a578, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6},
1209 {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1211 {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6},
1210 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1212 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1211 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1213 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1212 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1214 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 6963862a1872..a18414b5948b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -227,13 +227,13 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
227 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags)) 227 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
228 goto work; 228 goto work;
229 229
230 ath9k_set_beacon(sc);
231
232 if (ah->opmode == NL80211_IFTYPE_STATION && 230 if (ah->opmode == NL80211_IFTYPE_STATION &&
233 test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) { 231 test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
234 spin_lock_irqsave(&sc->sc_pm_lock, flags); 232 spin_lock_irqsave(&sc->sc_pm_lock, flags);
235 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; 233 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
236 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 234 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
235 } else {
236 ath9k_set_beacon(sc);
237 } 237 }
238 work: 238 work:
239 ath_restart_work(sc); 239 ath_restart_work(sc);
@@ -1332,6 +1332,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
1332 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1332 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1333 struct ath_node *an = (struct ath_node *) sta->drv_priv; 1333 struct ath_node *an = (struct ath_node *) sta->drv_priv;
1334 struct ieee80211_key_conf ps_key = { }; 1334 struct ieee80211_key_conf ps_key = { };
1335 int key;
1335 1336
1336 ath_node_attach(sc, sta, vif); 1337 ath_node_attach(sc, sta, vif);
1337 1338
@@ -1339,7 +1340,9 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
1339 vif->type != NL80211_IFTYPE_AP_VLAN) 1340 vif->type != NL80211_IFTYPE_AP_VLAN)
1340 return 0; 1341 return 0;
1341 1342
1342 an->ps_key = ath_key_config(common, vif, sta, &ps_key); 1343 key = ath_key_config(common, vif, sta, &ps_key);
1344 if (key > 0)
1345 an->ps_key = key;
1343 1346
1344 return 0; 1347 return 0;
1345} 1348}
@@ -1356,6 +1359,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
1356 return; 1359 return;
1357 1360
1358 ath_key_delete(common, &ps_key); 1361 ath_key_delete(common, &ps_key);
1362 an->ps_key = 0;
1359} 1363}
1360 1364
1361static int ath9k_sta_remove(struct ieee80211_hw *hw, 1365static int ath9k_sta_remove(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 523355b87659..f7c70b3a6ea9 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1728,6 +1728,25 @@ drop_recycle_buffer:
1728 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); 1728 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1729} 1729}
1730 1730
1731void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
1732{
1733 int current_slot, previous_slot;
1734
1735 B43_WARN_ON(ring->tx);
1736
1737 /* Device has filled all buffers, drop all packets and let TCP
1738 * decrease speed.
1739 * Decrement RX index by one will let the device to see all slots
1740 * as free again
1741 */
1742 /*
1743 *TODO: How to increase rx_drop in mac80211?
1744 */
1745 current_slot = ring->ops->get_current_rxslot(ring);
1746 previous_slot = prev_slot(ring, current_slot);
1747 ring->ops->set_current_rxslot(ring, previous_slot);
1748}
1749
1731void b43_dma_rx(struct b43_dmaring *ring) 1750void b43_dma_rx(struct b43_dmaring *ring)
1732{ 1751{
1733 const struct b43_dma_ops *ops = ring->ops; 1752 const struct b43_dma_ops *ops = ring->ops;
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index 9fdd1983079c..df8c8cdcbdb5 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -9,7 +9,7 @@
9/* DMA-Interrupt reasons. */ 9/* DMA-Interrupt reasons. */
10#define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \ 10#define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \
11 | (1 << 14) | (1 << 15)) 11 | (1 << 14) | (1 << 15))
12#define B43_DMAIRQ_NONFATALMASK (1 << 13) 12#define B43_DMAIRQ_RDESC_UFLOW (1 << 13)
13#define B43_DMAIRQ_RX_DONE (1 << 16) 13#define B43_DMAIRQ_RX_DONE (1 << 16)
14 14
15/*** 32-bit DMA Engine. ***/ 15/*** 32-bit DMA Engine. ***/
@@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev,
295void b43_dma_handle_txstatus(struct b43_wldev *dev, 295void b43_dma_handle_txstatus(struct b43_wldev *dev,
296 const struct b43_txstatus *status); 296 const struct b43_txstatus *status);
297 297
298void b43_dma_handle_rx_overflow(struct b43_dmaring *ring);
299
298void b43_dma_rx(struct b43_dmaring *ring); 300void b43_dma_rx(struct b43_dmaring *ring);
299 301
300void b43_dma_direct_fifo_rx(struct b43_wldev *dev, 302void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index d377f77d30b5..6dd07e2ec595 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1902,30 +1902,18 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
1902 } 1902 }
1903 } 1903 }
1904 1904
1905 if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK | 1905 if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) {
1906 B43_DMAIRQ_NONFATALMASK))) { 1906 b43err(dev->wl,
1907 if (merged_dma_reason & B43_DMAIRQ_FATALMASK) { 1907 "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
1908 b43err(dev->wl, "Fatal DMA error: " 1908 dma_reason[0], dma_reason[1],
1909 "0x%08X, 0x%08X, 0x%08X, " 1909 dma_reason[2], dma_reason[3],
1910 "0x%08X, 0x%08X, 0x%08X\n", 1910 dma_reason[4], dma_reason[5]);
1911 dma_reason[0], dma_reason[1], 1911 b43err(dev->wl, "This device does not support DMA "
1912 dma_reason[2], dma_reason[3],
1913 dma_reason[4], dma_reason[5]);
1914 b43err(dev->wl, "This device does not support DMA "
1915 "on your system. It will now be switched to PIO.\n"); 1912 "on your system. It will now be switched to PIO.\n");
1916 /* Fall back to PIO transfers if we get fatal DMA errors! */ 1913 /* Fall back to PIO transfers if we get fatal DMA errors! */
1917 dev->use_pio = true; 1914 dev->use_pio = true;
1918 b43_controller_restart(dev, "DMA error"); 1915 b43_controller_restart(dev, "DMA error");
1919 return; 1916 return;
1920 }
1921 if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
1922 b43err(dev->wl, "DMA error: "
1923 "0x%08X, 0x%08X, 0x%08X, "
1924 "0x%08X, 0x%08X, 0x%08X\n",
1925 dma_reason[0], dma_reason[1],
1926 dma_reason[2], dma_reason[3],
1927 dma_reason[4], dma_reason[5]);
1928 }
1929 } 1917 }
1930 1918
1931 if (unlikely(reason & B43_IRQ_UCODE_DEBUG)) 1919 if (unlikely(reason & B43_IRQ_UCODE_DEBUG))
@@ -1944,6 +1932,11 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
1944 handle_irq_noise(dev); 1932 handle_irq_noise(dev);
1945 1933
1946 /* Check the DMA reason registers for received data. */ 1934 /* Check the DMA reason registers for received data. */
1935 if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) {
1936 if (B43_DEBUG)
1937 b43warn(dev->wl, "RX descriptor underrun\n");
1938 b43_dma_handle_rx_overflow(dev->dma.rx_ring);
1939 }
1947 if (dma_reason[0] & B43_DMAIRQ_RX_DONE) { 1940 if (dma_reason[0] & B43_DMAIRQ_RX_DONE) {
1948 if (b43_using_pio_transfers(dev)) 1941 if (b43_using_pio_transfers(dev))
1949 b43_pio_rx(dev->pio.rx_queue); 1942 b43_pio_rx(dev->pio.rx_queue);
@@ -2001,7 +1994,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev)
2001 return IRQ_NONE; 1994 return IRQ_NONE;
2002 1995
2003 dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON) 1996 dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON)
2004 & 0x0001DC00; 1997 & 0x0001FC00;
2005 dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON) 1998 dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON)
2006 & 0x0000DC00; 1999 & 0x0000DC00;
2007 dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON) 2000 dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON)
@@ -3130,7 +3123,7 @@ static int b43_chip_init(struct b43_wldev *dev)
3130 b43_write32(dev, 0x018C, 0x02000000); 3123 b43_write32(dev, 0x018C, 0x02000000);
3131 } 3124 }
3132 b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000); 3125 b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000);
3133 b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00); 3126 b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00);
3134 b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00); 3127 b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
3135 b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00); 3128 b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
3136 b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00); 3129 b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index b8f82e688c72..9a95045c97b6 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -5741,8 +5741,7 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5741 hw->flags = 5741 hw->flags =
5742 IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | 5742 IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
5743 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT | 5743 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
5744 IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS | 5744 IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
5745 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
5746 if (il->cfg->sku & IL_SKU_N) 5745 if (il->cfg->sku & IL_SKU_N)
5747 hw->flags |= 5746 hw->flags |=
5748 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | 5747 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index d3c8ece980d8..e42b266a023a 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -2234,9 +2234,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
2234 if (wdev->netdev->reg_state == NETREG_REGISTERED) 2234 if (wdev->netdev->reg_state == NETREG_REGISTERED)
2235 unregister_netdevice(wdev->netdev); 2235 unregister_netdevice(wdev->netdev);
2236 2236
2237 if (wdev->netdev->reg_state == NETREG_UNREGISTERED)
2238 free_netdev(wdev->netdev);
2239
2240 /* Clear the priv in adapter */ 2237 /* Clear the priv in adapter */
2241 priv->netdev = NULL; 2238 priv->netdev = NULL;
2242 2239
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 74db0d24a579..26755d9acb55 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1191,6 +1191,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
1191 adapter->if_ops.wakeup(adapter); 1191 adapter->if_ops.wakeup(adapter);
1192 adapter->hs_activated = false; 1192 adapter->hs_activated = false;
1193 adapter->is_hs_configured = false; 1193 adapter->is_hs_configured = false;
1194 adapter->is_suspended = false;
1194 mwifiex_hs_activated_event(mwifiex_get_priv(adapter, 1195 mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
1195 MWIFIEX_BSS_ROLE_ANY), 1196 MWIFIEX_BSS_ROLE_ANY),
1196 false); 1197 false);
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 121443a0f2a1..2eb88ea9acf7 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -655,6 +655,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
655 struct net_device *dev) 655 struct net_device *dev)
656{ 656{
657 dev->netdev_ops = &mwifiex_netdev_ops; 657 dev->netdev_ops = &mwifiex_netdev_ops;
658 dev->destructor = free_netdev;
658 /* Initialize private structure */ 659 /* Initialize private structure */
659 priv->current_key_index = 0; 660 priv->current_key_index = 0;
660 priv->media_connected = false; 661 priv->media_connected = false;
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 311d0b26b81c..1a8a19dbd635 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -96,7 +96,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
96 } else { 96 } else {
97 /* Multicast */ 97 /* Multicast */
98 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; 98 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
99 if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) { 99 if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
100 dev_dbg(priv->adapter->dev, 100 dev_dbg(priv->adapter->dev,
101 "info: Enabling All Multicast!\n"); 101 "info: Enabling All Multicast!\n");
102 priv->curr_pkt_filter |= 102 priv->curr_pkt_filter |=
@@ -108,20 +108,11 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
108 dev_dbg(priv->adapter->dev, 108 dev_dbg(priv->adapter->dev,
109 "info: Set multicast list=%d\n", 109 "info: Set multicast list=%d\n",
110 mcast_list->num_multicast_addr); 110 mcast_list->num_multicast_addr);
111 /* Set multicast addresses to firmware */ 111 /* Send multicast addresses to firmware */
112 if (old_pkt_filter == priv->curr_pkt_filter) { 112 ret = mwifiex_send_cmd_async(priv,
113 /* Send request to firmware */ 113 HostCmd_CMD_MAC_MULTICAST_ADR,
114 ret = mwifiex_send_cmd_async(priv, 114 HostCmd_ACT_GEN_SET, 0,
115 HostCmd_CMD_MAC_MULTICAST_ADR, 115 mcast_list);
116 HostCmd_ACT_GEN_SET, 0,
117 mcast_list);
118 } else {
119 /* Send request to firmware */
120 ret = mwifiex_send_cmd_async(priv,
121 HostCmd_CMD_MAC_MULTICAST_ADR,
122 HostCmd_ACT_GEN_SET, 0,
123 mcast_list);
124 }
125 } 116 }
126 } 117 }
127 } 118 }
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index f802e7c92356..2dacd19e1b8a 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -345,7 +345,7 @@ int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
345 */ 345 */
346void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) 346void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
347{ 347{
348 if (mw > NTB_NUM_MW) 348 if (mw >= NTB_NUM_MW)
349 return NULL; 349 return NULL;
350 350
351 return ndev->mw[mw].vbase; 351 return ndev->mw[mw].vbase;
@@ -362,7 +362,7 @@ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
362 */ 362 */
363resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) 363resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
364{ 364{
365 if (mw > NTB_NUM_MW) 365 if (mw >= NTB_NUM_MW)
366 return 0; 366 return 0;
367 367
368 return ndev->mw[mw].bar_sz; 368 return ndev->mw[mw].bar_sz;
@@ -380,7 +380,7 @@ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
380 */ 380 */
381void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr) 381void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
382{ 382{
383 if (mw > NTB_NUM_MW) 383 if (mw >= NTB_NUM_MW)
384 return; 384 return;
385 385
386 dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr, 386 dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
@@ -1027,8 +1027,8 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1027 ndev->mw[i].vbase = 1027 ndev->mw[i].vbase =
1028 ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)), 1028 ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)),
1029 ndev->mw[i].bar_sz); 1029 ndev->mw[i].bar_sz);
1030 dev_info(&pdev->dev, "MW %d size %d\n", i, 1030 dev_info(&pdev->dev, "MW %d size %llu\n", i,
1031 (u32) pci_resource_len(pdev, MW_TO_BAR(i))); 1031 pci_resource_len(pdev, MW_TO_BAR(i)));
1032 if (!ndev->mw[i].vbase) { 1032 if (!ndev->mw[i].vbase) {
1033 dev_warn(&pdev->dev, "Cannot remap BAR %d\n", 1033 dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
1034 MW_TO_BAR(i)); 1034 MW_TO_BAR(i));
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index e0bdfd7f9930..f8d7081ee301 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -58,7 +58,7 @@
58#include <linux/ntb.h> 58#include <linux/ntb.h>
59#include "ntb_hw.h" 59#include "ntb_hw.h"
60 60
61#define NTB_TRANSPORT_VERSION 2 61#define NTB_TRANSPORT_VERSION 3
62 62
63static unsigned int transport_mtu = 0x401E; 63static unsigned int transport_mtu = 0x401E;
64module_param(transport_mtu, uint, 0644); 64module_param(transport_mtu, uint, 0644);
@@ -173,10 +173,13 @@ struct ntb_payload_header {
173 173
174enum { 174enum {
175 VERSION = 0, 175 VERSION = 0,
176 MW0_SZ,
177 MW1_SZ,
178 NUM_QPS,
179 QP_LINKS, 176 QP_LINKS,
177 NUM_QPS,
178 NUM_MWS,
179 MW0_SZ_HIGH,
180 MW0_SZ_LOW,
181 MW1_SZ_HIGH,
182 MW1_SZ_LOW,
180 MAX_SPAD, 183 MAX_SPAD,
181}; 184};
182 185
@@ -297,7 +300,7 @@ int ntb_register_client_dev(char *device_name)
297{ 300{
298 struct ntb_transport_client_dev *client_dev; 301 struct ntb_transport_client_dev *client_dev;
299 struct ntb_transport *nt; 302 struct ntb_transport *nt;
300 int rc; 303 int rc, i = 0;
301 304
302 if (list_empty(&ntb_transport_list)) 305 if (list_empty(&ntb_transport_list))
303 return -ENODEV; 306 return -ENODEV;
@@ -315,7 +318,7 @@ int ntb_register_client_dev(char *device_name)
315 dev = &client_dev->dev; 318 dev = &client_dev->dev;
316 319
317 /* setup and register client devices */ 320 /* setup and register client devices */
318 dev_set_name(dev, "%s", device_name); 321 dev_set_name(dev, "%s%d", device_name, i);
319 dev->bus = &ntb_bus_type; 322 dev->bus = &ntb_bus_type;
320 dev->release = ntb_client_release; 323 dev->release = ntb_client_release;
321 dev->parent = &ntb_query_pdev(nt->ndev)->dev; 324 dev->parent = &ntb_query_pdev(nt->ndev)->dev;
@@ -327,6 +330,7 @@ int ntb_register_client_dev(char *device_name)
327 } 330 }
328 331
329 list_add_tail(&client_dev->entry, &nt->client_devs); 332 list_add_tail(&client_dev->entry, &nt->client_devs);
333 i++;
330 } 334 }
331 335
332 return 0; 336 return 0;
@@ -486,12 +490,13 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
486 (qp_num / NTB_NUM_MW * rx_size); 490 (qp_num / NTB_NUM_MW * rx_size);
487 rx_size -= sizeof(struct ntb_rx_info); 491 rx_size -= sizeof(struct ntb_rx_info);
488 492
489 qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info); 493 qp->rx_buff = qp->remote_rx_info + 1;
490 qp->rx_max_frame = min(transport_mtu, rx_size); 494 /* Due to housekeeping, there must be atleast 2 buffs */
495 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
491 qp->rx_max_entry = rx_size / qp->rx_max_frame; 496 qp->rx_max_entry = rx_size / qp->rx_max_frame;
492 qp->rx_index = 0; 497 qp->rx_index = 0;
493 498
494 qp->remote_rx_info->entry = qp->rx_max_entry; 499 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
495 500
496 /* setup the hdr offsets with 0's */ 501 /* setup the hdr offsets with 0's */
497 for (i = 0; i < qp->rx_max_entry; i++) { 502 for (i = 0; i < qp->rx_max_entry; i++) {
@@ -502,6 +507,19 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
502 507
503 qp->rx_pkts = 0; 508 qp->rx_pkts = 0;
504 qp->tx_pkts = 0; 509 qp->tx_pkts = 0;
510 qp->tx_index = 0;
511}
512
513static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
514{
515 struct ntb_transport_mw *mw = &nt->mw[num_mw];
516 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
517
518 if (!mw->virt_addr)
519 return;
520
521 dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
522 mw->virt_addr = NULL;
505} 523}
506 524
507static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) 525static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
@@ -509,12 +527,20 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
509 struct ntb_transport_mw *mw = &nt->mw[num_mw]; 527 struct ntb_transport_mw *mw = &nt->mw[num_mw];
510 struct pci_dev *pdev = ntb_query_pdev(nt->ndev); 528 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
511 529
530 /* No need to re-setup */
531 if (mw->size == ALIGN(size, 4096))
532 return 0;
533
534 if (mw->size != 0)
535 ntb_free_mw(nt, num_mw);
536
512 /* Alloc memory for receiving data. Must be 4k aligned */ 537 /* Alloc memory for receiving data. Must be 4k aligned */
513 mw->size = ALIGN(size, 4096); 538 mw->size = ALIGN(size, 4096);
514 539
515 mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr, 540 mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
516 GFP_KERNEL); 541 GFP_KERNEL);
517 if (!mw->virt_addr) { 542 if (!mw->virt_addr) {
543 mw->size = 0;
518 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n", 544 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
519 (int) mw->size); 545 (int) mw->size);
520 return -ENOMEM; 546 return -ENOMEM;
@@ -604,25 +630,31 @@ static void ntb_transport_link_work(struct work_struct *work)
604 u32 val; 630 u32 val;
605 int rc, i; 631 int rc, i;
606 632
607 /* send the local info */ 633 /* send the local info, in the opposite order of the way we read it */
608 rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION); 634 for (i = 0; i < NTB_NUM_MW; i++) {
609 if (rc) { 635 rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
610 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 636 ntb_get_mw_size(ndev, i) >> 32);
611 0, VERSION); 637 if (rc) {
612 goto out; 638 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
613 } 639 (u32)(ntb_get_mw_size(ndev, i) >> 32),
640 MW0_SZ_HIGH + (i * 2));
641 goto out;
642 }
614 643
615 rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0)); 644 rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
616 if (rc) { 645 (u32) ntb_get_mw_size(ndev, i));
617 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 646 if (rc) {
618 (u32) ntb_get_mw_size(ndev, 0), MW0_SZ); 647 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
619 goto out; 648 (u32) ntb_get_mw_size(ndev, i),
649 MW0_SZ_LOW + (i * 2));
650 goto out;
651 }
620 } 652 }
621 653
622 rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1)); 654 rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW);
623 if (rc) { 655 if (rc) {
624 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 656 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
625 (u32) ntb_get_mw_size(ndev, 1), MW1_SZ); 657 NTB_NUM_MW, NUM_MWS);
626 goto out; 658 goto out;
627 } 659 }
628 660
@@ -633,16 +665,10 @@ static void ntb_transport_link_work(struct work_struct *work)
633 goto out; 665 goto out;
634 } 666 }
635 667
636 rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val); 668 rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
637 if (rc) {
638 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
639 goto out;
640 }
641
642 rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
643 if (rc) { 669 if (rc) {
644 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 670 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
645 val, QP_LINKS); 671 NTB_TRANSPORT_VERSION, VERSION);
646 goto out; 672 goto out;
647 } 673 }
648 674
@@ -667,33 +693,43 @@ static void ntb_transport_link_work(struct work_struct *work)
667 goto out; 693 goto out;
668 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); 694 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
669 695
670 rc = ntb_read_remote_spad(ndev, MW0_SZ, &val); 696 rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
671 if (rc) { 697 if (rc) {
672 dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ); 698 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
673 goto out; 699 goto out;
674 } 700 }
675 701
676 if (!val) 702 if (val != NTB_NUM_MW)
677 goto out; 703 goto out;
678 dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val); 704 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
679 705
680 rc = ntb_set_mw(nt, 0, val); 706 for (i = 0; i < NTB_NUM_MW; i++) {
681 if (rc) 707 u64 val64;
682 goto out;
683 708
684 rc = ntb_read_remote_spad(ndev, MW1_SZ, &val); 709 rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
685 if (rc) { 710 if (rc) {
686 dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ); 711 dev_err(&pdev->dev, "Error reading remote spad %d\n",
687 goto out; 712 MW0_SZ_HIGH + (i * 2));
688 } 713 goto out1;
714 }
689 715
690 if (!val) 716 val64 = (u64) val << 32;
691 goto out;
692 dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
693 717
694 rc = ntb_set_mw(nt, 1, val); 718 rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
695 if (rc) 719 if (rc) {
696 goto out; 720 dev_err(&pdev->dev, "Error reading remote spad %d\n",
721 MW0_SZ_LOW + (i * 2));
722 goto out1;
723 }
724
725 val64 |= val;
726
727 dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
728
729 rc = ntb_set_mw(nt, i, val64);
730 if (rc)
731 goto out1;
732 }
697 733
698 nt->transport_link = NTB_LINK_UP; 734 nt->transport_link = NTB_LINK_UP;
699 735
@@ -708,6 +744,9 @@ static void ntb_transport_link_work(struct work_struct *work)
708 744
709 return; 745 return;
710 746
747out1:
748 for (i = 0; i < NTB_NUM_MW; i++)
749 ntb_free_mw(nt, i);
711out: 750out:
712 if (ntb_hw_link_status(ndev)) 751 if (ntb_hw_link_status(ndev))
713 schedule_delayed_work(&nt->link_work, 752 schedule_delayed_work(&nt->link_work,
@@ -780,10 +819,10 @@ static void ntb_transport_init_queue(struct ntb_transport *nt,
780 (qp_num / NTB_NUM_MW * tx_size); 819 (qp_num / NTB_NUM_MW * tx_size);
781 tx_size -= sizeof(struct ntb_rx_info); 820 tx_size -= sizeof(struct ntb_rx_info);
782 821
783 qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info); 822 qp->tx_mw = qp->rx_info + 1;
784 qp->tx_max_frame = min(transport_mtu, tx_size); 823 /* Due to housekeeping, there must be atleast 2 buffs */
824 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
785 qp->tx_max_entry = tx_size / qp->tx_max_frame; 825 qp->tx_max_entry = tx_size / qp->tx_max_frame;
786 qp->tx_index = 0;
787 826
788 if (nt->debugfs_dir) { 827 if (nt->debugfs_dir) {
789 char debugfs_name[4]; 828 char debugfs_name[4];
@@ -897,10 +936,7 @@ void ntb_transport_free(void *transport)
897 pdev = ntb_query_pdev(nt->ndev); 936 pdev = ntb_query_pdev(nt->ndev);
898 937
899 for (i = 0; i < NTB_NUM_MW; i++) 938 for (i = 0; i < NTB_NUM_MW; i++)
900 if (nt->mw[i].virt_addr) 939 ntb_free_mw(nt, i);
901 dma_free_coherent(&pdev->dev, nt->mw[i].size,
902 nt->mw[i].virt_addr,
903 nt->mw[i].dma_addr);
904 940
905 kfree(nt->qps); 941 kfree(nt->qps);
906 ntb_unregister_transport(nt->ndev); 942 ntb_unregister_transport(nt->ndev);
@@ -999,11 +1035,16 @@ out:
999static void ntb_transport_rx(unsigned long data) 1035static void ntb_transport_rx(unsigned long data)
1000{ 1036{
1001 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; 1037 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
1002 int rc; 1038 int rc, i;
1003 1039
1004 do { 1040 /* Limit the number of packets processed in a single interrupt to
1041 * provide fairness to others
1042 */
1043 for (i = 0; i < qp->rx_max_entry; i++) {
1005 rc = ntb_process_rxc(qp); 1044 rc = ntb_process_rxc(qp);
1006 } while (!rc); 1045 if (rc)
1046 break;
1047 }
1007} 1048}
1008 1049
1009static void ntb_transport_rxc_db(void *data, int db_num) 1050static void ntb_transport_rxc_db(void *data, int db_num)
@@ -1210,12 +1251,14 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1210 */ 1251 */
1211void ntb_transport_free_queue(struct ntb_transport_qp *qp) 1252void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1212{ 1253{
1213 struct pci_dev *pdev = ntb_query_pdev(qp->ndev); 1254 struct pci_dev *pdev;
1214 struct ntb_queue_entry *entry; 1255 struct ntb_queue_entry *entry;
1215 1256
1216 if (!qp) 1257 if (!qp)
1217 return; 1258 return;
1218 1259
1260 pdev = ntb_query_pdev(qp->ndev);
1261
1219 cancel_delayed_work_sync(&qp->link_work); 1262 cancel_delayed_work_sync(&qp->link_work);
1220 1263
1221 ntb_unregister_db_callback(qp->ndev, qp->qp_num); 1264 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
@@ -1371,12 +1414,13 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1371 */ 1414 */
1372void ntb_transport_link_down(struct ntb_transport_qp *qp) 1415void ntb_transport_link_down(struct ntb_transport_qp *qp)
1373{ 1416{
1374 struct pci_dev *pdev = ntb_query_pdev(qp->ndev); 1417 struct pci_dev *pdev;
1375 int rc, val; 1418 int rc, val;
1376 1419
1377 if (!qp) 1420 if (!qp)
1378 return; 1421 return;
1379 1422
1423 pdev = ntb_query_pdev(qp->ndev);
1380 qp->client_ready = NTB_LINK_DOWN; 1424 qp->client_ready = NTB_LINK_DOWN;
1381 1425
1382 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val); 1426 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
@@ -1408,6 +1452,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1408 */ 1452 */
1409bool ntb_transport_link_query(struct ntb_transport_qp *qp) 1453bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1410{ 1454{
1455 if (!qp)
1456 return false;
1457
1411 return qp->qp_link == NTB_LINK_UP; 1458 return qp->qp_link == NTB_LINK_UP;
1412} 1459}
1413EXPORT_SYMBOL_GPL(ntb_transport_link_query); 1460EXPORT_SYMBOL_GPL(ntb_transport_link_query);
@@ -1422,6 +1469,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1422 */ 1469 */
1423unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) 1470unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1424{ 1471{
1472 if (!qp)
1473 return 0;
1474
1425 return qp->qp_num; 1475 return qp->qp_num;
1426} 1476}
1427EXPORT_SYMBOL_GPL(ntb_transport_qp_num); 1477EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
@@ -1436,6 +1486,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1436 */ 1486 */
1437unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) 1487unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1438{ 1488{
1489 if (!qp)
1490 return 0;
1491
1439 return qp->tx_max_frame - sizeof(struct ntb_payload_header); 1492 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1440} 1493}
1441EXPORT_SYMBOL_GPL(ntb_transport_max_size); 1494EXPORT_SYMBOL_GPL(ntb_transport_max_size);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 0c81915b1997..b9838130a7b0 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -20,7 +20,6 @@ if RTC_CLASS
20config RTC_HCTOSYS 20config RTC_HCTOSYS
21 bool "Set system time from RTC on startup and resume" 21 bool "Set system time from RTC on startup and resume"
22 default y 22 default y
23 depends on !ALWAYS_USE_PERSISTENT_CLOCK
24 help 23 help
25 If you say yes here, the system time (wall clock) will be set using 24 If you say yes here, the system time (wall clock) will be set using
26 the value read from a specified RTC device. This is useful to avoid 25 the value read from a specified RTC device. This is useful to avoid
@@ -29,7 +28,6 @@ config RTC_HCTOSYS
29config RTC_SYSTOHC 28config RTC_SYSTOHC
30 bool "Set the RTC time based on NTP synchronization" 29 bool "Set the RTC time based on NTP synchronization"
31 default y 30 default y
32 depends on !ALWAYS_USE_PERSISTENT_CLOCK
33 help 31 help
34 If you say yes here, the system time (wall clock) will be stored 32 If you say yes here, the system time (wall clock) will be stored
35 in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11 33 in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 787bd2c22bca..380387a47b1d 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -526,13 +526,17 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
526 } 526 }
527 527
528 if (xfer->tx_buf) 528 if (xfer->tx_buf)
529 spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); 529 if (xfer->bits_per_word > 8)
530 spi_writel(as, TDR, *(u16 *)(xfer->tx_buf));
531 else
532 spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
530 else 533 else
531 spi_writel(as, TDR, 0); 534 spi_writel(as, TDR, 0);
532 535
533 dev_dbg(master->dev.parent, 536 dev_dbg(master->dev.parent,
534 " start pio xfer %p: len %u tx %p rx %p\n", 537 " start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
535 xfer, xfer->len, xfer->tx_buf, xfer->rx_buf); 538 xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
539 xfer->bits_per_word);
536 540
537 /* Enable relevant interrupts */ 541 /* Enable relevant interrupts */
538 spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES)); 542 spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
@@ -950,21 +954,39 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
950{ 954{
951 u8 *txp; 955 u8 *txp;
952 u8 *rxp; 956 u8 *rxp;
957 u16 *txp16;
958 u16 *rxp16;
953 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; 959 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
954 960
955 if (xfer->rx_buf) { 961 if (xfer->rx_buf) {
956 rxp = ((u8 *)xfer->rx_buf) + xfer_pos; 962 if (xfer->bits_per_word > 8) {
957 *rxp = spi_readl(as, RDR); 963 rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
964 *rxp16 = spi_readl(as, RDR);
965 } else {
966 rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
967 *rxp = spi_readl(as, RDR);
968 }
958 } else { 969 } else {
959 spi_readl(as, RDR); 970 spi_readl(as, RDR);
960 } 971 }
961 972 if (xfer->bits_per_word > 8) {
962 as->current_remaining_bytes--; 973 as->current_remaining_bytes -= 2;
974 if (as->current_remaining_bytes < 0)
975 as->current_remaining_bytes = 0;
976 } else {
977 as->current_remaining_bytes--;
978 }
963 979
964 if (as->current_remaining_bytes) { 980 if (as->current_remaining_bytes) {
965 if (xfer->tx_buf) { 981 if (xfer->tx_buf) {
966 txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; 982 if (xfer->bits_per_word > 8) {
967 spi_writel(as, TDR, *txp); 983 txp16 = (u16 *)(((u8 *)xfer->tx_buf)
984 + xfer_pos + 2);
985 spi_writel(as, TDR, *txp16);
986 } else {
987 txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
988 spi_writel(as, TDR, *txp);
989 }
968 } else { 990 } else {
969 spi_writel(as, TDR, 0); 991 spi_writel(as, TDR, 0);
970 } 992 }
@@ -1378,9 +1400,16 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1378 } 1400 }
1379 } 1401 }
1380 1402
1403 if (xfer->bits_per_word > 8) {
1404 if (xfer->len % 2) {
1405 dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n");
1406 return -EINVAL;
1407 }
1408 }
1409
1381 /* FIXME implement these protocol options!! */ 1410 /* FIXME implement these protocol options!! */
1382 if (xfer->speed_hz) { 1411 if (xfer->speed_hz < spi->max_speed_hz) {
1383 dev_dbg(&spi->dev, "no protocol options yet\n"); 1412 dev_dbg(&spi->dev, "can't change speed in transfer\n");
1384 return -ENOPROTOOPT; 1413 return -ENOPROTOOPT;
1385 } 1414 }
1386 1415
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 2e8f24a1fb95..50b13c9b1ab6 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -784,7 +784,7 @@ static const struct of_device_id davinci_spi_of_match[] = {
784 }, 784 },
785 { }, 785 { },
786}; 786};
787MODULE_DEVICE_TABLE(of, davini_spi_of_match); 787MODULE_DEVICE_TABLE(of, davinci_spi_of_match);
788 788
789/** 789/**
790 * spi_davinci_get_pdata - Get platform data from DTS binding 790 * spi_davinci_get_pdata - Get platform data from DTS binding
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 163fd802b7ac..32b7bb111eb6 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -334,7 +334,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
334 spi->dev.parent = &master->dev; 334 spi->dev.parent = &master->dev;
335 spi->dev.bus = &spi_bus_type; 335 spi->dev.bus = &spi_bus_type;
336 spi->dev.release = spidev_release; 336 spi->dev.release = spidev_release;
337 spi->cs_gpio = -EINVAL; 337 spi->cs_gpio = -ENOENT;
338 device_initialize(&spi->dev); 338 device_initialize(&spi->dev);
339 return spi; 339 return spi;
340} 340}
@@ -1067,8 +1067,11 @@ static int of_spi_register_master(struct spi_master *master)
1067 nb = of_gpio_named_count(np, "cs-gpios"); 1067 nb = of_gpio_named_count(np, "cs-gpios");
1068 master->num_chipselect = max(nb, (int)master->num_chipselect); 1068 master->num_chipselect = max(nb, (int)master->num_chipselect);
1069 1069
1070 if (nb < 1) 1070 /* Return error only for an incorrectly formed cs-gpios property */
1071 if (nb == 0 || nb == -ENOENT)
1071 return 0; 1072 return 0;
1073 else if (nb < 0)
1074 return nb;
1072 1075
1073 cs = devm_kzalloc(&master->dev, 1076 cs = devm_kzalloc(&master->dev,
1074 sizeof(int) * master->num_chipselect, 1077 sizeof(int) * master->num_chipselect,
@@ -1079,7 +1082,7 @@ static int of_spi_register_master(struct spi_master *master)
1079 return -ENOMEM; 1082 return -ENOMEM;
1080 1083
1081 for (i = 0; i < master->num_chipselect; i++) 1084 for (i = 0; i < master->num_chipselect; i++)
1082 cs[i] = -EINVAL; 1085 cs[i] = -ENOENT;
1083 1086
1084 for (i = 0; i < nb; i++) 1087 for (i = 0; i < nb; i++)
1085 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1088 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index ffbc6a94be52..262ef1f23b38 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1250,7 +1250,7 @@ static u32 iscsit_do_crypto_hash_sg(
1250 1250
1251static void iscsit_do_crypto_hash_buf( 1251static void iscsit_do_crypto_hash_buf(
1252 struct hash_desc *hash, 1252 struct hash_desc *hash,
1253 unsigned char *buf, 1253 const void *buf,
1254 u32 payload_length, 1254 u32 payload_length,
1255 u32 padding, 1255 u32 padding,
1256 u8 *pad_bytes, 1256 u8 *pad_bytes,
@@ -2524,9 +2524,8 @@ static int iscsit_send_conn_drop_async_message(
2524 if (conn->conn_ops->HeaderDigest) { 2524 if (conn->conn_ops->HeaderDigest) {
2525 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2525 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2526 2526
2527 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2527 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
2528 (unsigned char *)hdr, ISCSI_HDR_LEN, 2528 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2529 0, NULL, (u8 *)header_digest);
2530 2529
2531 cmd->tx_size += ISCSI_CRC_LEN; 2530 cmd->tx_size += ISCSI_CRC_LEN;
2532 pr_debug("Attaching CRC32C HeaderDigest to" 2531 pr_debug("Attaching CRC32C HeaderDigest to"
@@ -2662,9 +2661,8 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2662 if (conn->conn_ops->HeaderDigest) { 2661 if (conn->conn_ops->HeaderDigest) {
2663 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2662 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2664 2663
2665 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2664 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
2666 (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, 2665 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2667 0, NULL, (u8 *)header_digest);
2668 2666
2669 iov[0].iov_len += ISCSI_CRC_LEN; 2667 iov[0].iov_len += ISCSI_CRC_LEN;
2670 tx_size += ISCSI_CRC_LEN; 2668 tx_size += ISCSI_CRC_LEN;
@@ -2841,9 +2839,8 @@ iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2841 if (conn->conn_ops->HeaderDigest) { 2839 if (conn->conn_ops->HeaderDigest) {
2842 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2840 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2843 2841
2844 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2842 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0],
2845 (unsigned char *)&cmd->pdu[0], ISCSI_HDR_LEN, 2843 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2846 0, NULL, (u8 *)header_digest);
2847 2844
2848 iov[0].iov_len += ISCSI_CRC_LEN; 2845 iov[0].iov_len += ISCSI_CRC_LEN;
2849 tx_size += ISCSI_CRC_LEN; 2846 tx_size += ISCSI_CRC_LEN;
@@ -2900,9 +2897,8 @@ static int iscsit_send_unsolicited_nopin(
2900 if (conn->conn_ops->HeaderDigest) { 2897 if (conn->conn_ops->HeaderDigest) {
2901 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2898 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2902 2899
2903 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2900 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
2904 (unsigned char *)hdr, ISCSI_HDR_LEN, 2901 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2905 0, NULL, (u8 *)header_digest);
2906 2902
2907 tx_size += ISCSI_CRC_LEN; 2903 tx_size += ISCSI_CRC_LEN;
2908 pr_debug("Attaching CRC32C HeaderDigest to" 2904 pr_debug("Attaching CRC32C HeaderDigest to"
@@ -2949,9 +2945,8 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2949 if (conn->conn_ops->HeaderDigest) { 2945 if (conn->conn_ops->HeaderDigest) {
2950 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2946 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2951 2947
2952 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2948 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
2953 (unsigned char *)hdr, ISCSI_HDR_LEN, 2949 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2954 0, NULL, (u8 *)header_digest);
2955 2950
2956 iov[0].iov_len += ISCSI_CRC_LEN; 2951 iov[0].iov_len += ISCSI_CRC_LEN;
2957 tx_size += ISCSI_CRC_LEN; 2952 tx_size += ISCSI_CRC_LEN;
@@ -3040,9 +3035,8 @@ static int iscsit_send_r2t(
3040 if (conn->conn_ops->HeaderDigest) { 3035 if (conn->conn_ops->HeaderDigest) {
3041 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3036 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3042 3037
3043 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3038 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3044 (unsigned char *)hdr, ISCSI_HDR_LEN, 3039 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3045 0, NULL, (u8 *)header_digest);
3046 3040
3047 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 3041 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
3048 tx_size += ISCSI_CRC_LEN; 3042 tx_size += ISCSI_CRC_LEN;
@@ -3256,9 +3250,8 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3256 if (conn->conn_ops->HeaderDigest) { 3250 if (conn->conn_ops->HeaderDigest) {
3257 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3251 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3258 3252
3259 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3253 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
3260 (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, 3254 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3261 0, NULL, (u8 *)header_digest);
3262 3255
3263 iov[0].iov_len += ISCSI_CRC_LEN; 3256 iov[0].iov_len += ISCSI_CRC_LEN;
3264 tx_size += ISCSI_CRC_LEN; 3257 tx_size += ISCSI_CRC_LEN;
@@ -3329,9 +3322,8 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3329 if (conn->conn_ops->HeaderDigest) { 3322 if (conn->conn_ops->HeaderDigest) {
3330 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3323 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3331 3324
3332 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3325 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3333 (unsigned char *)hdr, ISCSI_HDR_LEN, 3326 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3334 0, NULL, (u8 *)header_digest);
3335 3327
3336 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 3328 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
3337 tx_size += ISCSI_CRC_LEN; 3329 tx_size += ISCSI_CRC_LEN;
@@ -3504,9 +3496,8 @@ static int iscsit_send_text_rsp(
3504 if (conn->conn_ops->HeaderDigest) { 3496 if (conn->conn_ops->HeaderDigest) {
3505 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3497 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3506 3498
3507 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3499 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3508 (unsigned char *)hdr, ISCSI_HDR_LEN, 3500 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3509 0, NULL, (u8 *)header_digest);
3510 3501
3511 iov[0].iov_len += ISCSI_CRC_LEN; 3502 iov[0].iov_len += ISCSI_CRC_LEN;
3512 tx_size += ISCSI_CRC_LEN; 3503 tx_size += ISCSI_CRC_LEN;
@@ -3557,11 +3548,11 @@ static int iscsit_send_reject(
3557 struct iscsi_cmd *cmd, 3548 struct iscsi_cmd *cmd,
3558 struct iscsi_conn *conn) 3549 struct iscsi_conn *conn)
3559{ 3550{
3560 u32 iov_count = 0, tx_size = 0; 3551 struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
3561 struct iscsi_reject *hdr;
3562 struct kvec *iov; 3552 struct kvec *iov;
3553 u32 iov_count = 0, tx_size;
3563 3554
3564 iscsit_build_reject(cmd, conn, (struct iscsi_reject *)&cmd->pdu[0]); 3555 iscsit_build_reject(cmd, conn, hdr);
3565 3556
3566 iov = &cmd->iov_misc[0]; 3557 iov = &cmd->iov_misc[0];
3567 iov[iov_count].iov_base = cmd->pdu; 3558 iov[iov_count].iov_base = cmd->pdu;
@@ -3574,9 +3565,8 @@ static int iscsit_send_reject(
3574 if (conn->conn_ops->HeaderDigest) { 3565 if (conn->conn_ops->HeaderDigest) {
3575 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3566 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3576 3567
3577 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3568 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3578 (unsigned char *)hdr, ISCSI_HDR_LEN, 3569 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3579 0, NULL, (u8 *)header_digest);
3580 3570
3581 iov[0].iov_len += ISCSI_CRC_LEN; 3571 iov[0].iov_len += ISCSI_CRC_LEN;
3582 tx_size += ISCSI_CRC_LEN; 3572 tx_size += ISCSI_CRC_LEN;
@@ -3585,9 +3575,8 @@ static int iscsit_send_reject(
3585 } 3575 }
3586 3576
3587 if (conn->conn_ops->DataDigest) { 3577 if (conn->conn_ops->DataDigest) {
3588 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3578 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr,
3589 (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN, 3579 ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
3590 0, NULL, (u8 *)&cmd->data_crc);
3591 3580
3592 iov[iov_count].iov_base = &cmd->data_crc; 3581 iov[iov_count].iov_base = &cmd->data_crc;
3593 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3582 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 7816af6cdd12..40d9dbca987b 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -823,7 +823,7 @@ static int iscsit_attach_ooo_cmdsn(
823 /* 823 /*
824 * CmdSN is greater than the tail of the list. 824 * CmdSN is greater than the tail of the list.
825 */ 825 */
826 if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn) 826 if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
827 list_add_tail(&ooo_cmdsn->ooo_list, 827 list_add_tail(&ooo_cmdsn->ooo_list,
828 &sess->sess_ooo_cmdsn_list); 828 &sess->sess_ooo_cmdsn_list);
829 else { 829 else {
@@ -833,11 +833,12 @@ static int iscsit_attach_ooo_cmdsn(
833 */ 833 */
834 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, 834 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
835 ooo_list) { 835 ooo_list) {
836 if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) 836 if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
837 continue; 837 continue;
838 838
839 /* Insert before this entry */
839 list_add(&ooo_cmdsn->ooo_list, 840 list_add(&ooo_cmdsn->ooo_list,
840 &ooo_tmp->ooo_list); 841 ooo_tmp->ooo_list.prev);
841 break; 842 break;
842 } 843 }
843 } 844 }
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index f690be9e5293..c2185fc31136 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -436,7 +436,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
436 /* 436 /*
437 * Extra parameters for ISER from RFC-5046 437 * Extra parameters for ISER from RFC-5046
438 */ 438 */
439 param = iscsi_set_default_param(pl, RDMAEXTENTIONS, INITIAL_RDMAEXTENTIONS, 439 param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS,
440 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, 440 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
441 TYPERANGE_BOOL_AND, USE_LEADING_ONLY); 441 TYPERANGE_BOOL_AND, USE_LEADING_ONLY);
442 if (!param) 442 if (!param)
@@ -529,7 +529,7 @@ int iscsi_set_keys_to_negotiate(
529 SET_PSTATE_NEGOTIATE(param); 529 SET_PSTATE_NEGOTIATE(param);
530 } else if (!strcmp(param->name, OFMARKINT)) { 530 } else if (!strcmp(param->name, OFMARKINT)) {
531 SET_PSTATE_NEGOTIATE(param); 531 SET_PSTATE_NEGOTIATE(param);
532 } else if (!strcmp(param->name, RDMAEXTENTIONS)) { 532 } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
533 if (iser == true) 533 if (iser == true)
534 SET_PSTATE_NEGOTIATE(param); 534 SET_PSTATE_NEGOTIATE(param);
535 } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { 535 } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
@@ -580,7 +580,7 @@ int iscsi_set_keys_irrelevant_for_discovery(
580 param->state &= ~PSTATE_NEGOTIATE; 580 param->state &= ~PSTATE_NEGOTIATE;
581 else if (!strcmp(param->name, OFMARKINT)) 581 else if (!strcmp(param->name, OFMARKINT))
582 param->state &= ~PSTATE_NEGOTIATE; 582 param->state &= ~PSTATE_NEGOTIATE;
583 else if (!strcmp(param->name, RDMAEXTENTIONS)) 583 else if (!strcmp(param->name, RDMAEXTENSIONS))
584 param->state &= ~PSTATE_NEGOTIATE; 584 param->state &= ~PSTATE_NEGOTIATE;
585 else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) 585 else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH))
586 param->state &= ~PSTATE_NEGOTIATE; 586 param->state &= ~PSTATE_NEGOTIATE;
@@ -1977,7 +1977,7 @@ void iscsi_set_session_parameters(
1977 ops->SessionType = !strcmp(param->value, DISCOVERY); 1977 ops->SessionType = !strcmp(param->value, DISCOVERY);
1978 pr_debug("SessionType: %s\n", 1978 pr_debug("SessionType: %s\n",
1979 param->value); 1979 param->value);
1980 } else if (!strcmp(param->name, RDMAEXTENTIONS)) { 1980 } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
1981 ops->RDMAExtensions = !strcmp(param->value, YES); 1981 ops->RDMAExtensions = !strcmp(param->value, YES);
1982 pr_debug("RDMAExtensions: %s\n", 1982 pr_debug("RDMAExtensions: %s\n",
1983 param->value); 1983 param->value);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index f31b9c4b83f2..915b06798505 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -91,7 +91,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
91/* 91/*
92 * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046 92 * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046
93 */ 93 */
94#define RDMAEXTENTIONS "RDMAExtensions" 94#define RDMAEXTENSIONS "RDMAExtensions"
95#define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength" 95#define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength"
96#define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength" 96#define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength"
97 97
@@ -142,7 +142,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
142/* 142/*
143 * Initial values for iSER parameters following RFC-5046 Section 6 143 * Initial values for iSER parameters following RFC-5046 Section 6
144 */ 144 */
145#define INITIAL_RDMAEXTENTIONS NO 145#define INITIAL_RDMAEXTENSIONS NO
146#define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144" 146#define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144"
147#define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192" 147#define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192"
148 148
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 43b7ac6c5b1c..4a8bd36d3958 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1584,6 +1584,13 @@ static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
1584 .store = target_core_store_dev_udev_path, 1584 .store = target_core_store_dev_udev_path,
1585}; 1585};
1586 1586
1587static ssize_t target_core_show_dev_enable(void *p, char *page)
1588{
1589 struct se_device *dev = p;
1590
1591 return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
1592}
1593
1587static ssize_t target_core_store_dev_enable( 1594static ssize_t target_core_store_dev_enable(
1588 void *p, 1595 void *p,
1589 const char *page, 1596 const char *page,
@@ -1609,8 +1616,8 @@ static ssize_t target_core_store_dev_enable(
1609static struct target_core_configfs_attribute target_core_attr_dev_enable = { 1616static struct target_core_configfs_attribute target_core_attr_dev_enable = {
1610 .attr = { .ca_owner = THIS_MODULE, 1617 .attr = { .ca_owner = THIS_MODULE,
1611 .ca_name = "enable", 1618 .ca_name = "enable",
1612 .ca_mode = S_IWUSR }, 1619 .ca_mode = S_IRUGO | S_IWUSR },
1613 .show = NULL, 1620 .show = target_core_show_dev_enable,
1614 .store = target_core_store_dev_enable, 1621 .store = target_core_store_dev_enable,
1615}; 1622};
1616 1623
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 2e4d655471bc..4630481b6043 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -68,7 +68,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
68 struct se_dev_entry *deve = se_cmd->se_deve; 68 struct se_dev_entry *deve = se_cmd->se_deve;
69 69
70 deve->total_cmds++; 70 deve->total_cmds++;
71 deve->total_bytes += se_cmd->data_length;
72 71
73 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 72 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
74 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { 73 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
@@ -85,8 +84,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
85 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 84 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
86 deve->read_bytes += se_cmd->data_length; 85 deve->read_bytes += se_cmd->data_length;
87 86
88 deve->deve_cmds++;
89
90 se_lun = deve->se_lun; 87 se_lun = deve->se_lun;
91 se_cmd->se_lun = deve->se_lun; 88 se_cmd->se_lun = deve->se_lun;
92 se_cmd->pr_res_key = deve->pr_res_key; 89 se_cmd->pr_res_key = deve->pr_res_key;
@@ -275,17 +272,6 @@ int core_free_device_list_for_node(
275 return 0; 272 return 0;
276} 273}
277 274
278void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
279{
280 struct se_dev_entry *deve;
281 unsigned long flags;
282
283 spin_lock_irqsave(&se_nacl->device_list_lock, flags);
284 deve = se_nacl->device_list[se_cmd->orig_fe_lun];
285 deve->deve_cmds--;
286 spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
287}
288
289void core_update_device_list_access( 275void core_update_device_list_access(
290 u32 mapped_lun, 276 u32 mapped_lun,
291 u32 lun_access, 277 u32 lun_access,
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 58ed683e04ae..1b1d544e927a 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -153,10 +153,6 @@ static int fd_configure_device(struct se_device *dev)
153 struct request_queue *q = bdev_get_queue(inode->i_bdev); 153 struct request_queue *q = bdev_get_queue(inode->i_bdev);
154 unsigned long long dev_size; 154 unsigned long long dev_size;
155 155
156 dev->dev_attrib.hw_block_size =
157 bdev_logical_block_size(inode->i_bdev);
158 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
159
160 /* 156 /*
161 * Determine the number of bytes from i_size_read() minus 157 * Determine the number of bytes from i_size_read() minus
162 * one (1) logical sector from underlying struct block_device 158 * one (1) logical sector from underlying struct block_device
@@ -203,9 +199,6 @@ static int fd_configure_device(struct se_device *dev)
203 goto fail; 199 goto fail;
204 } 200 }
205 201
206 dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
207 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
208
209 /* 202 /*
210 * Limit UNMAP emulation to 8k Number of LBAs (NoLB) 203 * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
211 */ 204 */
@@ -226,6 +219,8 @@ static int fd_configure_device(struct se_device *dev)
226 219
227 fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; 220 fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
228 221
222 dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
223 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
229 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
230 225
231 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { 226 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 07f5f94634bb..aa1620abec6d 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -615,6 +615,8 @@ iblock_execute_rw(struct se_cmd *cmd)
615 rw = WRITE_FUA; 615 rw = WRITE_FUA;
616 else if (!(q->flush_flags & REQ_FLUSH)) 616 else if (!(q->flush_flags & REQ_FLUSH))
617 rw = WRITE_FUA; 617 rw = WRITE_FUA;
618 else
619 rw = WRITE;
618 } else { 620 } else {
619 rw = WRITE; 621 rw = WRITE;
620 } 622 }
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 853bab60e362..18d49df4d0ac 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -8,7 +8,6 @@ extern struct t10_alua_lu_gp *default_lu_gp;
8struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); 8struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
9int core_free_device_list_for_node(struct se_node_acl *, 9int core_free_device_list_for_node(struct se_node_acl *,
10 struct se_portal_group *); 10 struct se_portal_group *);
11void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
12void core_update_device_list_access(u32, u32, struct se_node_acl *); 11void core_update_device_list_access(u32, u32, struct se_node_acl *);
13int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, 12int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
14 u32, u32, struct se_node_acl *, struct se_portal_group *); 13 u32, u32, struct se_node_acl *, struct se_portal_group *);
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index e0b3c379aa14..0921a64b5550 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -291,6 +291,11 @@ rd_execute_rw(struct se_cmd *cmd)
291 u32 src_len; 291 u32 src_len;
292 u64 tmp; 292 u64 tmp;
293 293
294 if (dev->rd_flags & RDF_NULLIO) {
295 target_complete_cmd(cmd, SAM_STAT_GOOD);
296 return 0;
297 }
298
294 tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; 299 tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
295 rd_offset = do_div(tmp, PAGE_SIZE); 300 rd_offset = do_div(tmp, PAGE_SIZE);
296 rd_page = tmp; 301 rd_page = tmp;
@@ -373,11 +378,12 @@ rd_execute_rw(struct se_cmd *cmd)
373} 378}
374 379
375enum { 380enum {
376 Opt_rd_pages, Opt_err 381 Opt_rd_pages, Opt_rd_nullio, Opt_err
377}; 382};
378 383
379static match_table_t tokens = { 384static match_table_t tokens = {
380 {Opt_rd_pages, "rd_pages=%d"}, 385 {Opt_rd_pages, "rd_pages=%d"},
386 {Opt_rd_nullio, "rd_nullio=%d"},
381 {Opt_err, NULL} 387 {Opt_err, NULL}
382}; 388};
383 389
@@ -408,6 +414,14 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
408 " Count: %u\n", rd_dev->rd_page_count); 414 " Count: %u\n", rd_dev->rd_page_count);
409 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; 415 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
410 break; 416 break;
417 case Opt_rd_nullio:
418 match_int(args, &arg);
419 if (arg != 1)
420 break;
421
422 pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
423 rd_dev->rd_flags |= RDF_NULLIO;
424 break;
411 default: 425 default:
412 break; 426 break;
413 } 427 }
@@ -424,8 +438,9 @@ static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
424 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", 438 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
425 rd_dev->rd_dev_id); 439 rd_dev->rd_dev_id);
426 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" 440 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
427 " SG_table_count: %u\n", rd_dev->rd_page_count, 441 " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count,
428 PAGE_SIZE, rd_dev->sg_table_count); 442 PAGE_SIZE, rd_dev->sg_table_count,
443 !!(rd_dev->rd_flags & RDF_NULLIO));
429 return bl; 444 return bl;
430} 445}
431 446
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 933b38b6e563..1789d1e14395 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -22,6 +22,7 @@ struct rd_dev_sg_table {
22} ____cacheline_aligned; 22} ____cacheline_aligned;
23 23
24#define RDF_HAS_PAGE_COUNT 0x01 24#define RDF_HAS_PAGE_COUNT 0x01
25#define RDF_NULLIO 0x02
25 26
26struct rd_dev { 27struct rd_dev {
27 struct se_device dev; 28 struct se_device dev;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index f8388b4024aa..4a793362309d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2163,8 +2163,6 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2163 if (wait_for_tasks) 2163 if (wait_for_tasks)
2164 transport_wait_for_tasks(cmd); 2164 transport_wait_for_tasks(cmd);
2165 2165
2166 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
2167
2168 if (cmd->se_lun) 2166 if (cmd->se_lun)
2169 transport_lun_remove_cmd(cmd); 2167 transport_lun_remove_cmd(cmd);
2170 2168
@@ -2213,21 +2211,19 @@ static void target_release_cmd_kref(struct kref *kref)
2213{ 2211{
2214 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2212 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2215 struct se_session *se_sess = se_cmd->se_sess; 2213 struct se_session *se_sess = se_cmd->se_sess;
2216 unsigned long flags;
2217 2214
2218 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2219 if (list_empty(&se_cmd->se_cmd_list)) { 2215 if (list_empty(&se_cmd->se_cmd_list)) {
2220 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2216 spin_unlock(&se_sess->sess_cmd_lock);
2221 se_cmd->se_tfo->release_cmd(se_cmd); 2217 se_cmd->se_tfo->release_cmd(se_cmd);
2222 return; 2218 return;
2223 } 2219 }
2224 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2220 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
2225 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2221 spin_unlock(&se_sess->sess_cmd_lock);
2226 complete(&se_cmd->cmd_wait_comp); 2222 complete(&se_cmd->cmd_wait_comp);
2227 return; 2223 return;
2228 } 2224 }
2229 list_del(&se_cmd->se_cmd_list); 2225 list_del(&se_cmd->se_cmd_list);
2230 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2226 spin_unlock(&se_sess->sess_cmd_lock);
2231 2227
2232 se_cmd->se_tfo->release_cmd(se_cmd); 2228 se_cmd->se_tfo->release_cmd(se_cmd);
2233} 2229}
@@ -2238,7 +2234,8 @@ static void target_release_cmd_kref(struct kref *kref)
2238 */ 2234 */
2239int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) 2235int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
2240{ 2236{
2241 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2237 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
2238 &se_sess->sess_cmd_lock);
2242} 2239}
2243EXPORT_SYMBOL(target_put_sess_cmd); 2240EXPORT_SYMBOL(target_put_sess_cmd);
2244 2241
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index bff0775e258c..5174ebac288d 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Since these may be in userspace, we use (inline) accessors. 4 * Since these may be in userspace, we use (inline) accessors.
5 */ 5 */
6#include <linux/module.h>
6#include <linux/vringh.h> 7#include <linux/vringh.h>
7#include <linux/virtio_ring.h> 8#include <linux/virtio_ring.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
@@ -1005,3 +1006,5 @@ int vringh_need_notify_kern(struct vringh *vrh)
1005 return __vringh_need_notify(vrh, getu16_kern); 1006 return __vringh_need_notify(vrh, getu16_kern);
1006} 1007}
1007EXPORT_SYMBOL(vringh_need_notify_kern); 1008EXPORT_SYMBOL(vringh_need_notify_kern);
1009
1010MODULE_LICENSE("GPL");
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0aabb344b02e..5aae3d12d400 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -209,7 +209,6 @@ typedef struct ext4_io_end {
209 ssize_t size; /* size of the extent */ 209 ssize_t size; /* size of the extent */
210 struct kiocb *iocb; /* iocb struct for AIO */ 210 struct kiocb *iocb; /* iocb struct for AIO */
211 int result; /* error value for AIO */ 211 int result; /* error value for AIO */
212 atomic_t count; /* reference counter */
213} ext4_io_end_t; 212} ext4_io_end_t;
214 213
215struct ext4_io_submit { 214struct ext4_io_submit {
@@ -2651,14 +2650,11 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
2651 2650
2652/* page-io.c */ 2651/* page-io.c */
2653extern int __init ext4_init_pageio(void); 2652extern int __init ext4_init_pageio(void);
2653extern void ext4_add_complete_io(ext4_io_end_t *io_end);
2654extern void ext4_exit_pageio(void); 2654extern void ext4_exit_pageio(void);
2655extern void ext4_ioend_shutdown(struct inode *); 2655extern void ext4_ioend_shutdown(struct inode *);
2656extern void ext4_free_io_end(ext4_io_end_t *io);
2656extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); 2657extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
2657extern ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end);
2658extern int ext4_put_io_end(ext4_io_end_t *io_end);
2659extern void ext4_put_io_end_defer(ext4_io_end_t *io_end);
2660extern void ext4_io_submit_init(struct ext4_io_submit *io,
2661 struct writeback_control *wbc);
2662extern void ext4_end_io_work(struct work_struct *work); 2658extern void ext4_end_io_work(struct work_struct *work);
2663extern void ext4_io_submit(struct ext4_io_submit *io); 2659extern void ext4_io_submit(struct ext4_io_submit *io);
2664extern int ext4_bio_write_page(struct ext4_io_submit *io, 2660extern int ext4_bio_write_page(struct ext4_io_submit *io,
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 107936db244e..bc0f1910b9cf 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3642,7 +3642,7 @@ int ext4_find_delalloc_range(struct inode *inode,
3642{ 3642{
3643 struct extent_status es; 3643 struct extent_status es;
3644 3644
3645 ext4_es_find_delayed_extent(inode, lblk_start, &es); 3645 ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
3646 if (es.es_len == 0) 3646 if (es.es_len == 0)
3647 return 0; /* there is no delay extent in this tree */ 3647 return 0; /* there is no delay extent in this tree */
3648 else if (es.es_lblk <= lblk_start && 3648 else if (es.es_lblk <= lblk_start &&
@@ -4608,9 +4608,10 @@ static int ext4_find_delayed_extent(struct inode *inode,
4608 struct extent_status es; 4608 struct extent_status es;
4609 ext4_lblk_t block, next_del; 4609 ext4_lblk_t block, next_del;
4610 4610
4611 ext4_es_find_delayed_extent(inode, newes->es_lblk, &es);
4612
4613 if (newes->es_pblk == 0) { 4611 if (newes->es_pblk == 0) {
4612 ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
4613 newes->es_lblk + newes->es_len - 1, &es);
4614
4614 /* 4615 /*
4615 * No extent in extent-tree contains block @newes->es_pblk, 4616 * No extent in extent-tree contains block @newes->es_pblk,
4616 * then the block may stay in 1)a hole or 2)delayed-extent. 4617 * then the block may stay in 1)a hole or 2)delayed-extent.
@@ -4630,7 +4631,7 @@ static int ext4_find_delayed_extent(struct inode *inode,
4630 } 4631 }
4631 4632
4632 block = newes->es_lblk + newes->es_len; 4633 block = newes->es_lblk + newes->es_len;
4633 ext4_es_find_delayed_extent(inode, block, &es); 4634 ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
4634 if (es.es_len == 0) 4635 if (es.es_len == 0)
4635 next_del = EXT_MAX_BLOCKS; 4636 next_del = EXT_MAX_BLOCKS;
4636 else 4637 else
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index fe3337a85ede..e6941e622d31 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -232,14 +232,16 @@ static struct extent_status *__es_tree_search(struct rb_root *root,
232} 232}
233 233
234/* 234/*
235 * ext4_es_find_delayed_extent: find the 1st delayed extent covering @es->lblk 235 * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering
236 * if it exists, otherwise, the next extent after @es->lblk. 236 * @es->lblk if it exists, otherwise, the next extent after @es->lblk.
237 * 237 *
238 * @inode: the inode which owns delayed extents 238 * @inode: the inode which owns delayed extents
239 * @lblk: the offset where we start to search 239 * @lblk: the offset where we start to search
240 * @end: the offset where we stop to search
240 * @es: delayed extent that we found 241 * @es: delayed extent that we found
241 */ 242 */
242void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk, 243void ext4_es_find_delayed_extent_range(struct inode *inode,
244 ext4_lblk_t lblk, ext4_lblk_t end,
243 struct extent_status *es) 245 struct extent_status *es)
244{ 246{
245 struct ext4_es_tree *tree = NULL; 247 struct ext4_es_tree *tree = NULL;
@@ -247,7 +249,8 @@ void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
247 struct rb_node *node; 249 struct rb_node *node;
248 250
249 BUG_ON(es == NULL); 251 BUG_ON(es == NULL);
250 trace_ext4_es_find_delayed_extent_enter(inode, lblk); 252 BUG_ON(end < lblk);
253 trace_ext4_es_find_delayed_extent_range_enter(inode, lblk);
251 254
252 read_lock(&EXT4_I(inode)->i_es_lock); 255 read_lock(&EXT4_I(inode)->i_es_lock);
253 tree = &EXT4_I(inode)->i_es_tree; 256 tree = &EXT4_I(inode)->i_es_tree;
@@ -270,6 +273,10 @@ out:
270 if (es1 && !ext4_es_is_delayed(es1)) { 273 if (es1 && !ext4_es_is_delayed(es1)) {
271 while ((node = rb_next(&es1->rb_node)) != NULL) { 274 while ((node = rb_next(&es1->rb_node)) != NULL) {
272 es1 = rb_entry(node, struct extent_status, rb_node); 275 es1 = rb_entry(node, struct extent_status, rb_node);
276 if (es1->es_lblk > end) {
277 es1 = NULL;
278 break;
279 }
273 if (ext4_es_is_delayed(es1)) 280 if (ext4_es_is_delayed(es1))
274 break; 281 break;
275 } 282 }
@@ -285,7 +292,7 @@ out:
285 read_unlock(&EXT4_I(inode)->i_es_lock); 292 read_unlock(&EXT4_I(inode)->i_es_lock);
286 293
287 ext4_es_lru_add(inode); 294 ext4_es_lru_add(inode);
288 trace_ext4_es_find_delayed_extent_exit(inode, es); 295 trace_ext4_es_find_delayed_extent_range_exit(inode, es);
289} 296}
290 297
291static struct extent_status * 298static struct extent_status *
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index d8e2d4dc311e..f740eb03b707 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -62,7 +62,8 @@ extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
62 unsigned long long status); 62 unsigned long long status);
63extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 63extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
64 ext4_lblk_t len); 64 ext4_lblk_t len);
65extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk, 65extern void ext4_es_find_delayed_extent_range(struct inode *inode,
66 ext4_lblk_t lblk, ext4_lblk_t end,
66 struct extent_status *es); 67 struct extent_status *es);
67extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, 68extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
68 struct extent_status *es); 69 struct extent_status *es);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 4959e29573b6..b1b4d51b5d86 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -465,7 +465,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
465 * If there is a delay extent at this offset, 465 * If there is a delay extent at this offset,
466 * it will be as a data. 466 * it will be as a data.
467 */ 467 */
468 ext4_es_find_delayed_extent(inode, last, &es); 468 ext4_es_find_delayed_extent_range(inode, last, last, &es);
469 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { 469 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
470 if (last != start) 470 if (last != start)
471 dataoff = last << blkbits; 471 dataoff = last << blkbits;
@@ -548,7 +548,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
548 * If there is a delay extent at this offset, 548 * If there is a delay extent at this offset,
549 * we will skip this extent. 549 * we will skip this extent.
550 */ 550 */
551 ext4_es_find_delayed_extent(inode, last, &es); 551 ext4_es_find_delayed_extent_range(inode, last, last, &es);
552 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { 552 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
553 last = es.es_lblk + es.es_len; 553 last = es.es_lblk + es.es_len;
554 holeoff = last << blkbits; 554 holeoff = last << blkbits;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 0723774bdfb5..d6382b89ecbd 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1488,10 +1488,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
1488 struct ext4_io_submit io_submit; 1488 struct ext4_io_submit io_submit;
1489 1489
1490 BUG_ON(mpd->next_page <= mpd->first_page); 1490 BUG_ON(mpd->next_page <= mpd->first_page);
1491 ext4_io_submit_init(&io_submit, mpd->wbc); 1491 memset(&io_submit, 0, sizeof(io_submit));
1492 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
1493 if (!io_submit.io_end)
1494 return -ENOMEM;
1495 /* 1492 /*
1496 * We need to start from the first_page to the next_page - 1 1493 * We need to start from the first_page to the next_page - 1
1497 * to make sure we also write the mapped dirty buffer_heads. 1494 * to make sure we also write the mapped dirty buffer_heads.
@@ -1579,8 +1576,6 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
1579 pagevec_release(&pvec); 1576 pagevec_release(&pvec);
1580 } 1577 }
1581 ext4_io_submit(&io_submit); 1578 ext4_io_submit(&io_submit);
1582 /* Drop io_end reference we got from init */
1583 ext4_put_io_end_defer(io_submit.io_end);
1584 return ret; 1579 return ret;
1585} 1580}
1586 1581
@@ -2239,16 +2234,9 @@ static int ext4_writepage(struct page *page,
2239 */ 2234 */
2240 return __ext4_journalled_writepage(page, len); 2235 return __ext4_journalled_writepage(page, len);
2241 2236
2242 ext4_io_submit_init(&io_submit, wbc); 2237 memset(&io_submit, 0, sizeof(io_submit));
2243 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
2244 if (!io_submit.io_end) {
2245 redirty_page_for_writepage(wbc, page);
2246 return -ENOMEM;
2247 }
2248 ret = ext4_bio_write_page(&io_submit, page, len, wbc); 2238 ret = ext4_bio_write_page(&io_submit, page, len, wbc);
2249 ext4_io_submit(&io_submit); 2239 ext4_io_submit(&io_submit);
2250 /* Drop io_end reference we got from init */
2251 ext4_put_io_end_defer(io_submit.io_end);
2252 return ret; 2240 return ret;
2253} 2241}
2254 2242
@@ -3079,13 +3067,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3079 struct inode *inode = file_inode(iocb->ki_filp); 3067 struct inode *inode = file_inode(iocb->ki_filp);
3080 ext4_io_end_t *io_end = iocb->private; 3068 ext4_io_end_t *io_end = iocb->private;
3081 3069
3082 /* if not async direct IO just return */ 3070 /* if not async direct IO or dio with 0 bytes write, just return */
3083 if (!io_end) { 3071 if (!io_end || !size)
3084 inode_dio_done(inode); 3072 goto out;
3085 if (is_async)
3086 aio_complete(iocb, ret, 0);
3087 return;
3088 }
3089 3073
3090 ext_debug("ext4_end_io_dio(): io_end 0x%p " 3074 ext_debug("ext4_end_io_dio(): io_end 0x%p "
3091 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 3075 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
@@ -3093,13 +3077,25 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3093 size); 3077 size);
3094 3078
3095 iocb->private = NULL; 3079 iocb->private = NULL;
3080
3081 /* if not aio dio with unwritten extents, just free io and return */
3082 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
3083 ext4_free_io_end(io_end);
3084out:
3085 inode_dio_done(inode);
3086 if (is_async)
3087 aio_complete(iocb, ret, 0);
3088 return;
3089 }
3090
3096 io_end->offset = offset; 3091 io_end->offset = offset;
3097 io_end->size = size; 3092 io_end->size = size;
3098 if (is_async) { 3093 if (is_async) {
3099 io_end->iocb = iocb; 3094 io_end->iocb = iocb;
3100 io_end->result = ret; 3095 io_end->result = ret;
3101 } 3096 }
3102 ext4_put_io_end_defer(io_end); 3097
3098 ext4_add_complete_io(io_end);
3103} 3099}
3104 3100
3105/* 3101/*
@@ -3133,7 +3129,6 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3133 get_block_t *get_block_func = NULL; 3129 get_block_t *get_block_func = NULL;
3134 int dio_flags = 0; 3130 int dio_flags = 0;
3135 loff_t final_size = offset + count; 3131 loff_t final_size = offset + count;
3136 ext4_io_end_t *io_end = NULL;
3137 3132
3138 /* Use the old path for reads and writes beyond i_size. */ 3133 /* Use the old path for reads and writes beyond i_size. */
3139 if (rw != WRITE || final_size > inode->i_size) 3134 if (rw != WRITE || final_size > inode->i_size)
@@ -3172,16 +3167,13 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3172 iocb->private = NULL; 3167 iocb->private = NULL;
3173 ext4_inode_aio_set(inode, NULL); 3168 ext4_inode_aio_set(inode, NULL);
3174 if (!is_sync_kiocb(iocb)) { 3169 if (!is_sync_kiocb(iocb)) {
3175 io_end = ext4_init_io_end(inode, GFP_NOFS); 3170 ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS);
3176 if (!io_end) { 3171 if (!io_end) {
3177 ret = -ENOMEM; 3172 ret = -ENOMEM;
3178 goto retake_lock; 3173 goto retake_lock;
3179 } 3174 }
3180 io_end->flag |= EXT4_IO_END_DIRECT; 3175 io_end->flag |= EXT4_IO_END_DIRECT;
3181 /* 3176 iocb->private = io_end;
3182 * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
3183 */
3184 iocb->private = ext4_get_io_end(io_end);
3185 /* 3177 /*
3186 * we save the io structure for current async direct 3178 * we save the io structure for current async direct
3187 * IO, so that later ext4_map_blocks() could flag the 3179 * IO, so that later ext4_map_blocks() could flag the
@@ -3205,27 +3197,26 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3205 NULL, 3197 NULL,
3206 dio_flags); 3198 dio_flags);
3207 3199
3200 if (iocb->private)
3201 ext4_inode_aio_set(inode, NULL);
3208 /* 3202 /*
3209 * Put our reference to io_end. This can free the io_end structure e.g. 3203 * The io_end structure takes a reference to the inode, that
3210 * in sync IO case or in case of error. It can even perform extent 3204 * structure needs to be destroyed and the reference to the
3211 * conversion if all bios we submitted finished before we got here. 3205 * inode need to be dropped, when IO is complete, even with 0
3212 * Note that in that case iocb->private can be already set to NULL 3206 * byte write, or failed.
3213 * here. 3207 *
3208 * In the successful AIO DIO case, the io_end structure will
3209 * be destroyed and the reference to the inode will be dropped
3210 * after the end_io call back function is called.
3211 *
3212 * In the case there is 0 byte write, or error case, since VFS
3213 * direct IO won't invoke the end_io call back function, we
3214 * need to free the end_io structure here.
3214 */ 3215 */
3215 if (io_end) { 3216 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
3216 ext4_inode_aio_set(inode, NULL); 3217 ext4_free_io_end(iocb->private);
3217 ext4_put_io_end(io_end); 3218 iocb->private = NULL;
3218 /* 3219 } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
3219 * In case of error or no write ext4_end_io_dio() was not
3220 * called so we have to put iocb's reference.
3221 */
3222 if (ret <= 0 && ret != -EIOCBQUEUED) {
3223 WARN_ON(iocb->private != io_end);
3224 ext4_put_io_end(io_end);
3225 iocb->private = NULL;
3226 }
3227 }
3228 if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
3229 EXT4_STATE_DIO_UNWRITTEN)) { 3220 EXT4_STATE_DIO_UNWRITTEN)) {
3230 int err; 3221 int err;
3231 /* 3222 /*
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index b1ed9e07434b..def84082a9a9 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2105,7 +2105,11 @@ repeat:
2105 group = ac->ac_g_ex.fe_group; 2105 group = ac->ac_g_ex.fe_group;
2106 2106
2107 for (i = 0; i < ngroups; group++, i++) { 2107 for (i = 0; i < ngroups; group++, i++) {
2108 if (group == ngroups) 2108 /*
2109 * Artificially restricted ngroups for non-extent
2110 * files makes group > ngroups possible on first loop.
2111 */
2112 if (group >= ngroups)
2109 group = 0; 2113 group = 0;
2110 2114
2111 /* This now checks without needing the buddy page */ 2115 /* This now checks without needing the buddy page */
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 19599bded62a..4acf1f78881b 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -62,28 +62,15 @@ void ext4_ioend_shutdown(struct inode *inode)
62 cancel_work_sync(&EXT4_I(inode)->i_unwritten_work); 62 cancel_work_sync(&EXT4_I(inode)->i_unwritten_work);
63} 63}
64 64
65static void ext4_release_io_end(ext4_io_end_t *io_end) 65void ext4_free_io_end(ext4_io_end_t *io)
66{ 66{
67 BUG_ON(!list_empty(&io_end->list)); 67 BUG_ON(!io);
68 BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); 68 BUG_ON(!list_empty(&io->list));
69 69 BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
70 if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
71 wake_up_all(ext4_ioend_wq(io_end->inode));
72 if (io_end->flag & EXT4_IO_END_DIRECT)
73 inode_dio_done(io_end->inode);
74 if (io_end->iocb)
75 aio_complete(io_end->iocb, io_end->result, 0);
76 kmem_cache_free(io_end_cachep, io_end);
77}
78
79static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
80{
81 struct inode *inode = io_end->inode;
82 70
83 io_end->flag &= ~EXT4_IO_END_UNWRITTEN; 71 if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
84 /* Wake up anyone waiting on unwritten extent conversion */ 72 wake_up_all(ext4_ioend_wq(io->inode));
85 if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten)) 73 kmem_cache_free(io_end_cachep, io);
86 wake_up_all(ext4_ioend_wq(inode));
87} 74}
88 75
89/* check a range of space and convert unwritten extents to written. */ 76/* check a range of space and convert unwritten extents to written. */
@@ -106,8 +93,13 @@ static int ext4_end_io(ext4_io_end_t *io)
106 "(inode %lu, offset %llu, size %zd, error %d)", 93 "(inode %lu, offset %llu, size %zd, error %d)",
107 inode->i_ino, offset, size, ret); 94 inode->i_ino, offset, size, ret);
108 } 95 }
109 ext4_clear_io_unwritten_flag(io); 96 /* Wake up anyone waiting on unwritten extent conversion */
110 ext4_release_io_end(io); 97 if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
98 wake_up_all(ext4_ioend_wq(inode));
99 if (io->flag & EXT4_IO_END_DIRECT)
100 inode_dio_done(inode);
101 if (io->iocb)
102 aio_complete(io->iocb, io->result, 0);
111 return ret; 103 return ret;
112} 104}
113 105
@@ -138,7 +130,7 @@ static void dump_completed_IO(struct inode *inode)
138} 130}
139 131
140/* Add the io_end to per-inode completed end_io list. */ 132/* Add the io_end to per-inode completed end_io list. */
141static void ext4_add_complete_io(ext4_io_end_t *io_end) 133void ext4_add_complete_io(ext4_io_end_t *io_end)
142{ 134{
143 struct ext4_inode_info *ei = EXT4_I(io_end->inode); 135 struct ext4_inode_info *ei = EXT4_I(io_end->inode);
144 struct workqueue_struct *wq; 136 struct workqueue_struct *wq;
@@ -175,6 +167,8 @@ static int ext4_do_flush_completed_IO(struct inode *inode)
175 err = ext4_end_io(io); 167 err = ext4_end_io(io);
176 if (unlikely(!ret && err)) 168 if (unlikely(!ret && err))
177 ret = err; 169 ret = err;
170 io->flag &= ~EXT4_IO_END_UNWRITTEN;
171 ext4_free_io_end(io);
178 } 172 }
179 return ret; 173 return ret;
180} 174}
@@ -206,43 +200,10 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
206 atomic_inc(&EXT4_I(inode)->i_ioend_count); 200 atomic_inc(&EXT4_I(inode)->i_ioend_count);
207 io->inode = inode; 201 io->inode = inode;
208 INIT_LIST_HEAD(&io->list); 202 INIT_LIST_HEAD(&io->list);
209 atomic_set(&io->count, 1);
210 } 203 }
211 return io; 204 return io;
212} 205}
213 206
214void ext4_put_io_end_defer(ext4_io_end_t *io_end)
215{
216 if (atomic_dec_and_test(&io_end->count)) {
217 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
218 ext4_release_io_end(io_end);
219 return;
220 }
221 ext4_add_complete_io(io_end);
222 }
223}
224
225int ext4_put_io_end(ext4_io_end_t *io_end)
226{
227 int err = 0;
228
229 if (atomic_dec_and_test(&io_end->count)) {
230 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
231 err = ext4_convert_unwritten_extents(io_end->inode,
232 io_end->offset, io_end->size);
233 ext4_clear_io_unwritten_flag(io_end);
234 }
235 ext4_release_io_end(io_end);
236 }
237 return err;
238}
239
240ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
241{
242 atomic_inc(&io_end->count);
243 return io_end;
244}
245
246/* 207/*
247 * Print an buffer I/O error compatible with the fs/buffer.c. This 208 * Print an buffer I/O error compatible with the fs/buffer.c. This
248 * provides compatibility with dmesg scrapers that look for a specific 209 * provides compatibility with dmesg scrapers that look for a specific
@@ -325,7 +286,12 @@ static void ext4_end_bio(struct bio *bio, int error)
325 bi_sector >> (inode->i_blkbits - 9)); 286 bi_sector >> (inode->i_blkbits - 9));
326 } 287 }
327 288
328 ext4_put_io_end_defer(io_end); 289 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
290 ext4_free_io_end(io_end);
291 return;
292 }
293
294 ext4_add_complete_io(io_end);
329} 295}
330 296
331void ext4_io_submit(struct ext4_io_submit *io) 297void ext4_io_submit(struct ext4_io_submit *io)
@@ -339,37 +305,40 @@ void ext4_io_submit(struct ext4_io_submit *io)
339 bio_put(io->io_bio); 305 bio_put(io->io_bio);
340 } 306 }
341 io->io_bio = NULL; 307 io->io_bio = NULL;
342} 308 io->io_op = 0;
343
344void ext4_io_submit_init(struct ext4_io_submit *io,
345 struct writeback_control *wbc)
346{
347 io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
348 io->io_bio = NULL;
349 io->io_end = NULL; 309 io->io_end = NULL;
350} 310}
351 311
352static int io_submit_init_bio(struct ext4_io_submit *io, 312static int io_submit_init(struct ext4_io_submit *io,
353 struct buffer_head *bh) 313 struct inode *inode,
314 struct writeback_control *wbc,
315 struct buffer_head *bh)
354{ 316{
317 ext4_io_end_t *io_end;
318 struct page *page = bh->b_page;
355 int nvecs = bio_get_nr_vecs(bh->b_bdev); 319 int nvecs = bio_get_nr_vecs(bh->b_bdev);
356 struct bio *bio; 320 struct bio *bio;
357 321
322 io_end = ext4_init_io_end(inode, GFP_NOFS);
323 if (!io_end)
324 return -ENOMEM;
358 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); 325 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
359 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 326 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
360 bio->bi_bdev = bh->b_bdev; 327 bio->bi_bdev = bh->b_bdev;
328 bio->bi_private = io->io_end = io_end;
361 bio->bi_end_io = ext4_end_bio; 329 bio->bi_end_io = ext4_end_bio;
362 bio->bi_private = ext4_get_io_end(io->io_end); 330
363 if (!io->io_end->size) 331 io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
364 io->io_end->offset = (bh->b_page->index << PAGE_CACHE_SHIFT) 332
365 + bh_offset(bh);
366 io->io_bio = bio; 333 io->io_bio = bio;
334 io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
367 io->io_next_block = bh->b_blocknr; 335 io->io_next_block = bh->b_blocknr;
368 return 0; 336 return 0;
369} 337}
370 338
371static int io_submit_add_bh(struct ext4_io_submit *io, 339static int io_submit_add_bh(struct ext4_io_submit *io,
372 struct inode *inode, 340 struct inode *inode,
341 struct writeback_control *wbc,
373 struct buffer_head *bh) 342 struct buffer_head *bh)
374{ 343{
375 ext4_io_end_t *io_end; 344 ext4_io_end_t *io_end;
@@ -380,18 +349,18 @@ submit_and_retry:
380 ext4_io_submit(io); 349 ext4_io_submit(io);
381 } 350 }
382 if (io->io_bio == NULL) { 351 if (io->io_bio == NULL) {
383 ret = io_submit_init_bio(io, bh); 352 ret = io_submit_init(io, inode, wbc, bh);
384 if (ret) 353 if (ret)
385 return ret; 354 return ret;
386 } 355 }
387 ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
388 if (ret != bh->b_size)
389 goto submit_and_retry;
390 io_end = io->io_end; 356 io_end = io->io_end;
391 if (test_clear_buffer_uninit(bh)) 357 if (test_clear_buffer_uninit(bh))
392 ext4_set_io_unwritten_flag(inode, io_end); 358 ext4_set_io_unwritten_flag(inode, io_end);
393 io_end->size += bh->b_size; 359 io->io_end->size += bh->b_size;
394 io->io_next_block++; 360 io->io_next_block++;
361 ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
362 if (ret != bh->b_size)
363 goto submit_and_retry;
395 return 0; 364 return 0;
396} 365}
397 366
@@ -463,7 +432,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
463 do { 432 do {
464 if (!buffer_async_write(bh)) 433 if (!buffer_async_write(bh))
465 continue; 434 continue;
466 ret = io_submit_add_bh(io, inode, bh); 435 ret = io_submit_add_bh(io, inode, wbc, bh);
467 if (ret) { 436 if (ret) {
468 /* 437 /*
469 * We only get here on ENOMEM. Not much else 438 * We only get here on ENOMEM. Not much else
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 5b3d2bd4813a..64b8c7639520 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -77,7 +77,7 @@ struct acpi_signal_fatal_info {
77/* 77/*
78 * OSL Initialization and shutdown primitives 78 * OSL Initialization and shutdown primitives
79 */ 79 */
80acpi_status __initdata acpi_os_initialize(void); 80acpi_status __init acpi_os_initialize(void);
81 81
82acpi_status acpi_os_terminate(void); 82acpi_status acpi_os_terminate(void);
83 83
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index b327b5a9296d..ea69367fdd3b 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -329,10 +329,16 @@ int acpi_processor_power_init(struct acpi_processor *pr);
329int acpi_processor_power_exit(struct acpi_processor *pr); 329int acpi_processor_power_exit(struct acpi_processor *pr);
330int acpi_processor_cst_has_changed(struct acpi_processor *pr); 330int acpi_processor_cst_has_changed(struct acpi_processor *pr);
331int acpi_processor_hotplug(struct acpi_processor *pr); 331int acpi_processor_hotplug(struct acpi_processor *pr);
332int acpi_processor_suspend(struct device *dev);
333int acpi_processor_resume(struct device *dev);
334extern struct cpuidle_driver acpi_idle_driver; 332extern struct cpuidle_driver acpi_idle_driver;
335 333
334#ifdef CONFIG_PM_SLEEP
335void acpi_processor_syscore_init(void);
336void acpi_processor_syscore_exit(void);
337#else
338static inline void acpi_processor_syscore_init(void) {}
339static inline void acpi_processor_syscore_exit(void) {}
340#endif
341
336/* in processor_thermal.c */ 342/* in processor_thermal.c */
337int acpi_processor_get_limit_info(struct acpi_processor *pr); 343int acpi_processor_get_limit_info(struct acpi_processor *pr);
338extern const struct thermal_cooling_device_ops processor_cooling_ops; 344extern const struct thermal_cooling_device_ops processor_cooling_ops;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 61196592152e..63d17ee9eb48 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -316,6 +316,7 @@ struct drm_ioctl_desc {
316 int flags; 316 int flags;
317 drm_ioctl_t *func; 317 drm_ioctl_t *func;
318 unsigned int cmd_drv; 318 unsigned int cmd_drv;
319 const char *name;
319}; 320};
320 321
321/** 322/**
@@ -324,7 +325,7 @@ struct drm_ioctl_desc {
324 */ 325 */
325 326
326#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ 327#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
327 [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl} 328 [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
328 329
329struct drm_magic_entry { 330struct drm_magic_entry {
330 struct list_head head; 331 struct list_head head;
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 8230b46fdd73..471f276ce8f7 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -50,13 +50,14 @@ struct drm_fb_helper_surface_size {
50 50
51/** 51/**
52 * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library 52 * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
53 * @gamma_set: - Set the given gamma lut register on the given crtc. 53 * @gamma_set: Set the given gamma lut register on the given crtc.
54 * @gamma_get: - Read the given gamma lut register on the given crtc, used to 54 * @gamma_get: Read the given gamma lut register on the given crtc, used to
55 * save the current lut when force-restoring the fbdev for e.g. 55 * save the current lut when force-restoring the fbdev for e.g.
56 * kdbg. 56 * kdbg.
57 * @fb_probe: - Driver callback to allocate and initialize the fbdev info 57 * @fb_probe: Driver callback to allocate and initialize the fbdev info
58 * structure. Futhermore it also needs to allocate the drm 58 * structure. Futhermore it also needs to allocate the drm
59 * framebuffer used to back the fbdev. 59 * framebuffer used to back the fbdev.
60 * @initial_config: Setup an initial fbdev display configuration
60 * 61 *
61 * Driver callbacks used by the fbdev emulation helper library. 62 * Driver callbacks used by the fbdev emulation helper library.
62 */ 63 */
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
index 393369147a2d..675ddf4b441f 100644
--- a/include/drm/drm_os_linux.h
+++ b/include/drm/drm_os_linux.h
@@ -87,15 +87,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
87/** Other copying of data from kernel space */ 87/** Other copying of data from kernel space */
88#define DRM_COPY_TO_USER(arg1, arg2, arg3) \ 88#define DRM_COPY_TO_USER(arg1, arg2, arg3) \
89 copy_to_user(arg1, arg2, arg3) 89 copy_to_user(arg1, arg2, arg3)
90/* Macros for copyfrom user, but checking readability only once */
91#define DRM_VERIFYAREA_READ( uaddr, size ) \
92 (access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT)
93#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
94 __copy_from_user(arg1, arg2, arg3)
95#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
96 __copy_to_user(arg1, arg2, arg3)
97#define DRM_GET_USER_UNCHECKED(val, uaddr) \
98 __get_user(val, uaddr)
99 90
100#define DRM_HZ HZ 91#define DRM_HZ HZ
101 92
diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h
index 13a3da25ff07..98cd41bb39c8 100644
--- a/include/linux/journal-head.h
+++ b/include/linux/journal-head.h
@@ -30,15 +30,19 @@ struct journal_head {
30 30
31 /* 31 /*
32 * Journalling list for this buffer [jbd_lock_bh_state()] 32 * Journalling list for this buffer [jbd_lock_bh_state()]
33 * NOTE: We *cannot* combine this with b_modified into a bitfield
34 * as gcc would then (which the C standard allows but which is
35 * very unuseful) make 64-bit accesses to the bitfield and clobber
36 * b_jcount if its update races with bitfield modification.
33 */ 37 */
34 unsigned b_jlist:4; 38 unsigned b_jlist;
35 39
36 /* 40 /*
37 * This flag signals the buffer has been modified by 41 * This flag signals the buffer has been modified by
38 * the currently running transaction 42 * the currently running transaction
39 * [jbd_lock_bh_state()] 43 * [jbd_lock_bh_state()]
40 */ 44 */
41 unsigned b_modified:1; 45 unsigned b_modified;
42 46
43 /* 47 /*
44 * Copy of the buffer data frozen for writing to the log. 48 * Copy of the buffer data frozen for writing to the log.
diff --git a/include/linux/kref.h b/include/linux/kref.h
index e15828fd71f1..484604d184be 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -19,6 +19,7 @@
19#include <linux/atomic.h> 19#include <linux/atomic.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/spinlock.h>
22 23
23struct kref { 24struct kref {
24 atomic_t refcount; 25 atomic_t refcount;
@@ -98,6 +99,38 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
98 return kref_sub(kref, 1, release); 99 return kref_sub(kref, 1, release);
99} 100}
100 101
102/**
103 * kref_put_spinlock_irqsave - decrement refcount for object.
104 * @kref: object.
105 * @release: pointer to the function that will clean up the object when the
106 * last reference to the object is released.
107 * This pointer is required, and it is not acceptable to pass kfree
108 * in as this function.
109 * @lock: lock to take in release case
110 *
111 * Behaves identical to kref_put with one exception. If the reference count
112 * drops to zero, the lock will be taken atomically wrt dropping the reference
113 * count. The release function has to call spin_unlock() without _irqrestore.
114 */
115static inline int kref_put_spinlock_irqsave(struct kref *kref,
116 void (*release)(struct kref *kref),
117 spinlock_t *lock)
118{
119 unsigned long flags;
120
121 WARN_ON(release == NULL);
122 if (atomic_add_unless(&kref->refcount, -1, 1))
123 return 0;
124 spin_lock_irqsave(lock, flags);
125 if (atomic_dec_and_test(&kref->refcount)) {
126 release(kref);
127 local_irq_restore(flags);
128 return 1;
129 }
130 spin_unlock_irqrestore(lock, flags);
131 return 0;
132}
133
101static inline int kref_put_mutex(struct kref *kref, 134static inline int kref_put_mutex(struct kref *kref,
102 void (*release)(struct kref *kref), 135 void (*release)(struct kref *kref),
103 struct mutex *lock) 136 struct mutex *lock)
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 67f46ad6920a..352eec9df1b8 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -126,7 +126,7 @@ struct mlx4_rss_context {
126 126
127struct mlx4_qp_path { 127struct mlx4_qp_path {
128 u8 fl; 128 u8 fl;
129 u8 reserved1[1]; 129 u8 vlan_control;
130 u8 disable_pkey_check; 130 u8 disable_pkey_check;
131 u8 pkey_index; 131 u8 pkey_index;
132 u8 counter_index; 132 u8 counter_index;
@@ -141,11 +141,32 @@ struct mlx4_qp_path {
141 u8 sched_queue; 141 u8 sched_queue;
142 u8 vlan_index; 142 u8 vlan_index;
143 u8 feup; 143 u8 feup;
144 u8 reserved3; 144 u8 fvl_rx;
145 u8 reserved4[2]; 145 u8 reserved4[2];
146 u8 dmac[6]; 146 u8 dmac[6];
147}; 147};
148 148
149enum { /* fl */
150 MLX4_FL_CV = 1 << 6,
151 MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2
152};
153enum { /* vlan_control */
154 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6,
155 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED = 1 << 2,
156 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1, /* 802.1p priority tag */
157 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED = 1 << 0
158};
159
160enum { /* feup */
161 MLX4_FEUP_FORCE_ETH_UP = 1 << 6, /* force Eth UP */
162 MLX4_FSM_FORCE_ETH_SRC_MAC = 1 << 5, /* force Source MAC */
163 MLX4_FVL_FORCE_ETH_VLAN = 1 << 3 /* force Eth vlan */
164};
165
166enum { /* fvl_rx */
167 MLX4_FVL_RX_FORCE_ETH_VLAN = 1 << 0 /* enforce Eth rx vlan */
168};
169
149struct mlx4_qp_context { 170struct mlx4_qp_context {
150 __be32 flags; 171 __be32 flags;
151 __be32 pd; 172 __be32 pd;
@@ -185,6 +206,10 @@ struct mlx4_qp_context {
185 u32 reserved5[10]; 206 u32 reserved5[10];
186}; 207};
187 208
209enum { /* param3 */
210 MLX4_STRIP_VLAN = 1 << 30
211};
212
188/* Which firmware version adds support for NEC (NoErrorCompletion) bit */ 213/* Which firmware version adds support for NEC (NoErrorCompletion) bit */
189#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232) 214#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
190 215
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 733eb5ee31c5..6ff26c8db7b9 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -57,7 +57,7 @@ extern struct bus_type spi_bus_type;
57 * @modalias: Name of the driver to use with this device, or an alias 57 * @modalias: Name of the driver to use with this device, or an alias
58 * for that name. This appears in the sysfs "modalias" attribute 58 * for that name. This appears in the sysfs "modalias" attribute
59 * for driver coldplugging, and in uevents used for hotplugging 59 * for driver coldplugging, and in uevents used for hotplugging
60 * @cs_gpio: gpio number of the chipselect line (optional, -EINVAL when 60 * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
61 * when not using a GPIO line) 61 * when not using a GPIO line)
62 * 62 *
63 * A @spi_device is used to interchange data between an SPI slave 63 * A @spi_device is used to interchange data between an SPI slave
@@ -266,7 +266,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
266 * queue so the subsystem notifies the driver that it may relax the 266 * queue so the subsystem notifies the driver that it may relax the
267 * hardware by issuing this call 267 * hardware by issuing this call
268 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS 268 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
269 * number. Any individual value may be -EINVAL for CS lines that 269 * number. Any individual value may be -ENOENT for CS lines that
270 * are not GPIOs (driven by the SPI controller itself). 270 * are not GPIOs (driven by the SPI controller itself).
271 * 271 *
272 * Each SPI master controller can communicate with one or more @spi_device 272 * Each SPI master controller can communicate with one or more @spi_device
diff --git a/include/linux/time.h b/include/linux/time.h
index 22d81b3c955b..d5d229b2e5af 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -117,14 +117,10 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
117 117
118extern bool persistent_clock_exist; 118extern bool persistent_clock_exist;
119 119
120#ifdef ALWAYS_USE_PERSISTENT_CLOCK
121#define has_persistent_clock() true
122#else
123static inline bool has_persistent_clock(void) 120static inline bool has_persistent_clock(void)
124{ 121{
125 return persistent_clock_exist; 122 return persistent_clock_exist;
126} 123}
127#endif
128 124
129extern void read_persistent_clock(struct timespec *ts); 125extern void read_persistent_clock(struct timespec *ts);
130extern void read_boot_clock(struct timespec *ts); 126extern void read_boot_clock(struct timespec *ts);
diff --git a/include/net/sock.h b/include/net/sock.h
index 5c97b0fc5623..66772cf8c3c5 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -866,6 +866,18 @@ struct inet_hashinfo;
866struct raw_hashinfo; 866struct raw_hashinfo;
867struct module; 867struct module;
868 868
869/*
870 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
871 * un-modified. Special care is taken when initializing object to zero.
872 */
873static inline void sk_prot_clear_nulls(struct sock *sk, int size)
874{
875 if (offsetof(struct sock, sk_node.next) != 0)
876 memset(sk, 0, offsetof(struct sock, sk_node.next));
877 memset(&sk->sk_node.pprev, 0,
878 size - offsetof(struct sock, sk_node.pprev));
879}
880
869/* Networking protocol blocks we attach to sockets. 881/* Networking protocol blocks we attach to sockets.
870 * socket layer -> transport layer interface 882 * socket layer -> transport layer interface
871 * transport -> network interface is defined by struct inet_proto 883 * transport -> network interface is defined by struct inet_proto
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index c4af592f7057..e773dfa5f98f 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -463,7 +463,6 @@ struct se_cmd {
463#define CMD_T_ABORTED (1 << 0) 463#define CMD_T_ABORTED (1 << 0)
464#define CMD_T_ACTIVE (1 << 1) 464#define CMD_T_ACTIVE (1 << 1)
465#define CMD_T_COMPLETE (1 << 2) 465#define CMD_T_COMPLETE (1 << 2)
466#define CMD_T_QUEUED (1 << 3)
467#define CMD_T_SENT (1 << 4) 466#define CMD_T_SENT (1 << 4)
468#define CMD_T_STOP (1 << 5) 467#define CMD_T_STOP (1 << 5)
469#define CMD_T_FAILED (1 << 6) 468#define CMD_T_FAILED (1 << 6)
@@ -572,12 +571,8 @@ struct se_dev_entry {
572 bool def_pr_registered; 571 bool def_pr_registered;
573 /* See transport_lunflags_table */ 572 /* See transport_lunflags_table */
574 u32 lun_flags; 573 u32 lun_flags;
575 u32 deve_cmds;
576 u32 mapped_lun; 574 u32 mapped_lun;
577 u32 average_bytes;
578 u32 last_byte_count;
579 u32 total_cmds; 575 u32 total_cmds;
580 u32 total_bytes;
581 u64 pr_res_key; 576 u64 pr_res_key;
582 u64 creation_time; 577 u64 creation_time;
583 u32 attach_count; 578 u32 attach_count;
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index d0e686402df8..8ee15b97cd38 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -2139,7 +2139,7 @@ TRACE_EVENT(ext4_es_remove_extent,
2139 __entry->lblk, __entry->len) 2139 __entry->lblk, __entry->len)
2140); 2140);
2141 2141
2142TRACE_EVENT(ext4_es_find_delayed_extent_enter, 2142TRACE_EVENT(ext4_es_find_delayed_extent_range_enter,
2143 TP_PROTO(struct inode *inode, ext4_lblk_t lblk), 2143 TP_PROTO(struct inode *inode, ext4_lblk_t lblk),
2144 2144
2145 TP_ARGS(inode, lblk), 2145 TP_ARGS(inode, lblk),
@@ -2161,7 +2161,7 @@ TRACE_EVENT(ext4_es_find_delayed_extent_enter,
2161 (unsigned long) __entry->ino, __entry->lblk) 2161 (unsigned long) __entry->ino, __entry->lblk)
2162); 2162);
2163 2163
2164TRACE_EVENT(ext4_es_find_delayed_extent_exit, 2164TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
2165 TP_PROTO(struct inode *inode, struct extent_status *es), 2165 TP_PROTO(struct inode *inode, struct extent_status *es),
2166 2166
2167 TP_ARGS(inode, es), 2167 TP_ARGS(inode, es),
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c
index 8b86c0c68edf..d5585f5e038e 100644
--- a/kernel/cpu/idle.c
+++ b/kernel/cpu/idle.c
@@ -40,11 +40,13 @@ __setup("hlt", cpu_idle_nopoll_setup);
40 40
41static inline int cpu_idle_poll(void) 41static inline int cpu_idle_poll(void)
42{ 42{
43 rcu_idle_enter();
43 trace_cpu_idle_rcuidle(0, smp_processor_id()); 44 trace_cpu_idle_rcuidle(0, smp_processor_id());
44 local_irq_enable(); 45 local_irq_enable();
45 while (!need_resched()) 46 while (!need_resched())
46 cpu_relax(); 47 cpu_relax();
47 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 48 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
49 rcu_idle_exit();
48 return 1; 50 return 1;
49} 51}
50 52
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6b41c1899a8b..9dc297faf7c0 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4394,6 +4394,64 @@ perf_event_read_event(struct perf_event *event,
4394 perf_output_end(&handle); 4394 perf_output_end(&handle);
4395} 4395}
4396 4396
4397typedef int (perf_event_aux_match_cb)(struct perf_event *event, void *data);
4398typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
4399
4400static void
4401perf_event_aux_ctx(struct perf_event_context *ctx,
4402 perf_event_aux_match_cb match,
4403 perf_event_aux_output_cb output,
4404 void *data)
4405{
4406 struct perf_event *event;
4407
4408 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4409 if (event->state < PERF_EVENT_STATE_INACTIVE)
4410 continue;
4411 if (!event_filter_match(event))
4412 continue;
4413 if (match(event, data))
4414 output(event, data);
4415 }
4416}
4417
4418static void
4419perf_event_aux(perf_event_aux_match_cb match,
4420 perf_event_aux_output_cb output,
4421 void *data,
4422 struct perf_event_context *task_ctx)
4423{
4424 struct perf_cpu_context *cpuctx;
4425 struct perf_event_context *ctx;
4426 struct pmu *pmu;
4427 int ctxn;
4428
4429 rcu_read_lock();
4430 list_for_each_entry_rcu(pmu, &pmus, entry) {
4431 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4432 if (cpuctx->unique_pmu != pmu)
4433 goto next;
4434 perf_event_aux_ctx(&cpuctx->ctx, match, output, data);
4435 if (task_ctx)
4436 goto next;
4437 ctxn = pmu->task_ctx_nr;
4438 if (ctxn < 0)
4439 goto next;
4440 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4441 if (ctx)
4442 perf_event_aux_ctx(ctx, match, output, data);
4443next:
4444 put_cpu_ptr(pmu->pmu_cpu_context);
4445 }
4446
4447 if (task_ctx) {
4448 preempt_disable();
4449 perf_event_aux_ctx(task_ctx, match, output, data);
4450 preempt_enable();
4451 }
4452 rcu_read_unlock();
4453}
4454
4397/* 4455/*
4398 * task tracking -- fork/exit 4456 * task tracking -- fork/exit
4399 * 4457 *
@@ -4416,8 +4474,9 @@ struct perf_task_event {
4416}; 4474};
4417 4475
4418static void perf_event_task_output(struct perf_event *event, 4476static void perf_event_task_output(struct perf_event *event,
4419 struct perf_task_event *task_event) 4477 void *data)
4420{ 4478{
4479 struct perf_task_event *task_event = data;
4421 struct perf_output_handle handle; 4480 struct perf_output_handle handle;
4422 struct perf_sample_data sample; 4481 struct perf_sample_data sample;
4423 struct task_struct *task = task_event->task; 4482 struct task_struct *task = task_event->task;
@@ -4445,62 +4504,11 @@ out:
4445 task_event->event_id.header.size = size; 4504 task_event->event_id.header.size = size;
4446} 4505}
4447 4506
4448static int perf_event_task_match(struct perf_event *event) 4507static int perf_event_task_match(struct perf_event *event,
4449{ 4508 void *data __maybe_unused)
4450 if (event->state < PERF_EVENT_STATE_INACTIVE)
4451 return 0;
4452
4453 if (!event_filter_match(event))
4454 return 0;
4455
4456 if (event->attr.comm || event->attr.mmap ||
4457 event->attr.mmap_data || event->attr.task)
4458 return 1;
4459
4460 return 0;
4461}
4462
4463static void perf_event_task_ctx(struct perf_event_context *ctx,
4464 struct perf_task_event *task_event)
4465{ 4509{
4466 struct perf_event *event; 4510 return event->attr.comm || event->attr.mmap ||
4467 4511 event->attr.mmap_data || event->attr.task;
4468 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4469 if (perf_event_task_match(event))
4470 perf_event_task_output(event, task_event);
4471 }
4472}
4473
4474static void perf_event_task_event(struct perf_task_event *task_event)
4475{
4476 struct perf_cpu_context *cpuctx;
4477 struct perf_event_context *ctx;
4478 struct pmu *pmu;
4479 int ctxn;
4480
4481 rcu_read_lock();
4482 list_for_each_entry_rcu(pmu, &pmus, entry) {
4483 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4484 if (cpuctx->unique_pmu != pmu)
4485 goto next;
4486 perf_event_task_ctx(&cpuctx->ctx, task_event);
4487
4488 ctx = task_event->task_ctx;
4489 if (!ctx) {
4490 ctxn = pmu->task_ctx_nr;
4491 if (ctxn < 0)
4492 goto next;
4493 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4494 if (ctx)
4495 perf_event_task_ctx(ctx, task_event);
4496 }
4497next:
4498 put_cpu_ptr(pmu->pmu_cpu_context);
4499 }
4500 if (task_event->task_ctx)
4501 perf_event_task_ctx(task_event->task_ctx, task_event);
4502
4503 rcu_read_unlock();
4504} 4512}
4505 4513
4506static void perf_event_task(struct task_struct *task, 4514static void perf_event_task(struct task_struct *task,
@@ -4531,7 +4539,10 @@ static void perf_event_task(struct task_struct *task,
4531 }, 4539 },
4532 }; 4540 };
4533 4541
4534 perf_event_task_event(&task_event); 4542 perf_event_aux(perf_event_task_match,
4543 perf_event_task_output,
4544 &task_event,
4545 task_ctx);
4535} 4546}
4536 4547
4537void perf_event_fork(struct task_struct *task) 4548void perf_event_fork(struct task_struct *task)
@@ -4557,8 +4568,9 @@ struct perf_comm_event {
4557}; 4568};
4558 4569
4559static void perf_event_comm_output(struct perf_event *event, 4570static void perf_event_comm_output(struct perf_event *event,
4560 struct perf_comm_event *comm_event) 4571 void *data)
4561{ 4572{
4573 struct perf_comm_event *comm_event = data;
4562 struct perf_output_handle handle; 4574 struct perf_output_handle handle;
4563 struct perf_sample_data sample; 4575 struct perf_sample_data sample;
4564 int size = comm_event->event_id.header.size; 4576 int size = comm_event->event_id.header.size;
@@ -4585,39 +4597,16 @@ out:
4585 comm_event->event_id.header.size = size; 4597 comm_event->event_id.header.size = size;
4586} 4598}
4587 4599
4588static int perf_event_comm_match(struct perf_event *event) 4600static int perf_event_comm_match(struct perf_event *event,
4589{ 4601 void *data __maybe_unused)
4590 if (event->state < PERF_EVENT_STATE_INACTIVE)
4591 return 0;
4592
4593 if (!event_filter_match(event))
4594 return 0;
4595
4596 if (event->attr.comm)
4597 return 1;
4598
4599 return 0;
4600}
4601
4602static void perf_event_comm_ctx(struct perf_event_context *ctx,
4603 struct perf_comm_event *comm_event)
4604{ 4602{
4605 struct perf_event *event; 4603 return event->attr.comm;
4606
4607 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4608 if (perf_event_comm_match(event))
4609 perf_event_comm_output(event, comm_event);
4610 }
4611} 4604}
4612 4605
4613static void perf_event_comm_event(struct perf_comm_event *comm_event) 4606static void perf_event_comm_event(struct perf_comm_event *comm_event)
4614{ 4607{
4615 struct perf_cpu_context *cpuctx;
4616 struct perf_event_context *ctx;
4617 char comm[TASK_COMM_LEN]; 4608 char comm[TASK_COMM_LEN];
4618 unsigned int size; 4609 unsigned int size;
4619 struct pmu *pmu;
4620 int ctxn;
4621 4610
4622 memset(comm, 0, sizeof(comm)); 4611 memset(comm, 0, sizeof(comm));
4623 strlcpy(comm, comm_event->task->comm, sizeof(comm)); 4612 strlcpy(comm, comm_event->task->comm, sizeof(comm));
@@ -4627,24 +4616,11 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
4627 comm_event->comm_size = size; 4616 comm_event->comm_size = size;
4628 4617
4629 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 4618 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
4630 rcu_read_lock();
4631 list_for_each_entry_rcu(pmu, &pmus, entry) {
4632 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4633 if (cpuctx->unique_pmu != pmu)
4634 goto next;
4635 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
4636 4619
4637 ctxn = pmu->task_ctx_nr; 4620 perf_event_aux(perf_event_comm_match,
4638 if (ctxn < 0) 4621 perf_event_comm_output,
4639 goto next; 4622 comm_event,
4640 4623 NULL);
4641 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4642 if (ctx)
4643 perf_event_comm_ctx(ctx, comm_event);
4644next:
4645 put_cpu_ptr(pmu->pmu_cpu_context);
4646 }
4647 rcu_read_unlock();
4648} 4624}
4649 4625
4650void perf_event_comm(struct task_struct *task) 4626void perf_event_comm(struct task_struct *task)
@@ -4706,8 +4682,9 @@ struct perf_mmap_event {
4706}; 4682};
4707 4683
4708static void perf_event_mmap_output(struct perf_event *event, 4684static void perf_event_mmap_output(struct perf_event *event,
4709 struct perf_mmap_event *mmap_event) 4685 void *data)
4710{ 4686{
4687 struct perf_mmap_event *mmap_event = data;
4711 struct perf_output_handle handle; 4688 struct perf_output_handle handle;
4712 struct perf_sample_data sample; 4689 struct perf_sample_data sample;
4713 int size = mmap_event->event_id.header.size; 4690 int size = mmap_event->event_id.header.size;
@@ -4734,46 +4711,24 @@ out:
4734} 4711}
4735 4712
4736static int perf_event_mmap_match(struct perf_event *event, 4713static int perf_event_mmap_match(struct perf_event *event,
4737 struct perf_mmap_event *mmap_event, 4714 void *data)
4738 int executable)
4739{
4740 if (event->state < PERF_EVENT_STATE_INACTIVE)
4741 return 0;
4742
4743 if (!event_filter_match(event))
4744 return 0;
4745
4746 if ((!executable && event->attr.mmap_data) ||
4747 (executable && event->attr.mmap))
4748 return 1;
4749
4750 return 0;
4751}
4752
4753static void perf_event_mmap_ctx(struct perf_event_context *ctx,
4754 struct perf_mmap_event *mmap_event,
4755 int executable)
4756{ 4715{
4757 struct perf_event *event; 4716 struct perf_mmap_event *mmap_event = data;
4717 struct vm_area_struct *vma = mmap_event->vma;
4718 int executable = vma->vm_flags & VM_EXEC;
4758 4719
4759 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 4720 return (!executable && event->attr.mmap_data) ||
4760 if (perf_event_mmap_match(event, mmap_event, executable)) 4721 (executable && event->attr.mmap);
4761 perf_event_mmap_output(event, mmap_event);
4762 }
4763} 4722}
4764 4723
4765static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 4724static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4766{ 4725{
4767 struct perf_cpu_context *cpuctx;
4768 struct perf_event_context *ctx;
4769 struct vm_area_struct *vma = mmap_event->vma; 4726 struct vm_area_struct *vma = mmap_event->vma;
4770 struct file *file = vma->vm_file; 4727 struct file *file = vma->vm_file;
4771 unsigned int size; 4728 unsigned int size;
4772 char tmp[16]; 4729 char tmp[16];
4773 char *buf = NULL; 4730 char *buf = NULL;
4774 const char *name; 4731 const char *name;
4775 struct pmu *pmu;
4776 int ctxn;
4777 4732
4778 memset(tmp, 0, sizeof(tmp)); 4733 memset(tmp, 0, sizeof(tmp));
4779 4734
@@ -4829,27 +4784,10 @@ got_name:
4829 4784
4830 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 4785 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4831 4786
4832 rcu_read_lock(); 4787 perf_event_aux(perf_event_mmap_match,
4833 list_for_each_entry_rcu(pmu, &pmus, entry) { 4788 perf_event_mmap_output,
4834 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4789 mmap_event,
4835 if (cpuctx->unique_pmu != pmu) 4790 NULL);
4836 goto next;
4837 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4838 vma->vm_flags & VM_EXEC);
4839
4840 ctxn = pmu->task_ctx_nr;
4841 if (ctxn < 0)
4842 goto next;
4843
4844 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4845 if (ctx) {
4846 perf_event_mmap_ctx(ctx, mmap_event,
4847 vma->vm_flags & VM_EXEC);
4848 }
4849next:
4850 put_cpu_ptr(pmu->pmu_cpu_context);
4851 }
4852 rcu_read_unlock();
4853 4791
4854 kfree(buf); 4792 kfree(buf);
4855} 4793}
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 1296e72e4161..8241906c4b61 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -569,6 +569,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
569 int retval = 0; 569 int retval = 0;
570 570
571 helper_lock(); 571 helper_lock();
572 if (!sub_info->path) {
573 retval = -EINVAL;
574 goto out;
575 }
576
572 if (sub_info->path[0] == '\0') 577 if (sub_info->path[0] == '\0')
573 goto out; 578 goto out;
574 579
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 170814dc418f..3db5a375d8dd 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -88,7 +88,7 @@ static void __init rcu_bootup_announce_oddness(void)
88#ifdef CONFIG_RCU_NOCB_CPU 88#ifdef CONFIG_RCU_NOCB_CPU
89#ifndef CONFIG_RCU_NOCB_CPU_NONE 89#ifndef CONFIG_RCU_NOCB_CPU_NONE
90 if (!have_rcu_nocb_mask) { 90 if (!have_rcu_nocb_mask) {
91 alloc_bootmem_cpumask_var(&rcu_nocb_mask); 91 zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
92 have_rcu_nocb_mask = true; 92 have_rcu_nocb_mask = true;
93 } 93 }
94#ifdef CONFIG_RCU_NOCB_CPU_ZERO 94#ifdef CONFIG_RCU_NOCB_CPU_ZERO
@@ -1667,7 +1667,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj)
1667 rdtp->last_accelerate = jiffies; 1667 rdtp->last_accelerate = jiffies;
1668 1668
1669 /* Request timer delay depending on laziness, and round. */ 1669 /* Request timer delay depending on laziness, and round. */
1670 if (rdtp->all_lazy) { 1670 if (!rdtp->all_lazy) {
1671 *dj = round_up(rcu_idle_gp_delay + jiffies, 1671 *dj = round_up(rcu_idle_gp_delay + jiffies,
1672 rcu_idle_gp_delay) - jiffies; 1672 rcu_idle_gp_delay) - jiffies;
1673 } else { 1673 } else {
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index e4c07b0692bb..70f27e89012b 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -12,11 +12,6 @@ config CLOCKSOURCE_WATCHDOG
12config ARCH_CLOCKSOURCE_DATA 12config ARCH_CLOCKSOURCE_DATA
13 bool 13 bool
14 14
15# Platforms has a persistent clock
16config ALWAYS_USE_PERSISTENT_CLOCK
17 bool
18 default n
19
20# Timekeeping vsyscall support 15# Timekeeping vsyscall support
21config GENERIC_TIME_VSYSCALL 16config GENERIC_TIME_VSYSCALL
22 bool 17 bool
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 206bbfb34e09..24938d577669 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -786,11 +786,11 @@ bool tick_broadcast_oneshot_available(void)
786 786
787void __init tick_broadcast_init(void) 787void __init tick_broadcast_init(void)
788{ 788{
789 alloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); 789 zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
790 alloc_cpumask_var(&tmpmask, GFP_NOWAIT); 790 zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
791#ifdef CONFIG_TICK_ONESHOT 791#ifdef CONFIG_TICK_ONESHOT
792 alloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); 792 zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
793 alloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT); 793 zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
794 alloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT); 794 zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
795#endif 795#endif
796} 796}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index bc67d4245e1d..f4208138fbf4 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -717,6 +717,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
717 if (unlikely(!cpu_online(cpu))) { 717 if (unlikely(!cpu_online(cpu))) {
718 if (cpu == tick_do_timer_cpu) 718 if (cpu == tick_do_timer_cpu)
719 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 719 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
720 return false;
720 } 721 }
721 722
722 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 723 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
@@ -1168,7 +1169,7 @@ void tick_cancel_sched_timer(int cpu)
1168 hrtimer_cancel(&ts->sched_timer); 1169 hrtimer_cancel(&ts->sched_timer);
1169# endif 1170# endif
1170 1171
1171 ts->nohz_mode = NOHZ_MODE_INACTIVE; 1172 memset(ts, 0, sizeof(*ts));
1172} 1173}
1173#endif 1174#endif
1174 1175
diff --git a/kernel/timer.c b/kernel/timer.c
index a860bba34412..15ffdb3f1948 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1539,12 +1539,12 @@ static int __cpuinit init_timers_cpu(int cpu)
1539 boot_done = 1; 1539 boot_done = 1;
1540 base = &boot_tvec_bases; 1540 base = &boot_tvec_bases;
1541 } 1541 }
1542 spin_lock_init(&base->lock);
1542 tvec_base_done[cpu] = 1; 1543 tvec_base_done[cpu] = 1;
1543 } else { 1544 } else {
1544 base = per_cpu(tvec_bases, cpu); 1545 base = per_cpu(tvec_bases, cpu);
1545 } 1546 }
1546 1547
1547 spin_lock_init(&base->lock);
1548 1548
1549 for (j = 0; j < TVN_SIZE; j++) { 1549 for (j = 0; j < TVN_SIZE; j++) {
1550 INIT_LIST_HEAD(base->tv5.vec + j); 1550 INIT_LIST_HEAD(base->tv5.vec + j);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index a6361178de5a..e1b653f7e1ca 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -750,7 +750,11 @@ static int filter_set_pred(struct event_filter *filter,
750 750
751static void __free_preds(struct event_filter *filter) 751static void __free_preds(struct event_filter *filter)
752{ 752{
753 int i;
754
753 if (filter->preds) { 755 if (filter->preds) {
756 for (i = 0; i < filter->n_preds; i++)
757 kfree(filter->preds[i].ops);
754 kfree(filter->preds); 758 kfree(filter->preds);
755 filter->preds = NULL; 759 filter->preds = NULL;
756 } 760 }
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 636d45fe69b3..9f46e98ba8f2 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -35,7 +35,7 @@ struct trace_probe {
35 const char *symbol; /* symbol name */ 35 const char *symbol; /* symbol name */
36 struct ftrace_event_class class; 36 struct ftrace_event_class class;
37 struct ftrace_event_call call; 37 struct ftrace_event_call call;
38 struct ftrace_event_file **files; 38 struct ftrace_event_file * __rcu *files;
39 ssize_t size; /* trace entry size */ 39 ssize_t size; /* trace entry size */
40 unsigned int nr_args; 40 unsigned int nr_args;
41 struct probe_arg args[]; 41 struct probe_arg args[];
@@ -185,9 +185,14 @@ static struct trace_probe *find_trace_probe(const char *event,
185 185
186static int trace_probe_nr_files(struct trace_probe *tp) 186static int trace_probe_nr_files(struct trace_probe *tp)
187{ 187{
188 struct ftrace_event_file **file = tp->files; 188 struct ftrace_event_file **file;
189 int ret = 0; 189 int ret = 0;
190 190
191 /*
192 * Since all tp->files updater is protected by probe_enable_lock,
193 * we don't need to lock an rcu_read_lock.
194 */
195 file = rcu_dereference_raw(tp->files);
191 if (file) 196 if (file)
192 while (*(file++)) 197 while (*(file++))
193 ret++; 198 ret++;
@@ -209,9 +214,10 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
209 mutex_lock(&probe_enable_lock); 214 mutex_lock(&probe_enable_lock);
210 215
211 if (file) { 216 if (file) {
212 struct ftrace_event_file **new, **old = tp->files; 217 struct ftrace_event_file **new, **old;
213 int n = trace_probe_nr_files(tp); 218 int n = trace_probe_nr_files(tp);
214 219
220 old = rcu_dereference_raw(tp->files);
215 /* 1 is for new one and 1 is for stopper */ 221 /* 1 is for new one and 1 is for stopper */
216 new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *), 222 new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
217 GFP_KERNEL); 223 GFP_KERNEL);
@@ -251,11 +257,17 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
251static int 257static int
252trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) 258trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
253{ 259{
260 struct ftrace_event_file **files;
254 int i; 261 int i;
255 262
256 if (tp->files) { 263 /*
257 for (i = 0; tp->files[i]; i++) 264 * Since all tp->files updater is protected by probe_enable_lock,
258 if (tp->files[i] == file) 265 * we don't need to lock an rcu_read_lock.
266 */
267 files = rcu_dereference_raw(tp->files);
268 if (files) {
269 for (i = 0; files[i]; i++)
270 if (files[i] == file)
259 return i; 271 return i;
260 } 272 }
261 273
@@ -274,10 +286,11 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
274 mutex_lock(&probe_enable_lock); 286 mutex_lock(&probe_enable_lock);
275 287
276 if (file) { 288 if (file) {
277 struct ftrace_event_file **new, **old = tp->files; 289 struct ftrace_event_file **new, **old;
278 int n = trace_probe_nr_files(tp); 290 int n = trace_probe_nr_files(tp);
279 int i, j; 291 int i, j;
280 292
293 old = rcu_dereference_raw(tp->files);
281 if (n == 0 || trace_probe_file_index(tp, file) < 0) { 294 if (n == 0 || trace_probe_file_index(tp, file) < 0) {
282 ret = -EINVAL; 295 ret = -EINVAL;
283 goto out_unlock; 296 goto out_unlock;
@@ -872,9 +885,16 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
872static __kprobes void 885static __kprobes void
873kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) 886kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
874{ 887{
875 struct ftrace_event_file **file = tp->files; 888 /*
889 * Note: preempt is already disabled around the kprobe handler.
890 * However, we still need an smp_read_barrier_depends() corresponding
891 * to smp_wmb() in rcu_assign_pointer() to access the pointer.
892 */
893 struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
894
895 if (unlikely(!file))
896 return;
876 897
877 /* Note: preempt is already disabled around the kprobe handler */
878 while (*file) { 898 while (*file) {
879 __kprobe_trace_func(tp, regs, *file); 899 __kprobe_trace_func(tp, regs, *file);
880 file++; 900 file++;
@@ -925,9 +945,16 @@ static __kprobes void
925kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, 945kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
926 struct pt_regs *regs) 946 struct pt_regs *regs)
927{ 947{
928 struct ftrace_event_file **file = tp->files; 948 /*
949 * Note: preempt is already disabled around the kprobe handler.
950 * However, we still need an smp_read_barrier_depends() corresponding
951 * to smp_wmb() in rcu_assign_pointer() to access the pointer.
952 */
953 struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
954
955 if (unlikely(!file))
956 return;
929 957
930 /* Note: preempt is already disabled around the kprobe handler */
931 while (*file) { 958 while (*file) {
932 __kretprobe_trace_func(tp, ri, regs, *file); 959 __kretprobe_trace_func(tp, ri, regs, *file);
933 file++; 960 file++;
@@ -935,7 +962,7 @@ kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
935} 962}
936 963
937/* Event entry printers */ 964/* Event entry printers */
938enum print_line_t 965static enum print_line_t
939print_kprobe_event(struct trace_iterator *iter, int flags, 966print_kprobe_event(struct trace_iterator *iter, int flags,
940 struct trace_event *event) 967 struct trace_event *event)
941{ 968{
@@ -971,7 +998,7 @@ partial:
971 return TRACE_TYPE_PARTIAL_LINE; 998 return TRACE_TYPE_PARTIAL_LINE;
972} 999}
973 1000
974enum print_line_t 1001static enum print_line_t
975print_kretprobe_event(struct trace_iterator *iter, int flags, 1002print_kretprobe_event(struct trace_iterator *iter, int flags,
976 struct trace_event *event) 1003 struct trace_event *event)
977{ 1004{
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4aa9f5bc6b2d..ee8e29a2320c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -296,7 +296,7 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
296static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 296static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
297 297
298struct workqueue_struct *system_wq __read_mostly; 298struct workqueue_struct *system_wq __read_mostly;
299EXPORT_SYMBOL_GPL(system_wq); 299EXPORT_SYMBOL(system_wq);
300struct workqueue_struct *system_highpri_wq __read_mostly; 300struct workqueue_struct *system_highpri_wq __read_mostly;
301EXPORT_SYMBOL_GPL(system_highpri_wq); 301EXPORT_SYMBOL_GPL(system_highpri_wq);
302struct workqueue_struct *system_long_wq __read_mostly; 302struct workqueue_struct *system_long_wq __read_mostly;
@@ -1411,7 +1411,7 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
1411 local_irq_restore(flags); 1411 local_irq_restore(flags);
1412 return ret; 1412 return ret;
1413} 1413}
1414EXPORT_SYMBOL_GPL(queue_work_on); 1414EXPORT_SYMBOL(queue_work_on);
1415 1415
1416void delayed_work_timer_fn(unsigned long __data) 1416void delayed_work_timer_fn(unsigned long __data)
1417{ 1417{
@@ -1485,7 +1485,7 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1485 local_irq_restore(flags); 1485 local_irq_restore(flags);
1486 return ret; 1486 return ret;
1487} 1487}
1488EXPORT_SYMBOL_GPL(queue_delayed_work_on); 1488EXPORT_SYMBOL(queue_delayed_work_on);
1489 1489
1490/** 1490/**
1491 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 1491 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
@@ -2059,6 +2059,7 @@ static bool manage_workers(struct worker *worker)
2059 if (unlikely(!mutex_trylock(&pool->manager_mutex))) { 2059 if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
2060 spin_unlock_irq(&pool->lock); 2060 spin_unlock_irq(&pool->lock);
2061 mutex_lock(&pool->manager_mutex); 2061 mutex_lock(&pool->manager_mutex);
2062 spin_lock_irq(&pool->lock);
2062 ret = true; 2063 ret = true;
2063 } 2064 }
2064 2065
@@ -4311,6 +4312,12 @@ bool current_is_workqueue_rescuer(void)
4311 * no synchronization around this function and the test result is 4312 * no synchronization around this function and the test result is
4312 * unreliable and only useful as advisory hints or for debugging. 4313 * unreliable and only useful as advisory hints or for debugging.
4313 * 4314 *
4315 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4316 * Note that both per-cpu and unbound workqueues may be associated with
4317 * multiple pool_workqueues which have separate congested states. A
4318 * workqueue being congested on one CPU doesn't mean the workqueue is also
4319 * contested on other CPUs / NUMA nodes.
4320 *
4314 * RETURNS: 4321 * RETURNS:
4315 * %true if congested, %false otherwise. 4322 * %true if congested, %false otherwise.
4316 */ 4323 */
@@ -4321,6 +4328,9 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4321 4328
4322 rcu_read_lock_sched(); 4329 rcu_read_lock_sched();
4323 4330
4331 if (cpu == WORK_CPU_UNBOUND)
4332 cpu = smp_processor_id();
4333
4324 if (!(wq->flags & WQ_UNBOUND)) 4334 if (!(wq->flags & WQ_UNBOUND))
4325 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 4335 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4326 else 4336 else
@@ -4895,7 +4905,8 @@ static void __init wq_numa_init(void)
4895 BUG_ON(!tbl); 4905 BUG_ON(!tbl);
4896 4906
4897 for_each_node(node) 4907 for_each_node(node)
4898 BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node)); 4908 BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
4909 node_online(node) ? node : NUMA_NO_NODE));
4899 4910
4900 for_each_possible_cpu(cpu) { 4911 for_each_possible_cpu(cpu) {
4901 node = cpu_to_node(cpu); 4912 node = cpu_to_node(cpu);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 8e15d966d9b0..239992021b1d 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -837,6 +837,19 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
837 837
838 dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst); 838 dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
839 if (dat_entry) { 839 if (dat_entry) {
840 /* If the ARP request is destined for a local client the local
841 * client will answer itself. DAT would only generate a
842 * duplicate packet.
843 *
844 * Moreover, if the soft-interface is enslaved into a bridge, an
845 * additional DAT answer may trigger kernel warnings about
846 * a packet coming from the wrong port.
847 */
848 if (batadv_is_my_client(bat_priv, dat_entry->mac_addr)) {
849 ret = true;
850 goto out;
851 }
852
840 skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src, 853 skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
841 bat_priv->soft_iface, ip_dst, hw_src, 854 bat_priv->soft_iface, ip_dst, hw_src,
842 dat_entry->mac_addr, hw_src); 855 dat_entry->mac_addr, hw_src);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 3e30a0f1b908..1240f07ad31d 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -163,14 +163,22 @@ void batadv_mesh_free(struct net_device *soft_iface)
163 batadv_vis_quit(bat_priv); 163 batadv_vis_quit(bat_priv);
164 164
165 batadv_gw_node_purge(bat_priv); 165 batadv_gw_node_purge(bat_priv);
166 batadv_originator_free(bat_priv);
167 batadv_nc_free(bat_priv); 166 batadv_nc_free(bat_priv);
167 batadv_dat_free(bat_priv);
168 batadv_bla_free(bat_priv);
168 169
170 /* Free the TT and the originator tables only after having terminated
171 * all the other depending components which may use these structures for
172 * their purposes.
173 */
169 batadv_tt_free(bat_priv); 174 batadv_tt_free(bat_priv);
170 175
171 batadv_bla_free(bat_priv); 176 /* Since the originator table clean up routine is accessing the TT
172 177 * tables as well, it has to be invoked after the TT tables have been
173 batadv_dat_free(bat_priv); 178 * freed and marked as empty. This ensures that no cleanup RCU callbacks
179 * accessing the TT data are scheduled for later execution.
180 */
181 batadv_originator_free(bat_priv);
174 182
175 free_percpu(bat_priv->bat_counters); 183 free_percpu(bat_priv->bat_counters);
176 184
@@ -475,7 +483,7 @@ static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
475 char *algo_name = (char *)val; 483 char *algo_name = (char *)val;
476 size_t name_len = strlen(algo_name); 484 size_t name_len = strlen(algo_name);
477 485
478 if (algo_name[name_len - 1] == '\n') 486 if (name_len > 0 && algo_name[name_len - 1] == '\n')
479 algo_name[name_len - 1] = '\0'; 487 algo_name[name_len - 1] = '\0';
480 488
481 bat_algo_ops = batadv_algo_get(algo_name); 489 bat_algo_ops = batadv_algo_get(algo_name);
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index f7c54305a918..e84629ece9b7 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -1514,6 +1514,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
1514 struct ethhdr *ethhdr, ethhdr_tmp; 1514 struct ethhdr *ethhdr, ethhdr_tmp;
1515 uint8_t *orig_dest, ttl, ttvn; 1515 uint8_t *orig_dest, ttl, ttvn;
1516 unsigned int coding_len; 1516 unsigned int coding_len;
1517 int err;
1517 1518
1518 /* Save headers temporarily */ 1519 /* Save headers temporarily */
1519 memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp)); 1520 memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp));
@@ -1568,8 +1569,11 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
1568 coding_len); 1569 coding_len);
1569 1570
1570 /* Resize decoded skb if decoded with larger packet */ 1571 /* Resize decoded skb if decoded with larger packet */
1571 if (nc_packet->skb->len > coding_len + h_size) 1572 if (nc_packet->skb->len > coding_len + h_size) {
1572 pskb_trim_rcsum(skb, coding_len + h_size); 1573 err = pskb_trim_rcsum(skb, coding_len + h_size);
1574 if (err)
1575 return NULL;
1576 }
1573 1577
1574 /* Create decoded unicast packet */ 1578 /* Create decoded unicast packet */
1575 unicast_packet = (struct batadv_unicast_packet *)skb->data; 1579 unicast_packet = (struct batadv_unicast_packet *)skb->data;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index a3395fdfbd4f..d5953b87918c 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1204,6 +1204,7 @@ void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
1204 mutex_lock(&osdc->request_mutex); 1204 mutex_lock(&osdc->request_mutex);
1205 if (req->r_linger) { 1205 if (req->r_linger) {
1206 __unregister_linger_request(osdc, req); 1206 __unregister_linger_request(osdc, req);
1207 req->r_linger = 0;
1207 ceph_osdc_put_request(req); 1208 ceph_osdc_put_request(req);
1208 } 1209 }
1209 mutex_unlock(&osdc->request_mutex); 1210 mutex_unlock(&osdc->request_mutex);
@@ -2120,7 +2121,9 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
2120 down_read(&osdc->map_sem); 2121 down_read(&osdc->map_sem);
2121 mutex_lock(&osdc->request_mutex); 2122 mutex_lock(&osdc->request_mutex);
2122 __register_request(osdc, req); 2123 __register_request(osdc, req);
2123 WARN_ON(req->r_sent); 2124 req->r_sent = 0;
2125 req->r_got_reply = 0;
2126 req->r_completed = 0;
2124 rc = __map_request(osdc, req, 0); 2127 rc = __map_request(osdc, req, 0);
2125 if (rc < 0) { 2128 if (rc < 0) {
2126 if (nofail) { 2129 if (nofail) {
diff --git a/net/core/sock.c b/net/core/sock.c
index d4f4cea726e7..6ba327da79e1 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1217,18 +1217,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
1217#endif 1217#endif
1218} 1218}
1219 1219
1220/*
1221 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1222 * un-modified. Special care is taken when initializing object to zero.
1223 */
1224static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1225{
1226 if (offsetof(struct sock, sk_node.next) != 0)
1227 memset(sk, 0, offsetof(struct sock, sk_node.next));
1228 memset(&sk->sk_node.pprev, 0,
1229 size - offsetof(struct sock, sk_node.pprev));
1230}
1231
1232void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) 1220void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1233{ 1221{
1234 unsigned long nulls1, nulls2; 1222 unsigned long nulls1, nulls2;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 147abf5275aa..4bcabf3ab4ca 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -84,7 +84,7 @@ int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
84EXPORT_SYMBOL(sysctl_ip_default_ttl); 84EXPORT_SYMBOL(sysctl_ip_default_ttl);
85 85
86/* Generate a checksum for an outgoing IP datagram. */ 86/* Generate a checksum for an outgoing IP datagram. */
87__inline__ void ip_send_check(struct iphdr *iph) 87void ip_send_check(struct iphdr *iph)
88{ 88{
89 iph->check = 0; 89 iph->check = 0;
90 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 90 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index d3ddd8400354..ecd60733e5e2 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1081,6 +1081,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
1081 } 1081 }
1082 if (t == NULL) 1082 if (t == NULL)
1083 t = netdev_priv(dev); 1083 t = netdev_priv(dev);
1084 memset(&p, 0, sizeof(p));
1084 ip6gre_tnl_parm_to_user(&p, &t->parms); 1085 ip6gre_tnl_parm_to_user(&p, &t->parms);
1085 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1086 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1086 err = -EFAULT; 1087 err = -EFAULT;
@@ -1128,6 +1129,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
1128 if (t) { 1129 if (t) {
1129 err = 0; 1130 err = 0;
1130 1131
1132 memset(&p, 0, sizeof(p));
1131 ip6gre_tnl_parm_to_user(&p, &t->parms); 1133 ip6gre_tnl_parm_to_user(&p, &t->parms);
1132 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1134 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1133 err = -EFAULT; 1135 err = -EFAULT;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 71167069b394..0a17ed9eaf39 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1890,6 +1890,17 @@ void tcp6_proc_exit(struct net *net)
1890} 1890}
1891#endif 1891#endif
1892 1892
1893static void tcp_v6_clear_sk(struct sock *sk, int size)
1894{
1895 struct inet_sock *inet = inet_sk(sk);
1896
1897 /* we do not want to clear pinet6 field, because of RCU lookups */
1898 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1899
1900 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1901 memset(&inet->pinet6 + 1, 0, size);
1902}
1903
1893struct proto tcpv6_prot = { 1904struct proto tcpv6_prot = {
1894 .name = "TCPv6", 1905 .name = "TCPv6",
1895 .owner = THIS_MODULE, 1906 .owner = THIS_MODULE,
@@ -1933,6 +1944,7 @@ struct proto tcpv6_prot = {
1933#ifdef CONFIG_MEMCG_KMEM 1944#ifdef CONFIG_MEMCG_KMEM
1934 .proto_cgroup = tcp_proto_cgroup, 1945 .proto_cgroup = tcp_proto_cgroup,
1935#endif 1946#endif
1947 .clear_sk = tcp_v6_clear_sk,
1936}; 1948};
1937 1949
1938static const struct inet6_protocol tcpv6_protocol = { 1950static const struct inet6_protocol tcpv6_protocol = {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d4defdd44937..42923b14dfa6 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1432,6 +1432,17 @@ void udp6_proc_exit(struct net *net) {
1432} 1432}
1433#endif /* CONFIG_PROC_FS */ 1433#endif /* CONFIG_PROC_FS */
1434 1434
1435void udp_v6_clear_sk(struct sock *sk, int size)
1436{
1437 struct inet_sock *inet = inet_sk(sk);
1438
1439 /* we do not want to clear pinet6 field, because of RCU lookups */
1440 sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
1441
1442 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1443 memset(&inet->pinet6 + 1, 0, size);
1444}
1445
1435/* ------------------------------------------------------------------------ */ 1446/* ------------------------------------------------------------------------ */
1436 1447
1437struct proto udpv6_prot = { 1448struct proto udpv6_prot = {
@@ -1462,7 +1473,7 @@ struct proto udpv6_prot = {
1462 .compat_setsockopt = compat_udpv6_setsockopt, 1473 .compat_setsockopt = compat_udpv6_setsockopt,
1463 .compat_getsockopt = compat_udpv6_getsockopt, 1474 .compat_getsockopt = compat_udpv6_getsockopt,
1464#endif 1475#endif
1465 .clear_sk = sk_prot_clear_portaddr_nulls, 1476 .clear_sk = udp_v6_clear_sk,
1466}; 1477};
1467 1478
1468static struct inet_protosw udpv6_protosw = { 1479static struct inet_protosw udpv6_protosw = {
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index d7571046bfc4..4691ed50a928 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -31,6 +31,8 @@ extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
31extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); 31extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
32extern void udpv6_destroy_sock(struct sock *sk); 32extern void udpv6_destroy_sock(struct sock *sk);
33 33
34extern void udp_v6_clear_sk(struct sock *sk, int size);
35
34#ifdef CONFIG_PROC_FS 36#ifdef CONFIG_PROC_FS
35extern int udp6_seq_show(struct seq_file *seq, void *v); 37extern int udp6_seq_show(struct seq_file *seq, void *v);
36#endif 38#endif
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 1d08e21d9f69..dfcc4be46898 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -56,7 +56,7 @@ struct proto udplitev6_prot = {
56 .compat_setsockopt = compat_udpv6_setsockopt, 56 .compat_setsockopt = compat_udpv6_setsockopt,
57 .compat_getsockopt = compat_udpv6_getsockopt, 57 .compat_getsockopt = compat_udpv6_getsockopt,
58#endif 58#endif
59 .clear_sk = sk_prot_clear_portaddr_nulls, 59 .clear_sk = udp_v6_clear_sk,
60}; 60};
61 61
62static struct inet_protosw udplite6_protosw = { 62static struct inet_protosw udplite6_protosw = {
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 4ef7bdb65440..23ed03d786c8 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -103,8 +103,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
103 dev_hold(dev); 103 dev_hold(dev);
104 104
105 xdst->u.rt6.rt6i_idev = in6_dev_get(dev); 105 xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
106 if (!xdst->u.rt6.rt6i_idev) 106 if (!xdst->u.rt6.rt6i_idev) {
107 dev_put(dev);
107 return -ENODEV; 108 return -ENODEV;
109 }
108 110
109 rt6_transfer_peer(&xdst->u.rt6, rt); 111 rt6_transfer_peer(&xdst->u.rt6, rt);
110 112
diff --git a/sound/aoa/fabrics/layout.c b/sound/aoa/fabrics/layout.c
index 552b97afbca5..61ab640e195f 100644
--- a/sound/aoa/fabrics/layout.c
+++ b/sound/aoa/fabrics/layout.c
@@ -113,6 +113,7 @@ MODULE_ALIAS("sound-layout-100");
113MODULE_ALIAS("aoa-device-id-14"); 113MODULE_ALIAS("aoa-device-id-14");
114MODULE_ALIAS("aoa-device-id-22"); 114MODULE_ALIAS("aoa-device-id-22");
115MODULE_ALIAS("aoa-device-id-35"); 115MODULE_ALIAS("aoa-device-id-35");
116MODULE_ALIAS("aoa-device-id-44");
116 117
117/* onyx with all but microphone connected */ 118/* onyx with all but microphone connected */
118static struct codec_connection onyx_connections_nomic[] = { 119static struct codec_connection onyx_connections_nomic[] = {
@@ -361,6 +362,13 @@ static struct layout layouts[] = {
361 .connections = tas_connections_nolineout, 362 .connections = tas_connections_nolineout,
362 }, 363 },
363 }, 364 },
365 /* PowerBook6,5 */
366 { .device_id = 44,
367 .codecs[0] = {
368 .name = "tas",
369 .connections = tas_connections_all,
370 },
371 },
364 /* PowerBook6,7 */ 372 /* PowerBook6,7 */
365 { .layout_id = 80, 373 { .layout_id = 80,
366 .codecs[0] = { 374 .codecs[0] = {
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index 010658335881..15e76131b501 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -200,7 +200,8 @@ static int i2sbus_add_dev(struct macio_dev *macio,
200 * We probably cannot handle all device-id machines, 200 * We probably cannot handle all device-id machines,
201 * so restrict to those we do handle for now. 201 * so restrict to those we do handle for now.
202 */ 202 */
203 if (id && (*id == 22 || *id == 14 || *id == 35)) { 203 if (id && (*id == 22 || *id == 14 || *id == 35 ||
204 *id == 44)) {
204 snprintf(dev->sound.modalias, 32, 205 snprintf(dev->sound.modalias, 32,
205 "aoa-device-id-%d", *id); 206 "aoa-device-id-%d", *id);
206 ok = 1; 207 ok = 1;
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index 51c4ba95a32d..1a9640254433 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -250,7 +250,7 @@ config MSND_FIFOSIZE
250menuconfig SOUND_OSS 250menuconfig SOUND_OSS
251 tristate "OSS sound modules" 251 tristate "OSS sound modules"
252 depends on ISA_DMA_API && VIRT_TO_BUS 252 depends on ISA_DMA_API && VIRT_TO_BUS
253 depends on !ISA_DMA_SUPPORT_BROKEN 253 depends on !GENERIC_ISA_DMA_SUPPORT_BROKEN
254 help 254 help
255 OSS is the Open Sound System suite of sound card drivers. They make 255 OSS is the Open Sound System suite of sound card drivers. They make
256 sound programming easier since they provide a common API. Say Y or 256 sound programming easier since they provide a common API. Say Y or
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index ac079f93c535..ae85bbd2e6f8 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -606,6 +606,10 @@ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid,
606 return false; 606 return false;
607} 607}
608 608
609/* check whether the NID is referred by any active paths */
610#define is_active_nid_for_any(codec, nid) \
611 is_active_nid(codec, nid, HDA_OUTPUT, 0)
612
609/* get the default amp value for the target state */ 613/* get the default amp value for the target state */
610static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, 614static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
611 int dir, unsigned int caps, bool enable) 615 int dir, unsigned int caps, bool enable)
@@ -759,7 +763,8 @@ static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path)
759 763
760 for (i = 0; i < path->depth; i++) { 764 for (i = 0; i < path->depth; i++) {
761 hda_nid_t nid = path->path[i]; 765 hda_nid_t nid = path->path[i];
762 if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3)) { 766 if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3) &&
767 !is_active_nid_for_any(codec, nid)) {
763 snd_hda_codec_write(codec, nid, 0, 768 snd_hda_codec_write(codec, nid, 0,
764 AC_VERB_SET_POWER_STATE, 769 AC_VERB_SET_POWER_STATE,
765 AC_PWRST_D3); 770 AC_PWRST_D3);
@@ -4157,7 +4162,7 @@ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec,
4157 return power_state; 4162 return power_state;
4158 if (get_wcaps_type(get_wcaps(codec, nid)) >= AC_WID_POWER) 4163 if (get_wcaps_type(get_wcaps(codec, nid)) >= AC_WID_POWER)
4159 return power_state; 4164 return power_state;
4160 if (is_active_nid(codec, nid, HDA_OUTPUT, 0)) 4165 if (is_active_nid_for_any(codec, nid))
4161 return power_state; 4166 return power_state;
4162 return AC_PWRST_D3; 4167 return AC_PWRST_D3;
4163} 4168}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 6bf47f7326ad..59d2e91a9ab6 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3482,6 +3482,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3482 SND_PCI_QUIRK(0x1028, 0x05c9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3482 SND_PCI_QUIRK(0x1028, 0x05c9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3483 SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 3483 SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
3484 SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 3484 SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
3485 SND_PCI_QUIRK(0x1028, 0x05de, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
3485 SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3486 SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3486 SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3487 SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3487 SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3488 SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
diff --git a/sound/soc/codecs/ab8500-codec.h b/sound/soc/codecs/ab8500-codec.h
index 114f69a0c629..306d0bc8455f 100644
--- a/sound/soc/codecs/ab8500-codec.h
+++ b/sound/soc/codecs/ab8500-codec.h
@@ -348,25 +348,25 @@
348 348
349/* AB8500_ADSLOTSELX */ 349/* AB8500_ADSLOTSELX */
350#define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_ODD 0x00 350#define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_ODD 0x00
351#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD 0x01 351#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD 0x10
352#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD 0x02 352#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD 0x20
353#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD 0x03 353#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD 0x30
354#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD 0x04 354#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD 0x40
355#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD 0x05 355#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD 0x50
356#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD 0x06 356#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD 0x60
357#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD 0x07 357#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD 0x70
358#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_ODD 0x08 358#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_ODD 0x80
359#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0x0F 359#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0xF0
360#define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_EVEN 0x00 360#define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_EVEN 0x00
361#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x10 361#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x01
362#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x20 362#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x02
363#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x30 363#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x03
364#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x40 364#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x04
365#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x50 365#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x05
366#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x60 366#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x06
367#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x70 367#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x07
368#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_EVEN 0x80 368#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_EVEN 0x08
369#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN 0xF0 369#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN 0x0F
370#define AB8500_ADSLOTSELX_EVEN_SHIFT 0 370#define AB8500_ADSLOTSELX_EVEN_SHIFT 0
371#define AB8500_ADSLOTSELX_ODD_SHIFT 4 371#define AB8500_ADSLOTSELX_ODD_SHIFT 4
372 372
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 41230ad1c3e0..4a6f1daf911f 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -1488,17 +1488,17 @@ static int da7213_probe(struct snd_soc_codec *codec)
1488 DA7213_DMIC_DATA_SEL_SHIFT); 1488 DA7213_DMIC_DATA_SEL_SHIFT);
1489 break; 1489 break;
1490 } 1490 }
1491 switch (pdata->dmic_data_sel) { 1491 switch (pdata->dmic_samplephase) {
1492 case DA7213_DMIC_SAMPLE_ON_CLKEDGE: 1492 case DA7213_DMIC_SAMPLE_ON_CLKEDGE:
1493 case DA7213_DMIC_SAMPLE_BETWEEN_CLKEDGE: 1493 case DA7213_DMIC_SAMPLE_BETWEEN_CLKEDGE:
1494 dmic_cfg |= (pdata->dmic_data_sel << 1494 dmic_cfg |= (pdata->dmic_samplephase <<
1495 DA7213_DMIC_SAMPLEPHASE_SHIFT); 1495 DA7213_DMIC_SAMPLEPHASE_SHIFT);
1496 break; 1496 break;
1497 } 1497 }
1498 switch (pdata->dmic_data_sel) { 1498 switch (pdata->dmic_clk_rate) {
1499 case DA7213_DMIC_CLK_3_0MHZ: 1499 case DA7213_DMIC_CLK_3_0MHZ:
1500 case DA7213_DMIC_CLK_1_5MHZ: 1500 case DA7213_DMIC_CLK_1_5MHZ:
1501 dmic_cfg |= (pdata->dmic_data_sel << 1501 dmic_cfg |= (pdata->dmic_clk_rate <<
1502 DA7213_DMIC_CLK_RATE_SHIFT); 1502 DA7213_DMIC_CLK_RATE_SHIFT);
1503 break; 1503 break;
1504 } 1504 }
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index 8df2b6e1a1a6..370af0cbcc9a 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -667,6 +667,7 @@ static int wm0010_boot(struct snd_soc_codec *codec)
667 /* On wm0010 only the CLKCTRL1 value is used */ 667 /* On wm0010 only the CLKCTRL1 value is used */
668 pll_rec.clkctrl1 = wm0010->pll_clkctrl1; 668 pll_rec.clkctrl1 = wm0010->pll_clkctrl1;
669 669
670 ret = -ENOMEM;
670 len = pll_rec.length + 8; 671 len = pll_rec.length + 8;
671 out = kzalloc(len, GFP_KERNEL); 672 out = kzalloc(len, GFP_KERNEL);
672 if (!out) { 673 if (!out) {
diff --git a/sound/usb/proc.c b/sound/usb/proc.c
index 135c76871063..5f761ab34c01 100644
--- a/sound/usb/proc.c
+++ b/sound/usb/proc.c
@@ -116,21 +116,22 @@ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct s
116} 116}
117 117
118static void proc_dump_ep_status(struct snd_usb_substream *subs, 118static void proc_dump_ep_status(struct snd_usb_substream *subs,
119 struct snd_usb_endpoint *ep, 119 struct snd_usb_endpoint *data_ep,
120 struct snd_usb_endpoint *sync_ep,
120 struct snd_info_buffer *buffer) 121 struct snd_info_buffer *buffer)
121{ 122{
122 if (!ep) 123 if (!data_ep)
123 return; 124 return;
124 snd_iprintf(buffer, " Packet Size = %d\n", ep->curpacksize); 125 snd_iprintf(buffer, " Packet Size = %d\n", data_ep->curpacksize);
125 snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n", 126 snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n",
126 subs->speed == USB_SPEED_FULL 127 subs->speed == USB_SPEED_FULL
127 ? get_full_speed_hz(ep->freqm) 128 ? get_full_speed_hz(data_ep->freqm)
128 : get_high_speed_hz(ep->freqm), 129 : get_high_speed_hz(data_ep->freqm),
129 ep->freqm >> 16, ep->freqm & 0xffff); 130 data_ep->freqm >> 16, data_ep->freqm & 0xffff);
130 if (ep->freqshift != INT_MIN) { 131 if (sync_ep && data_ep->freqshift != INT_MIN) {
131 int res = 16 - ep->freqshift; 132 int res = 16 - data_ep->freqshift;
132 snd_iprintf(buffer, " Feedback Format = %d.%d\n", 133 snd_iprintf(buffer, " Feedback Format = %d.%d\n",
133 (ep->syncmaxsize > 3 ? 32 : 24) - res, res); 134 (sync_ep->syncmaxsize > 3 ? 32 : 24) - res, res);
134 } 135 }
135} 136}
136 137
@@ -140,8 +141,7 @@ static void proc_dump_substream_status(struct snd_usb_substream *subs, struct sn
140 snd_iprintf(buffer, " Status: Running\n"); 141 snd_iprintf(buffer, " Status: Running\n");
141 snd_iprintf(buffer, " Interface = %d\n", subs->interface); 142 snd_iprintf(buffer, " Interface = %d\n", subs->interface);
142 snd_iprintf(buffer, " Altset = %d\n", subs->altset_idx); 143 snd_iprintf(buffer, " Altset = %d\n", subs->altset_idx);
143 proc_dump_ep_status(subs, subs->data_endpoint, buffer); 144 proc_dump_ep_status(subs, subs->data_endpoint, subs->sync_endpoint, buffer);
144 proc_dump_ep_status(subs, subs->sync_endpoint, buffer);
145 } else { 145 } else {
146 snd_iprintf(buffer, " Status: Stop\n"); 146 snd_iprintf(buffer, " Status: Stop\n");
147 } 147 }